2 * Copyright (c) 2018 Chelsio Communications, Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * Written by: Atul Gupta (atul.gupta@chelsio.com)
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/skbuff.h>
13 #include <linux/socket.h>
14 #include <linux/hash.h>
16 #include <linux/net.h>
18 #include <linux/tcp.h>
25 #define DRV_NAME "chtls"
28 * chtls device management
29 * maintains a list of the chtls devices
31 static LIST_HEAD(cdev_list);
32 static DEFINE_MUTEX(cdev_mutex);
33 static DEFINE_MUTEX(cdev_list_lock);
35 static DEFINE_MUTEX(notify_mutex);
36 static RAW_NOTIFIER_HEAD(listen_notify_list);
37 static struct proto chtls_cpl_prot;
38 struct request_sock_ops chtls_rsk_ops;
39 static uint send_page_order = (14 - PAGE_SHIFT < 0) ? 0 : 14 - PAGE_SHIFT;
41 static void register_listen_notifier(struct notifier_block *nb)
43 mutex_lock(¬ify_mutex);
44 raw_notifier_chain_register(&listen_notify_list, nb);
45 mutex_unlock(¬ify_mutex);
48 static void unregister_listen_notifier(struct notifier_block *nb)
50 mutex_lock(¬ify_mutex);
51 raw_notifier_chain_unregister(&listen_notify_list, nb);
52 mutex_unlock(¬ify_mutex);
55 static int listen_notify_handler(struct notifier_block *this,
56 unsigned long event, void *data)
58 struct chtls_dev *cdev;
66 case CHTLS_LISTEN_START:
67 case CHTLS_LISTEN_STOP:
68 mutex_lock(&cdev_list_lock);
69 list_for_each_entry(cdev, &cdev_list, list) {
70 if (event == CHTLS_LISTEN_START)
71 ret = chtls_listen_start(cdev, sk);
73 chtls_listen_stop(cdev, sk);
75 mutex_unlock(&cdev_list_lock);
81 static struct notifier_block listen_notifier = {
82 .notifier_call = listen_notify_handler
85 static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb)
87 if (likely(skb_transport_header(skb) != skb_network_header(skb)))
88 return tcp_v4_do_rcv(sk, skb);
89 BLOG_SKB_CB(skb)->backlog_rcv(sk, skb);
93 static int chtls_start_listen(struct sock *sk)
97 if (sk->sk_protocol != IPPROTO_TCP)
98 return -EPROTONOSUPPORT;
100 if (sk->sk_family == PF_INET &&
101 LOOPBACK(inet_sk(sk)->inet_rcv_saddr))
102 return -EADDRNOTAVAIL;
104 sk->sk_backlog_rcv = listen_backlog_rcv;
105 mutex_lock(¬ify_mutex);
106 err = raw_notifier_call_chain(&listen_notify_list,
107 CHTLS_LISTEN_START, sk);
108 mutex_unlock(¬ify_mutex);
112 static void chtls_stop_listen(struct sock *sk)
114 if (sk->sk_protocol != IPPROTO_TCP)
117 mutex_lock(¬ify_mutex);
118 raw_notifier_call_chain(&listen_notify_list,
119 CHTLS_LISTEN_STOP, sk);
120 mutex_unlock(¬ify_mutex);
123 static int chtls_inline_feature(struct tls_device *dev)
125 struct net_device *netdev;
126 struct chtls_dev *cdev;
129 cdev = to_chtls_dev(dev);
131 for (i = 0; i < cdev->lldi->nports; i++) {
132 netdev = cdev->ports[i];
133 if (netdev->features & NETIF_F_HW_TLS_RECORD)
139 static int chtls_create_hash(struct tls_device *dev, struct sock *sk)
141 if (sk->sk_state == TCP_LISTEN)
142 return chtls_start_listen(sk);
146 static void chtls_destroy_hash(struct tls_device *dev, struct sock *sk)
148 if (sk->sk_state == TCP_LISTEN)
149 chtls_stop_listen(sk);
152 static void chtls_register_dev(struct chtls_dev *cdev)
154 struct tls_device *tlsdev = &cdev->tlsdev;
156 strlcpy(tlsdev->name, "chtls", TLS_DEVICE_NAME_MAX);
157 strlcat(tlsdev->name, cdev->lldi->ports[0]->name,
158 TLS_DEVICE_NAME_MAX);
159 tlsdev->feature = chtls_inline_feature;
160 tlsdev->hash = chtls_create_hash;
161 tlsdev->unhash = chtls_destroy_hash;
162 tls_register_device(&cdev->tlsdev);
165 static void chtls_unregister_dev(struct chtls_dev *cdev)
167 tls_unregister_device(&cdev->tlsdev);
170 static void process_deferq(struct work_struct *task_param)
172 struct chtls_dev *cdev = container_of(task_param,
173 struct chtls_dev, deferq_task);
176 spin_lock_bh(&cdev->deferq.lock);
177 while ((skb = __skb_dequeue(&cdev->deferq)) != NULL) {
178 spin_unlock_bh(&cdev->deferq.lock);
179 DEFERRED_SKB_CB(skb)->handler(cdev, skb);
180 spin_lock_bh(&cdev->deferq.lock);
182 spin_unlock_bh(&cdev->deferq.lock);
185 static int chtls_get_skb(struct chtls_dev *cdev)
187 cdev->askb = alloc_skb(sizeof(struct tcphdr), GFP_KERNEL);
191 skb_put(cdev->askb, sizeof(struct tcphdr));
192 skb_reset_transport_header(cdev->askb);
193 memset(cdev->askb->data, 0, cdev->askb->len);
197 static void *chtls_uld_add(const struct cxgb4_lld_info *info)
199 struct cxgb4_lld_info *lldi;
200 struct chtls_dev *cdev;
203 cdev = kzalloc(sizeof(*cdev) + info->nports *
204 (sizeof(struct net_device *)), GFP_KERNEL);
208 lldi = kzalloc(sizeof(*lldi), GFP_KERNEL);
212 if (chtls_get_skb(cdev))
217 cdev->pdev = lldi->pdev;
218 cdev->tids = lldi->tids;
219 cdev->ports = lldi->ports;
220 cdev->mtus = lldi->mtus;
221 cdev->tids = lldi->tids;
222 cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
225 for (i = 0; i < (1 << RSPQ_HASH_BITS); i++) {
226 unsigned int size = 64 - sizeof(struct rsp_ctrl) - 8;
228 cdev->rspq_skb_cache[i] = __alloc_skb(size,
231 if (unlikely(!cdev->rspq_skb_cache[i]))
235 idr_init(&cdev->hwtid_idr);
236 INIT_WORK(&cdev->deferq_task, process_deferq);
237 spin_lock_init(&cdev->listen_lock);
238 spin_lock_init(&cdev->idr_lock);
239 cdev->send_page_order = min_t(uint, get_order(32768),
241 cdev->max_host_sndbuf = 48 * 1024;
243 if (lldi->vr->key.size)
244 if (chtls_init_kmap(cdev, lldi))
247 mutex_lock(&cdev_mutex);
248 list_add_tail(&cdev->list, &cdev_list);
249 mutex_unlock(&cdev_mutex);
253 for (j = 0; j < i; j++)
254 kfree_skb(cdev->rspq_skb_cache[j]);
255 kfree_skb(cdev->askb);
264 static void chtls_free_uld(struct chtls_dev *cdev)
268 chtls_unregister_dev(cdev);
269 kvfree(cdev->kmap.addr);
270 idr_destroy(&cdev->hwtid_idr);
271 for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
272 kfree_skb(cdev->rspq_skb_cache[i]);
275 kfree_skb(cdev->askb);
279 static void chtls_free_all_uld(void)
281 struct chtls_dev *cdev, *tmp;
283 mutex_lock(&cdev_mutex);
284 list_for_each_entry_safe(cdev, tmp, &cdev_list, list)
285 chtls_free_uld(cdev);
286 mutex_unlock(&cdev_mutex);
289 static int chtls_uld_state_change(void *handle, enum cxgb4_state new_state)
291 struct chtls_dev *cdev = handle;
295 chtls_register_dev(cdev);
297 case CXGB4_STATE_DOWN:
299 case CXGB4_STATE_START_RECOVERY:
301 case CXGB4_STATE_DETACH:
302 mutex_lock(&cdev_mutex);
303 list_del(&cdev->list);
304 mutex_unlock(&cdev_mutex);
305 chtls_free_uld(cdev);
313 static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
319 /* Allocate space for cpl_pass_accpet_req which will be synthesized by
320 * driver. Once driver synthesizes cpl_pass_accpet_req the skb will go
321 * through the regular cpl_pass_accept_req processing in TOM.
323 skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req)
324 - pktshift, GFP_ATOMIC);
327 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req)
329 /* For now we will copy cpl_rx_pkt in the skb */
330 skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_rx_pkt));
331 skb_copy_to_linear_data_offset(skb, sizeof(struct cpl_pass_accept_req)
333 gl->tot_len - pktshift);
338 static int chtls_recv_packet(struct chtls_dev *cdev,
339 const struct pkt_gl *gl, const __be64 *rsp)
341 unsigned int opcode = *(u8 *)rsp;
345 skb = copy_gl_to_skb_pkt(gl, rsp, cdev->lldi->sge_pktshift);
349 ret = chtls_handlers[opcode](cdev, skb);
350 if (ret & CPL_RET_BUF_DONE)
356 static int chtls_recv_rsp(struct chtls_dev *cdev, const __be64 *rsp)
358 unsigned long rspq_bin;
364 len = 64 - sizeof(struct rsp_ctrl) - 8;
367 rspq_bin = hash_ptr((void *)rsp, RSPQ_HASH_BITS);
368 skb = cdev->rspq_skb_cache[rspq_bin];
369 if (skb && !skb_is_nonlinear(skb) &&
370 !skb_shared(skb) && !skb_cloned(skb)) {
371 refcount_inc(&skb->users);
372 if (refcount_read(&skb->users) == 2) {
374 if (skb_tailroom(skb) >= len)
377 refcount_dec(&skb->users);
379 skb = alloc_skb(len, GFP_ATOMIC);
385 skb_copy_to_linear_data(skb, rsp, len);
386 skb_reset_network_header(skb);
387 skb_reset_transport_header(skb);
388 ret = chtls_handlers[opcode](cdev, skb);
390 if (ret & CPL_RET_BUF_DONE)
395 static void chtls_recv(struct chtls_dev *cdev,
396 struct sk_buff **skbs, const __be64 *rsp)
398 struct sk_buff *skb = *skbs;
404 __skb_push(skb, sizeof(struct rss_header));
405 skb_copy_to_linear_data(skb, rsp, sizeof(struct rss_header));
407 ret = chtls_handlers[opcode](cdev, skb);
408 if (ret & CPL_RET_BUF_DONE)
412 static int chtls_uld_rx_handler(void *handle, const __be64 *rsp,
413 const struct pkt_gl *gl)
415 struct chtls_dev *cdev = handle;
421 if (unlikely(opcode == CPL_RX_PKT)) {
422 if (chtls_recv_packet(cdev, gl, rsp) < 0)
428 return chtls_recv_rsp(cdev, rsp);
430 #define RX_PULL_LEN 128
431 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
434 chtls_recv(cdev, &skb, rsp);
441 static int do_chtls_getsockopt(struct sock *sk, char __user *optval,
444 struct tls_crypto_info crypto_info = { 0 };
446 crypto_info.version = TLS_1_2_VERSION;
447 if (copy_to_user(optval, &crypto_info, sizeof(struct tls_crypto_info)))
452 static int chtls_getsockopt(struct sock *sk, int level, int optname,
453 char __user *optval, int __user *optlen)
455 struct tls_context *ctx = tls_get_ctx(sk);
457 if (level != SOL_TLS)
458 return ctx->getsockopt(sk, level, optname, optval, optlen);
460 return do_chtls_getsockopt(sk, optval, optlen);
463 static int do_chtls_setsockopt(struct sock *sk, int optname,
464 char __user *optval, unsigned int optlen)
466 struct tls_crypto_info *crypto_info, tmp_crypto_info;
467 struct chtls_sock *csk;
471 csk = rcu_dereference_sk_user_data(sk);
473 if (!optval || optlen < sizeof(*crypto_info)) {
478 rc = copy_from_user(&tmp_crypto_info, optval, sizeof(*crypto_info));
485 if (tmp_crypto_info.version != TLS_1_2_VERSION) {
490 crypto_info = (struct tls_crypto_info *)&csk->tlshws.crypto_info;
492 switch (tmp_crypto_info.cipher_type) {
493 case TLS_CIPHER_AES_GCM_128: {
494 /* Obtain version and type from previous copy */
495 crypto_info[0] = tmp_crypto_info;
496 /* Now copy the following data */
497 rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info),
498 optval + sizeof(*crypto_info),
499 sizeof(struct tls12_crypto_info_aes_gcm_128)
500 - sizeof(*crypto_info));
507 keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
508 rc = chtls_setkey(csk, keylen, optname);
519 static int chtls_setsockopt(struct sock *sk, int level, int optname,
520 char __user *optval, unsigned int optlen)
522 struct tls_context *ctx = tls_get_ctx(sk);
524 if (level != SOL_TLS)
525 return ctx->setsockopt(sk, level, optname, optval, optlen);
527 return do_chtls_setsockopt(sk, optname, optval, optlen);
530 static struct cxgb4_uld_info chtls_uld_info = {
532 .nrxq = MAX_ULD_QSETS,
533 .ntxq = MAX_ULD_QSETS,
535 .add = chtls_uld_add,
536 .state_change = chtls_uld_state_change,
537 .rx_handler = chtls_uld_rx_handler,
540 void chtls_install_cpl_ops(struct sock *sk)
542 sk->sk_prot = &chtls_cpl_prot;
545 static void __init chtls_init_ulp_ops(void)
547 chtls_cpl_prot = tcp_prot;
548 chtls_init_rsk_ops(&chtls_cpl_prot, &chtls_rsk_ops,
550 chtls_cpl_prot.close = chtls_close;
551 chtls_cpl_prot.disconnect = chtls_disconnect;
552 chtls_cpl_prot.destroy = chtls_destroy_sock;
553 chtls_cpl_prot.shutdown = chtls_shutdown;
554 chtls_cpl_prot.sendmsg = chtls_sendmsg;
555 chtls_cpl_prot.sendpage = chtls_sendpage;
556 chtls_cpl_prot.recvmsg = chtls_recvmsg;
557 chtls_cpl_prot.setsockopt = chtls_setsockopt;
558 chtls_cpl_prot.getsockopt = chtls_getsockopt;
561 static int __init chtls_register(void)
563 chtls_init_ulp_ops();
564 register_listen_notifier(&listen_notifier);
565 cxgb4_register_uld(CXGB4_ULD_TLS, &chtls_uld_info);
569 static void __exit chtls_unregister(void)
571 unregister_listen_notifier(&listen_notifier);
572 chtls_free_all_uld();
573 cxgb4_unregister_uld(CXGB4_ULD_TLS);
576 module_init(chtls_register);
577 module_exit(chtls_unregister);
579 MODULE_DESCRIPTION("Chelsio TLS Inline driver");
580 MODULE_LICENSE("GPL");
581 MODULE_AUTHOR("Chelsio Communications");
582 MODULE_VERSION(DRV_VERSION);