GNU Linux-libre 4.19.211-gnu1
[releases.git] / drivers / crypto / chelsio / chtls / chtls_main.c
1 /*
2  * Copyright (c) 2018 Chelsio Communications, Inc.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * Written by: Atul Gupta (atul.gupta@chelsio.com)
9  */
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/skbuff.h>
13 #include <linux/socket.h>
14 #include <linux/hash.h>
15 #include <linux/in.h>
16 #include <linux/net.h>
17 #include <linux/ip.h>
18 #include <linux/tcp.h>
19 #include <net/tcp.h>
20 #include <net/tls.h>
21
22 #include "chtls.h"
23 #include "chtls_cm.h"
24
25 #define DRV_NAME "chtls"
26
27 /*
28  * chtls device management
29  * maintains a list of the chtls devices
30  */
31 static LIST_HEAD(cdev_list);
32 static DEFINE_MUTEX(cdev_mutex);
33 static DEFINE_MUTEX(cdev_list_lock);
34
35 static DEFINE_MUTEX(notify_mutex);
36 static RAW_NOTIFIER_HEAD(listen_notify_list);
37 static struct proto chtls_cpl_prot;
38 struct request_sock_ops chtls_rsk_ops;
39 static uint send_page_order = (14 - PAGE_SHIFT < 0) ? 0 : 14 - PAGE_SHIFT;
40
41 static void register_listen_notifier(struct notifier_block *nb)
42 {
43         mutex_lock(&notify_mutex);
44         raw_notifier_chain_register(&listen_notify_list, nb);
45         mutex_unlock(&notify_mutex);
46 }
47
48 static void unregister_listen_notifier(struct notifier_block *nb)
49 {
50         mutex_lock(&notify_mutex);
51         raw_notifier_chain_unregister(&listen_notify_list, nb);
52         mutex_unlock(&notify_mutex);
53 }
54
55 static int listen_notify_handler(struct notifier_block *this,
56                                  unsigned long event, void *data)
57 {
58         struct chtls_listen *clisten;
59         int ret = NOTIFY_DONE;
60
61         clisten = (struct chtls_listen *)data;
62
63         switch (event) {
64         case CHTLS_LISTEN_START:
65                 ret = chtls_listen_start(clisten->cdev, clisten->sk);
66                 kfree(clisten);
67                 break;
68         case CHTLS_LISTEN_STOP:
69                 chtls_listen_stop(clisten->cdev, clisten->sk);
70                 kfree(clisten);
71                 break;
72         }
73         return ret;
74 }
75
76 static struct notifier_block listen_notifier = {
77         .notifier_call = listen_notify_handler
78 };
79
80 static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb)
81 {
82         if (likely(skb_transport_header(skb) != skb_network_header(skb)))
83                 return tcp_v4_do_rcv(sk, skb);
84         BLOG_SKB_CB(skb)->backlog_rcv(sk, skb);
85         return 0;
86 }
87
88 static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
89 {
90         struct chtls_listen *clisten;
91         int err;
92
93         if (sk->sk_protocol != IPPROTO_TCP)
94                 return -EPROTONOSUPPORT;
95
96         if (sk->sk_family == PF_INET &&
97             LOOPBACK(inet_sk(sk)->inet_rcv_saddr))
98                 return -EADDRNOTAVAIL;
99
100         sk->sk_backlog_rcv = listen_backlog_rcv;
101         clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
102         if (!clisten)
103                 return -ENOMEM;
104         clisten->cdev = cdev;
105         clisten->sk = sk;
106         mutex_lock(&notify_mutex);
107         err = raw_notifier_call_chain(&listen_notify_list,
108                                       CHTLS_LISTEN_START, clisten);
109         mutex_unlock(&notify_mutex);
110         return err;
111 }
112
113 static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk)
114 {
115         struct chtls_listen *clisten;
116
117         if (sk->sk_protocol != IPPROTO_TCP)
118                 return;
119
120         clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
121         if (!clisten)
122                 return;
123         clisten->cdev = cdev;
124         clisten->sk = sk;
125         mutex_lock(&notify_mutex);
126         raw_notifier_call_chain(&listen_notify_list,
127                                 CHTLS_LISTEN_STOP, clisten);
128         mutex_unlock(&notify_mutex);
129 }
130
131 static int chtls_inline_feature(struct tls_device *dev)
132 {
133         struct net_device *netdev;
134         struct chtls_dev *cdev;
135         int i;
136
137         cdev = to_chtls_dev(dev);
138
139         for (i = 0; i < cdev->lldi->nports; i++) {
140                 netdev = cdev->ports[i];
141                 if (netdev->features & NETIF_F_HW_TLS_RECORD)
142                         return 1;
143         }
144         return 0;
145 }
146
147 static int chtls_create_hash(struct tls_device *dev, struct sock *sk)
148 {
149         struct chtls_dev *cdev = to_chtls_dev(dev);
150
151         if (sk->sk_state == TCP_LISTEN)
152                 return chtls_start_listen(cdev, sk);
153         return 0;
154 }
155
156 static void chtls_destroy_hash(struct tls_device *dev, struct sock *sk)
157 {
158         struct chtls_dev *cdev = to_chtls_dev(dev);
159
160         if (sk->sk_state == TCP_LISTEN)
161                 chtls_stop_listen(cdev, sk);
162 }
163
164 static void chtls_register_dev(struct chtls_dev *cdev)
165 {
166         struct tls_device *tlsdev = &cdev->tlsdev;
167
168         strlcpy(tlsdev->name, "chtls", TLS_DEVICE_NAME_MAX);
169         strlcat(tlsdev->name, cdev->lldi->ports[0]->name,
170                 TLS_DEVICE_NAME_MAX);
171         tlsdev->feature = chtls_inline_feature;
172         tlsdev->hash = chtls_create_hash;
173         tlsdev->unhash = chtls_destroy_hash;
174         tls_register_device(&cdev->tlsdev);
175         cdev->cdev_state = CHTLS_CDEV_STATE_UP;
176 }
177
178 static void chtls_unregister_dev(struct chtls_dev *cdev)
179 {
180         tls_unregister_device(&cdev->tlsdev);
181 }
182
183 static void process_deferq(struct work_struct *task_param)
184 {
185         struct chtls_dev *cdev = container_of(task_param,
186                                 struct chtls_dev, deferq_task);
187         struct sk_buff *skb;
188
189         spin_lock_bh(&cdev->deferq.lock);
190         while ((skb = __skb_dequeue(&cdev->deferq)) != NULL) {
191                 spin_unlock_bh(&cdev->deferq.lock);
192                 DEFERRED_SKB_CB(skb)->handler(cdev, skb);
193                 spin_lock_bh(&cdev->deferq.lock);
194         }
195         spin_unlock_bh(&cdev->deferq.lock);
196 }
197
198 static int chtls_get_skb(struct chtls_dev *cdev)
199 {
200         cdev->askb = alloc_skb(sizeof(struct tcphdr), GFP_KERNEL);
201         if (!cdev->askb)
202                 return -ENOMEM;
203
204         skb_put(cdev->askb, sizeof(struct tcphdr));
205         skb_reset_transport_header(cdev->askb);
206         memset(cdev->askb->data, 0, cdev->askb->len);
207         return 0;
208 }
209
210 static void *chtls_uld_add(const struct cxgb4_lld_info *info)
211 {
212         struct cxgb4_lld_info *lldi;
213         struct chtls_dev *cdev;
214         int i, j;
215
216         cdev = kzalloc(sizeof(*cdev) + info->nports *
217                       (sizeof(struct net_device *)), GFP_KERNEL);
218         if (!cdev)
219                 goto out;
220
221         lldi = kzalloc(sizeof(*lldi), GFP_KERNEL);
222         if (!lldi)
223                 goto out_lldi;
224
225         if (chtls_get_skb(cdev))
226                 goto out_skb;
227
228         *lldi = *info;
229         cdev->lldi = lldi;
230         cdev->pdev = lldi->pdev;
231         cdev->tids = lldi->tids;
232         cdev->ports = lldi->ports;
233         cdev->mtus = lldi->mtus;
234         cdev->tids = lldi->tids;
235         cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
236                         << FW_VIID_PFN_S;
237
238         for (i = 0; i < (1 << RSPQ_HASH_BITS); i++) {
239                 unsigned int size = 64 - sizeof(struct rsp_ctrl) - 8;
240
241                 cdev->rspq_skb_cache[i] = __alloc_skb(size,
242                                                       gfp_any(), 0,
243                                                       lldi->nodeid);
244                 if (unlikely(!cdev->rspq_skb_cache[i]))
245                         goto out_rspq_skb;
246         }
247
248         idr_init(&cdev->hwtid_idr);
249         INIT_WORK(&cdev->deferq_task, process_deferq);
250         spin_lock_init(&cdev->listen_lock);
251         spin_lock_init(&cdev->idr_lock);
252         cdev->send_page_order = min_t(uint, get_order(32768),
253                                       send_page_order);
254         cdev->max_host_sndbuf = 48 * 1024;
255
256         if (lldi->vr->key.size)
257                 if (chtls_init_kmap(cdev, lldi))
258                         goto out_rspq_skb;
259
260         mutex_lock(&cdev_mutex);
261         list_add_tail(&cdev->list, &cdev_list);
262         mutex_unlock(&cdev_mutex);
263
264         return cdev;
265 out_rspq_skb:
266         for (j = 0; j < i; j++)
267                 kfree_skb(cdev->rspq_skb_cache[j]);
268         kfree_skb(cdev->askb);
269 out_skb:
270         kfree(lldi);
271 out_lldi:
272         kfree(cdev);
273 out:
274         return NULL;
275 }
276
277 static void chtls_free_uld(struct chtls_dev *cdev)
278 {
279         int i;
280
281         chtls_unregister_dev(cdev);
282         kvfree(cdev->kmap.addr);
283         idr_destroy(&cdev->hwtid_idr);
284         for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
285                 kfree_skb(cdev->rspq_skb_cache[i]);
286         kfree(cdev->lldi);
287         if (cdev->askb)
288                 kfree_skb(cdev->askb);
289         kfree(cdev);
290 }
291
292 static void chtls_free_all_uld(void)
293 {
294         struct chtls_dev *cdev, *tmp;
295
296         mutex_lock(&cdev_mutex);
297         list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
298                 if (cdev->cdev_state == CHTLS_CDEV_STATE_UP)
299                         chtls_free_uld(cdev);
300         }
301         mutex_unlock(&cdev_mutex);
302 }
303
304 static int chtls_uld_state_change(void *handle, enum cxgb4_state new_state)
305 {
306         struct chtls_dev *cdev = handle;
307
308         switch (new_state) {
309         case CXGB4_STATE_UP:
310                 chtls_register_dev(cdev);
311                 break;
312         case CXGB4_STATE_DOWN:
313                 break;
314         case CXGB4_STATE_START_RECOVERY:
315                 break;
316         case CXGB4_STATE_DETACH:
317                 mutex_lock(&cdev_mutex);
318                 list_del(&cdev->list);
319                 mutex_unlock(&cdev_mutex);
320                 chtls_free_uld(cdev);
321                 break;
322         default:
323                 break;
324         }
325         return 0;
326 }
327
328 static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
329                                           const __be64 *rsp,
330                                           u32 pktshift)
331 {
332         struct sk_buff *skb;
333
334         /* Allocate space for cpl_pass_accpet_req which will be synthesized by
335          * driver. Once driver synthesizes cpl_pass_accpet_req the skb will go
336          * through the regular cpl_pass_accept_req processing in TOM.
337          */
338         skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req)
339                         - pktshift, GFP_ATOMIC);
340         if (unlikely(!skb))
341                 return NULL;
342         __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req)
343                    - pktshift);
344         /* For now we will copy  cpl_rx_pkt in the skb */
345         skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_rx_pkt));
346         skb_copy_to_linear_data_offset(skb, sizeof(struct cpl_pass_accept_req)
347                                        , gl->va + pktshift,
348                                        gl->tot_len - pktshift);
349
350         return skb;
351 }
352
353 static int chtls_recv_packet(struct chtls_dev *cdev,
354                              const struct pkt_gl *gl, const __be64 *rsp)
355 {
356         unsigned int opcode = *(u8 *)rsp;
357         struct sk_buff *skb;
358         int ret;
359
360         skb = copy_gl_to_skb_pkt(gl, rsp, cdev->lldi->sge_pktshift);
361         if (!skb)
362                 return -ENOMEM;
363
364         ret = chtls_handlers[opcode](cdev, skb);
365         if (ret & CPL_RET_BUF_DONE)
366                 kfree_skb(skb);
367
368         return 0;
369 }
370
371 static int chtls_recv_rsp(struct chtls_dev *cdev, const __be64 *rsp)
372 {
373         unsigned long rspq_bin;
374         unsigned int opcode;
375         struct sk_buff *skb;
376         unsigned int len;
377         int ret;
378
379         len = 64 - sizeof(struct rsp_ctrl) - 8;
380         opcode = *(u8 *)rsp;
381
382         rspq_bin = hash_ptr((void *)rsp, RSPQ_HASH_BITS);
383         skb = cdev->rspq_skb_cache[rspq_bin];
384         if (skb && !skb_is_nonlinear(skb) &&
385             !skb_shared(skb) && !skb_cloned(skb)) {
386                 refcount_inc(&skb->users);
387                 if (refcount_read(&skb->users) == 2) {
388                         __skb_trim(skb, 0);
389                         if (skb_tailroom(skb) >= len)
390                                 goto copy_out;
391                 }
392                 refcount_dec(&skb->users);
393         }
394         skb = alloc_skb(len, GFP_ATOMIC);
395         if (unlikely(!skb))
396                 return -ENOMEM;
397
398 copy_out:
399         __skb_put(skb, len);
400         skb_copy_to_linear_data(skb, rsp, len);
401         skb_reset_network_header(skb);
402         skb_reset_transport_header(skb);
403         ret = chtls_handlers[opcode](cdev, skb);
404
405         if (ret & CPL_RET_BUF_DONE)
406                 kfree_skb(skb);
407         return 0;
408 }
409
410 static void chtls_recv(struct chtls_dev *cdev,
411                        struct sk_buff **skbs, const __be64 *rsp)
412 {
413         struct sk_buff *skb = *skbs;
414         unsigned int opcode;
415         int ret;
416
417         opcode = *(u8 *)rsp;
418
419         __skb_push(skb, sizeof(struct rss_header));
420         skb_copy_to_linear_data(skb, rsp, sizeof(struct rss_header));
421
422         ret = chtls_handlers[opcode](cdev, skb);
423         if (ret & CPL_RET_BUF_DONE)
424                 kfree_skb(skb);
425 }
426
427 static int chtls_uld_rx_handler(void *handle, const __be64 *rsp,
428                                 const struct pkt_gl *gl)
429 {
430         struct chtls_dev *cdev = handle;
431         unsigned int opcode;
432         struct sk_buff *skb;
433
434         opcode = *(u8 *)rsp;
435
436         if (unlikely(opcode == CPL_RX_PKT)) {
437                 if (chtls_recv_packet(cdev, gl, rsp) < 0)
438                         goto nomem;
439                 return 0;
440         }
441
442         if (!gl)
443                 return chtls_recv_rsp(cdev, rsp);
444
445 #define RX_PULL_LEN 128
446         skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
447         if (unlikely(!skb))
448                 goto nomem;
449         chtls_recv(cdev, &skb, rsp);
450         return 0;
451
452 nomem:
453         return -ENOMEM;
454 }
455
456 static int do_chtls_getsockopt(struct sock *sk, char __user *optval,
457                                int __user *optlen)
458 {
459         struct tls_crypto_info crypto_info = { 0 };
460
461         crypto_info.version = TLS_1_2_VERSION;
462         if (copy_to_user(optval, &crypto_info, sizeof(struct tls_crypto_info)))
463                 return -EFAULT;
464         return 0;
465 }
466
467 static int chtls_getsockopt(struct sock *sk, int level, int optname,
468                             char __user *optval, int __user *optlen)
469 {
470         struct tls_context *ctx = tls_get_ctx(sk);
471
472         if (level != SOL_TLS)
473                 return ctx->getsockopt(sk, level, optname, optval, optlen);
474
475         return do_chtls_getsockopt(sk, optval, optlen);
476 }
477
478 static int do_chtls_setsockopt(struct sock *sk, int optname,
479                                char __user *optval, unsigned int optlen)
480 {
481         struct tls_crypto_info *crypto_info, tmp_crypto_info;
482         struct chtls_sock *csk;
483         int keylen;
484         int rc = 0;
485
486         csk = rcu_dereference_sk_user_data(sk);
487
488         if (!optval || optlen < sizeof(*crypto_info)) {
489                 rc = -EINVAL;
490                 goto out;
491         }
492
493         rc = copy_from_user(&tmp_crypto_info, optval, sizeof(*crypto_info));
494         if (rc) {
495                 rc = -EFAULT;
496                 goto out;
497         }
498
499         /* check version */
500         if (tmp_crypto_info.version != TLS_1_2_VERSION) {
501                 rc = -ENOTSUPP;
502                 goto out;
503         }
504
505         crypto_info = (struct tls_crypto_info *)&csk->tlshws.crypto_info;
506
507         switch (tmp_crypto_info.cipher_type) {
508         case TLS_CIPHER_AES_GCM_128: {
509                 /* Obtain version and type from previous copy */
510                 crypto_info[0] = tmp_crypto_info;
511                 /* Now copy the following data */
512                 rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info),
513                                 optval + sizeof(*crypto_info),
514                                 sizeof(struct tls12_crypto_info_aes_gcm_128)
515                                 - sizeof(*crypto_info));
516
517                 if (rc) {
518                         rc = -EFAULT;
519                         goto out;
520                 }
521
522                 keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
523                 rc = chtls_setkey(csk, keylen, optname);
524                 break;
525         }
526         default:
527                 rc = -EINVAL;
528                 goto out;
529         }
530 out:
531         return rc;
532 }
533
534 static int chtls_setsockopt(struct sock *sk, int level, int optname,
535                             char __user *optval, unsigned int optlen)
536 {
537         struct tls_context *ctx = tls_get_ctx(sk);
538
539         if (level != SOL_TLS)
540                 return ctx->setsockopt(sk, level, optname, optval, optlen);
541
542         return do_chtls_setsockopt(sk, optname, optval, optlen);
543 }
544
545 static struct cxgb4_uld_info chtls_uld_info = {
546         .name = DRV_NAME,
547         .nrxq = MAX_ULD_QSETS,
548         .ntxq = MAX_ULD_QSETS,
549         .rxq_size = 1024,
550         .add = chtls_uld_add,
551         .state_change = chtls_uld_state_change,
552         .rx_handler = chtls_uld_rx_handler,
553 };
554
555 void chtls_install_cpl_ops(struct sock *sk)
556 {
557         sk->sk_prot = &chtls_cpl_prot;
558 }
559
560 static void __init chtls_init_ulp_ops(void)
561 {
562         chtls_cpl_prot                  = tcp_prot;
563         chtls_init_rsk_ops(&chtls_cpl_prot, &chtls_rsk_ops,
564                            &tcp_prot, PF_INET);
565         chtls_cpl_prot.close            = chtls_close;
566         chtls_cpl_prot.disconnect       = chtls_disconnect;
567         chtls_cpl_prot.destroy          = chtls_destroy_sock;
568         chtls_cpl_prot.shutdown         = chtls_shutdown;
569         chtls_cpl_prot.sendmsg          = chtls_sendmsg;
570         chtls_cpl_prot.sendpage         = chtls_sendpage;
571         chtls_cpl_prot.recvmsg          = chtls_recvmsg;
572         chtls_cpl_prot.setsockopt       = chtls_setsockopt;
573         chtls_cpl_prot.getsockopt       = chtls_getsockopt;
574 }
575
576 static int __init chtls_register(void)
577 {
578         chtls_init_ulp_ops();
579         register_listen_notifier(&listen_notifier);
580         cxgb4_register_uld(CXGB4_ULD_TLS, &chtls_uld_info);
581         return 0;
582 }
583
584 static void __exit chtls_unregister(void)
585 {
586         unregister_listen_notifier(&listen_notifier);
587         chtls_free_all_uld();
588         cxgb4_unregister_uld(CXGB4_ULD_TLS);
589 }
590
591 module_init(chtls_register);
592 module_exit(chtls_unregister);
593
594 MODULE_DESCRIPTION("Chelsio TLS Inline driver");
595 MODULE_LICENSE("GPL");
596 MODULE_AUTHOR("Chelsio Communications");
597 MODULE_VERSION(DRV_VERSION);