mm/mremap.c: fix extent calculation
[linux-2.6-microblaze.git] / drivers / crypto / n2_core.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
3  *
4  * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
5  */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/of_device.h>
13 #include <linux/cpumask.h>
14 #include <linux/slab.h>
15 #include <linux/interrupt.h>
16 #include <linux/crypto.h>
17 #include <crypto/md5.h>
18 #include <crypto/sha1.h>
19 #include <crypto/sha2.h>
20 #include <crypto/aes.h>
21 #include <crypto/internal/des.h>
22 #include <linux/mutex.h>
23 #include <linux/delay.h>
24 #include <linux/sched.h>
25
26 #include <crypto/internal/hash.h>
27 #include <crypto/internal/skcipher.h>
28 #include <crypto/scatterwalk.h>
29 #include <crypto/algapi.h>
30
31 #include <asm/hypervisor.h>
32 #include <asm/mdesc.h>
33
34 #include "n2_core.h"
35
36 #define DRV_MODULE_NAME         "n2_crypto"
37 #define DRV_MODULE_VERSION      "0.2"
38 #define DRV_MODULE_RELDATE      "July 28, 2011"
39
40 static const char version[] =
41         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
42
43 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
44 MODULE_DESCRIPTION("Niagara2 Crypto driver");
45 MODULE_LICENSE("GPL");
46 MODULE_VERSION(DRV_MODULE_VERSION);
47
48 #define N2_CRA_PRIORITY         200
49
50 static DEFINE_MUTEX(spu_lock);
51
52 struct spu_queue {
53         cpumask_t               sharing;
54         unsigned long           qhandle;
55
56         spinlock_t              lock;
57         u8                      q_type;
58         void                    *q;
59         unsigned long           head;
60         unsigned long           tail;
61         struct list_head        jobs;
62
63         unsigned long           devino;
64
65         char                    irq_name[32];
66         unsigned int            irq;
67
68         struct list_head        list;
69 };
70
71 struct spu_qreg {
72         struct spu_queue        *queue;
73         unsigned long           type;
74 };
75
76 static struct spu_queue **cpu_to_cwq;
77 static struct spu_queue **cpu_to_mau;
78
79 static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
80 {
81         if (q->q_type == HV_NCS_QTYPE_MAU) {
82                 off += MAU_ENTRY_SIZE;
83                 if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
84                         off = 0;
85         } else {
86                 off += CWQ_ENTRY_SIZE;
87                 if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
88                         off = 0;
89         }
90         return off;
91 }
92
93 struct n2_request_common {
94         struct list_head        entry;
95         unsigned int            offset;
96 };
97 #define OFFSET_NOT_RUNNING      (~(unsigned int)0)
98
99 /* An async job request records the final tail value it used in
100  * n2_request_common->offset, test to see if that offset is in
101  * the range old_head, new_head, inclusive.
102  */
103 static inline bool job_finished(struct spu_queue *q, unsigned int offset,
104                                 unsigned long old_head, unsigned long new_head)
105 {
106         if (old_head <= new_head) {
107                 if (offset > old_head && offset <= new_head)
108                         return true;
109         } else {
110                 if (offset > old_head || offset <= new_head)
111                         return true;
112         }
113         return false;
114 }
115
116 /* When the HEAD marker is unequal to the actual HEAD, we get
117  * a virtual device INO interrupt.  We should process the
118  * completed CWQ entries and adjust the HEAD marker to clear
119  * the IRQ.
120  */
121 static irqreturn_t cwq_intr(int irq, void *dev_id)
122 {
123         unsigned long off, new_head, hv_ret;
124         struct spu_queue *q = dev_id;
125
126         pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
127                smp_processor_id(), q->qhandle);
128
129         spin_lock(&q->lock);
130
131         hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
132
133         pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
134                smp_processor_id(), new_head, hv_ret);
135
136         for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
137                 /* XXX ... XXX */
138         }
139
140         hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
141         if (hv_ret == HV_EOK)
142                 q->head = new_head;
143
144         spin_unlock(&q->lock);
145
146         return IRQ_HANDLED;
147 }
148
149 static irqreturn_t mau_intr(int irq, void *dev_id)
150 {
151         struct spu_queue *q = dev_id;
152         unsigned long head, hv_ret;
153
154         spin_lock(&q->lock);
155
156         pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
157                smp_processor_id(), q->qhandle);
158
159         hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
160
161         pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
162                smp_processor_id(), head, hv_ret);
163
164         sun4v_ncs_sethead_marker(q->qhandle, head);
165
166         spin_unlock(&q->lock);
167
168         return IRQ_HANDLED;
169 }
170
171 static void *spu_queue_next(struct spu_queue *q, void *cur)
172 {
173         return q->q + spu_next_offset(q, cur - q->q);
174 }
175
176 static int spu_queue_num_free(struct spu_queue *q)
177 {
178         unsigned long head = q->head;
179         unsigned long tail = q->tail;
180         unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
181         unsigned long diff;
182
183         if (head > tail)
184                 diff = head - tail;
185         else
186                 diff = (end - tail) + head;
187
188         return (diff / CWQ_ENTRY_SIZE) - 1;
189 }
190
191 static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
192 {
193         int avail = spu_queue_num_free(q);
194
195         if (avail >= num_entries)
196                 return q->q + q->tail;
197
198         return NULL;
199 }
200
201 static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
202 {
203         unsigned long hv_ret, new_tail;
204
205         new_tail = spu_next_offset(q, last - q->q);
206
207         hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
208         if (hv_ret == HV_EOK)
209                 q->tail = new_tail;
210         return hv_ret;
211 }
212
213 static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
214                              int enc_type, int auth_type,
215                              unsigned int hash_len,
216                              bool sfas, bool sob, bool eob, bool encrypt,
217                              int opcode)
218 {
219         u64 word = (len - 1) & CONTROL_LEN;
220
221         word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
222         word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
223         word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
224         if (sfas)
225                 word |= CONTROL_STORE_FINAL_AUTH_STATE;
226         if (sob)
227                 word |= CONTROL_START_OF_BLOCK;
228         if (eob)
229                 word |= CONTROL_END_OF_BLOCK;
230         if (encrypt)
231                 word |= CONTROL_ENCRYPT;
232         if (hmac_key_len)
233                 word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
234         if (hash_len)
235                 word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
236
237         return word;
238 }
239
240 #if 0
241 static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
242 {
243         if (this_len >= 64 ||
244             qp->head != qp->tail)
245                 return true;
246         return false;
247 }
248 #endif
249
250 struct n2_ahash_alg {
251         struct list_head        entry;
252         const u8                *hash_zero;
253         const u8                *hash_init;
254         u8                      hw_op_hashsz;
255         u8                      digest_size;
256         u8                      auth_type;
257         u8                      hmac_type;
258         struct ahash_alg        alg;
259 };
260
261 static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
262 {
263         struct crypto_alg *alg = tfm->__crt_alg;
264         struct ahash_alg *ahash_alg;
265
266         ahash_alg = container_of(alg, struct ahash_alg, halg.base);
267
268         return container_of(ahash_alg, struct n2_ahash_alg, alg);
269 }
270
271 struct n2_hmac_alg {
272         const char              *child_alg;
273         struct n2_ahash_alg     derived;
274 };
275
276 static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
277 {
278         struct crypto_alg *alg = tfm->__crt_alg;
279         struct ahash_alg *ahash_alg;
280
281         ahash_alg = container_of(alg, struct ahash_alg, halg.base);
282
283         return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
284 }
285
286 struct n2_hash_ctx {
287         struct crypto_ahash             *fallback_tfm;
288 };
289
290 #define N2_HASH_KEY_MAX                 32 /* HW limit for all HMAC requests */
291
292 struct n2_hmac_ctx {
293         struct n2_hash_ctx              base;
294
295         struct crypto_shash             *child_shash;
296
297         int                             hash_key_len;
298         unsigned char                   hash_key[N2_HASH_KEY_MAX];
299 };
300
301 struct n2_hash_req_ctx {
302         union {
303                 struct md5_state        md5;
304                 struct sha1_state       sha1;
305                 struct sha256_state     sha256;
306         } u;
307
308         struct ahash_request            fallback_req;
309 };
310
311 static int n2_hash_async_init(struct ahash_request *req)
312 {
313         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
314         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
315         struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
316
317         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
318         rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
319
320         return crypto_ahash_init(&rctx->fallback_req);
321 }
322
323 static int n2_hash_async_update(struct ahash_request *req)
324 {
325         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
326         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
327         struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
328
329         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
330         rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
331         rctx->fallback_req.nbytes = req->nbytes;
332         rctx->fallback_req.src = req->src;
333
334         return crypto_ahash_update(&rctx->fallback_req);
335 }
336
337 static int n2_hash_async_final(struct ahash_request *req)
338 {
339         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
340         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
341         struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
342
343         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
344         rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
345         rctx->fallback_req.result = req->result;
346
347         return crypto_ahash_final(&rctx->fallback_req);
348 }
349
350 static int n2_hash_async_finup(struct ahash_request *req)
351 {
352         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
353         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
354         struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
355
356         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
357         rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
358         rctx->fallback_req.nbytes = req->nbytes;
359         rctx->fallback_req.src = req->src;
360         rctx->fallback_req.result = req->result;
361
362         return crypto_ahash_finup(&rctx->fallback_req);
363 }
364
365 static int n2_hash_async_noimport(struct ahash_request *req, const void *in)
366 {
367         return -ENOSYS;
368 }
369
370 static int n2_hash_async_noexport(struct ahash_request *req, void *out)
371 {
372         return -ENOSYS;
373 }
374
375 static int n2_hash_cra_init(struct crypto_tfm *tfm)
376 {
377         const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
378         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
379         struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
380         struct crypto_ahash *fallback_tfm;
381         int err;
382
383         fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
384                                           CRYPTO_ALG_NEED_FALLBACK);
385         if (IS_ERR(fallback_tfm)) {
386                 pr_warn("Fallback driver '%s' could not be loaded!\n",
387                         fallback_driver_name);
388                 err = PTR_ERR(fallback_tfm);
389                 goto out;
390         }
391
392         crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
393                                          crypto_ahash_reqsize(fallback_tfm)));
394
395         ctx->fallback_tfm = fallback_tfm;
396         return 0;
397
398 out:
399         return err;
400 }
401
402 static void n2_hash_cra_exit(struct crypto_tfm *tfm)
403 {
404         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
405         struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
406
407         crypto_free_ahash(ctx->fallback_tfm);
408 }
409
410 static int n2_hmac_cra_init(struct crypto_tfm *tfm)
411 {
412         const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
413         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
414         struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
415         struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
416         struct crypto_ahash *fallback_tfm;
417         struct crypto_shash *child_shash;
418         int err;
419
420         fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
421                                           CRYPTO_ALG_NEED_FALLBACK);
422         if (IS_ERR(fallback_tfm)) {
423                 pr_warn("Fallback driver '%s' could not be loaded!\n",
424                         fallback_driver_name);
425                 err = PTR_ERR(fallback_tfm);
426                 goto out;
427         }
428
429         child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
430         if (IS_ERR(child_shash)) {
431                 pr_warn("Child shash '%s' could not be loaded!\n",
432                         n2alg->child_alg);
433                 err = PTR_ERR(child_shash);
434                 goto out_free_fallback;
435         }
436
437         crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
438                                          crypto_ahash_reqsize(fallback_tfm)));
439
440         ctx->child_shash = child_shash;
441         ctx->base.fallback_tfm = fallback_tfm;
442         return 0;
443
444 out_free_fallback:
445         crypto_free_ahash(fallback_tfm);
446
447 out:
448         return err;
449 }
450
451 static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
452 {
453         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
454         struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
455
456         crypto_free_ahash(ctx->base.fallback_tfm);
457         crypto_free_shash(ctx->child_shash);
458 }
459
460 static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
461                                 unsigned int keylen)
462 {
463         struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
464         struct crypto_shash *child_shash = ctx->child_shash;
465         struct crypto_ahash *fallback_tfm;
466         int err, bs, ds;
467
468         fallback_tfm = ctx->base.fallback_tfm;
469         err = crypto_ahash_setkey(fallback_tfm, key, keylen);
470         if (err)
471                 return err;
472
473         bs = crypto_shash_blocksize(child_shash);
474         ds = crypto_shash_digestsize(child_shash);
475         BUG_ON(ds > N2_HASH_KEY_MAX);
476         if (keylen > bs) {
477                 err = crypto_shash_tfm_digest(child_shash, key, keylen,
478                                               ctx->hash_key);
479                 if (err)
480                         return err;
481                 keylen = ds;
482         } else if (keylen <= N2_HASH_KEY_MAX)
483                 memcpy(ctx->hash_key, key, keylen);
484
485         ctx->hash_key_len = keylen;
486
487         return err;
488 }
489
490 static unsigned long wait_for_tail(struct spu_queue *qp)
491 {
492         unsigned long head, hv_ret;
493
494         do {
495                 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
496                 if (hv_ret != HV_EOK) {
497                         pr_err("Hypervisor error on gethead\n");
498                         break;
499                 }
500                 if (head == qp->tail) {
501                         qp->head = head;
502                         break;
503                 }
504         } while (1);
505         return hv_ret;
506 }
507
508 static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
509                                               struct cwq_initial_entry *ent)
510 {
511         unsigned long hv_ret = spu_queue_submit(qp, ent);
512
513         if (hv_ret == HV_EOK)
514                 hv_ret = wait_for_tail(qp);
515
516         return hv_ret;
517 }
518
519 static int n2_do_async_digest(struct ahash_request *req,
520                               unsigned int auth_type, unsigned int digest_size,
521                               unsigned int result_size, void *hash_loc,
522                               unsigned long auth_key, unsigned int auth_key_len)
523 {
524         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
525         struct cwq_initial_entry *ent;
526         struct crypto_hash_walk walk;
527         struct spu_queue *qp;
528         unsigned long flags;
529         int err = -ENODEV;
530         int nbytes, cpu;
531
532         /* The total effective length of the operation may not
533          * exceed 2^16.
534          */
535         if (unlikely(req->nbytes > (1 << 16))) {
536                 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
537                 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
538
539                 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
540                 rctx->fallback_req.base.flags =
541                         req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
542                 rctx->fallback_req.nbytes = req->nbytes;
543                 rctx->fallback_req.src = req->src;
544                 rctx->fallback_req.result = req->result;
545
546                 return crypto_ahash_digest(&rctx->fallback_req);
547         }
548
549         nbytes = crypto_hash_walk_first(req, &walk);
550
551         cpu = get_cpu();
552         qp = cpu_to_cwq[cpu];
553         if (!qp)
554                 goto out;
555
556         spin_lock_irqsave(&qp->lock, flags);
557
558         /* XXX can do better, improve this later by doing a by-hand scatterlist
559          * XXX walk, etc.
560          */
561         ent = qp->q + qp->tail;
562
563         ent->control = control_word_base(nbytes, auth_key_len, 0,
564                                          auth_type, digest_size,
565                                          false, true, false, false,
566                                          OPCODE_INPLACE_BIT |
567                                          OPCODE_AUTH_MAC);
568         ent->src_addr = __pa(walk.data);
569         ent->auth_key_addr = auth_key;
570         ent->auth_iv_addr = __pa(hash_loc);
571         ent->final_auth_state_addr = 0UL;
572         ent->enc_key_addr = 0UL;
573         ent->enc_iv_addr = 0UL;
574         ent->dest_addr = __pa(hash_loc);
575
576         nbytes = crypto_hash_walk_done(&walk, 0);
577         while (nbytes > 0) {
578                 ent = spu_queue_next(qp, ent);
579
580                 ent->control = (nbytes - 1);
581                 ent->src_addr = __pa(walk.data);
582                 ent->auth_key_addr = 0UL;
583                 ent->auth_iv_addr = 0UL;
584                 ent->final_auth_state_addr = 0UL;
585                 ent->enc_key_addr = 0UL;
586                 ent->enc_iv_addr = 0UL;
587                 ent->dest_addr = 0UL;
588
589                 nbytes = crypto_hash_walk_done(&walk, 0);
590         }
591         ent->control |= CONTROL_END_OF_BLOCK;
592
593         if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
594                 err = -EINVAL;
595         else
596                 err = 0;
597
598         spin_unlock_irqrestore(&qp->lock, flags);
599
600         if (!err)
601                 memcpy(req->result, hash_loc, result_size);
602 out:
603         put_cpu();
604
605         return err;
606 }
607
608 static int n2_hash_async_digest(struct ahash_request *req)
609 {
610         struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
611         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
612         int ds;
613
614         ds = n2alg->digest_size;
615         if (unlikely(req->nbytes == 0)) {
616                 memcpy(req->result, n2alg->hash_zero, ds);
617                 return 0;
618         }
619         memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
620
621         return n2_do_async_digest(req, n2alg->auth_type,
622                                   n2alg->hw_op_hashsz, ds,
623                                   &rctx->u, 0UL, 0);
624 }
625
626 static int n2_hmac_async_digest(struct ahash_request *req)
627 {
628         struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
629         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
630         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
631         struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
632         int ds;
633
634         ds = n2alg->derived.digest_size;
635         if (unlikely(req->nbytes == 0) ||
636             unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
637                 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
638                 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
639
640                 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
641                 rctx->fallback_req.base.flags =
642                         req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
643                 rctx->fallback_req.nbytes = req->nbytes;
644                 rctx->fallback_req.src = req->src;
645                 rctx->fallback_req.result = req->result;
646
647                 return crypto_ahash_digest(&rctx->fallback_req);
648         }
649         memcpy(&rctx->u, n2alg->derived.hash_init,
650                n2alg->derived.hw_op_hashsz);
651
652         return n2_do_async_digest(req, n2alg->derived.hmac_type,
653                                   n2alg->derived.hw_op_hashsz, ds,
654                                   &rctx->u,
655                                   __pa(&ctx->hash_key),
656                                   ctx->hash_key_len);
657 }
658
659 struct n2_skcipher_context {
660         int                     key_len;
661         int                     enc_type;
662         union {
663                 u8              aes[AES_MAX_KEY_SIZE];
664                 u8              des[DES_KEY_SIZE];
665                 u8              des3[3 * DES_KEY_SIZE];
666         } key;
667 };
668
669 #define N2_CHUNK_ARR_LEN        16
670
671 struct n2_crypto_chunk {
672         struct list_head        entry;
673         unsigned long           iv_paddr : 44;
674         unsigned long           arr_len : 20;
675         unsigned long           dest_paddr;
676         unsigned long           dest_final;
677         struct {
678                 unsigned long   src_paddr : 44;
679                 unsigned long   src_len : 20;
680         } arr[N2_CHUNK_ARR_LEN];
681 };
682
683 struct n2_request_context {
684         struct skcipher_walk    walk;
685         struct list_head        chunk_list;
686         struct n2_crypto_chunk  chunk;
687         u8                      temp_iv[16];
688 };
689
690 /* The SPU allows some level of flexibility for partial cipher blocks
691  * being specified in a descriptor.
692  *
693  * It merely requires that every descriptor's length field is at least
694  * as large as the cipher block size.  This means that a cipher block
695  * can span at most 2 descriptors.  However, this does not allow a
696  * partial block to span into the final descriptor as that would
697  * violate the rule (since every descriptor's length must be at lest
698  * the block size).  So, for example, assuming an 8 byte block size:
699  *
700  *      0xe --> 0xa --> 0x8
701  *
702  * is a valid length sequence, whereas:
703  *
704  *      0xe --> 0xb --> 0x7
705  *
706  * is not a valid sequence.
707  */
708
709 struct n2_skcipher_alg {
710         struct list_head        entry;
711         u8                      enc_type;
712         struct skcipher_alg     skcipher;
713 };
714
715 static inline struct n2_skcipher_alg *n2_skcipher_alg(struct crypto_skcipher *tfm)
716 {
717         struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
718
719         return container_of(alg, struct n2_skcipher_alg, skcipher);
720 }
721
722 struct n2_skcipher_request_context {
723         struct skcipher_walk    walk;
724 };
725
726 static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key,
727                          unsigned int keylen)
728 {
729         struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
730         struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
731         struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
732
733         ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
734
735         switch (keylen) {
736         case AES_KEYSIZE_128:
737                 ctx->enc_type |= ENC_TYPE_ALG_AES128;
738                 break;
739         case AES_KEYSIZE_192:
740                 ctx->enc_type |= ENC_TYPE_ALG_AES192;
741                 break;
742         case AES_KEYSIZE_256:
743                 ctx->enc_type |= ENC_TYPE_ALG_AES256;
744                 break;
745         default:
746                 return -EINVAL;
747         }
748
749         ctx->key_len = keylen;
750         memcpy(ctx->key.aes, key, keylen);
751         return 0;
752 }
753
754 static int n2_des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
755                          unsigned int keylen)
756 {
757         struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
758         struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
759         struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
760         int err;
761
762         err = verify_skcipher_des_key(skcipher, key);
763         if (err)
764                 return err;
765
766         ctx->enc_type = n2alg->enc_type;
767
768         ctx->key_len = keylen;
769         memcpy(ctx->key.des, key, keylen);
770         return 0;
771 }
772
773 static int n2_3des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
774                           unsigned int keylen)
775 {
776         struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
777         struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
778         struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
779         int err;
780
781         err = verify_skcipher_des3_key(skcipher, key);
782         if (err)
783                 return err;
784
785         ctx->enc_type = n2alg->enc_type;
786
787         ctx->key_len = keylen;
788         memcpy(ctx->key.des3, key, keylen);
789         return 0;
790 }
791
792 static inline int skcipher_descriptor_len(int nbytes, unsigned int block_size)
793 {
794         int this_len = nbytes;
795
796         this_len -= (nbytes & (block_size - 1));
797         return this_len > (1 << 16) ? (1 << 16) : this_len;
798 }
799
800 static int __n2_crypt_chunk(struct crypto_skcipher *skcipher,
801                             struct n2_crypto_chunk *cp,
802                             struct spu_queue *qp, bool encrypt)
803 {
804         struct n2_skcipher_context *ctx = crypto_skcipher_ctx(skcipher);
805         struct cwq_initial_entry *ent;
806         bool in_place;
807         int i;
808
809         ent = spu_queue_alloc(qp, cp->arr_len);
810         if (!ent) {
811                 pr_info("queue_alloc() of %d fails\n",
812                         cp->arr_len);
813                 return -EBUSY;
814         }
815
816         in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
817
818         ent->control = control_word_base(cp->arr[0].src_len,
819                                          0, ctx->enc_type, 0, 0,
820                                          false, true, false, encrypt,
821                                          OPCODE_ENCRYPT |
822                                          (in_place ? OPCODE_INPLACE_BIT : 0));
823         ent->src_addr = cp->arr[0].src_paddr;
824         ent->auth_key_addr = 0UL;
825         ent->auth_iv_addr = 0UL;
826         ent->final_auth_state_addr = 0UL;
827         ent->enc_key_addr = __pa(&ctx->key);
828         ent->enc_iv_addr = cp->iv_paddr;
829         ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
830
831         for (i = 1; i < cp->arr_len; i++) {
832                 ent = spu_queue_next(qp, ent);
833
834                 ent->control = cp->arr[i].src_len - 1;
835                 ent->src_addr = cp->arr[i].src_paddr;
836                 ent->auth_key_addr = 0UL;
837                 ent->auth_iv_addr = 0UL;
838                 ent->final_auth_state_addr = 0UL;
839                 ent->enc_key_addr = 0UL;
840                 ent->enc_iv_addr = 0UL;
841                 ent->dest_addr = 0UL;
842         }
843         ent->control |= CONTROL_END_OF_BLOCK;
844
845         return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
846 }
847
848 static int n2_compute_chunks(struct skcipher_request *req)
849 {
850         struct n2_request_context *rctx = skcipher_request_ctx(req);
851         struct skcipher_walk *walk = &rctx->walk;
852         struct n2_crypto_chunk *chunk;
853         unsigned long dest_prev;
854         unsigned int tot_len;
855         bool prev_in_place;
856         int err, nbytes;
857
858         err = skcipher_walk_async(walk, req);
859         if (err)
860                 return err;
861
862         INIT_LIST_HEAD(&rctx->chunk_list);
863
864         chunk = &rctx->chunk;
865         INIT_LIST_HEAD(&chunk->entry);
866
867         chunk->iv_paddr = 0UL;
868         chunk->arr_len = 0;
869         chunk->dest_paddr = 0UL;
870
871         prev_in_place = false;
872         dest_prev = ~0UL;
873         tot_len = 0;
874
875         while ((nbytes = walk->nbytes) != 0) {
876                 unsigned long dest_paddr, src_paddr;
877                 bool in_place;
878                 int this_len;
879
880                 src_paddr = (page_to_phys(walk->src.phys.page) +
881                              walk->src.phys.offset);
882                 dest_paddr = (page_to_phys(walk->dst.phys.page) +
883                               walk->dst.phys.offset);
884                 in_place = (src_paddr == dest_paddr);
885                 this_len = skcipher_descriptor_len(nbytes, walk->blocksize);
886
887                 if (chunk->arr_len != 0) {
888                         if (in_place != prev_in_place ||
889                             (!prev_in_place &&
890                              dest_paddr != dest_prev) ||
891                             chunk->arr_len == N2_CHUNK_ARR_LEN ||
892                             tot_len + this_len > (1 << 16)) {
893                                 chunk->dest_final = dest_prev;
894                                 list_add_tail(&chunk->entry,
895                                               &rctx->chunk_list);
896                                 chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
897                                 if (!chunk) {
898                                         err = -ENOMEM;
899                                         break;
900                                 }
901                                 INIT_LIST_HEAD(&chunk->entry);
902                         }
903                 }
904                 if (chunk->arr_len == 0) {
905                         chunk->dest_paddr = dest_paddr;
906                         tot_len = 0;
907                 }
908                 chunk->arr[chunk->arr_len].src_paddr = src_paddr;
909                 chunk->arr[chunk->arr_len].src_len = this_len;
910                 chunk->arr_len++;
911
912                 dest_prev = dest_paddr + this_len;
913                 prev_in_place = in_place;
914                 tot_len += this_len;
915
916                 err = skcipher_walk_done(walk, nbytes - this_len);
917                 if (err)
918                         break;
919         }
920         if (!err && chunk->arr_len != 0) {
921                 chunk->dest_final = dest_prev;
922                 list_add_tail(&chunk->entry, &rctx->chunk_list);
923         }
924
925         return err;
926 }
927
928 static void n2_chunk_complete(struct skcipher_request *req, void *final_iv)
929 {
930         struct n2_request_context *rctx = skcipher_request_ctx(req);
931         struct n2_crypto_chunk *c, *tmp;
932
933         if (final_iv)
934                 memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
935
936         list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
937                 list_del(&c->entry);
938                 if (unlikely(c != &rctx->chunk))
939                         kfree(c);
940         }
941
942 }
943
944 static int n2_do_ecb(struct skcipher_request *req, bool encrypt)
945 {
946         struct n2_request_context *rctx = skcipher_request_ctx(req);
947         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
948         int err = n2_compute_chunks(req);
949         struct n2_crypto_chunk *c, *tmp;
950         unsigned long flags, hv_ret;
951         struct spu_queue *qp;
952
953         if (err)
954                 return err;
955
956         qp = cpu_to_cwq[get_cpu()];
957         err = -ENODEV;
958         if (!qp)
959                 goto out;
960
961         spin_lock_irqsave(&qp->lock, flags);
962
963         list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
964                 err = __n2_crypt_chunk(tfm, c, qp, encrypt);
965                 if (err)
966                         break;
967                 list_del(&c->entry);
968                 if (unlikely(c != &rctx->chunk))
969                         kfree(c);
970         }
971         if (!err) {
972                 hv_ret = wait_for_tail(qp);
973                 if (hv_ret != HV_EOK)
974                         err = -EINVAL;
975         }
976
977         spin_unlock_irqrestore(&qp->lock, flags);
978
979 out:
980         put_cpu();
981
982         n2_chunk_complete(req, NULL);
983         return err;
984 }
985
986 static int n2_encrypt_ecb(struct skcipher_request *req)
987 {
988         return n2_do_ecb(req, true);
989 }
990
991 static int n2_decrypt_ecb(struct skcipher_request *req)
992 {
993         return n2_do_ecb(req, false);
994 }
995
996 static int n2_do_chaining(struct skcipher_request *req, bool encrypt)
997 {
998         struct n2_request_context *rctx = skcipher_request_ctx(req);
999         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1000         unsigned long flags, hv_ret, iv_paddr;
1001         int err = n2_compute_chunks(req);
1002         struct n2_crypto_chunk *c, *tmp;
1003         struct spu_queue *qp;
1004         void *final_iv_addr;
1005
1006         final_iv_addr = NULL;
1007
1008         if (err)
1009                 return err;
1010
1011         qp = cpu_to_cwq[get_cpu()];
1012         err = -ENODEV;
1013         if (!qp)
1014                 goto out;
1015
1016         spin_lock_irqsave(&qp->lock, flags);
1017
1018         if (encrypt) {
1019                 iv_paddr = __pa(rctx->walk.iv);
1020                 list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
1021                                          entry) {
1022                         c->iv_paddr = iv_paddr;
1023                         err = __n2_crypt_chunk(tfm, c, qp, true);
1024                         if (err)
1025                                 break;
1026                         iv_paddr = c->dest_final - rctx->walk.blocksize;
1027                         list_del(&c->entry);
1028                         if (unlikely(c != &rctx->chunk))
1029                                 kfree(c);
1030                 }
1031                 final_iv_addr = __va(iv_paddr);
1032         } else {
1033                 list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
1034                                                  entry) {
1035                         if (c == &rctx->chunk) {
1036                                 iv_paddr = __pa(rctx->walk.iv);
1037                         } else {
1038                                 iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
1039                                             tmp->arr[tmp->arr_len-1].src_len -
1040                                             rctx->walk.blocksize);
1041                         }
1042                         if (!final_iv_addr) {
1043                                 unsigned long pa;
1044
1045                                 pa = (c->arr[c->arr_len-1].src_paddr +
1046                                       c->arr[c->arr_len-1].src_len -
1047                                       rctx->walk.blocksize);
1048                                 final_iv_addr = rctx->temp_iv;
1049                                 memcpy(rctx->temp_iv, __va(pa),
1050                                        rctx->walk.blocksize);
1051                         }
1052                         c->iv_paddr = iv_paddr;
1053                         err = __n2_crypt_chunk(tfm, c, qp, false);
1054                         if (err)
1055                                 break;
1056                         list_del(&c->entry);
1057                         if (unlikely(c != &rctx->chunk))
1058                                 kfree(c);
1059                 }
1060         }
1061         if (!err) {
1062                 hv_ret = wait_for_tail(qp);
1063                 if (hv_ret != HV_EOK)
1064                         err = -EINVAL;
1065         }
1066
1067         spin_unlock_irqrestore(&qp->lock, flags);
1068
1069 out:
1070         put_cpu();
1071
1072         n2_chunk_complete(req, err ? NULL : final_iv_addr);
1073         return err;
1074 }
1075
1076 static int n2_encrypt_chaining(struct skcipher_request *req)
1077 {
1078         return n2_do_chaining(req, true);
1079 }
1080
1081 static int n2_decrypt_chaining(struct skcipher_request *req)
1082 {
1083         return n2_do_chaining(req, false);
1084 }
1085
1086 struct n2_skcipher_tmpl {
1087         const char              *name;
1088         const char              *drv_name;
1089         u8                      block_size;
1090         u8                      enc_type;
1091         struct skcipher_alg     skcipher;
1092 };
1093
1094 static const struct n2_skcipher_tmpl skcipher_tmpls[] = {
1095         /* DES: ECB CBC and CFB are supported */
1096         {       .name           = "ecb(des)",
1097                 .drv_name       = "ecb-des",
1098                 .block_size     = DES_BLOCK_SIZE,
1099                 .enc_type       = (ENC_TYPE_ALG_DES |
1100                                    ENC_TYPE_CHAINING_ECB),
1101                 .skcipher       = {
1102                         .min_keysize    = DES_KEY_SIZE,
1103                         .max_keysize    = DES_KEY_SIZE,
1104                         .setkey         = n2_des_setkey,
1105                         .encrypt        = n2_encrypt_ecb,
1106                         .decrypt        = n2_decrypt_ecb,
1107                 },
1108         },
1109         {       .name           = "cbc(des)",
1110                 .drv_name       = "cbc-des",
1111                 .block_size     = DES_BLOCK_SIZE,
1112                 .enc_type       = (ENC_TYPE_ALG_DES |
1113                                    ENC_TYPE_CHAINING_CBC),
1114                 .skcipher       = {
1115                         .ivsize         = DES_BLOCK_SIZE,
1116                         .min_keysize    = DES_KEY_SIZE,
1117                         .max_keysize    = DES_KEY_SIZE,
1118                         .setkey         = n2_des_setkey,
1119                         .encrypt        = n2_encrypt_chaining,
1120                         .decrypt        = n2_decrypt_chaining,
1121                 },
1122         },
1123         {       .name           = "cfb(des)",
1124                 .drv_name       = "cfb-des",
1125                 .block_size     = DES_BLOCK_SIZE,
1126                 .enc_type       = (ENC_TYPE_ALG_DES |
1127                                    ENC_TYPE_CHAINING_CFB),
1128                 .skcipher       = {
1129                         .min_keysize    = DES_KEY_SIZE,
1130                         .max_keysize    = DES_KEY_SIZE,
1131                         .setkey         = n2_des_setkey,
1132                         .encrypt        = n2_encrypt_chaining,
1133                         .decrypt        = n2_decrypt_chaining,
1134                 },
1135         },
1136
1137         /* 3DES: ECB CBC and CFB are supported */
1138         {       .name           = "ecb(des3_ede)",
1139                 .drv_name       = "ecb-3des",
1140                 .block_size     = DES_BLOCK_SIZE,
1141                 .enc_type       = (ENC_TYPE_ALG_3DES |
1142                                    ENC_TYPE_CHAINING_ECB),
1143                 .skcipher       = {
1144                         .min_keysize    = 3 * DES_KEY_SIZE,
1145                         .max_keysize    = 3 * DES_KEY_SIZE,
1146                         .setkey         = n2_3des_setkey,
1147                         .encrypt        = n2_encrypt_ecb,
1148                         .decrypt        = n2_decrypt_ecb,
1149                 },
1150         },
1151         {       .name           = "cbc(des3_ede)",
1152                 .drv_name       = "cbc-3des",
1153                 .block_size     = DES_BLOCK_SIZE,
1154                 .enc_type       = (ENC_TYPE_ALG_3DES |
1155                                    ENC_TYPE_CHAINING_CBC),
1156                 .skcipher       = {
1157                         .ivsize         = DES_BLOCK_SIZE,
1158                         .min_keysize    = 3 * DES_KEY_SIZE,
1159                         .max_keysize    = 3 * DES_KEY_SIZE,
1160                         .setkey         = n2_3des_setkey,
1161                         .encrypt        = n2_encrypt_chaining,
1162                         .decrypt        = n2_decrypt_chaining,
1163                 },
1164         },
1165         {       .name           = "cfb(des3_ede)",
1166                 .drv_name       = "cfb-3des",
1167                 .block_size     = DES_BLOCK_SIZE,
1168                 .enc_type       = (ENC_TYPE_ALG_3DES |
1169                                    ENC_TYPE_CHAINING_CFB),
1170                 .skcipher       = {
1171                         .min_keysize    = 3 * DES_KEY_SIZE,
1172                         .max_keysize    = 3 * DES_KEY_SIZE,
1173                         .setkey         = n2_3des_setkey,
1174                         .encrypt        = n2_encrypt_chaining,
1175                         .decrypt        = n2_decrypt_chaining,
1176                 },
1177         },
1178         /* AES: ECB CBC and CTR are supported */
1179         {       .name           = "ecb(aes)",
1180                 .drv_name       = "ecb-aes",
1181                 .block_size     = AES_BLOCK_SIZE,
1182                 .enc_type       = (ENC_TYPE_ALG_AES128 |
1183                                    ENC_TYPE_CHAINING_ECB),
1184                 .skcipher       = {
1185                         .min_keysize    = AES_MIN_KEY_SIZE,
1186                         .max_keysize    = AES_MAX_KEY_SIZE,
1187                         .setkey         = n2_aes_setkey,
1188                         .encrypt        = n2_encrypt_ecb,
1189                         .decrypt        = n2_decrypt_ecb,
1190                 },
1191         },
1192         {       .name           = "cbc(aes)",
1193                 .drv_name       = "cbc-aes",
1194                 .block_size     = AES_BLOCK_SIZE,
1195                 .enc_type       = (ENC_TYPE_ALG_AES128 |
1196                                    ENC_TYPE_CHAINING_CBC),
1197                 .skcipher       = {
1198                         .ivsize         = AES_BLOCK_SIZE,
1199                         .min_keysize    = AES_MIN_KEY_SIZE,
1200                         .max_keysize    = AES_MAX_KEY_SIZE,
1201                         .setkey         = n2_aes_setkey,
1202                         .encrypt        = n2_encrypt_chaining,
1203                         .decrypt        = n2_decrypt_chaining,
1204                 },
1205         },
1206         {       .name           = "ctr(aes)",
1207                 .drv_name       = "ctr-aes",
1208                 .block_size     = AES_BLOCK_SIZE,
1209                 .enc_type       = (ENC_TYPE_ALG_AES128 |
1210                                    ENC_TYPE_CHAINING_COUNTER),
1211                 .skcipher       = {
1212                         .ivsize         = AES_BLOCK_SIZE,
1213                         .min_keysize    = AES_MIN_KEY_SIZE,
1214                         .max_keysize    = AES_MAX_KEY_SIZE,
1215                         .setkey         = n2_aes_setkey,
1216                         .encrypt        = n2_encrypt_chaining,
1217                         .decrypt        = n2_encrypt_chaining,
1218                 },
1219         },
1220
1221 };
1222 #define NUM_CIPHER_TMPLS ARRAY_SIZE(skcipher_tmpls)
1223
1224 static LIST_HEAD(skcipher_algs);
1225
1226 struct n2_hash_tmpl {
1227         const char      *name;
1228         const u8        *hash_zero;
1229         const u8        *hash_init;
1230         u8              hw_op_hashsz;
1231         u8              digest_size;
1232         u8              block_size;
1233         u8              auth_type;
1234         u8              hmac_type;
1235 };
1236
1237 static const __le32 n2_md5_init[MD5_HASH_WORDS] = {
1238         cpu_to_le32(MD5_H0),
1239         cpu_to_le32(MD5_H1),
1240         cpu_to_le32(MD5_H2),
1241         cpu_to_le32(MD5_H3),
1242 };
1243 static const u32 n2_sha1_init[SHA1_DIGEST_SIZE / 4] = {
1244         SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
1245 };
1246 static const u32 n2_sha256_init[SHA256_DIGEST_SIZE / 4] = {
1247         SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
1248         SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
1249 };
1250 static const u32 n2_sha224_init[SHA256_DIGEST_SIZE / 4] = {
1251         SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
1252         SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
1253 };
1254
1255 static const struct n2_hash_tmpl hash_tmpls[] = {
1256         { .name         = "md5",
1257           .hash_zero    = md5_zero_message_hash,
1258           .hash_init    = (u8 *)n2_md5_init,
1259           .auth_type    = AUTH_TYPE_MD5,
1260           .hmac_type    = AUTH_TYPE_HMAC_MD5,
1261           .hw_op_hashsz = MD5_DIGEST_SIZE,
1262           .digest_size  = MD5_DIGEST_SIZE,
1263           .block_size   = MD5_HMAC_BLOCK_SIZE },
1264         { .name         = "sha1",
1265           .hash_zero    = sha1_zero_message_hash,
1266           .hash_init    = (u8 *)n2_sha1_init,
1267           .auth_type    = AUTH_TYPE_SHA1,
1268           .hmac_type    = AUTH_TYPE_HMAC_SHA1,
1269           .hw_op_hashsz = SHA1_DIGEST_SIZE,
1270           .digest_size  = SHA1_DIGEST_SIZE,
1271           .block_size   = SHA1_BLOCK_SIZE },
1272         { .name         = "sha256",
1273           .hash_zero    = sha256_zero_message_hash,
1274           .hash_init    = (u8 *)n2_sha256_init,
1275           .auth_type    = AUTH_TYPE_SHA256,
1276           .hmac_type    = AUTH_TYPE_HMAC_SHA256,
1277           .hw_op_hashsz = SHA256_DIGEST_SIZE,
1278           .digest_size  = SHA256_DIGEST_SIZE,
1279           .block_size   = SHA256_BLOCK_SIZE },
1280         { .name         = "sha224",
1281           .hash_zero    = sha224_zero_message_hash,
1282           .hash_init    = (u8 *)n2_sha224_init,
1283           .auth_type    = AUTH_TYPE_SHA256,
1284           .hmac_type    = AUTH_TYPE_RESERVED,
1285           .hw_op_hashsz = SHA256_DIGEST_SIZE,
1286           .digest_size  = SHA224_DIGEST_SIZE,
1287           .block_size   = SHA224_BLOCK_SIZE },
1288 };
1289 #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
1290
1291 static LIST_HEAD(ahash_algs);
1292 static LIST_HEAD(hmac_algs);
1293
1294 static int algs_registered;
1295
1296 static void __n2_unregister_algs(void)
1297 {
1298         struct n2_skcipher_alg *skcipher, *skcipher_tmp;
1299         struct n2_ahash_alg *alg, *alg_tmp;
1300         struct n2_hmac_alg *hmac, *hmac_tmp;
1301
1302         list_for_each_entry_safe(skcipher, skcipher_tmp, &skcipher_algs, entry) {
1303                 crypto_unregister_skcipher(&skcipher->skcipher);
1304                 list_del(&skcipher->entry);
1305                 kfree(skcipher);
1306         }
1307         list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
1308                 crypto_unregister_ahash(&hmac->derived.alg);
1309                 list_del(&hmac->derived.entry);
1310                 kfree(hmac);
1311         }
1312         list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
1313                 crypto_unregister_ahash(&alg->alg);
1314                 list_del(&alg->entry);
1315                 kfree(alg);
1316         }
1317 }
1318
1319 static int n2_skcipher_init_tfm(struct crypto_skcipher *tfm)
1320 {
1321         crypto_skcipher_set_reqsize(tfm, sizeof(struct n2_request_context));
1322         return 0;
1323 }
1324
1325 static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl *tmpl)
1326 {
1327         struct n2_skcipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1328         struct skcipher_alg *alg;
1329         int err;
1330
1331         if (!p)
1332                 return -ENOMEM;
1333
1334         alg = &p->skcipher;
1335         *alg = tmpl->skcipher;
1336
1337         snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1338         snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
1339         alg->base.cra_priority = N2_CRA_PRIORITY;
1340         alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
1341                               CRYPTO_ALG_ALLOCATES_MEMORY;
1342         alg->base.cra_blocksize = tmpl->block_size;
1343         p->enc_type = tmpl->enc_type;
1344         alg->base.cra_ctxsize = sizeof(struct n2_skcipher_context);
1345         alg->base.cra_module = THIS_MODULE;
1346         alg->init = n2_skcipher_init_tfm;
1347
1348         list_add(&p->entry, &skcipher_algs);
1349         err = crypto_register_skcipher(alg);
1350         if (err) {
1351                 pr_err("%s alg registration failed\n", alg->base.cra_name);
1352                 list_del(&p->entry);
1353                 kfree(p);
1354         } else {
1355                 pr_info("%s alg registered\n", alg->base.cra_name);
1356         }
1357         return err;
1358 }
1359
1360 static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
1361 {
1362         struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1363         struct ahash_alg *ahash;
1364         struct crypto_alg *base;
1365         int err;
1366
1367         if (!p)
1368                 return -ENOMEM;
1369
1370         p->child_alg = n2ahash->alg.halg.base.cra_name;
1371         memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
1372         INIT_LIST_HEAD(&p->derived.entry);
1373
1374         ahash = &p->derived.alg;
1375         ahash->digest = n2_hmac_async_digest;
1376         ahash->setkey = n2_hmac_async_setkey;
1377
1378         base = &ahash->halg.base;
1379         snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg);
1380         snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg);
1381
1382         base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
1383         base->cra_init = n2_hmac_cra_init;
1384         base->cra_exit = n2_hmac_cra_exit;
1385
1386         list_add(&p->derived.entry, &hmac_algs);
1387         err = crypto_register_ahash(ahash);
1388         if (err) {
1389                 pr_err("%s alg registration failed\n", base->cra_name);
1390                 list_del(&p->derived.entry);
1391                 kfree(p);
1392         } else {
1393                 pr_info("%s alg registered\n", base->cra_name);
1394         }
1395         return err;
1396 }
1397
1398 static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
1399 {
1400         struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1401         struct hash_alg_common *halg;
1402         struct crypto_alg *base;
1403         struct ahash_alg *ahash;
1404         int err;
1405
1406         if (!p)
1407                 return -ENOMEM;
1408
1409         p->hash_zero = tmpl->hash_zero;
1410         p->hash_init = tmpl->hash_init;
1411         p->auth_type = tmpl->auth_type;
1412         p->hmac_type = tmpl->hmac_type;
1413         p->hw_op_hashsz = tmpl->hw_op_hashsz;
1414         p->digest_size = tmpl->digest_size;
1415
1416         ahash = &p->alg;
1417         ahash->init = n2_hash_async_init;
1418         ahash->update = n2_hash_async_update;
1419         ahash->final = n2_hash_async_final;
1420         ahash->finup = n2_hash_async_finup;
1421         ahash->digest = n2_hash_async_digest;
1422         ahash->export = n2_hash_async_noexport;
1423         ahash->import = n2_hash_async_noimport;
1424
1425         halg = &ahash->halg;
1426         halg->digestsize = tmpl->digest_size;
1427
1428         base = &halg->base;
1429         snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1430         snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
1431         base->cra_priority = N2_CRA_PRIORITY;
1432         base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1433                           CRYPTO_ALG_NEED_FALLBACK;
1434         base->cra_blocksize = tmpl->block_size;
1435         base->cra_ctxsize = sizeof(struct n2_hash_ctx);
1436         base->cra_module = THIS_MODULE;
1437         base->cra_init = n2_hash_cra_init;
1438         base->cra_exit = n2_hash_cra_exit;
1439
1440         list_add(&p->entry, &ahash_algs);
1441         err = crypto_register_ahash(ahash);
1442         if (err) {
1443                 pr_err("%s alg registration failed\n", base->cra_name);
1444                 list_del(&p->entry);
1445                 kfree(p);
1446         } else {
1447                 pr_info("%s alg registered\n", base->cra_name);
1448         }
1449         if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
1450                 err = __n2_register_one_hmac(p);
1451         return err;
1452 }
1453
1454 static int n2_register_algs(void)
1455 {
1456         int i, err = 0;
1457
1458         mutex_lock(&spu_lock);
1459         if (algs_registered++)
1460                 goto out;
1461
1462         for (i = 0; i < NUM_HASH_TMPLS; i++) {
1463                 err = __n2_register_one_ahash(&hash_tmpls[i]);
1464                 if (err) {
1465                         __n2_unregister_algs();
1466                         goto out;
1467                 }
1468         }
1469         for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
1470                 err = __n2_register_one_skcipher(&skcipher_tmpls[i]);
1471                 if (err) {
1472                         __n2_unregister_algs();
1473                         goto out;
1474                 }
1475         }
1476
1477 out:
1478         mutex_unlock(&spu_lock);
1479         return err;
1480 }
1481
1482 static void n2_unregister_algs(void)
1483 {
1484         mutex_lock(&spu_lock);
1485         if (!--algs_registered)
1486                 __n2_unregister_algs();
1487         mutex_unlock(&spu_lock);
1488 }
1489
1490 /* To map CWQ queues to interrupt sources, the hypervisor API provides
1491  * a devino.  This isn't very useful to us because all of the
1492  * interrupts listed in the device_node have been translated to
1493  * Linux virtual IRQ cookie numbers.
1494  *
1495  * So we have to back-translate, going through the 'intr' and 'ino'
1496  * property tables of the n2cp MDESC node, matching it with the OF
1497  * 'interrupts' property entries, in order to to figure out which
1498  * devino goes to which already-translated IRQ.
1499  */
1500 static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
1501                              unsigned long dev_ino)
1502 {
1503         const unsigned int *dev_intrs;
1504         unsigned int intr;
1505         int i;
1506
1507         for (i = 0; i < ip->num_intrs; i++) {
1508                 if (ip->ino_table[i].ino == dev_ino)
1509                         break;
1510         }
1511         if (i == ip->num_intrs)
1512                 return -ENODEV;
1513
1514         intr = ip->ino_table[i].intr;
1515
1516         dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
1517         if (!dev_intrs)
1518                 return -ENODEV;
1519
1520         for (i = 0; i < dev->archdata.num_irqs; i++) {
1521                 if (dev_intrs[i] == intr)
1522                         return i;
1523         }
1524
1525         return -ENODEV;
1526 }
1527
1528 static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
1529                        const char *irq_name, struct spu_queue *p,
1530                        irq_handler_t handler)
1531 {
1532         unsigned long herr;
1533         int index;
1534
1535         herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
1536         if (herr)
1537                 return -EINVAL;
1538
1539         index = find_devino_index(dev, ip, p->devino);
1540         if (index < 0)
1541                 return index;
1542
1543         p->irq = dev->archdata.irqs[index];
1544
1545         sprintf(p->irq_name, "%s-%d", irq_name, index);
1546
1547         return request_irq(p->irq, handler, 0, p->irq_name, p);
1548 }
1549
1550 static struct kmem_cache *queue_cache[2];
1551
1552 static void *new_queue(unsigned long q_type)
1553 {
1554         return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
1555 }
1556
1557 static void free_queue(void *p, unsigned long q_type)
1558 {
1559         kmem_cache_free(queue_cache[q_type - 1], p);
1560 }
1561
1562 static int queue_cache_init(void)
1563 {
1564         if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1565                 queue_cache[HV_NCS_QTYPE_MAU - 1] =
1566                         kmem_cache_create("mau_queue",
1567                                           (MAU_NUM_ENTRIES *
1568                                            MAU_ENTRY_SIZE),
1569                                           MAU_ENTRY_SIZE, 0, NULL);
1570         if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1571                 return -ENOMEM;
1572
1573         if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
1574                 queue_cache[HV_NCS_QTYPE_CWQ - 1] =
1575                         kmem_cache_create("cwq_queue",
1576                                           (CWQ_NUM_ENTRIES *
1577                                            CWQ_ENTRY_SIZE),
1578                                           CWQ_ENTRY_SIZE, 0, NULL);
1579         if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
1580                 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1581                 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1582                 return -ENOMEM;
1583         }
1584         return 0;
1585 }
1586
1587 static void queue_cache_destroy(void)
1588 {
1589         kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1590         kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
1591         queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1592         queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
1593 }
1594
1595 static long spu_queue_register_workfn(void *arg)
1596 {
1597         struct spu_qreg *qr = arg;
1598         struct spu_queue *p = qr->queue;
1599         unsigned long q_type = qr->type;
1600         unsigned long hv_ret;
1601
1602         hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
1603                                  CWQ_NUM_ENTRIES, &p->qhandle);
1604         if (!hv_ret)
1605                 sun4v_ncs_sethead_marker(p->qhandle, 0);
1606
1607         return hv_ret ? -EINVAL : 0;
1608 }
1609
1610 static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
1611 {
1612         int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
1613         struct spu_qreg qr = { .queue = p, .type = q_type };
1614
1615         return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
1616 }
1617
1618 static int spu_queue_setup(struct spu_queue *p)
1619 {
1620         int err;
1621
1622         p->q = new_queue(p->q_type);
1623         if (!p->q)
1624                 return -ENOMEM;
1625
1626         err = spu_queue_register(p, p->q_type);
1627         if (err) {
1628                 free_queue(p->q, p->q_type);
1629                 p->q = NULL;
1630         }
1631
1632         return err;
1633 }
1634
1635 static void spu_queue_destroy(struct spu_queue *p)
1636 {
1637         unsigned long hv_ret;
1638
1639         if (!p->q)
1640                 return;
1641
1642         hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
1643
1644         if (!hv_ret)
1645                 free_queue(p->q, p->q_type);
1646 }
1647
1648 static void spu_list_destroy(struct list_head *list)
1649 {
1650         struct spu_queue *p, *n;
1651
1652         list_for_each_entry_safe(p, n, list, list) {
1653                 int i;
1654
1655                 for (i = 0; i < NR_CPUS; i++) {
1656                         if (cpu_to_cwq[i] == p)
1657                                 cpu_to_cwq[i] = NULL;
1658                 }
1659
1660                 if (p->irq) {
1661                         free_irq(p->irq, p);
1662                         p->irq = 0;
1663                 }
1664                 spu_queue_destroy(p);
1665                 list_del(&p->list);
1666                 kfree(p);
1667         }
1668 }
1669
1670 /* Walk the backward arcs of a CWQ 'exec-unit' node,
1671  * gathering cpu membership information.
1672  */
1673 static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
1674                                struct platform_device *dev,
1675                                u64 node, struct spu_queue *p,
1676                                struct spu_queue **table)
1677 {
1678         u64 arc;
1679
1680         mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
1681                 u64 tgt = mdesc_arc_target(mdesc, arc);
1682                 const char *name = mdesc_node_name(mdesc, tgt);
1683                 const u64 *id;
1684
1685                 if (strcmp(name, "cpu"))
1686                         continue;
1687                 id = mdesc_get_property(mdesc, tgt, "id", NULL);
1688                 if (table[*id] != NULL) {
1689                         dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n",
1690                                 dev->dev.of_node);
1691                         return -EINVAL;
1692                 }
1693                 cpumask_set_cpu(*id, &p->sharing);
1694                 table[*id] = p;
1695         }
1696         return 0;
1697 }
1698
1699 /* Process an 'exec-unit' MDESC node of type 'cwq'.  */
1700 static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
1701                             struct platform_device *dev, struct mdesc_handle *mdesc,
1702                             u64 node, const char *iname, unsigned long q_type,
1703                             irq_handler_t handler, struct spu_queue **table)
1704 {
1705         struct spu_queue *p;
1706         int err;
1707
1708         p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
1709         if (!p) {
1710                 dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n",
1711                         dev->dev.of_node);
1712                 return -ENOMEM;
1713         }
1714
1715         cpumask_clear(&p->sharing);
1716         spin_lock_init(&p->lock);
1717         p->q_type = q_type;
1718         INIT_LIST_HEAD(&p->jobs);
1719         list_add(&p->list, list);
1720
1721         err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
1722         if (err)
1723                 return err;
1724
1725         err = spu_queue_setup(p);
1726         if (err)
1727                 return err;
1728
1729         return spu_map_ino(dev, ip, iname, p, handler);
1730 }
1731
1732 static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
1733                           struct spu_mdesc_info *ip, struct list_head *list,
1734                           const char *exec_name, unsigned long q_type,
1735                           irq_handler_t handler, struct spu_queue **table)
1736 {
1737         int err = 0;
1738         u64 node;
1739
1740         mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
1741                 const char *type;
1742
1743                 type = mdesc_get_property(mdesc, node, "type", NULL);
1744                 if (!type || strcmp(type, exec_name))
1745                         continue;
1746
1747                 err = handle_exec_unit(ip, list, dev, mdesc, node,
1748                                        exec_name, q_type, handler, table);
1749                 if (err) {
1750                         spu_list_destroy(list);
1751                         break;
1752                 }
1753         }
1754
1755         return err;
1756 }
1757
1758 static int get_irq_props(struct mdesc_handle *mdesc, u64 node,
1759                          struct spu_mdesc_info *ip)
1760 {
1761         const u64 *ino;
1762         int ino_len;
1763         int i;
1764
1765         ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
1766         if (!ino) {
1767                 printk("NO 'ino'\n");
1768                 return -ENODEV;
1769         }
1770
1771         ip->num_intrs = ino_len / sizeof(u64);
1772         ip->ino_table = kzalloc((sizeof(struct ino_blob) *
1773                                  ip->num_intrs),
1774                                 GFP_KERNEL);
1775         if (!ip->ino_table)
1776                 return -ENOMEM;
1777
1778         for (i = 0; i < ip->num_intrs; i++) {
1779                 struct ino_blob *b = &ip->ino_table[i];
1780                 b->intr = i + 1;
1781                 b->ino = ino[i];
1782         }
1783
1784         return 0;
1785 }
1786
1787 static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
1788                                 struct platform_device *dev,
1789                                 struct spu_mdesc_info *ip,
1790                                 const char *node_name)
1791 {
1792         const unsigned int *reg;
1793         u64 node;
1794
1795         reg = of_get_property(dev->dev.of_node, "reg", NULL);
1796         if (!reg)
1797                 return -ENODEV;
1798
1799         mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
1800                 const char *name;
1801                 const u64 *chdl;
1802
1803                 name = mdesc_get_property(mdesc, node, "name", NULL);
1804                 if (!name || strcmp(name, node_name))
1805                         continue;
1806                 chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
1807                 if (!chdl || (*chdl != *reg))
1808                         continue;
1809                 ip->cfg_handle = *chdl;
1810                 return get_irq_props(mdesc, node, ip);
1811         }
1812
1813         return -ENODEV;
1814 }
1815
1816 static unsigned long n2_spu_hvapi_major;
1817 static unsigned long n2_spu_hvapi_minor;
1818
1819 static int n2_spu_hvapi_register(void)
1820 {
1821         int err;
1822
1823         n2_spu_hvapi_major = 2;
1824         n2_spu_hvapi_minor = 0;
1825
1826         err = sun4v_hvapi_register(HV_GRP_NCS,
1827                                    n2_spu_hvapi_major,
1828                                    &n2_spu_hvapi_minor);
1829
1830         if (!err)
1831                 pr_info("Registered NCS HVAPI version %lu.%lu\n",
1832                         n2_spu_hvapi_major,
1833                         n2_spu_hvapi_minor);
1834
1835         return err;
1836 }
1837
1838 static void n2_spu_hvapi_unregister(void)
1839 {
1840         sun4v_hvapi_unregister(HV_GRP_NCS);
1841 }
1842
1843 static int global_ref;
1844
1845 static int grab_global_resources(void)
1846 {
1847         int err = 0;
1848
1849         mutex_lock(&spu_lock);
1850
1851         if (global_ref++)
1852                 goto out;
1853
1854         err = n2_spu_hvapi_register();
1855         if (err)
1856                 goto out;
1857
1858         err = queue_cache_init();
1859         if (err)
1860                 goto out_hvapi_release;
1861
1862         err = -ENOMEM;
1863         cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
1864                              GFP_KERNEL);
1865         if (!cpu_to_cwq)
1866                 goto out_queue_cache_destroy;
1867
1868         cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
1869                              GFP_KERNEL);
1870         if (!cpu_to_mau)
1871                 goto out_free_cwq_table;
1872
1873         err = 0;
1874
1875 out:
1876         if (err)
1877                 global_ref--;
1878         mutex_unlock(&spu_lock);
1879         return err;
1880
1881 out_free_cwq_table:
1882         kfree(cpu_to_cwq);
1883         cpu_to_cwq = NULL;
1884
1885 out_queue_cache_destroy:
1886         queue_cache_destroy();
1887
1888 out_hvapi_release:
1889         n2_spu_hvapi_unregister();
1890         goto out;
1891 }
1892
1893 static void release_global_resources(void)
1894 {
1895         mutex_lock(&spu_lock);
1896         if (!--global_ref) {
1897                 kfree(cpu_to_cwq);
1898                 cpu_to_cwq = NULL;
1899
1900                 kfree(cpu_to_mau);
1901                 cpu_to_mau = NULL;
1902
1903                 queue_cache_destroy();
1904                 n2_spu_hvapi_unregister();
1905         }
1906         mutex_unlock(&spu_lock);
1907 }
1908
1909 static struct n2_crypto *alloc_n2cp(void)
1910 {
1911         struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
1912
1913         if (np)
1914                 INIT_LIST_HEAD(&np->cwq_list);
1915
1916         return np;
1917 }
1918
1919 static void free_n2cp(struct n2_crypto *np)
1920 {
1921         kfree(np->cwq_info.ino_table);
1922         np->cwq_info.ino_table = NULL;
1923
1924         kfree(np);
1925 }
1926
1927 static void n2_spu_driver_version(void)
1928 {
1929         static int n2_spu_version_printed;
1930
1931         if (n2_spu_version_printed++ == 0)
1932                 pr_info("%s", version);
1933 }
1934
1935 static int n2_crypto_probe(struct platform_device *dev)
1936 {
1937         struct mdesc_handle *mdesc;
1938         struct n2_crypto *np;
1939         int err;
1940
1941         n2_spu_driver_version();
1942
1943         pr_info("Found N2CP at %pOF\n", dev->dev.of_node);
1944
1945         np = alloc_n2cp();
1946         if (!np) {
1947                 dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n",
1948                         dev->dev.of_node);
1949                 return -ENOMEM;
1950         }
1951
1952         err = grab_global_resources();
1953         if (err) {
1954                 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
1955                         dev->dev.of_node);
1956                 goto out_free_n2cp;
1957         }
1958
1959         mdesc = mdesc_grab();
1960
1961         if (!mdesc) {
1962                 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
1963                         dev->dev.of_node);
1964                 err = -ENODEV;
1965                 goto out_free_global;
1966         }
1967         err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
1968         if (err) {
1969                 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
1970                         dev->dev.of_node);
1971                 mdesc_release(mdesc);
1972                 goto out_free_global;
1973         }
1974
1975         err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
1976                              "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
1977                              cpu_to_cwq);
1978         mdesc_release(mdesc);
1979
1980         if (err) {
1981                 dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n",
1982                         dev->dev.of_node);
1983                 goto out_free_global;
1984         }
1985
1986         err = n2_register_algs();
1987         if (err) {
1988                 dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n",
1989                         dev->dev.of_node);
1990                 goto out_free_spu_list;
1991         }
1992
1993         dev_set_drvdata(&dev->dev, np);
1994
1995         return 0;
1996
1997 out_free_spu_list:
1998         spu_list_destroy(&np->cwq_list);
1999
2000 out_free_global:
2001         release_global_resources();
2002
2003 out_free_n2cp:
2004         free_n2cp(np);
2005
2006         return err;
2007 }
2008
2009 static int n2_crypto_remove(struct platform_device *dev)
2010 {
2011         struct n2_crypto *np = dev_get_drvdata(&dev->dev);
2012
2013         n2_unregister_algs();
2014
2015         spu_list_destroy(&np->cwq_list);
2016
2017         release_global_resources();
2018
2019         free_n2cp(np);
2020
2021         return 0;
2022 }
2023
2024 static struct n2_mau *alloc_ncp(void)
2025 {
2026         struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
2027
2028         if (mp)
2029                 INIT_LIST_HEAD(&mp->mau_list);
2030
2031         return mp;
2032 }
2033
2034 static void free_ncp(struct n2_mau *mp)
2035 {
2036         kfree(mp->mau_info.ino_table);
2037         mp->mau_info.ino_table = NULL;
2038
2039         kfree(mp);
2040 }
2041
2042 static int n2_mau_probe(struct platform_device *dev)
2043 {
2044         struct mdesc_handle *mdesc;
2045         struct n2_mau *mp;
2046         int err;
2047
2048         n2_spu_driver_version();
2049
2050         pr_info("Found NCP at %pOF\n", dev->dev.of_node);
2051
2052         mp = alloc_ncp();
2053         if (!mp) {
2054                 dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n",
2055                         dev->dev.of_node);
2056                 return -ENOMEM;
2057         }
2058
2059         err = grab_global_resources();
2060         if (err) {
2061                 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
2062                         dev->dev.of_node);
2063                 goto out_free_ncp;
2064         }
2065
2066         mdesc = mdesc_grab();
2067
2068         if (!mdesc) {
2069                 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
2070                         dev->dev.of_node);
2071                 err = -ENODEV;
2072                 goto out_free_global;
2073         }
2074
2075         err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
2076         if (err) {
2077                 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
2078                         dev->dev.of_node);
2079                 mdesc_release(mdesc);
2080                 goto out_free_global;
2081         }
2082
2083         err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
2084                              "mau", HV_NCS_QTYPE_MAU, mau_intr,
2085                              cpu_to_mau);
2086         mdesc_release(mdesc);
2087
2088         if (err) {
2089                 dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n",
2090                         dev->dev.of_node);
2091                 goto out_free_global;
2092         }
2093
2094         dev_set_drvdata(&dev->dev, mp);
2095
2096         return 0;
2097
2098 out_free_global:
2099         release_global_resources();
2100
2101 out_free_ncp:
2102         free_ncp(mp);
2103
2104         return err;
2105 }
2106
2107 static int n2_mau_remove(struct platform_device *dev)
2108 {
2109         struct n2_mau *mp = dev_get_drvdata(&dev->dev);
2110
2111         spu_list_destroy(&mp->mau_list);
2112
2113         release_global_resources();
2114
2115         free_ncp(mp);
2116
2117         return 0;
2118 }
2119
2120 static const struct of_device_id n2_crypto_match[] = {
2121         {
2122                 .name = "n2cp",
2123                 .compatible = "SUNW,n2-cwq",
2124         },
2125         {
2126                 .name = "n2cp",
2127                 .compatible = "SUNW,vf-cwq",
2128         },
2129         {
2130                 .name = "n2cp",
2131                 .compatible = "SUNW,kt-cwq",
2132         },
2133         {},
2134 };
2135
2136 MODULE_DEVICE_TABLE(of, n2_crypto_match);
2137
2138 static struct platform_driver n2_crypto_driver = {
2139         .driver = {
2140                 .name           =       "n2cp",
2141                 .of_match_table =       n2_crypto_match,
2142         },
2143         .probe          =       n2_crypto_probe,
2144         .remove         =       n2_crypto_remove,
2145 };
2146
2147 static const struct of_device_id n2_mau_match[] = {
2148         {
2149                 .name = "ncp",
2150                 .compatible = "SUNW,n2-mau",
2151         },
2152         {
2153                 .name = "ncp",
2154                 .compatible = "SUNW,vf-mau",
2155         },
2156         {
2157                 .name = "ncp",
2158                 .compatible = "SUNW,kt-mau",
2159         },
2160         {},
2161 };
2162
2163 MODULE_DEVICE_TABLE(of, n2_mau_match);
2164
2165 static struct platform_driver n2_mau_driver = {
2166         .driver = {
2167                 .name           =       "ncp",
2168                 .of_match_table =       n2_mau_match,
2169         },
2170         .probe          =       n2_mau_probe,
2171         .remove         =       n2_mau_remove,
2172 };
2173
2174 static struct platform_driver * const drivers[] = {
2175         &n2_crypto_driver,
2176         &n2_mau_driver,
2177 };
2178
2179 static int __init n2_init(void)
2180 {
2181         return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2182 }
2183
2184 static void __exit n2_exit(void)
2185 {
2186         platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2187 }
2188
2189 module_init(n2_init);
2190 module_exit(n2_exit);