1 // SPDX-License-Identifier: GPL-2.0-only
2 /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
4 * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/kernel.h>
10 #include <linux/module.h>
12 #include <linux/of_device.h>
13 #include <linux/cpumask.h>
14 #include <linux/slab.h>
15 #include <linux/interrupt.h>
16 #include <linux/crypto.h>
17 #include <crypto/md5.h>
18 #include <crypto/sha.h>
19 #include <crypto/aes.h>
20 #include <crypto/internal/des.h>
21 #include <linux/mutex.h>
22 #include <linux/delay.h>
23 #include <linux/sched.h>
25 #include <crypto/internal/hash.h>
26 #include <crypto/internal/skcipher.h>
27 #include <crypto/scatterwalk.h>
28 #include <crypto/algapi.h>
30 #include <asm/hypervisor.h>
31 #include <asm/mdesc.h>
35 #define DRV_MODULE_NAME "n2_crypto"
36 #define DRV_MODULE_VERSION "0.2"
37 #define DRV_MODULE_RELDATE "July 28, 2011"
39 static const char version[] =
40 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
42 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
43 MODULE_DESCRIPTION("Niagara2 Crypto driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(DRV_MODULE_VERSION);
47 #define N2_CRA_PRIORITY 200
49 static DEFINE_MUTEX(spu_lock);
53 unsigned long qhandle;
60 struct list_head jobs;
67 struct list_head list;
71 struct spu_queue *queue;
75 static struct spu_queue **cpu_to_cwq;
76 static struct spu_queue **cpu_to_mau;
78 static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
80 if (q->q_type == HV_NCS_QTYPE_MAU) {
81 off += MAU_ENTRY_SIZE;
82 if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
85 off += CWQ_ENTRY_SIZE;
86 if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
92 struct n2_request_common {
93 struct list_head entry;
96 #define OFFSET_NOT_RUNNING (~(unsigned int)0)
98 /* An async job request records the final tail value it used in
99 * n2_request_common->offset, test to see if that offset is in
100 * the range old_head, new_head, inclusive.
102 static inline bool job_finished(struct spu_queue *q, unsigned int offset,
103 unsigned long old_head, unsigned long new_head)
105 if (old_head <= new_head) {
106 if (offset > old_head && offset <= new_head)
109 if (offset > old_head || offset <= new_head)
115 /* When the HEAD marker is unequal to the actual HEAD, we get
116 * a virtual device INO interrupt. We should process the
117 * completed CWQ entries and adjust the HEAD marker to clear
120 static irqreturn_t cwq_intr(int irq, void *dev_id)
122 unsigned long off, new_head, hv_ret;
123 struct spu_queue *q = dev_id;
125 pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
126 smp_processor_id(), q->qhandle);
130 hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
132 pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
133 smp_processor_id(), new_head, hv_ret);
135 for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
139 hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
140 if (hv_ret == HV_EOK)
143 spin_unlock(&q->lock);
148 static irqreturn_t mau_intr(int irq, void *dev_id)
150 struct spu_queue *q = dev_id;
151 unsigned long head, hv_ret;
155 pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
156 smp_processor_id(), q->qhandle);
158 hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
160 pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
161 smp_processor_id(), head, hv_ret);
163 sun4v_ncs_sethead_marker(q->qhandle, head);
165 spin_unlock(&q->lock);
170 static void *spu_queue_next(struct spu_queue *q, void *cur)
172 return q->q + spu_next_offset(q, cur - q->q);
175 static int spu_queue_num_free(struct spu_queue *q)
177 unsigned long head = q->head;
178 unsigned long tail = q->tail;
179 unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
185 diff = (end - tail) + head;
187 return (diff / CWQ_ENTRY_SIZE) - 1;
190 static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
192 int avail = spu_queue_num_free(q);
194 if (avail >= num_entries)
195 return q->q + q->tail;
200 static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
202 unsigned long hv_ret, new_tail;
204 new_tail = spu_next_offset(q, last - q->q);
206 hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
207 if (hv_ret == HV_EOK)
212 static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
213 int enc_type, int auth_type,
214 unsigned int hash_len,
215 bool sfas, bool sob, bool eob, bool encrypt,
218 u64 word = (len - 1) & CONTROL_LEN;
220 word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
221 word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
222 word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
224 word |= CONTROL_STORE_FINAL_AUTH_STATE;
226 word |= CONTROL_START_OF_BLOCK;
228 word |= CONTROL_END_OF_BLOCK;
230 word |= CONTROL_ENCRYPT;
232 word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
234 word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
240 static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
242 if (this_len >= 64 ||
243 qp->head != qp->tail)
249 struct n2_ahash_alg {
250 struct list_head entry;
257 struct ahash_alg alg;
260 static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
262 struct crypto_alg *alg = tfm->__crt_alg;
263 struct ahash_alg *ahash_alg;
265 ahash_alg = container_of(alg, struct ahash_alg, halg.base);
267 return container_of(ahash_alg, struct n2_ahash_alg, alg);
271 const char *child_alg;
272 struct n2_ahash_alg derived;
275 static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
277 struct crypto_alg *alg = tfm->__crt_alg;
278 struct ahash_alg *ahash_alg;
280 ahash_alg = container_of(alg, struct ahash_alg, halg.base);
282 return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
286 struct crypto_ahash *fallback_tfm;
289 #define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */
292 struct n2_hash_ctx base;
294 struct crypto_shash *child_shash;
297 unsigned char hash_key[N2_HASH_KEY_MAX];
300 struct n2_hash_req_ctx {
302 struct md5_state md5;
303 struct sha1_state sha1;
304 struct sha256_state sha256;
307 struct ahash_request fallback_req;
310 static int n2_hash_async_init(struct ahash_request *req)
312 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
313 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
314 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
316 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
317 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
319 return crypto_ahash_init(&rctx->fallback_req);
322 static int n2_hash_async_update(struct ahash_request *req)
324 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
325 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
326 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
328 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
329 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
330 rctx->fallback_req.nbytes = req->nbytes;
331 rctx->fallback_req.src = req->src;
333 return crypto_ahash_update(&rctx->fallback_req);
336 static int n2_hash_async_final(struct ahash_request *req)
338 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
339 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
340 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
342 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
343 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
344 rctx->fallback_req.result = req->result;
346 return crypto_ahash_final(&rctx->fallback_req);
349 static int n2_hash_async_finup(struct ahash_request *req)
351 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
352 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
353 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
355 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
356 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
357 rctx->fallback_req.nbytes = req->nbytes;
358 rctx->fallback_req.src = req->src;
359 rctx->fallback_req.result = req->result;
361 return crypto_ahash_finup(&rctx->fallback_req);
364 static int n2_hash_async_noimport(struct ahash_request *req, const void *in)
369 static int n2_hash_async_noexport(struct ahash_request *req, void *out)
374 static int n2_hash_cra_init(struct crypto_tfm *tfm)
376 const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
377 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
378 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
379 struct crypto_ahash *fallback_tfm;
382 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
383 CRYPTO_ALG_NEED_FALLBACK);
384 if (IS_ERR(fallback_tfm)) {
385 pr_warn("Fallback driver '%s' could not be loaded!\n",
386 fallback_driver_name);
387 err = PTR_ERR(fallback_tfm);
391 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
392 crypto_ahash_reqsize(fallback_tfm)));
394 ctx->fallback_tfm = fallback_tfm;
401 static void n2_hash_cra_exit(struct crypto_tfm *tfm)
403 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
404 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
406 crypto_free_ahash(ctx->fallback_tfm);
409 static int n2_hmac_cra_init(struct crypto_tfm *tfm)
411 const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
412 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
413 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
414 struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
415 struct crypto_ahash *fallback_tfm;
416 struct crypto_shash *child_shash;
419 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
420 CRYPTO_ALG_NEED_FALLBACK);
421 if (IS_ERR(fallback_tfm)) {
422 pr_warn("Fallback driver '%s' could not be loaded!\n",
423 fallback_driver_name);
424 err = PTR_ERR(fallback_tfm);
428 child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
429 if (IS_ERR(child_shash)) {
430 pr_warn("Child shash '%s' could not be loaded!\n",
432 err = PTR_ERR(child_shash);
433 goto out_free_fallback;
436 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
437 crypto_ahash_reqsize(fallback_tfm)));
439 ctx->child_shash = child_shash;
440 ctx->base.fallback_tfm = fallback_tfm;
444 crypto_free_ahash(fallback_tfm);
450 static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
452 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
453 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
455 crypto_free_ahash(ctx->base.fallback_tfm);
456 crypto_free_shash(ctx->child_shash);
459 static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
462 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
463 struct crypto_shash *child_shash = ctx->child_shash;
464 struct crypto_ahash *fallback_tfm;
467 fallback_tfm = ctx->base.fallback_tfm;
468 err = crypto_ahash_setkey(fallback_tfm, key, keylen);
472 bs = crypto_shash_blocksize(child_shash);
473 ds = crypto_shash_digestsize(child_shash);
474 BUG_ON(ds > N2_HASH_KEY_MAX);
476 err = crypto_shash_tfm_digest(child_shash, key, keylen,
481 } else if (keylen <= N2_HASH_KEY_MAX)
482 memcpy(ctx->hash_key, key, keylen);
484 ctx->hash_key_len = keylen;
489 static unsigned long wait_for_tail(struct spu_queue *qp)
491 unsigned long head, hv_ret;
494 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
495 if (hv_ret != HV_EOK) {
496 pr_err("Hypervisor error on gethead\n");
499 if (head == qp->tail) {
507 static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
508 struct cwq_initial_entry *ent)
510 unsigned long hv_ret = spu_queue_submit(qp, ent);
512 if (hv_ret == HV_EOK)
513 hv_ret = wait_for_tail(qp);
518 static int n2_do_async_digest(struct ahash_request *req,
519 unsigned int auth_type, unsigned int digest_size,
520 unsigned int result_size, void *hash_loc,
521 unsigned long auth_key, unsigned int auth_key_len)
523 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
524 struct cwq_initial_entry *ent;
525 struct crypto_hash_walk walk;
526 struct spu_queue *qp;
531 /* The total effective length of the operation may not
534 if (unlikely(req->nbytes > (1 << 16))) {
535 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
536 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
538 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
539 rctx->fallback_req.base.flags =
540 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
541 rctx->fallback_req.nbytes = req->nbytes;
542 rctx->fallback_req.src = req->src;
543 rctx->fallback_req.result = req->result;
545 return crypto_ahash_digest(&rctx->fallback_req);
548 nbytes = crypto_hash_walk_first(req, &walk);
551 qp = cpu_to_cwq[cpu];
555 spin_lock_irqsave(&qp->lock, flags);
557 /* XXX can do better, improve this later by doing a by-hand scatterlist
560 ent = qp->q + qp->tail;
562 ent->control = control_word_base(nbytes, auth_key_len, 0,
563 auth_type, digest_size,
564 false, true, false, false,
567 ent->src_addr = __pa(walk.data);
568 ent->auth_key_addr = auth_key;
569 ent->auth_iv_addr = __pa(hash_loc);
570 ent->final_auth_state_addr = 0UL;
571 ent->enc_key_addr = 0UL;
572 ent->enc_iv_addr = 0UL;
573 ent->dest_addr = __pa(hash_loc);
575 nbytes = crypto_hash_walk_done(&walk, 0);
577 ent = spu_queue_next(qp, ent);
579 ent->control = (nbytes - 1);
580 ent->src_addr = __pa(walk.data);
581 ent->auth_key_addr = 0UL;
582 ent->auth_iv_addr = 0UL;
583 ent->final_auth_state_addr = 0UL;
584 ent->enc_key_addr = 0UL;
585 ent->enc_iv_addr = 0UL;
586 ent->dest_addr = 0UL;
588 nbytes = crypto_hash_walk_done(&walk, 0);
590 ent->control |= CONTROL_END_OF_BLOCK;
592 if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
597 spin_unlock_irqrestore(&qp->lock, flags);
600 memcpy(req->result, hash_loc, result_size);
607 static int n2_hash_async_digest(struct ahash_request *req)
609 struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
610 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
613 ds = n2alg->digest_size;
614 if (unlikely(req->nbytes == 0)) {
615 memcpy(req->result, n2alg->hash_zero, ds);
618 memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
620 return n2_do_async_digest(req, n2alg->auth_type,
621 n2alg->hw_op_hashsz, ds,
625 static int n2_hmac_async_digest(struct ahash_request *req)
627 struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
628 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
629 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
630 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
633 ds = n2alg->derived.digest_size;
634 if (unlikely(req->nbytes == 0) ||
635 unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
636 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
637 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
639 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
640 rctx->fallback_req.base.flags =
641 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
642 rctx->fallback_req.nbytes = req->nbytes;
643 rctx->fallback_req.src = req->src;
644 rctx->fallback_req.result = req->result;
646 return crypto_ahash_digest(&rctx->fallback_req);
648 memcpy(&rctx->u, n2alg->derived.hash_init,
649 n2alg->derived.hw_op_hashsz);
651 return n2_do_async_digest(req, n2alg->derived.hmac_type,
652 n2alg->derived.hw_op_hashsz, ds,
654 __pa(&ctx->hash_key),
658 struct n2_skcipher_context {
662 u8 aes[AES_MAX_KEY_SIZE];
663 u8 des[DES_KEY_SIZE];
664 u8 des3[3 * DES_KEY_SIZE];
668 #define N2_CHUNK_ARR_LEN 16
670 struct n2_crypto_chunk {
671 struct list_head entry;
672 unsigned long iv_paddr : 44;
673 unsigned long arr_len : 20;
674 unsigned long dest_paddr;
675 unsigned long dest_final;
677 unsigned long src_paddr : 44;
678 unsigned long src_len : 20;
679 } arr[N2_CHUNK_ARR_LEN];
682 struct n2_request_context {
683 struct skcipher_walk walk;
684 struct list_head chunk_list;
685 struct n2_crypto_chunk chunk;
689 /* The SPU allows some level of flexibility for partial cipher blocks
690 * being specified in a descriptor.
692 * It merely requires that every descriptor's length field is at least
693 * as large as the cipher block size. This means that a cipher block
694 * can span at most 2 descriptors. However, this does not allow a
695 * partial block to span into the final descriptor as that would
696 * violate the rule (since every descriptor's length must be at lest
697 * the block size). So, for example, assuming an 8 byte block size:
699 * 0xe --> 0xa --> 0x8
701 * is a valid length sequence, whereas:
703 * 0xe --> 0xb --> 0x7
705 * is not a valid sequence.
708 struct n2_skcipher_alg {
709 struct list_head entry;
711 struct skcipher_alg skcipher;
714 static inline struct n2_skcipher_alg *n2_skcipher_alg(struct crypto_skcipher *tfm)
716 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
718 return container_of(alg, struct n2_skcipher_alg, skcipher);
721 struct n2_skcipher_request_context {
722 struct skcipher_walk walk;
725 static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key,
728 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
729 struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
730 struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
732 ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
735 case AES_KEYSIZE_128:
736 ctx->enc_type |= ENC_TYPE_ALG_AES128;
738 case AES_KEYSIZE_192:
739 ctx->enc_type |= ENC_TYPE_ALG_AES192;
741 case AES_KEYSIZE_256:
742 ctx->enc_type |= ENC_TYPE_ALG_AES256;
748 ctx->key_len = keylen;
749 memcpy(ctx->key.aes, key, keylen);
753 static int n2_des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
756 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
757 struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
758 struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
761 err = verify_skcipher_des_key(skcipher, key);
765 ctx->enc_type = n2alg->enc_type;
767 ctx->key_len = keylen;
768 memcpy(ctx->key.des, key, keylen);
772 static int n2_3des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
775 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
776 struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
777 struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
780 err = verify_skcipher_des3_key(skcipher, key);
784 ctx->enc_type = n2alg->enc_type;
786 ctx->key_len = keylen;
787 memcpy(ctx->key.des3, key, keylen);
791 static inline int skcipher_descriptor_len(int nbytes, unsigned int block_size)
793 int this_len = nbytes;
795 this_len -= (nbytes & (block_size - 1));
796 return this_len > (1 << 16) ? (1 << 16) : this_len;
799 static int __n2_crypt_chunk(struct crypto_skcipher *skcipher,
800 struct n2_crypto_chunk *cp,
801 struct spu_queue *qp, bool encrypt)
803 struct n2_skcipher_context *ctx = crypto_skcipher_ctx(skcipher);
804 struct cwq_initial_entry *ent;
808 ent = spu_queue_alloc(qp, cp->arr_len);
810 pr_info("queue_alloc() of %d fails\n",
815 in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
817 ent->control = control_word_base(cp->arr[0].src_len,
818 0, ctx->enc_type, 0, 0,
819 false, true, false, encrypt,
821 (in_place ? OPCODE_INPLACE_BIT : 0));
822 ent->src_addr = cp->arr[0].src_paddr;
823 ent->auth_key_addr = 0UL;
824 ent->auth_iv_addr = 0UL;
825 ent->final_auth_state_addr = 0UL;
826 ent->enc_key_addr = __pa(&ctx->key);
827 ent->enc_iv_addr = cp->iv_paddr;
828 ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
830 for (i = 1; i < cp->arr_len; i++) {
831 ent = spu_queue_next(qp, ent);
833 ent->control = cp->arr[i].src_len - 1;
834 ent->src_addr = cp->arr[i].src_paddr;
835 ent->auth_key_addr = 0UL;
836 ent->auth_iv_addr = 0UL;
837 ent->final_auth_state_addr = 0UL;
838 ent->enc_key_addr = 0UL;
839 ent->enc_iv_addr = 0UL;
840 ent->dest_addr = 0UL;
842 ent->control |= CONTROL_END_OF_BLOCK;
844 return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
847 static int n2_compute_chunks(struct skcipher_request *req)
849 struct n2_request_context *rctx = skcipher_request_ctx(req);
850 struct skcipher_walk *walk = &rctx->walk;
851 struct n2_crypto_chunk *chunk;
852 unsigned long dest_prev;
853 unsigned int tot_len;
857 err = skcipher_walk_async(walk, req);
861 INIT_LIST_HEAD(&rctx->chunk_list);
863 chunk = &rctx->chunk;
864 INIT_LIST_HEAD(&chunk->entry);
866 chunk->iv_paddr = 0UL;
868 chunk->dest_paddr = 0UL;
870 prev_in_place = false;
874 while ((nbytes = walk->nbytes) != 0) {
875 unsigned long dest_paddr, src_paddr;
879 src_paddr = (page_to_phys(walk->src.phys.page) +
880 walk->src.phys.offset);
881 dest_paddr = (page_to_phys(walk->dst.phys.page) +
882 walk->dst.phys.offset);
883 in_place = (src_paddr == dest_paddr);
884 this_len = skcipher_descriptor_len(nbytes, walk->blocksize);
886 if (chunk->arr_len != 0) {
887 if (in_place != prev_in_place ||
889 dest_paddr != dest_prev) ||
890 chunk->arr_len == N2_CHUNK_ARR_LEN ||
891 tot_len + this_len > (1 << 16)) {
892 chunk->dest_final = dest_prev;
893 list_add_tail(&chunk->entry,
895 chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
900 INIT_LIST_HEAD(&chunk->entry);
903 if (chunk->arr_len == 0) {
904 chunk->dest_paddr = dest_paddr;
907 chunk->arr[chunk->arr_len].src_paddr = src_paddr;
908 chunk->arr[chunk->arr_len].src_len = this_len;
911 dest_prev = dest_paddr + this_len;
912 prev_in_place = in_place;
915 err = skcipher_walk_done(walk, nbytes - this_len);
919 if (!err && chunk->arr_len != 0) {
920 chunk->dest_final = dest_prev;
921 list_add_tail(&chunk->entry, &rctx->chunk_list);
927 static void n2_chunk_complete(struct skcipher_request *req, void *final_iv)
929 struct n2_request_context *rctx = skcipher_request_ctx(req);
930 struct n2_crypto_chunk *c, *tmp;
933 memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
935 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
937 if (unlikely(c != &rctx->chunk))
943 static int n2_do_ecb(struct skcipher_request *req, bool encrypt)
945 struct n2_request_context *rctx = skcipher_request_ctx(req);
946 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
947 int err = n2_compute_chunks(req);
948 struct n2_crypto_chunk *c, *tmp;
949 unsigned long flags, hv_ret;
950 struct spu_queue *qp;
955 qp = cpu_to_cwq[get_cpu()];
960 spin_lock_irqsave(&qp->lock, flags);
962 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
963 err = __n2_crypt_chunk(tfm, c, qp, encrypt);
967 if (unlikely(c != &rctx->chunk))
971 hv_ret = wait_for_tail(qp);
972 if (hv_ret != HV_EOK)
976 spin_unlock_irqrestore(&qp->lock, flags);
981 n2_chunk_complete(req, NULL);
985 static int n2_encrypt_ecb(struct skcipher_request *req)
987 return n2_do_ecb(req, true);
990 static int n2_decrypt_ecb(struct skcipher_request *req)
992 return n2_do_ecb(req, false);
995 static int n2_do_chaining(struct skcipher_request *req, bool encrypt)
997 struct n2_request_context *rctx = skcipher_request_ctx(req);
998 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
999 unsigned long flags, hv_ret, iv_paddr;
1000 int err = n2_compute_chunks(req);
1001 struct n2_crypto_chunk *c, *tmp;
1002 struct spu_queue *qp;
1003 void *final_iv_addr;
1005 final_iv_addr = NULL;
1010 qp = cpu_to_cwq[get_cpu()];
1015 spin_lock_irqsave(&qp->lock, flags);
1018 iv_paddr = __pa(rctx->walk.iv);
1019 list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
1021 c->iv_paddr = iv_paddr;
1022 err = __n2_crypt_chunk(tfm, c, qp, true);
1025 iv_paddr = c->dest_final - rctx->walk.blocksize;
1026 list_del(&c->entry);
1027 if (unlikely(c != &rctx->chunk))
1030 final_iv_addr = __va(iv_paddr);
1032 list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
1034 if (c == &rctx->chunk) {
1035 iv_paddr = __pa(rctx->walk.iv);
1037 iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
1038 tmp->arr[tmp->arr_len-1].src_len -
1039 rctx->walk.blocksize);
1041 if (!final_iv_addr) {
1044 pa = (c->arr[c->arr_len-1].src_paddr +
1045 c->arr[c->arr_len-1].src_len -
1046 rctx->walk.blocksize);
1047 final_iv_addr = rctx->temp_iv;
1048 memcpy(rctx->temp_iv, __va(pa),
1049 rctx->walk.blocksize);
1051 c->iv_paddr = iv_paddr;
1052 err = __n2_crypt_chunk(tfm, c, qp, false);
1055 list_del(&c->entry);
1056 if (unlikely(c != &rctx->chunk))
1061 hv_ret = wait_for_tail(qp);
1062 if (hv_ret != HV_EOK)
1066 spin_unlock_irqrestore(&qp->lock, flags);
1071 n2_chunk_complete(req, err ? NULL : final_iv_addr);
1075 static int n2_encrypt_chaining(struct skcipher_request *req)
1077 return n2_do_chaining(req, true);
1080 static int n2_decrypt_chaining(struct skcipher_request *req)
1082 return n2_do_chaining(req, false);
1085 struct n2_skcipher_tmpl {
1087 const char *drv_name;
1090 struct skcipher_alg skcipher;
1093 static const struct n2_skcipher_tmpl skcipher_tmpls[] = {
1094 /* DES: ECB CBC and CFB are supported */
1095 { .name = "ecb(des)",
1096 .drv_name = "ecb-des",
1097 .block_size = DES_BLOCK_SIZE,
1098 .enc_type = (ENC_TYPE_ALG_DES |
1099 ENC_TYPE_CHAINING_ECB),
1101 .min_keysize = DES_KEY_SIZE,
1102 .max_keysize = DES_KEY_SIZE,
1103 .setkey = n2_des_setkey,
1104 .encrypt = n2_encrypt_ecb,
1105 .decrypt = n2_decrypt_ecb,
1108 { .name = "cbc(des)",
1109 .drv_name = "cbc-des",
1110 .block_size = DES_BLOCK_SIZE,
1111 .enc_type = (ENC_TYPE_ALG_DES |
1112 ENC_TYPE_CHAINING_CBC),
1114 .ivsize = DES_BLOCK_SIZE,
1115 .min_keysize = DES_KEY_SIZE,
1116 .max_keysize = DES_KEY_SIZE,
1117 .setkey = n2_des_setkey,
1118 .encrypt = n2_encrypt_chaining,
1119 .decrypt = n2_decrypt_chaining,
1122 { .name = "cfb(des)",
1123 .drv_name = "cfb-des",
1124 .block_size = DES_BLOCK_SIZE,
1125 .enc_type = (ENC_TYPE_ALG_DES |
1126 ENC_TYPE_CHAINING_CFB),
1128 .min_keysize = DES_KEY_SIZE,
1129 .max_keysize = DES_KEY_SIZE,
1130 .setkey = n2_des_setkey,
1131 .encrypt = n2_encrypt_chaining,
1132 .decrypt = n2_decrypt_chaining,
1136 /* 3DES: ECB CBC and CFB are supported */
1137 { .name = "ecb(des3_ede)",
1138 .drv_name = "ecb-3des",
1139 .block_size = DES_BLOCK_SIZE,
1140 .enc_type = (ENC_TYPE_ALG_3DES |
1141 ENC_TYPE_CHAINING_ECB),
1143 .min_keysize = 3 * DES_KEY_SIZE,
1144 .max_keysize = 3 * DES_KEY_SIZE,
1145 .setkey = n2_3des_setkey,
1146 .encrypt = n2_encrypt_ecb,
1147 .decrypt = n2_decrypt_ecb,
1150 { .name = "cbc(des3_ede)",
1151 .drv_name = "cbc-3des",
1152 .block_size = DES_BLOCK_SIZE,
1153 .enc_type = (ENC_TYPE_ALG_3DES |
1154 ENC_TYPE_CHAINING_CBC),
1156 .ivsize = DES_BLOCK_SIZE,
1157 .min_keysize = 3 * DES_KEY_SIZE,
1158 .max_keysize = 3 * DES_KEY_SIZE,
1159 .setkey = n2_3des_setkey,
1160 .encrypt = n2_encrypt_chaining,
1161 .decrypt = n2_decrypt_chaining,
1164 { .name = "cfb(des3_ede)",
1165 .drv_name = "cfb-3des",
1166 .block_size = DES_BLOCK_SIZE,
1167 .enc_type = (ENC_TYPE_ALG_3DES |
1168 ENC_TYPE_CHAINING_CFB),
1170 .min_keysize = 3 * DES_KEY_SIZE,
1171 .max_keysize = 3 * DES_KEY_SIZE,
1172 .setkey = n2_3des_setkey,
1173 .encrypt = n2_encrypt_chaining,
1174 .decrypt = n2_decrypt_chaining,
1177 /* AES: ECB CBC and CTR are supported */
1178 { .name = "ecb(aes)",
1179 .drv_name = "ecb-aes",
1180 .block_size = AES_BLOCK_SIZE,
1181 .enc_type = (ENC_TYPE_ALG_AES128 |
1182 ENC_TYPE_CHAINING_ECB),
1184 .min_keysize = AES_MIN_KEY_SIZE,
1185 .max_keysize = AES_MAX_KEY_SIZE,
1186 .setkey = n2_aes_setkey,
1187 .encrypt = n2_encrypt_ecb,
1188 .decrypt = n2_decrypt_ecb,
1191 { .name = "cbc(aes)",
1192 .drv_name = "cbc-aes",
1193 .block_size = AES_BLOCK_SIZE,
1194 .enc_type = (ENC_TYPE_ALG_AES128 |
1195 ENC_TYPE_CHAINING_CBC),
1197 .ivsize = AES_BLOCK_SIZE,
1198 .min_keysize = AES_MIN_KEY_SIZE,
1199 .max_keysize = AES_MAX_KEY_SIZE,
1200 .setkey = n2_aes_setkey,
1201 .encrypt = n2_encrypt_chaining,
1202 .decrypt = n2_decrypt_chaining,
1205 { .name = "ctr(aes)",
1206 .drv_name = "ctr-aes",
1207 .block_size = AES_BLOCK_SIZE,
1208 .enc_type = (ENC_TYPE_ALG_AES128 |
1209 ENC_TYPE_CHAINING_COUNTER),
1211 .ivsize = AES_BLOCK_SIZE,
1212 .min_keysize = AES_MIN_KEY_SIZE,
1213 .max_keysize = AES_MAX_KEY_SIZE,
1214 .setkey = n2_aes_setkey,
1215 .encrypt = n2_encrypt_chaining,
1216 .decrypt = n2_encrypt_chaining,
1221 #define NUM_CIPHER_TMPLS ARRAY_SIZE(skcipher_tmpls)
1223 static LIST_HEAD(skcipher_algs);
1225 struct n2_hash_tmpl {
1227 const u8 *hash_zero;
1228 const u8 *hash_init;
1236 static const __le32 n2_md5_init[MD5_HASH_WORDS] = {
1237 cpu_to_le32(MD5_H0),
1238 cpu_to_le32(MD5_H1),
1239 cpu_to_le32(MD5_H2),
1240 cpu_to_le32(MD5_H3),
1242 static const u32 n2_sha1_init[SHA1_DIGEST_SIZE / 4] = {
1243 SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
1245 static const u32 n2_sha256_init[SHA256_DIGEST_SIZE / 4] = {
1246 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
1247 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
1249 static const u32 n2_sha224_init[SHA256_DIGEST_SIZE / 4] = {
1250 SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
1251 SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
1254 static const struct n2_hash_tmpl hash_tmpls[] = {
1256 .hash_zero = md5_zero_message_hash,
1257 .hash_init = (u8 *)n2_md5_init,
1258 .auth_type = AUTH_TYPE_MD5,
1259 .hmac_type = AUTH_TYPE_HMAC_MD5,
1260 .hw_op_hashsz = MD5_DIGEST_SIZE,
1261 .digest_size = MD5_DIGEST_SIZE,
1262 .block_size = MD5_HMAC_BLOCK_SIZE },
1264 .hash_zero = sha1_zero_message_hash,
1265 .hash_init = (u8 *)n2_sha1_init,
1266 .auth_type = AUTH_TYPE_SHA1,
1267 .hmac_type = AUTH_TYPE_HMAC_SHA1,
1268 .hw_op_hashsz = SHA1_DIGEST_SIZE,
1269 .digest_size = SHA1_DIGEST_SIZE,
1270 .block_size = SHA1_BLOCK_SIZE },
1272 .hash_zero = sha256_zero_message_hash,
1273 .hash_init = (u8 *)n2_sha256_init,
1274 .auth_type = AUTH_TYPE_SHA256,
1275 .hmac_type = AUTH_TYPE_HMAC_SHA256,
1276 .hw_op_hashsz = SHA256_DIGEST_SIZE,
1277 .digest_size = SHA256_DIGEST_SIZE,
1278 .block_size = SHA256_BLOCK_SIZE },
1280 .hash_zero = sha224_zero_message_hash,
1281 .hash_init = (u8 *)n2_sha224_init,
1282 .auth_type = AUTH_TYPE_SHA256,
1283 .hmac_type = AUTH_TYPE_RESERVED,
1284 .hw_op_hashsz = SHA256_DIGEST_SIZE,
1285 .digest_size = SHA224_DIGEST_SIZE,
1286 .block_size = SHA224_BLOCK_SIZE },
1288 #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
1290 static LIST_HEAD(ahash_algs);
1291 static LIST_HEAD(hmac_algs);
1293 static int algs_registered;
1295 static void __n2_unregister_algs(void)
1297 struct n2_skcipher_alg *skcipher, *skcipher_tmp;
1298 struct n2_ahash_alg *alg, *alg_tmp;
1299 struct n2_hmac_alg *hmac, *hmac_tmp;
1301 list_for_each_entry_safe(skcipher, skcipher_tmp, &skcipher_algs, entry) {
1302 crypto_unregister_skcipher(&skcipher->skcipher);
1303 list_del(&skcipher->entry);
1306 list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
1307 crypto_unregister_ahash(&hmac->derived.alg);
1308 list_del(&hmac->derived.entry);
1311 list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
1312 crypto_unregister_ahash(&alg->alg);
1313 list_del(&alg->entry);
1318 static int n2_skcipher_init_tfm(struct crypto_skcipher *tfm)
1320 crypto_skcipher_set_reqsize(tfm, sizeof(struct n2_request_context));
1324 static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl *tmpl)
1326 struct n2_skcipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1327 struct skcipher_alg *alg;
1334 *alg = tmpl->skcipher;
1336 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1337 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
1338 alg->base.cra_priority = N2_CRA_PRIORITY;
1339 alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
1340 CRYPTO_ALG_ALLOCATES_MEMORY;
1341 alg->base.cra_blocksize = tmpl->block_size;
1342 p->enc_type = tmpl->enc_type;
1343 alg->base.cra_ctxsize = sizeof(struct n2_skcipher_context);
1344 alg->base.cra_module = THIS_MODULE;
1345 alg->init = n2_skcipher_init_tfm;
1347 list_add(&p->entry, &skcipher_algs);
1348 err = crypto_register_skcipher(alg);
1350 pr_err("%s alg registration failed\n", alg->base.cra_name);
1351 list_del(&p->entry);
1354 pr_info("%s alg registered\n", alg->base.cra_name);
1359 static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
1361 struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1362 struct ahash_alg *ahash;
1363 struct crypto_alg *base;
1369 p->child_alg = n2ahash->alg.halg.base.cra_name;
1370 memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
1371 INIT_LIST_HEAD(&p->derived.entry);
1373 ahash = &p->derived.alg;
1374 ahash->digest = n2_hmac_async_digest;
1375 ahash->setkey = n2_hmac_async_setkey;
1377 base = &ahash->halg.base;
1378 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg);
1379 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg);
1381 base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
1382 base->cra_init = n2_hmac_cra_init;
1383 base->cra_exit = n2_hmac_cra_exit;
1385 list_add(&p->derived.entry, &hmac_algs);
1386 err = crypto_register_ahash(ahash);
1388 pr_err("%s alg registration failed\n", base->cra_name);
1389 list_del(&p->derived.entry);
1392 pr_info("%s alg registered\n", base->cra_name);
1397 static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
1399 struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1400 struct hash_alg_common *halg;
1401 struct crypto_alg *base;
1402 struct ahash_alg *ahash;
1408 p->hash_zero = tmpl->hash_zero;
1409 p->hash_init = tmpl->hash_init;
1410 p->auth_type = tmpl->auth_type;
1411 p->hmac_type = tmpl->hmac_type;
1412 p->hw_op_hashsz = tmpl->hw_op_hashsz;
1413 p->digest_size = tmpl->digest_size;
1416 ahash->init = n2_hash_async_init;
1417 ahash->update = n2_hash_async_update;
1418 ahash->final = n2_hash_async_final;
1419 ahash->finup = n2_hash_async_finup;
1420 ahash->digest = n2_hash_async_digest;
1421 ahash->export = n2_hash_async_noexport;
1422 ahash->import = n2_hash_async_noimport;
1424 halg = &ahash->halg;
1425 halg->digestsize = tmpl->digest_size;
1428 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1429 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
1430 base->cra_priority = N2_CRA_PRIORITY;
1431 base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1432 CRYPTO_ALG_NEED_FALLBACK;
1433 base->cra_blocksize = tmpl->block_size;
1434 base->cra_ctxsize = sizeof(struct n2_hash_ctx);
1435 base->cra_module = THIS_MODULE;
1436 base->cra_init = n2_hash_cra_init;
1437 base->cra_exit = n2_hash_cra_exit;
1439 list_add(&p->entry, &ahash_algs);
1440 err = crypto_register_ahash(ahash);
1442 pr_err("%s alg registration failed\n", base->cra_name);
1443 list_del(&p->entry);
1446 pr_info("%s alg registered\n", base->cra_name);
1448 if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
1449 err = __n2_register_one_hmac(p);
1453 static int n2_register_algs(void)
1457 mutex_lock(&spu_lock);
1458 if (algs_registered++)
1461 for (i = 0; i < NUM_HASH_TMPLS; i++) {
1462 err = __n2_register_one_ahash(&hash_tmpls[i]);
1464 __n2_unregister_algs();
1468 for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
1469 err = __n2_register_one_skcipher(&skcipher_tmpls[i]);
1471 __n2_unregister_algs();
1477 mutex_unlock(&spu_lock);
1481 static void n2_unregister_algs(void)
1483 mutex_lock(&spu_lock);
1484 if (!--algs_registered)
1485 __n2_unregister_algs();
1486 mutex_unlock(&spu_lock);
1489 /* To map CWQ queues to interrupt sources, the hypervisor API provides
1490 * a devino. This isn't very useful to us because all of the
1491 * interrupts listed in the device_node have been translated to
1492 * Linux virtual IRQ cookie numbers.
1494 * So we have to back-translate, going through the 'intr' and 'ino'
1495 * property tables of the n2cp MDESC node, matching it with the OF
1496 * 'interrupts' property entries, in order to to figure out which
1497 * devino goes to which already-translated IRQ.
1499 static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
1500 unsigned long dev_ino)
1502 const unsigned int *dev_intrs;
1506 for (i = 0; i < ip->num_intrs; i++) {
1507 if (ip->ino_table[i].ino == dev_ino)
1510 if (i == ip->num_intrs)
1513 intr = ip->ino_table[i].intr;
1515 dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
1519 for (i = 0; i < dev->archdata.num_irqs; i++) {
1520 if (dev_intrs[i] == intr)
1527 static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
1528 const char *irq_name, struct spu_queue *p,
1529 irq_handler_t handler)
1534 herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
1538 index = find_devino_index(dev, ip, p->devino);
1542 p->irq = dev->archdata.irqs[index];
1544 sprintf(p->irq_name, "%s-%d", irq_name, index);
1546 return request_irq(p->irq, handler, 0, p->irq_name, p);
1549 static struct kmem_cache *queue_cache[2];
1551 static void *new_queue(unsigned long q_type)
1553 return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
1556 static void free_queue(void *p, unsigned long q_type)
1558 kmem_cache_free(queue_cache[q_type - 1], p);
1561 static int queue_cache_init(void)
1563 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1564 queue_cache[HV_NCS_QTYPE_MAU - 1] =
1565 kmem_cache_create("mau_queue",
1568 MAU_ENTRY_SIZE, 0, NULL);
1569 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1572 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
1573 queue_cache[HV_NCS_QTYPE_CWQ - 1] =
1574 kmem_cache_create("cwq_queue",
1577 CWQ_ENTRY_SIZE, 0, NULL);
1578 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
1579 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1580 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1586 static void queue_cache_destroy(void)
1588 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1589 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
1590 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1591 queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
1594 static long spu_queue_register_workfn(void *arg)
1596 struct spu_qreg *qr = arg;
1597 struct spu_queue *p = qr->queue;
1598 unsigned long q_type = qr->type;
1599 unsigned long hv_ret;
1601 hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
1602 CWQ_NUM_ENTRIES, &p->qhandle);
1604 sun4v_ncs_sethead_marker(p->qhandle, 0);
1606 return hv_ret ? -EINVAL : 0;
1609 static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
1611 int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
1612 struct spu_qreg qr = { .queue = p, .type = q_type };
1614 return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
1617 static int spu_queue_setup(struct spu_queue *p)
1621 p->q = new_queue(p->q_type);
1625 err = spu_queue_register(p, p->q_type);
1627 free_queue(p->q, p->q_type);
1634 static void spu_queue_destroy(struct spu_queue *p)
1636 unsigned long hv_ret;
1641 hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
1644 free_queue(p->q, p->q_type);
1647 static void spu_list_destroy(struct list_head *list)
1649 struct spu_queue *p, *n;
1651 list_for_each_entry_safe(p, n, list, list) {
1654 for (i = 0; i < NR_CPUS; i++) {
1655 if (cpu_to_cwq[i] == p)
1656 cpu_to_cwq[i] = NULL;
1660 free_irq(p->irq, p);
1663 spu_queue_destroy(p);
1669 /* Walk the backward arcs of a CWQ 'exec-unit' node,
1670 * gathering cpu membership information.
1672 static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
1673 struct platform_device *dev,
1674 u64 node, struct spu_queue *p,
1675 struct spu_queue **table)
1679 mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
1680 u64 tgt = mdesc_arc_target(mdesc, arc);
1681 const char *name = mdesc_node_name(mdesc, tgt);
1684 if (strcmp(name, "cpu"))
1686 id = mdesc_get_property(mdesc, tgt, "id", NULL);
1687 if (table[*id] != NULL) {
1688 dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n",
1692 cpumask_set_cpu(*id, &p->sharing);
1698 /* Process an 'exec-unit' MDESC node of type 'cwq'. */
1699 static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
1700 struct platform_device *dev, struct mdesc_handle *mdesc,
1701 u64 node, const char *iname, unsigned long q_type,
1702 irq_handler_t handler, struct spu_queue **table)
1704 struct spu_queue *p;
1707 p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
1709 dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n",
1714 cpumask_clear(&p->sharing);
1715 spin_lock_init(&p->lock);
1717 INIT_LIST_HEAD(&p->jobs);
1718 list_add(&p->list, list);
1720 err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
1724 err = spu_queue_setup(p);
1728 return spu_map_ino(dev, ip, iname, p, handler);
1731 static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
1732 struct spu_mdesc_info *ip, struct list_head *list,
1733 const char *exec_name, unsigned long q_type,
1734 irq_handler_t handler, struct spu_queue **table)
1739 mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
1742 type = mdesc_get_property(mdesc, node, "type", NULL);
1743 if (!type || strcmp(type, exec_name))
1746 err = handle_exec_unit(ip, list, dev, mdesc, node,
1747 exec_name, q_type, handler, table);
1749 spu_list_destroy(list);
1757 static int get_irq_props(struct mdesc_handle *mdesc, u64 node,
1758 struct spu_mdesc_info *ip)
1764 ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
1766 printk("NO 'ino'\n");
1770 ip->num_intrs = ino_len / sizeof(u64);
1771 ip->ino_table = kzalloc((sizeof(struct ino_blob) *
1777 for (i = 0; i < ip->num_intrs; i++) {
1778 struct ino_blob *b = &ip->ino_table[i];
1786 static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
1787 struct platform_device *dev,
1788 struct spu_mdesc_info *ip,
1789 const char *node_name)
1791 const unsigned int *reg;
1794 reg = of_get_property(dev->dev.of_node, "reg", NULL);
1798 mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
1802 name = mdesc_get_property(mdesc, node, "name", NULL);
1803 if (!name || strcmp(name, node_name))
1805 chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
1806 if (!chdl || (*chdl != *reg))
1808 ip->cfg_handle = *chdl;
1809 return get_irq_props(mdesc, node, ip);
1815 static unsigned long n2_spu_hvapi_major;
1816 static unsigned long n2_spu_hvapi_minor;
1818 static int n2_spu_hvapi_register(void)
1822 n2_spu_hvapi_major = 2;
1823 n2_spu_hvapi_minor = 0;
1825 err = sun4v_hvapi_register(HV_GRP_NCS,
1827 &n2_spu_hvapi_minor);
1830 pr_info("Registered NCS HVAPI version %lu.%lu\n",
1832 n2_spu_hvapi_minor);
1837 static void n2_spu_hvapi_unregister(void)
1839 sun4v_hvapi_unregister(HV_GRP_NCS);
1842 static int global_ref;
1844 static int grab_global_resources(void)
1848 mutex_lock(&spu_lock);
1853 err = n2_spu_hvapi_register();
1857 err = queue_cache_init();
1859 goto out_hvapi_release;
1862 cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
1865 goto out_queue_cache_destroy;
1867 cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
1870 goto out_free_cwq_table;
1877 mutex_unlock(&spu_lock);
1884 out_queue_cache_destroy:
1885 queue_cache_destroy();
1888 n2_spu_hvapi_unregister();
1892 static void release_global_resources(void)
1894 mutex_lock(&spu_lock);
1895 if (!--global_ref) {
1902 queue_cache_destroy();
1903 n2_spu_hvapi_unregister();
1905 mutex_unlock(&spu_lock);
1908 static struct n2_crypto *alloc_n2cp(void)
1910 struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
1913 INIT_LIST_HEAD(&np->cwq_list);
1918 static void free_n2cp(struct n2_crypto *np)
1920 kfree(np->cwq_info.ino_table);
1921 np->cwq_info.ino_table = NULL;
1926 static void n2_spu_driver_version(void)
1928 static int n2_spu_version_printed;
1930 if (n2_spu_version_printed++ == 0)
1931 pr_info("%s", version);
1934 static int n2_crypto_probe(struct platform_device *dev)
1936 struct mdesc_handle *mdesc;
1937 struct n2_crypto *np;
1940 n2_spu_driver_version();
1942 pr_info("Found N2CP at %pOF\n", dev->dev.of_node);
1946 dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n",
1951 err = grab_global_resources();
1953 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
1958 mdesc = mdesc_grab();
1961 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
1964 goto out_free_global;
1966 err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
1968 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
1970 mdesc_release(mdesc);
1971 goto out_free_global;
1974 err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
1975 "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
1977 mdesc_release(mdesc);
1980 dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n",
1982 goto out_free_global;
1985 err = n2_register_algs();
1987 dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n",
1989 goto out_free_spu_list;
1992 dev_set_drvdata(&dev->dev, np);
1997 spu_list_destroy(&np->cwq_list);
2000 release_global_resources();
2008 static int n2_crypto_remove(struct platform_device *dev)
2010 struct n2_crypto *np = dev_get_drvdata(&dev->dev);
2012 n2_unregister_algs();
2014 spu_list_destroy(&np->cwq_list);
2016 release_global_resources();
2023 static struct n2_mau *alloc_ncp(void)
2025 struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
2028 INIT_LIST_HEAD(&mp->mau_list);
2033 static void free_ncp(struct n2_mau *mp)
2035 kfree(mp->mau_info.ino_table);
2036 mp->mau_info.ino_table = NULL;
2041 static int n2_mau_probe(struct platform_device *dev)
2043 struct mdesc_handle *mdesc;
2047 n2_spu_driver_version();
2049 pr_info("Found NCP at %pOF\n", dev->dev.of_node);
2053 dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n",
2058 err = grab_global_resources();
2060 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
2065 mdesc = mdesc_grab();
2068 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
2071 goto out_free_global;
2074 err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
2076 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
2078 mdesc_release(mdesc);
2079 goto out_free_global;
2082 err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
2083 "mau", HV_NCS_QTYPE_MAU, mau_intr,
2085 mdesc_release(mdesc);
2088 dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n",
2090 goto out_free_global;
2093 dev_set_drvdata(&dev->dev, mp);
2098 release_global_resources();
2106 static int n2_mau_remove(struct platform_device *dev)
2108 struct n2_mau *mp = dev_get_drvdata(&dev->dev);
2110 spu_list_destroy(&mp->mau_list);
2112 release_global_resources();
2119 static const struct of_device_id n2_crypto_match[] = {
2122 .compatible = "SUNW,n2-cwq",
2126 .compatible = "SUNW,vf-cwq",
2130 .compatible = "SUNW,kt-cwq",
2135 MODULE_DEVICE_TABLE(of, n2_crypto_match);
2137 static struct platform_driver n2_crypto_driver = {
2140 .of_match_table = n2_crypto_match,
2142 .probe = n2_crypto_probe,
2143 .remove = n2_crypto_remove,
2146 static const struct of_device_id n2_mau_match[] = {
2149 .compatible = "SUNW,n2-mau",
2153 .compatible = "SUNW,vf-mau",
2157 .compatible = "SUNW,kt-mau",
2162 MODULE_DEVICE_TABLE(of, n2_mau_match);
2164 static struct platform_driver n2_mau_driver = {
2167 .of_match_table = n2_mau_match,
2169 .probe = n2_mau_probe,
2170 .remove = n2_mau_remove,
2173 static struct platform_driver * const drivers[] = {
2178 static int __init n2_init(void)
2180 return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2183 static void __exit n2_exit(void)
2185 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2188 module_init(n2_init);
2189 module_exit(n2_exit);