1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
4 #include <linux/kernel.h>
5 #include <linux/nospec.h>
7 #include "cc_buffer_mgr.h"
8 #include "cc_request_mgr.h"
11 #define CC_MAX_POLL_ITER 10
12 /* The highest descriptor count in used */
13 #define CC_MAX_DESC_SEQ_LEN 23
15 struct cc_req_mgr_handle {
16 /* Request manager resources */
17 unsigned int hw_queue_size; /* HW capability */
18 unsigned int min_free_hw_slots;
19 unsigned int max_used_sw_slots;
20 struct cc_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
25 /* This lock protects access to HW register
26 * that must be single request at a time
29 struct cc_hw_desc compl_desc;
31 dma_addr_t dummy_comp_buff_dma;
34 struct list_head backlog;
36 spinlock_t bl_lock; /* protect backlog queue */
39 struct workqueue_struct *workq;
40 struct delayed_work compwork;
42 struct tasklet_struct comptask;
47 struct cc_crypto_req creq;
48 struct cc_hw_desc desc[CC_MAX_DESC_SEQ_LEN];
50 struct list_head list;
54 static const u32 cc_cpp_int_masks[CC_CPP_NUM_ALGS][CC_CPP_NUM_SLOTS] = {
55 { BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT),
56 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT),
57 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT),
58 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT),
59 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT),
60 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT),
61 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT),
62 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT) },
63 { BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT),
64 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT),
65 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT),
66 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT),
67 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT),
68 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT),
69 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT),
70 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT) }
73 static void comp_handler(unsigned long devarg);
75 static void comp_work_handler(struct work_struct *work);
78 static inline u32 cc_cpp_int_mask(enum cc_cpp_alg alg, int slot)
80 alg = array_index_nospec(alg, CC_CPP_NUM_ALGS);
81 slot = array_index_nospec(slot, CC_CPP_NUM_SLOTS);
83 return cc_cpp_int_masks[alg][slot];
86 void cc_req_mgr_fini(struct cc_drvdata *drvdata)
88 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
89 struct device *dev = drvdata_to_dev(drvdata);
92 return; /* Not allocated */
94 if (req_mgr_h->dummy_comp_buff_dma) {
95 dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff,
96 req_mgr_h->dummy_comp_buff_dma);
99 dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
100 req_mgr_h->min_free_hw_slots));
101 dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
104 destroy_workqueue(req_mgr_h->workq);
107 tasklet_kill(&req_mgr_h->comptask);
109 kfree_sensitive(req_mgr_h);
110 drvdata->request_mgr_handle = NULL;
113 int cc_req_mgr_init(struct cc_drvdata *drvdata)
115 struct cc_req_mgr_handle *req_mgr_h;
116 struct device *dev = drvdata_to_dev(drvdata);
119 req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL);
122 goto req_mgr_init_err;
125 drvdata->request_mgr_handle = req_mgr_h;
127 spin_lock_init(&req_mgr_h->hw_lock);
128 spin_lock_init(&req_mgr_h->bl_lock);
129 INIT_LIST_HEAD(&req_mgr_h->backlog);
132 dev_dbg(dev, "Initializing completion workqueue\n");
133 req_mgr_h->workq = create_singlethread_workqueue("ccree");
134 if (!req_mgr_h->workq) {
135 dev_err(dev, "Failed creating work queue\n");
137 goto req_mgr_init_err;
139 INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler);
141 dev_dbg(dev, "Initializing completion tasklet\n");
142 tasklet_init(&req_mgr_h->comptask, comp_handler,
143 (unsigned long)drvdata);
145 req_mgr_h->hw_queue_size = cc_ioread(drvdata,
146 CC_REG(DSCRPTR_QUEUE_SRAM_SIZE));
147 dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
148 if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
149 dev_err(dev, "Invalid HW queue size = %u (Min. required is %u)\n",
150 req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
152 goto req_mgr_init_err;
154 req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size;
155 req_mgr_h->max_used_sw_slots = 0;
157 /* Allocate DMA word for "dummy" completion descriptor use */
158 req_mgr_h->dummy_comp_buff =
159 dma_alloc_coherent(dev, sizeof(u32),
160 &req_mgr_h->dummy_comp_buff_dma,
162 if (!req_mgr_h->dummy_comp_buff) {
163 dev_err(dev, "Not enough memory to allocate DMA (%zu) dropped buffer\n",
166 goto req_mgr_init_err;
169 /* Init. "dummy" completion descriptor */
170 hw_desc_init(&req_mgr_h->compl_desc);
171 set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32));
172 set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma,
173 sizeof(u32), NS_BIT, 1);
174 set_flow_mode(&req_mgr_h->compl_desc, BYPASS);
175 set_queue_last_ind(drvdata, &req_mgr_h->compl_desc);
180 cc_req_mgr_fini(drvdata);
184 static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[],
185 unsigned int seq_len)
188 void __iomem *reg = drvdata->cc_base + CC_REG(DSCRPTR_QUEUE_WORD0);
189 struct device *dev = drvdata_to_dev(drvdata);
192 * We do indeed write all 6 command words to the same
193 * register. The HW supports this.
196 for (i = 0; i < seq_len; i++) {
197 for (w = 0; w <= 5; w++)
198 writel_relaxed(seq[i].word[w], reg);
201 dev_dbg(dev, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
202 i, seq[i].word[0], seq[i].word[1],
203 seq[i].word[2], seq[i].word[3],
204 seq[i].word[4], seq[i].word[5]);
209 * request_mgr_complete() - Completion will take place if and only if user
210 * requested completion by cc_send_sync_request().
212 * @dev: Device pointer
213 * @dx_compl_h: The completion event to signal
214 * @dummy: unused error code
216 static void request_mgr_complete(struct device *dev, void *dx_compl_h,
219 struct completion *this_compl = dx_compl_h;
221 complete(this_compl);
224 static int cc_queues_status(struct cc_drvdata *drvdata,
225 struct cc_req_mgr_handle *req_mgr_h,
226 unsigned int total_seq_len)
228 unsigned long poll_queue;
229 struct device *dev = drvdata_to_dev(drvdata);
231 /* SW queue is checked only once as it will not
232 * be changed during the poll because the spinlock_bh
233 * is held by the thread
235 if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
236 req_mgr_h->req_queue_tail) {
237 dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
238 req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
242 if (req_mgr_h->q_free_slots >= total_seq_len)
245 /* Wait for space in HW queue. Poll constant num of iterations. */
246 for (poll_queue = 0; poll_queue < CC_MAX_POLL_ITER ; poll_queue++) {
247 req_mgr_h->q_free_slots =
248 cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
249 if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots)
250 req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
252 if (req_mgr_h->q_free_slots >= total_seq_len) {
253 /* If there is enough place return */
257 dev_dbg(dev, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
258 req_mgr_h->q_free_slots, total_seq_len);
260 /* No room in the HW queue try again later */
261 dev_dbg(dev, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
262 req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE,
263 req_mgr_h->q_free_slots, total_seq_len);
268 * cc_do_send_request() - Enqueue caller request to crypto hardware.
269 * Need to be called with HW lock held and PM running
271 * @drvdata: Associated device driver context
272 * @cc_req: The request to enqueue
273 * @desc: The crypto sequence
274 * @len: The crypto sequence length
275 * @add_comp: If "true": add an artificial dout DMA to mark completion
278 static void cc_do_send_request(struct cc_drvdata *drvdata,
279 struct cc_crypto_req *cc_req,
280 struct cc_hw_desc *desc, unsigned int len,
283 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
284 unsigned int used_sw_slots;
285 unsigned int total_seq_len = len; /*initial sequence length*/
286 struct device *dev = drvdata_to_dev(drvdata);
288 used_sw_slots = ((req_mgr_h->req_queue_head -
289 req_mgr_h->req_queue_tail) &
290 (MAX_REQUEST_QUEUE_SIZE - 1));
291 if (used_sw_slots > req_mgr_h->max_used_sw_slots)
292 req_mgr_h->max_used_sw_slots = used_sw_slots;
294 /* Enqueue request - must be locked with HW lock*/
295 req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req;
296 req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) &
297 (MAX_REQUEST_QUEUE_SIZE - 1);
299 dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
302 * We are about to push command to the HW via the command registers
303 * that may reference host memory. We need to issue a memory barrier
304 * to make sure there are no outstanding memory writes
308 /* STAT_PHASE_4: Push sequence */
310 enqueue_seq(drvdata, desc, len);
313 enqueue_seq(drvdata, &req_mgr_h->compl_desc, 1);
317 if (req_mgr_h->q_free_slots < total_seq_len) {
318 /* This situation should never occur. Maybe indicating problem
319 * with resuming power. Set the free slot count to 0 and hope
322 dev_err(dev, "HW free slot count mismatch.");
323 req_mgr_h->q_free_slots = 0;
325 /* Update the free slots in HW queue */
326 req_mgr_h->q_free_slots -= total_seq_len;
330 static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
331 struct cc_bl_item *bli)
333 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
334 struct device *dev = drvdata_to_dev(drvdata);
336 spin_lock_bh(&mgr->bl_lock);
337 list_add_tail(&bli->list, &mgr->backlog);
339 dev_dbg(dev, "+++bl len: %d\n", mgr->bl_len);
340 spin_unlock_bh(&mgr->bl_lock);
341 tasklet_schedule(&mgr->comptask);
344 static void cc_proc_backlog(struct cc_drvdata *drvdata)
346 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
347 struct cc_bl_item *bli;
348 struct cc_crypto_req *creq;
350 struct device *dev = drvdata_to_dev(drvdata);
353 spin_lock(&mgr->bl_lock);
355 while (mgr->bl_len) {
356 bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
357 dev_dbg(dev, "---bl len: %d\n", mgr->bl_len);
359 spin_unlock(&mgr->bl_lock);
363 req = creq->user_arg;
366 * Notify the request we're moving out of the backlog
367 * but only if we haven't done so already.
370 creq->user_cb(dev, req, -EINPROGRESS);
374 spin_lock(&mgr->hw_lock);
376 rc = cc_queues_status(drvdata, mgr, bli->len);
379 * There is still no room in the FIFO for
380 * this request. Bail out. We'll return here
381 * on the next completion irq.
383 spin_unlock(&mgr->hw_lock);
387 cc_do_send_request(drvdata, &bli->creq, bli->desc, bli->len,
389 spin_unlock(&mgr->hw_lock);
391 /* Remove ourselves from the backlog list */
392 spin_lock(&mgr->bl_lock);
393 list_del(&bli->list);
398 spin_unlock(&mgr->bl_lock);
401 int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
402 struct cc_hw_desc *desc, unsigned int len,
403 struct crypto_async_request *req)
406 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
407 struct device *dev = drvdata_to_dev(drvdata);
408 bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
409 gfp_t flags = cc_gfp_flags(req);
410 struct cc_bl_item *bli;
414 dev_err(dev, "cc_pm_get returned %x\n", rc);
418 spin_lock_bh(&mgr->hw_lock);
419 rc = cc_queues_status(drvdata, mgr, len);
421 #ifdef CC_DEBUG_FORCE_BACKLOG
424 #endif /* CC_DEBUG_FORCE_BACKLOG */
426 if (rc == -ENOSPC && backlog_ok) {
427 spin_unlock_bh(&mgr->hw_lock);
429 bli = kmalloc(sizeof(*bli), flags);
431 cc_pm_put_suspend(dev);
435 memcpy(&bli->creq, cc_req, sizeof(*cc_req));
436 memcpy(&bli->desc, desc, len * sizeof(*desc));
439 cc_enqueue_backlog(drvdata, bli);
444 cc_do_send_request(drvdata, cc_req, desc, len, false);
448 spin_unlock_bh(&mgr->hw_lock);
452 int cc_send_sync_request(struct cc_drvdata *drvdata,
453 struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
457 struct device *dev = drvdata_to_dev(drvdata);
458 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
460 init_completion(&cc_req->seq_compl);
461 cc_req->user_cb = request_mgr_complete;
462 cc_req->user_arg = &cc_req->seq_compl;
466 dev_err(dev, "cc_pm_get returned %x\n", rc);
471 spin_lock_bh(&mgr->hw_lock);
472 rc = cc_queues_status(drvdata, mgr, len + 1);
477 spin_unlock_bh(&mgr->hw_lock);
478 wait_for_completion_interruptible(&drvdata->hw_queue_avail);
479 reinit_completion(&drvdata->hw_queue_avail);
482 cc_do_send_request(drvdata, cc_req, desc, len, true);
483 spin_unlock_bh(&mgr->hw_lock);
484 wait_for_completion(&cc_req->seq_compl);
489 * send_request_init() - Enqueue caller request to crypto hardware during init
491 * Assume this function is not called in the middle of a flow,
492 * since we set QUEUE_LAST_IND flag in the last descriptor.
494 * @drvdata: Associated device driver context
495 * @desc: The crypto sequence
496 * @len: The crypto sequence length
499 * Returns "0" upon success
501 int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
504 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
505 unsigned int total_seq_len = len; /*initial sequence length*/
508 /* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT.
510 rc = cc_queues_status(drvdata, req_mgr_h, total_seq_len);
514 set_queue_last_ind(drvdata, &desc[(len - 1)]);
517 * We are about to push command to the HW via the command registers
518 * that may reference host memory. We need to issue a memory barrier
519 * to make sure there are no outstanding memory writes
522 enqueue_seq(drvdata, desc, len);
524 /* Update the free slots in HW queue */
525 req_mgr_h->q_free_slots =
526 cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
531 void complete_request(struct cc_drvdata *drvdata)
533 struct cc_req_mgr_handle *request_mgr_handle =
534 drvdata->request_mgr_handle;
536 complete(&drvdata->hw_queue_avail);
538 queue_delayed_work(request_mgr_handle->workq,
539 &request_mgr_handle->compwork, 0);
541 tasklet_schedule(&request_mgr_handle->comptask);
546 static void comp_work_handler(struct work_struct *work)
548 struct cc_drvdata *drvdata =
549 container_of(work, struct cc_drvdata, compwork.work);
551 comp_handler((unsigned long)drvdata);
555 static void proc_completions(struct cc_drvdata *drvdata)
557 struct cc_crypto_req *cc_req;
558 struct device *dev = drvdata_to_dev(drvdata);
559 struct cc_req_mgr_handle *request_mgr_handle =
560 drvdata->request_mgr_handle;
561 unsigned int *tail = &request_mgr_handle->req_queue_tail;
562 unsigned int *head = &request_mgr_handle->req_queue_head;
566 while (request_mgr_handle->axi_completed) {
567 request_mgr_handle->axi_completed--;
569 /* Dequeue request */
570 if (*head == *tail) {
571 /* We are supposed to handle a completion but our
572 * queue is empty. This is not normal. Return and
575 dev_err(dev, "Request queue is empty head == tail %u\n",
580 cc_req = &request_mgr_handle->req_queue[*tail];
582 if (cc_req->cpp.is_cpp) {
584 dev_dbg(dev, "CPP request completion slot: %d alg:%d\n",
585 cc_req->cpp.slot, cc_req->cpp.alg);
586 mask = cc_cpp_int_mask(cc_req->cpp.alg,
588 rc = (drvdata->irq & mask ? -EPERM : 0);
589 dev_dbg(dev, "Got mask: %x irq: %x rc: %d\n", mask,
592 dev_dbg(dev, "None CPP request completion\n");
597 cc_req->user_cb(dev, cc_req->user_arg, rc);
598 *tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
599 dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
600 dev_dbg(dev, "Request completed. axi_completed=%d\n",
601 request_mgr_handle->axi_completed);
602 cc_pm_put_suspend(dev);
606 static inline u32 cc_axi_comp_count(struct cc_drvdata *drvdata)
608 return FIELD_GET(AXIM_MON_COMP_VALUE,
609 cc_ioread(drvdata, drvdata->axim_mon_offset));
612 /* Deferred service handler, run as interrupt-fired tasklet */
613 static void comp_handler(unsigned long devarg)
615 struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
616 struct cc_req_mgr_handle *request_mgr_handle =
617 drvdata->request_mgr_handle;
618 struct device *dev = drvdata_to_dev(drvdata);
621 dev_dbg(dev, "Completion handler called!\n");
622 irq = (drvdata->irq & drvdata->comp_mask);
624 /* To avoid the interrupt from firing as we unmask it,
627 cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
629 /* Avoid race with above clear: Test completion counter once more */
631 request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
633 dev_dbg(dev, "AXI completion after updated: %d\n",
634 request_mgr_handle->axi_completed);
636 while (request_mgr_handle->axi_completed) {
638 drvdata->irq |= cc_ioread(drvdata, CC_REG(HOST_IRR));
639 irq = (drvdata->irq & drvdata->comp_mask);
640 proc_completions(drvdata);
642 /* At this point (after proc_completions()),
643 * request_mgr_handle->axi_completed is 0.
645 request_mgr_handle->axi_completed +=
646 cc_axi_comp_count(drvdata);
647 } while (request_mgr_handle->axi_completed > 0);
649 cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
651 request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
654 /* after verifying that there is nothing to do,
655 * unmask AXI completion interrupt
657 cc_iowrite(drvdata, CC_REG(HOST_IMR),
658 cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~drvdata->comp_mask);
660 cc_proc_backlog(drvdata);
661 dev_dbg(dev, "Comp. handler done.\n");