2 * Copyright 2016 Broadcom
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation (the "GPL").
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License version 2 (GPLv2) for more details.
13 * You should have received a copy of the GNU General Public License
14 * version 2 (GPLv2) along with this source code.
18 * Broadcom PDC Mailbox Driver
19 * The PDC provides a ring based programming interface to one or more hardware
20 * offload engines. For example, the PDC driver works with both SPU-M and SPU2
21 * cryptographic offload hardware. In some chips the PDC is referred to as MDE.
23 * The PDC driver registers with the Linux mailbox framework as a mailbox
24 * controller, once for each PDC instance. Ring 0 for each PDC is registered as
25 * a mailbox channel. The PDC driver uses interrupts to determine when data
26 * transfers to and from an offload engine are complete. The PDC driver uses
27 * threaded IRQs so that response messages are handled outside of interrupt
30 * The PDC driver allows multiple messages to be pending in the descriptor
31 * rings. The tx_msg_start descriptor index indicates where the last message
32 * starts. The txin_numd value at this index indicates how many descriptor
33 * indexes make up the message. Similar state is kept on the receive side. When
34 * an rx interrupt indicates a response is ready, the PDC driver processes numd
35 * descriptors from the tx and rx ring, thus processing one response at a time.
38 #include <linux/errno.h>
39 #include <linux/module.h>
40 #include <linux/init.h>
41 #include <linux/slab.h>
42 #include <linux/debugfs.h>
43 #include <linux/interrupt.h>
44 #include <linux/wait.h>
45 #include <linux/platform_device.h>
48 #include <linux/of_device.h>
49 #include <linux/of_address.h>
50 #include <linux/of_irq.h>
51 #include <linux/mailbox_controller.h>
52 #include <linux/mailbox/brcm-message.h>
53 #include <linux/scatterlist.h>
54 #include <linux/dma-direction.h>
55 #include <linux/dma-mapping.h>
56 #include <linux/dmapool.h>
60 #define RING_ENTRY_SIZE sizeof(struct dma64dd)
62 /* # entries in PDC dma ring */
63 #define PDC_RING_ENTRIES 512
65 * Minimum number of ring descriptor entries that must be free to tell mailbox
66 * framework that it can submit another request
68 #define PDC_RING_SPACE_MIN 15
70 #define PDC_RING_SIZE (PDC_RING_ENTRIES * RING_ENTRY_SIZE)
71 /* Rings are 8k aligned */
72 #define RING_ALIGN_ORDER 13
73 #define RING_ALIGN BIT(RING_ALIGN_ORDER)
75 #define RX_BUF_ALIGN_ORDER 5
76 #define RX_BUF_ALIGN BIT(RX_BUF_ALIGN_ORDER)
78 /* descriptor bumping macros */
79 #define XXD(x, max_mask) ((x) & (max_mask))
80 #define TXD(x, max_mask) XXD((x), (max_mask))
81 #define RXD(x, max_mask) XXD((x), (max_mask))
82 #define NEXTTXD(i, max_mask) TXD((i) + 1, (max_mask))
83 #define PREVTXD(i, max_mask) TXD((i) - 1, (max_mask))
84 #define NEXTRXD(i, max_mask) RXD((i) + 1, (max_mask))
85 #define PREVRXD(i, max_mask) RXD((i) - 1, (max_mask))
86 #define NTXDACTIVE(h, t, max_mask) TXD((t) - (h), (max_mask))
87 #define NRXDACTIVE(h, t, max_mask) RXD((t) - (h), (max_mask))
89 /* Length of BCM header at start of SPU msg, in bytes */
93 * PDC driver reserves ringset 0 on each SPU for its own use. The driver does
94 * not currently support use of multiple ringsets on a single PDC engine.
99 * Interrupt mask and status definitions. Enable interrupts for tx and rx on
102 #define PDC_RCVINT_0 (16 + PDC_RINGSET)
103 #define PDC_RCVINTEN_0 BIT(PDC_RCVINT_0)
104 #define PDC_INTMASK (PDC_RCVINTEN_0)
105 #define PDC_LAZY_FRAMECOUNT 1
106 #define PDC_LAZY_TIMEOUT 10000
107 #define PDC_LAZY_INT (PDC_LAZY_TIMEOUT | (PDC_LAZY_FRAMECOUNT << 24))
108 #define PDC_INTMASK_OFFSET 0x24
109 #define PDC_INTSTATUS_OFFSET 0x20
110 #define PDC_RCVLAZY0_OFFSET (0x30 + 4 * PDC_RINGSET)
113 * For SPU2, configure MDE_CKSUM_CONTROL to write 17 bytes of metadata
116 #define PDC_SPU2_RESP_HDR_LEN 17
117 #define PDC_CKSUM_CTRL BIT(27)
118 #define PDC_CKSUM_CTRL_OFFSET 0x400
120 #define PDC_SPUM_RESP_HDR_LEN 32
123 * Sets the following bits for write to transmit control reg:
124 * 11 - PtyChkDisable - parity check is disabled
125 * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
127 #define PDC_TX_CTL 0x000C0800
129 /* Bit in tx control reg to enable tx channel */
130 #define PDC_TX_ENABLE 0x1
133 * Sets the following bits for write to receive control reg:
134 * 7:1 - RcvOffset - size in bytes of status region at start of rx frame buf
135 * 9 - SepRxHdrDescEn - place start of new frames only in descriptors
136 * that have StartOfFrame set
137 * 10 - OflowContinue - on rx FIFO overflow, clear rx fifo, discard all
138 * remaining bytes in current frame, report error
139 * in rx frame status for current frame
140 * 11 - PtyChkDisable - parity check is disabled
141 * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
143 #define PDC_RX_CTL 0x000C0E00
145 /* Bit in rx control reg to enable rx channel */
146 #define PDC_RX_ENABLE 0x1
148 #define CRYPTO_D64_RS0_CD_MASK ((PDC_RING_ENTRIES * RING_ENTRY_SIZE) - 1)
150 /* descriptor flags */
151 #define D64_CTRL1_EOT BIT(28) /* end of descriptor table */
152 #define D64_CTRL1_IOC BIT(29) /* interrupt on complete */
153 #define D64_CTRL1_EOF BIT(30) /* end of frame */
154 #define D64_CTRL1_SOF BIT(31) /* start of frame */
156 #define RX_STATUS_OVERFLOW 0x00800000
157 #define RX_STATUS_LEN 0x0000FFFF
159 #define PDC_TXREGS_OFFSET 0x200
160 #define PDC_RXREGS_OFFSET 0x220
162 /* Maximum size buffer the DMA engine can handle */
163 #define PDC_DMA_BUF_MAX 16384
166 void *ctx; /* opaque context associated with frame */
171 u32 ctrl1; /* misc control bits */
172 u32 ctrl2; /* buffer count and address extension */
173 u32 addrlow; /* memory address of the date buffer, bits 31:0 */
174 u32 addrhigh; /* memory address of the date buffer, bits 63:32 */
177 /* dma registers per channel(xmt or rcv) */
179 u32 control; /* enable, et al */
180 u32 ptr; /* last descriptor posted to chip */
181 u32 addrlow; /* descriptor ring base address low 32-bits */
182 u32 addrhigh; /* descriptor ring base address bits 63:32 */
183 u32 status0; /* last rx descriptor written by hw */
184 u32 status1; /* driver does not use */
187 /* cpp contortions to concatenate w/arg prescan */
189 #define _PADLINE(line) pad ## line
190 #define _XSTR(line) _PADLINE(line)
191 #define PAD _XSTR(__LINE__)
194 /* dma registers. matches hw layout. */
196 struct dma64_regs dmaxmt; /* dma tx */
198 struct dma64_regs dmarcv; /* dma rx */
204 u32 devcontrol; /* 0x000 */
205 u32 devstatus; /* 0x004 */
207 u32 biststatus; /* 0x00c */
209 u32 intstatus; /* 0x020 */
210 u32 intmask; /* 0x024 */
211 u32 gptimer; /* 0x028 */
214 u32 intrcvlazy_0; /* 0x030 */
215 u32 intrcvlazy_1; /* 0x034 */
216 u32 intrcvlazy_2; /* 0x038 */
217 u32 intrcvlazy_3; /* 0x03c */
220 u32 removed_intrecvlazy; /* 0x100 */
221 u32 flowctlthresh; /* 0x104 */
222 u32 wrrthresh; /* 0x108 */
223 u32 gmac_idle_cnt_thresh; /* 0x10c */
226 u32 ifioaccessaddr; /* 0x120 */
227 u32 ifioaccessbyte; /* 0x124 */
228 u32 ifioaccessdata; /* 0x128 */
231 u32 phyaccess; /* 0x180 */
233 u32 phycontrol; /* 0x188 */
234 u32 txqctl; /* 0x18c */
235 u32 rxqctl; /* 0x190 */
236 u32 gpioselect; /* 0x194 */
237 u32 gpio_output_en; /* 0x198 */
239 u32 txq_rxq_mem_ctl; /* 0x1a0 */
240 u32 memory_ecc_status; /* 0x1a4 */
241 u32 serdes_ctl; /* 0x1a8 */
242 u32 serdes_status0; /* 0x1ac */
243 u32 serdes_status1; /* 0x1b0 */
244 u32 PAD[11]; /* 0x1b4-1dc */
245 u32 clk_ctl_st; /* 0x1e0 */
246 u32 hw_war; /* 0x1e4 */
247 u32 pwrctl; /* 0x1e8 */
250 #define PDC_NUM_DMA_RINGS 4
251 struct dma64 dmaregs[PDC_NUM_DMA_RINGS]; /* 0x0200 - 0x2fc */
253 /* more registers follow, but we don't use them */
256 /* structure for allocating/freeing DMA rings */
257 struct pdc_ring_alloc {
258 dma_addr_t dmabase; /* DMA address of start of ring */
259 void *vbase; /* base kernel virtual address of ring */
260 u32 size; /* ring allocation size in bytes */
264 * context associated with a receive descriptor.
265 * @rxp_ctx: opaque context associated with frame that starts at each
267 * @dst_sg: Scatterlist used to form reply frames beginning at a given ring
268 * index. Retained in order to unmap each sg after reply is processed.
269 * @rxin_numd: Number of rx descriptors associated with the message that starts
270 * at a descriptor index. Not set for every index. For example,
271 * if descriptor index i points to a scatterlist with 4 entries,
272 * then the next three descriptor indexes don't have a value set.
273 * @resp_hdr: Virtual address of buffer used to catch DMA rx status
274 * @resp_hdr_daddr: physical address of DMA rx status buffer
278 struct scatterlist *dst_sg;
281 dma_addr_t resp_hdr_daddr;
284 /* PDC state structure */
286 /* Index of the PDC whose state is in this structure instance */
289 /* Platform device for this PDC instance */
290 struct platform_device *pdev;
293 * Each PDC instance has a mailbox controller. PDC receives request
294 * messages through mailboxes, and sends response messages through the
297 struct mbox_controller mbc;
299 unsigned int pdc_irq;
302 * Last interrupt status read from PDC device. Saved in interrupt
303 * handler so the handler can clear the interrupt in the device,
304 * and the interrupt thread called later can know which interrupt
307 unsigned long intstatus;
309 /* tasklet for deferred processing after DMA rx interrupt */
310 struct tasklet_struct rx_tasklet;
312 /* Number of bytes of receive status prior to each rx frame */
314 /* Whether a BCM header is prepended to each frame */
316 /* Sum of length of BCM header and rx status header */
317 u32 pdc_resp_hdr_len;
319 /* The base virtual address of DMA hw registers */
320 void __iomem *pdc_reg_vbase;
322 /* Pool for allocation of DMA rings */
323 struct dma_pool *ring_pool;
325 /* Pool for allocation of metadata buffers for response messages */
326 struct dma_pool *rx_buf_pool;
329 * The base virtual address of DMA tx/rx descriptor rings. Corresponding
330 * DMA address and size of ring allocation.
332 struct pdc_ring_alloc tx_ring_alloc;
333 struct pdc_ring_alloc rx_ring_alloc;
335 struct pdc_regs *regs; /* start of PDC registers */
337 struct dma64_regs *txregs_64; /* dma tx engine registers */
338 struct dma64_regs *rxregs_64; /* dma rx engine registers */
341 * Arrays of PDC_RING_ENTRIES descriptors
342 * To use multiple ringsets, this needs to be extended
344 struct dma64dd *txd_64; /* tx descriptor ring */
345 struct dma64dd *rxd_64; /* rx descriptor ring */
347 /* descriptor ring sizes */
348 u32 ntxd; /* # tx descriptors */
349 u32 nrxd; /* # rx descriptors */
350 u32 nrxpost; /* # rx buffers to keep posted */
351 u32 ntxpost; /* max number of tx buffers that can be posted */
354 * Index of next tx descriptor to reclaim. That is, the descriptor
355 * index of the oldest tx buffer for which the host has yet to process
356 * the corresponding response.
361 * Index of the first receive descriptor for the sequence of
362 * message fragments currently under construction. Used to build up
363 * the rxin_numd count for a message. Updated to rxout when the host
364 * starts a new sequence of rx buffers for a new message.
368 /* Index of next tx descriptor to post. */
372 * Number of tx descriptors associated with the message that starts
373 * at this tx descriptor index.
375 u32 txin_numd[PDC_RING_ENTRIES];
378 * Index of next rx descriptor to reclaim. This is the index of
379 * the next descriptor whose data has yet to be processed by the host.
384 * Index of the first receive descriptor for the sequence of
385 * message fragments currently under construction. Used to build up
386 * the rxin_numd count for a message. Updated to rxout when the host
387 * starts a new sequence of rx buffers for a new message.
392 * Saved value of current hardware rx descriptor index.
393 * The last rx buffer written by the hw is the index previous to
398 /* Index of next rx descriptor to post. */
401 struct pdc_rx_ctx rx_ctx[PDC_RING_ENTRIES];
404 * Scatterlists used to form request and reply frames beginning at a
405 * given ring index. Retained in order to unmap each sg after reply
408 struct scatterlist *src_sg[PDC_RING_ENTRIES];
410 struct dentry *debugfs_stats; /* debug FS stats file for this PDC */
413 u32 pdc_requests; /* number of request messages submitted */
414 u32 pdc_replies; /* number of reply messages received */
415 u32 last_tx_not_done; /* too few tx descriptors to indicate done */
416 u32 tx_ring_full; /* unable to accept msg because tx ring full */
417 u32 rx_ring_full; /* unable to accept msg because rx ring full */
418 u32 txnobuf; /* unable to create tx descriptor */
419 u32 rxnobuf; /* unable to create rx descriptor */
420 u32 rx_oflow; /* count of rx overflows */
423 /* Global variables */
426 /* Actual number of SPUs in hardware, as reported by device tree */
430 static struct pdc_globals pdcg;
432 /* top level debug FS directory for PDC driver */
433 static struct dentry *debugfs_dir;
435 static ssize_t pdc_debugfs_read(struct file *filp, char __user *ubuf,
436 size_t count, loff_t *offp)
438 struct pdc_state *pdcs;
440 ssize_t ret, out_offset, out_count;
444 buf = kmalloc(out_count, GFP_KERNEL);
448 pdcs = filp->private_data;
450 out_offset += snprintf(buf + out_offset, out_count - out_offset,
451 "SPU %u stats:\n", pdcs->pdc_idx);
452 out_offset += snprintf(buf + out_offset, out_count - out_offset,
453 "PDC requests....................%u\n",
455 out_offset += snprintf(buf + out_offset, out_count - out_offset,
456 "PDC responses...................%u\n",
458 out_offset += snprintf(buf + out_offset, out_count - out_offset,
459 "Tx not done.....................%u\n",
460 pdcs->last_tx_not_done);
461 out_offset += snprintf(buf + out_offset, out_count - out_offset,
462 "Tx ring full....................%u\n",
464 out_offset += snprintf(buf + out_offset, out_count - out_offset,
465 "Rx ring full....................%u\n",
467 out_offset += snprintf(buf + out_offset, out_count - out_offset,
468 "Tx desc write fail. Ring full...%u\n",
470 out_offset += snprintf(buf + out_offset, out_count - out_offset,
471 "Rx desc write fail. Ring full...%u\n",
473 out_offset += snprintf(buf + out_offset, out_count - out_offset,
474 "Receive overflow................%u\n",
476 out_offset += snprintf(buf + out_offset, out_count - out_offset,
477 "Num frags in rx ring............%u\n",
478 NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr,
481 if (out_offset > out_count)
482 out_offset = out_count;
484 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
489 static const struct file_operations pdc_debugfs_stats = {
490 .owner = THIS_MODULE,
492 .read = pdc_debugfs_read,
496 * pdc_setup_debugfs() - Create the debug FS directories. If the top-level
497 * directory has not yet been created, create it now. Create a stats file in
498 * this directory for a SPU.
499 * @pdcs: PDC state structure
501 static void pdc_setup_debugfs(struct pdc_state *pdcs)
503 char spu_stats_name[16];
505 if (!debugfs_initialized())
508 snprintf(spu_stats_name, 16, "pdc%d_stats", pdcs->pdc_idx);
510 debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
512 /* S_IRUSR == 0400 */
513 pdcs->debugfs_stats = debugfs_create_file(spu_stats_name, 0400,
518 static void pdc_free_debugfs(void)
520 debugfs_remove_recursive(debugfs_dir);
525 * pdc_build_rxd() - Build DMA descriptor to receive SPU result.
526 * @pdcs: PDC state for SPU that will generate result
527 * @dma_addr: DMA address of buffer that descriptor is being built for
528 * @buf_len: Length of the receive buffer, in bytes
529 * @flags: Flags to be stored in descriptor
532 pdc_build_rxd(struct pdc_state *pdcs, dma_addr_t dma_addr,
533 u32 buf_len, u32 flags)
535 struct device *dev = &pdcs->pdev->dev;
536 struct dma64dd *rxd = &pdcs->rxd_64[pdcs->rxout];
539 "Writing rx descriptor for PDC %u at index %u with length %u. flags %#x\n",
540 pdcs->pdc_idx, pdcs->rxout, buf_len, flags);
542 rxd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
543 rxd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
544 rxd->ctrl1 = cpu_to_le32(flags);
545 rxd->ctrl2 = cpu_to_le32(buf_len);
547 /* bump ring index and return */
548 pdcs->rxout = NEXTRXD(pdcs->rxout, pdcs->nrxpost);
552 * pdc_build_txd() - Build a DMA descriptor to transmit a SPU request to
554 * @pdcs: PDC state for the SPU that will process this request
555 * @dma_addr: DMA address of packet to be transmitted
556 * @buf_len: Length of tx buffer, in bytes
557 * @flags: Flags to be stored in descriptor
560 pdc_build_txd(struct pdc_state *pdcs, dma_addr_t dma_addr, u32 buf_len,
563 struct device *dev = &pdcs->pdev->dev;
564 struct dma64dd *txd = &pdcs->txd_64[pdcs->txout];
567 "Writing tx descriptor for PDC %u at index %u with length %u, flags %#x\n",
568 pdcs->pdc_idx, pdcs->txout, buf_len, flags);
570 txd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
571 txd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
572 txd->ctrl1 = cpu_to_le32(flags);
573 txd->ctrl2 = cpu_to_le32(buf_len);
575 /* bump ring index and return */
576 pdcs->txout = NEXTTXD(pdcs->txout, pdcs->ntxpost);
580 * pdc_receive_one() - Receive a response message from a given SPU.
581 * @pdcs: PDC state for the SPU to receive from
583 * When the return code indicates success, the response message is available in
584 * the receive buffers provided prior to submission of the request.
586 * Return: PDC_SUCCESS if one or more receive descriptors was processed
587 * -EAGAIN indicates that no response message is available
588 * -EIO an error occurred
591 pdc_receive_one(struct pdc_state *pdcs)
593 struct device *dev = &pdcs->pdev->dev;
594 struct mbox_controller *mbc;
595 struct mbox_chan *chan;
596 struct brcm_message mssg;
599 u8 *resp_hdr; /* virtual addr of start of resp message DMA header */
600 u32 frags_rdy; /* number of fragments ready to read */
601 u32 rx_idx; /* ring index of start of receive frame */
602 dma_addr_t resp_hdr_daddr;
603 struct pdc_rx_ctx *rx_ctx;
606 chan = &mbc->chans[0];
607 mssg.type = BRCM_MESSAGE_SPU;
610 * return if a complete response message is not yet ready.
611 * rxin_numd[rxin] is the number of fragments in the next msg
614 frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, pdcs->nrxpost);
615 if ((frags_rdy == 0) ||
616 (frags_rdy < pdcs->rx_ctx[pdcs->rxin].rxin_numd))
617 /* No response ready */
620 num_frags = pdcs->txin_numd[pdcs->txin];
621 WARN_ON(num_frags == 0);
623 dma_unmap_sg(dev, pdcs->src_sg[pdcs->txin],
624 sg_nents(pdcs->src_sg[pdcs->txin]), DMA_TO_DEVICE);
626 pdcs->txin = (pdcs->txin + num_frags) & pdcs->ntxpost;
628 dev_dbg(dev, "PDC %u reclaimed %d tx descriptors",
629 pdcs->pdc_idx, num_frags);
632 rx_ctx = &pdcs->rx_ctx[rx_idx];
633 num_frags = rx_ctx->rxin_numd;
634 /* Return opaque context with result */
635 mssg.ctx = rx_ctx->rxp_ctx;
636 rx_ctx->rxp_ctx = NULL;
637 resp_hdr = rx_ctx->resp_hdr;
638 resp_hdr_daddr = rx_ctx->resp_hdr_daddr;
639 dma_unmap_sg(dev, rx_ctx->dst_sg, sg_nents(rx_ctx->dst_sg),
642 pdcs->rxin = (pdcs->rxin + num_frags) & pdcs->nrxpost;
644 dev_dbg(dev, "PDC %u reclaimed %d rx descriptors",
645 pdcs->pdc_idx, num_frags);
648 "PDC %u txin %u, txout %u, rxin %u, rxout %u, last_rx_curr %u\n",
649 pdcs->pdc_idx, pdcs->txin, pdcs->txout, pdcs->rxin,
650 pdcs->rxout, pdcs->last_rx_curr);
652 if (pdcs->pdc_resp_hdr_len == PDC_SPUM_RESP_HDR_LEN) {
654 * For SPU-M, get length of response msg and rx overflow status.
656 rx_status = *((u32 *)resp_hdr);
657 len = rx_status & RX_STATUS_LEN;
659 "SPU response length %u bytes", len);
660 if (unlikely(((rx_status & RX_STATUS_OVERFLOW) || (!len)))) {
661 if (rx_status & RX_STATUS_OVERFLOW) {
662 dev_err_ratelimited(dev,
663 "crypto receive overflow");
666 dev_info_ratelimited(dev, "crypto rx len = 0");
672 dma_pool_free(pdcs->rx_buf_pool, resp_hdr, resp_hdr_daddr);
674 mbox_chan_received_data(chan, &mssg);
681 * pdc_receive() - Process as many responses as are available in the rx ring.
684 * Called within the hard IRQ.
688 pdc_receive(struct pdc_state *pdcs)
692 /* read last_rx_curr from register once */
694 (ioread32((void *)&pdcs->rxregs_64->status0) &
695 CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE;
698 /* Could be many frames ready */
699 rx_status = pdc_receive_one(pdcs);
700 } while (rx_status == PDC_SUCCESS);
706 * pdc_tx_list_sg_add() - Add the buffers in a scatterlist to the transmit
707 * descriptors for a given SPU. The scatterlist buffers contain the data for a
708 * SPU request message.
709 * @spu_idx: The index of the SPU to submit the request to, [0, max_spu)
710 * @sg: Scatterlist whose buffers contain part of the SPU request
712 * If a scatterlist buffer is larger than PDC_DMA_BUF_MAX, multiple descriptors
713 * are written for that buffer, each <= PDC_DMA_BUF_MAX byte in length.
715 * Return: PDC_SUCCESS if successful
718 static int pdc_tx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
725 * Num descriptors needed. Conservatively assume we need a descriptor
726 * for every entry in sg.
729 u32 desc_w = 0; /* Number of tx descriptors written */
730 u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */
731 dma_addr_t databufptr; /* DMA address to put in descriptor */
733 num_desc = (u32)sg_nents(sg);
735 /* check whether enough tx descriptors are available */
736 tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout,
738 if (unlikely(num_desc > tx_avail)) {
743 /* build tx descriptors */
744 if (pdcs->tx_msg_start == pdcs->txout) {
746 pdcs->txin_numd[pdcs->tx_msg_start] = 0;
747 pdcs->src_sg[pdcs->txout] = sg;
748 flags = D64_CTRL1_SOF;
752 if (unlikely(pdcs->txout == (pdcs->ntxd - 1)))
758 * If sg buffer larger than PDC limit, split across
759 * multiple descriptors
761 bufcnt = sg_dma_len(sg);
762 databufptr = sg_dma_address(sg);
763 while (bufcnt > PDC_DMA_BUF_MAX) {
764 pdc_build_txd(pdcs, databufptr, PDC_DMA_BUF_MAX,
767 bufcnt -= PDC_DMA_BUF_MAX;
768 databufptr += PDC_DMA_BUF_MAX;
769 if (unlikely(pdcs->txout == (pdcs->ntxd - 1)))
776 /* Writing last descriptor for frame */
777 flags |= (D64_CTRL1_EOF | D64_CTRL1_IOC);
778 pdc_build_txd(pdcs, databufptr, bufcnt, flags | eot);
780 /* Clear start of frame after first descriptor */
781 flags &= ~D64_CTRL1_SOF;
783 pdcs->txin_numd[pdcs->tx_msg_start] += desc_w;
789 * pdc_tx_list_final() - Initiate DMA transfer of last frame written to tx
791 * @pdcs: PDC state for SPU to process the request
793 * Sets the index of the last descriptor written in both the rx and tx ring.
795 * Return: PDC_SUCCESS
797 static int pdc_tx_list_final(struct pdc_state *pdcs)
800 * write barrier to ensure all register writes are complete
801 * before chip starts to process new request
804 iowrite32(pdcs->rxout << 4, (void *)&pdcs->rxregs_64->ptr);
805 iowrite32(pdcs->txout << 4, (void *)&pdcs->txregs_64->ptr);
806 pdcs->pdc_requests++;
812 * pdc_rx_list_init() - Start a new receive descriptor list for a given PDC.
813 * @pdcs: PDC state for SPU handling request
814 * @dst_sg: scatterlist providing rx buffers for response to be returned to
816 * @ctx: Opaque context for this request
818 * Posts a single receive descriptor to hold the metadata that precedes a
819 * response. For example, with SPU-M, the metadata is a 32-byte DMA header and
820 * an 8-byte BCM header. Moves the msg_start descriptor indexes for both tx and
821 * rx to indicate the start of a new message.
823 * Return: PDC_SUCCESS if successful
824 * < 0 if an error (e.g., rx ring is full)
826 static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
831 u32 rx_pkt_cnt = 1; /* Adding a single rx buffer */
834 struct pdc_rx_ctx *rx_ctx;
836 rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
838 if (unlikely(rx_pkt_cnt > rx_avail)) {
843 /* allocate a buffer for the dma rx status */
844 vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr);
845 if (unlikely(!vaddr))
849 * Update msg_start indexes for both tx and rx to indicate the start
850 * of a new sequence of descriptor indexes that contain the fragments
851 * of the same message.
853 pdcs->rx_msg_start = pdcs->rxout;
854 pdcs->tx_msg_start = pdcs->txout;
856 /* This is always the first descriptor in the receive sequence */
857 flags = D64_CTRL1_SOF;
858 pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd = 1;
860 if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
861 flags |= D64_CTRL1_EOT;
863 rx_ctx = &pdcs->rx_ctx[pdcs->rxout];
864 rx_ctx->rxp_ctx = ctx;
865 rx_ctx->dst_sg = dst_sg;
866 rx_ctx->resp_hdr = vaddr;
867 rx_ctx->resp_hdr_daddr = daddr;
868 pdc_build_rxd(pdcs, daddr, pdcs->pdc_resp_hdr_len, flags);
873 * pdc_rx_list_sg_add() - Add the buffers in a scatterlist to the receive
874 * descriptors for a given SPU. The caller must have already DMA mapped the
876 * @spu_idx: Indicates which SPU the buffers are for
877 * @sg: Scatterlist whose buffers are added to the receive ring
879 * If a receive buffer in the scatterlist is larger than PDC_DMA_BUF_MAX,
880 * multiple receive descriptors are written, each with a buffer <=
883 * Return: PDC_SUCCESS if successful
884 * < 0 otherwise (e.g., receive ring is full)
886 static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
892 * Num descriptors needed. Conservatively assume we need a descriptor
893 * for every entry from our starting point in the scatterlist.
896 u32 desc_w = 0; /* Number of tx descriptors written */
897 u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */
898 dma_addr_t databufptr; /* DMA address to put in descriptor */
900 num_desc = (u32)sg_nents(sg);
902 rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
904 if (unlikely(num_desc > rx_avail)) {
910 if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
911 flags = D64_CTRL1_EOT;
916 * If sg buffer larger than PDC limit, split across
917 * multiple descriptors
919 bufcnt = sg_dma_len(sg);
920 databufptr = sg_dma_address(sg);
921 while (bufcnt > PDC_DMA_BUF_MAX) {
922 pdc_build_rxd(pdcs, databufptr, PDC_DMA_BUF_MAX, flags);
924 bufcnt -= PDC_DMA_BUF_MAX;
925 databufptr += PDC_DMA_BUF_MAX;
926 if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
927 flags = D64_CTRL1_EOT;
931 pdc_build_rxd(pdcs, databufptr, bufcnt, flags);
935 pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd += desc_w;
941 * pdc_irq_handler() - Interrupt handler called in interrupt context.
942 * @irq: Interrupt number that has fired
943 * @data: device struct for DMA engine that generated the interrupt
945 * We have to clear the device interrupt status flags here. So cache the
946 * status for later use in the thread function. Other than that, just return
947 * WAKE_THREAD to invoke the thread function.
949 * Return: IRQ_WAKE_THREAD if interrupt is ours
952 static irqreturn_t pdc_irq_handler(int irq, void *data)
954 struct device *dev = (struct device *)data;
955 struct pdc_state *pdcs = dev_get_drvdata(dev);
956 u32 intstatus = ioread32(pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
958 if (likely(intstatus & PDC_RCVINTEN_0))
959 set_bit(PDC_RCVINT_0, &pdcs->intstatus);
961 /* Clear interrupt flags in device */
962 iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
964 /* Disable interrupts until soft handler runs */
965 iowrite32(0, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
967 /* Wakeup IRQ thread */
968 if (likely(pdcs && (irq == pdcs->pdc_irq) &&
969 (intstatus & PDC_INTMASK))) {
970 tasklet_schedule(&pdcs->rx_tasklet);
976 static void pdc_tasklet_cb(unsigned long data)
978 struct pdc_state *pdcs = (struct pdc_state *)data;
981 rx_int = test_and_clear_bit(PDC_RCVINT_0, &pdcs->intstatus);
982 if (likely(pdcs && rx_int))
985 /* reenable interrupts */
986 iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
990 * pdc_ring_init() - Allocate DMA rings and initialize constant fields of
991 * descriptors in one ringset.
992 * @pdcs: PDC instance state
993 * @ringset: index of ringset being used
995 * Return: PDC_SUCCESS if ring initialized
998 static int pdc_ring_init(struct pdc_state *pdcs, int ringset)
1001 int err = PDC_SUCCESS;
1002 struct dma64 *dma_reg;
1003 struct device *dev = &pdcs->pdev->dev;
1004 struct pdc_ring_alloc tx;
1005 struct pdc_ring_alloc rx;
1007 /* Allocate tx ring */
1008 tx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &tx.dmabase);
1009 if (unlikely(!tx.vbase)) {
1014 /* Allocate rx ring */
1015 rx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &rx.dmabase);
1016 if (unlikely(!rx.vbase)) {
1021 dev_dbg(dev, " - base DMA addr of tx ring %pad", &tx.dmabase);
1022 dev_dbg(dev, " - base virtual addr of tx ring %p", tx.vbase);
1023 dev_dbg(dev, " - base DMA addr of rx ring %pad", &rx.dmabase);
1024 dev_dbg(dev, " - base virtual addr of rx ring %p", rx.vbase);
1026 memcpy(&pdcs->tx_ring_alloc, &tx, sizeof(tx));
1027 memcpy(&pdcs->rx_ring_alloc, &rx, sizeof(rx));
1030 pdcs->rx_msg_start = 0;
1031 pdcs->last_rx_curr = 0;
1034 pdcs->tx_msg_start = 0;
1037 /* Set descriptor array base addresses */
1038 pdcs->txd_64 = (struct dma64dd *)pdcs->tx_ring_alloc.vbase;
1039 pdcs->rxd_64 = (struct dma64dd *)pdcs->rx_ring_alloc.vbase;
1041 /* Tell device the base DMA address of each ring */
1042 dma_reg = &pdcs->regs->dmaregs[ringset];
1044 /* But first disable DMA and set curptr to 0 for both TX & RX */
1045 iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
1046 iowrite32((PDC_RX_CTL + (pdcs->rx_status_len << 1)),
1047 (void *)&dma_reg->dmarcv.control);
1048 iowrite32(0, (void *)&dma_reg->dmaxmt.ptr);
1049 iowrite32(0, (void *)&dma_reg->dmarcv.ptr);
1051 /* Set base DMA addresses */
1052 iowrite32(lower_32_bits(pdcs->tx_ring_alloc.dmabase),
1053 (void *)&dma_reg->dmaxmt.addrlow);
1054 iowrite32(upper_32_bits(pdcs->tx_ring_alloc.dmabase),
1055 (void *)&dma_reg->dmaxmt.addrhigh);
1057 iowrite32(lower_32_bits(pdcs->rx_ring_alloc.dmabase),
1058 (void *)&dma_reg->dmarcv.addrlow);
1059 iowrite32(upper_32_bits(pdcs->rx_ring_alloc.dmabase),
1060 (void *)&dma_reg->dmarcv.addrhigh);
1063 iowrite32(PDC_TX_CTL | PDC_TX_ENABLE, &dma_reg->dmaxmt.control);
1064 iowrite32((PDC_RX_CTL | PDC_RX_ENABLE | (pdcs->rx_status_len << 1)),
1065 (void *)&dma_reg->dmarcv.control);
1067 /* Initialize descriptors */
1068 for (i = 0; i < PDC_RING_ENTRIES; i++) {
1069 /* Every tx descriptor can be used for start of frame. */
1070 if (i != pdcs->ntxpost) {
1071 iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF,
1072 (void *)&pdcs->txd_64[i].ctrl1);
1074 /* Last descriptor in ringset. Set End of Table. */
1075 iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF |
1077 (void *)&pdcs->txd_64[i].ctrl1);
1080 /* Every rx descriptor can be used for start of frame */
1081 if (i != pdcs->nrxpost) {
1082 iowrite32(D64_CTRL1_SOF,
1083 (void *)&pdcs->rxd_64[i].ctrl1);
1085 /* Last descriptor in ringset. Set End of Table. */
1086 iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOT,
1087 (void *)&pdcs->rxd_64[i].ctrl1);
1093 dma_pool_free(pdcs->ring_pool, tx.vbase, tx.dmabase);
1098 static void pdc_ring_free(struct pdc_state *pdcs)
1100 if (pdcs->tx_ring_alloc.vbase) {
1101 dma_pool_free(pdcs->ring_pool, pdcs->tx_ring_alloc.vbase,
1102 pdcs->tx_ring_alloc.dmabase);
1103 pdcs->tx_ring_alloc.vbase = NULL;
1106 if (pdcs->rx_ring_alloc.vbase) {
1107 dma_pool_free(pdcs->ring_pool, pdcs->rx_ring_alloc.vbase,
1108 pdcs->rx_ring_alloc.dmabase);
1109 pdcs->rx_ring_alloc.vbase = NULL;
1114 * pdc_desc_count() - Count the number of DMA descriptors that will be required
1115 * for a given scatterlist. Account for the max length of a DMA buffer.
1116 * @sg: Scatterlist to be DMA'd
1117 * Return: Number of descriptors required
1119 static u32 pdc_desc_count(struct scatterlist *sg)
1124 cnt += ((sg->length / PDC_DMA_BUF_MAX) + 1);
1131 * pdc_rings_full() - Check whether the tx ring has room for tx_cnt descriptors
1132 * and the rx ring has room for rx_cnt descriptors.
1134 * @tx_cnt: The number of descriptors required in the tx ring
1135 * @rx_cnt: The number of descriptors required i the rx ring
1137 * Return: true if one of the rings does not have enough space
1138 * false if sufficient space is available in both rings
1140 static bool pdc_rings_full(struct pdc_state *pdcs, int tx_cnt, int rx_cnt)
1146 /* Check if the tx and rx rings are likely to have enough space */
1147 rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
1149 if (unlikely(rx_cnt > rx_avail)) {
1150 pdcs->rx_ring_full++;
1154 if (likely(!full)) {
1155 tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout,
1157 if (unlikely(tx_cnt > tx_avail)) {
1158 pdcs->tx_ring_full++;
1166 * pdc_last_tx_done() - If both the tx and rx rings have at least
1167 * PDC_RING_SPACE_MIN descriptors available, then indicate that the mailbox
1168 * framework can submit another message.
1169 * @chan: mailbox channel to check
1170 * Return: true if PDC can accept another message on this channel
1172 static bool pdc_last_tx_done(struct mbox_chan *chan)
1174 struct pdc_state *pdcs = chan->con_priv;
1177 if (unlikely(pdc_rings_full(pdcs, PDC_RING_SPACE_MIN,
1178 PDC_RING_SPACE_MIN))) {
1179 pdcs->last_tx_not_done++;
1188 * pdc_send_data() - mailbox send_data function
1189 * @chan: The mailbox channel on which the data is sent. The channel
1190 * corresponds to a DMA ringset.
1191 * @data: The mailbox message to be sent. The message must be a
1192 * brcm_message structure.
1194 * This function is registered as the send_data function for the mailbox
1195 * controller. From the destination scatterlist in the mailbox message, it
1196 * creates a sequence of receive descriptors in the rx ring. From the source
1197 * scatterlist, it creates a sequence of transmit descriptors in the tx ring.
1198 * After creating the descriptors, it writes the rx ptr and tx ptr registers to
1199 * initiate the DMA transfer.
1201 * This function does the DMA map and unmap of the src and dst scatterlists in
1202 * the mailbox message.
1204 * Return: 0 if successful
1205 * -ENOTSUPP if the mailbox message is a type this driver does not
1209 static int pdc_send_data(struct mbox_chan *chan, void *data)
1211 struct pdc_state *pdcs = chan->con_priv;
1212 struct device *dev = &pdcs->pdev->dev;
1213 struct brcm_message *mssg = data;
1214 int err = PDC_SUCCESS;
1221 if (unlikely(mssg->type != BRCM_MESSAGE_SPU))
1224 src_nent = sg_nents(mssg->spu.src);
1225 if (likely(src_nent)) {
1226 nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE);
1227 if (unlikely(nent == 0))
1231 dst_nent = sg_nents(mssg->spu.dst);
1232 if (likely(dst_nent)) {
1233 nent = dma_map_sg(dev, mssg->spu.dst, dst_nent,
1235 if (unlikely(nent == 0)) {
1236 dma_unmap_sg(dev, mssg->spu.src, src_nent,
1243 * Check if the tx and rx rings have enough space. Do this prior to
1244 * writing any tx or rx descriptors. Need to ensure that we do not write
1245 * a partial set of descriptors, or write just rx descriptors but
1246 * corresponding tx descriptors don't fit. Note that we want this check
1247 * and the entire sequence of descriptor to happen without another
1248 * thread getting in. The channel spin lock in the mailbox framework
1251 tx_desc_req = pdc_desc_count(mssg->spu.src);
1252 rx_desc_req = pdc_desc_count(mssg->spu.dst);
1253 if (unlikely(pdc_rings_full(pdcs, tx_desc_req, rx_desc_req + 1)))
1256 /* Create rx descriptors to SPU catch response */
1257 err = pdc_rx_list_init(pdcs, mssg->spu.dst, mssg->ctx);
1258 err |= pdc_rx_list_sg_add(pdcs, mssg->spu.dst);
1260 /* Create tx descriptors to submit SPU request */
1261 err |= pdc_tx_list_sg_add(pdcs, mssg->spu.src);
1262 err |= pdc_tx_list_final(pdcs); /* initiate transfer */
1265 dev_err(&pdcs->pdev->dev,
1266 "%s failed with error %d", __func__, err);
1271 static int pdc_startup(struct mbox_chan *chan)
1273 return pdc_ring_init(chan->con_priv, PDC_RINGSET);
1276 static void pdc_shutdown(struct mbox_chan *chan)
1278 struct pdc_state *pdcs = chan->con_priv;
1283 dev_dbg(&pdcs->pdev->dev,
1284 "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx);
1285 pdc_ring_free(pdcs);
1289 * pdc_hw_init() - Use the given initialization parameters to initialize the
1290 * state for one of the PDCs.
1291 * @pdcs: state of the PDC
1294 void pdc_hw_init(struct pdc_state *pdcs)
1296 struct platform_device *pdev;
1298 struct dma64 *dma_reg;
1299 int ringset = PDC_RINGSET;
1304 dev_dbg(dev, "PDC %u initial values:", pdcs->pdc_idx);
1305 dev_dbg(dev, "state structure: %p",
1307 dev_dbg(dev, " - base virtual addr of hw regs %p",
1308 pdcs->pdc_reg_vbase);
1310 /* initialize data structures */
1311 pdcs->regs = (struct pdc_regs *)pdcs->pdc_reg_vbase;
1312 pdcs->txregs_64 = (struct dma64_regs *)
1313 (void *)(((u8 *)pdcs->pdc_reg_vbase) +
1314 PDC_TXREGS_OFFSET + (sizeof(struct dma64) * ringset));
1315 pdcs->rxregs_64 = (struct dma64_regs *)
1316 (void *)(((u8 *)pdcs->pdc_reg_vbase) +
1317 PDC_RXREGS_OFFSET + (sizeof(struct dma64) * ringset));
1319 pdcs->ntxd = PDC_RING_ENTRIES;
1320 pdcs->nrxd = PDC_RING_ENTRIES;
1321 pdcs->ntxpost = PDC_RING_ENTRIES - 1;
1322 pdcs->nrxpost = PDC_RING_ENTRIES - 1;
1323 iowrite32(0, &pdcs->regs->intmask);
1325 dma_reg = &pdcs->regs->dmaregs[ringset];
1327 /* Configure DMA but will enable later in pdc_ring_init() */
1328 iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
1330 iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
1331 (void *)&dma_reg->dmarcv.control);
1333 /* Reset current index pointers after making sure DMA is disabled */
1334 iowrite32(0, &dma_reg->dmaxmt.ptr);
1335 iowrite32(0, &dma_reg->dmarcv.ptr);
1337 if (pdcs->pdc_resp_hdr_len == PDC_SPU2_RESP_HDR_LEN)
1338 iowrite32(PDC_CKSUM_CTRL,
1339 pdcs->pdc_reg_vbase + PDC_CKSUM_CTRL_OFFSET);
1343 * pdc_hw_disable() - Disable the tx and rx control in the hw.
1344 * @pdcs: PDC state structure
1347 static void pdc_hw_disable(struct pdc_state *pdcs)
1349 struct dma64 *dma_reg;
1351 dma_reg = &pdcs->regs->dmaregs[PDC_RINGSET];
1352 iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
1353 iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
1354 &dma_reg->dmarcv.control);
1358 * pdc_rx_buf_pool_create() - Pool of receive buffers used to catch the metadata
1359 * header returned with each response message.
1360 * @pdcs: PDC state structure
1362 * The metadata is not returned to the mailbox client. So the PDC driver
1363 * manages these buffers.
1365 * Return: PDC_SUCCESS
1366 * -ENOMEM if pool creation fails
1368 static int pdc_rx_buf_pool_create(struct pdc_state *pdcs)
1370 struct platform_device *pdev;
1376 pdcs->pdc_resp_hdr_len = pdcs->rx_status_len;
1377 if (pdcs->use_bcm_hdr)
1378 pdcs->pdc_resp_hdr_len += BCM_HDR_LEN;
1380 pdcs->rx_buf_pool = dma_pool_create("pdc rx bufs", dev,
1381 pdcs->pdc_resp_hdr_len,
1383 if (!pdcs->rx_buf_pool)
1390 * pdc_interrupts_init() - Initialize the interrupt configuration for a PDC and
1391 * specify a threaded IRQ handler for deferred handling of interrupts outside of
1392 * interrupt context.
1395 * Set the interrupt mask for transmit and receive done.
1396 * Set the lazy interrupt frame count to generate an interrupt for just one pkt.
1398 * Return: PDC_SUCCESS
1399 * <0 if threaded irq request fails
1401 static int pdc_interrupts_init(struct pdc_state *pdcs)
1403 struct platform_device *pdev = pdcs->pdev;
1404 struct device *dev = &pdev->dev;
1405 struct device_node *dn = pdev->dev.of_node;
1408 pdcs->intstatus = 0;
1410 /* interrupt configuration */
1411 iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
1412 iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase + PDC_RCVLAZY0_OFFSET);
1414 /* read irq from device tree */
1415 pdcs->pdc_irq = irq_of_parse_and_map(dn, 0);
1416 dev_dbg(dev, "pdc device %s irq %u for pdcs %p",
1417 dev_name(dev), pdcs->pdc_irq, pdcs);
1419 err = devm_request_irq(dev, pdcs->pdc_irq, pdc_irq_handler, 0,
1420 dev_name(dev), dev);
1422 dev_err(dev, "IRQ %u request failed with err %d\n",
1423 pdcs->pdc_irq, err);
1429 static const struct mbox_chan_ops pdc_mbox_chan_ops = {
1430 .send_data = pdc_send_data,
1431 .last_tx_done = pdc_last_tx_done,
1432 .startup = pdc_startup,
1433 .shutdown = pdc_shutdown
1437 * pdc_mb_init() - Initialize the mailbox controller.
1440 * Each PDC is a mailbox controller. Each ringset is a mailbox channel. Kernel
1441 * driver only uses one ringset and thus one mb channel. PDC uses the transmit
1442 * complete interrupt to determine when a mailbox message has successfully been
1445 * Return: 0 on success
1446 * < 0 if there is an allocation or registration failure
1448 static int pdc_mb_init(struct pdc_state *pdcs)
1450 struct device *dev = &pdcs->pdev->dev;
1451 struct mbox_controller *mbc;
1457 mbc->ops = &pdc_mbox_chan_ops;
1459 mbc->chans = devm_kcalloc(dev, mbc->num_chans, sizeof(*mbc->chans),
1464 mbc->txdone_irq = false;
1465 mbc->txdone_poll = true;
1466 mbc->txpoll_period = 1;
1467 for (chan_index = 0; chan_index < mbc->num_chans; chan_index++)
1468 mbc->chans[chan_index].con_priv = pdcs;
1470 /* Register mailbox controller */
1471 err = mbox_controller_register(mbc);
1474 "Failed to register PDC mailbox controller. Error %d.",
1482 * pdc_dt_read() - Read application-specific data from device tree.
1483 * @pdev: Platform device
1486 * Reads the number of bytes of receive status that precede each received frame.
1487 * Reads whether transmit and received frames should be preceded by an 8-byte
1490 * Return: 0 if successful
1491 * -ENODEV if device not available
1493 static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs)
1495 struct device *dev = &pdev->dev;
1496 struct device_node *dn = pdev->dev.of_node;
1499 err = of_property_read_u32(dn, "brcm,rx-status-len",
1500 &pdcs->rx_status_len);
1503 "%s failed to get DMA receive status length from device tree",
1506 pdcs->use_bcm_hdr = of_property_read_bool(dn, "brcm,use-bcm-hdr");
1512 * pdc_probe() - Probe function for PDC driver.
1513 * @pdev: PDC platform device
1515 * Reserve and map register regions defined in device tree.
1516 * Allocate and initialize tx and rx DMA rings.
1517 * Initialize a mailbox controller for each PDC.
1519 * Return: 0 if successful
1522 static int pdc_probe(struct platform_device *pdev)
1525 struct device *dev = &pdev->dev;
1526 struct resource *pdc_regs;
1527 struct pdc_state *pdcs;
1529 /* PDC state for one SPU */
1530 pdcs = devm_kzalloc(dev, sizeof(*pdcs), GFP_KERNEL);
1537 platform_set_drvdata(pdev, pdcs);
1538 pdcs->pdc_idx = pdcg.num_spu;
1541 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1543 dev_warn(dev, "PDC device cannot perform DMA. Error %d.", err);
1547 /* Create DMA pool for tx ring */
1548 pdcs->ring_pool = dma_pool_create("pdc rings", dev, PDC_RING_SIZE,
1550 if (!pdcs->ring_pool) {
1555 err = pdc_dt_read(pdev, pdcs);
1557 goto cleanup_ring_pool;
1559 pdc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1562 goto cleanup_ring_pool;
1564 dev_dbg(dev, "PDC register region res.start = %pa, res.end = %pa",
1565 &pdc_regs->start, &pdc_regs->end);
1567 pdcs->pdc_reg_vbase = devm_ioremap_resource(&pdev->dev, pdc_regs);
1568 if (IS_ERR(pdcs->pdc_reg_vbase)) {
1569 err = PTR_ERR(pdcs->pdc_reg_vbase);
1570 dev_err(&pdev->dev, "Failed to map registers: %d\n", err);
1571 goto cleanup_ring_pool;
1574 /* create rx buffer pool after dt read to know how big buffers are */
1575 err = pdc_rx_buf_pool_create(pdcs);
1577 goto cleanup_ring_pool;
1581 /* Init tasklet for deferred DMA rx processing */
1582 tasklet_init(&pdcs->rx_tasklet, pdc_tasklet_cb, (unsigned long) pdcs);
1584 err = pdc_interrupts_init(pdcs);
1586 goto cleanup_buf_pool;
1588 /* Initialize mailbox controller */
1589 err = pdc_mb_init(pdcs);
1591 goto cleanup_buf_pool;
1593 pdcs->debugfs_stats = NULL;
1594 pdc_setup_debugfs(pdcs);
1596 dev_dbg(dev, "pdc_probe() successful");
1600 tasklet_kill(&pdcs->rx_tasklet);
1601 dma_pool_destroy(pdcs->rx_buf_pool);
1604 dma_pool_destroy(pdcs->ring_pool);
1610 static int pdc_remove(struct platform_device *pdev)
1612 struct pdc_state *pdcs = platform_get_drvdata(pdev);
1616 tasklet_kill(&pdcs->rx_tasklet);
1618 pdc_hw_disable(pdcs);
1620 mbox_controller_unregister(&pdcs->mbc);
1622 dma_pool_destroy(pdcs->rx_buf_pool);
1623 dma_pool_destroy(pdcs->ring_pool);
1627 static const struct of_device_id pdc_mbox_of_match[] = {
1628 {.compatible = "brcm,iproc-pdc-mbox"},
1631 MODULE_DEVICE_TABLE(of, pdc_mbox_of_match);
1633 static struct platform_driver pdc_mbox_driver = {
1635 .remove = pdc_remove,
1637 .name = "brcm-iproc-pdc-mbox",
1638 .of_match_table = of_match_ptr(pdc_mbox_of_match),
1641 module_platform_driver(pdc_mbox_driver);
1643 MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
1644 MODULE_DESCRIPTION("Broadcom PDC mailbox driver");
1645 MODULE_LICENSE("GPL v2");