1 // SPDX-License-Identifier: GPL-2.0
3 * TI K3 NAVSS Ring Accelerator subsystem driver
5 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
8 #include <linux/dma-mapping.h>
10 #include <linux/init.h>
12 #include <linux/platform_device.h>
13 #include <linux/soc/ti/k3-ringacc.h>
14 #include <linux/soc/ti/ti_sci_protocol.h>
15 #include <linux/soc/ti/ti_sci_inta_msi.h>
16 #include <linux/of_irq.h>
17 #include <linux/irqdomain.h>
19 static LIST_HEAD(k3_ringacc_list);
20 static DEFINE_MUTEX(k3_ringacc_list_lock);
22 #define K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0)
25 * struct k3_ring_rt_regs - The RA realtime Control/Status Registers region
28 * @db: Ring Doorbell Register
30 * @occ: Ring Occupancy Register
31 * @indx: Ring Current Index Register
32 * @hwocc: Ring Hardware Occupancy Register
33 * @hwindx: Ring Hardware Current Index Register
35 struct k3_ring_rt_regs {
45 #define K3_RINGACC_RT_REGS_STEP 0x1000
48 * struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region
50 * @head_data: Ring Head Entry Data Registers
51 * @tail_data: Ring Tail Entry Data Registers
52 * @peek_head_data: Ring Peek Head Entry Data Regs
53 * @peek_tail_data: Ring Peek Tail Entry Data Regs
55 struct k3_ring_fifo_regs {
58 u32 peek_head_data[128];
59 u32 peek_tail_data[128];
63 * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region
65 * @revision: Revision Register
66 * @config: Config Register
68 struct k3_ringacc_proxy_gcfg_regs {
73 #define K3_RINGACC_PROXY_CFG_THREADS_MASK GENMASK(15, 0)
76 * struct k3_ringacc_proxy_target_regs - Proxy Datapath MMIO Region
78 * @control: Proxy Control Register
79 * @status: Proxy Status Register
81 * @data: Proxy Data Register
83 struct k3_ringacc_proxy_target_regs {
90 #define K3_RINGACC_PROXY_TARGET_STEP 0x1000
91 #define K3_RINGACC_PROXY_NOT_USED (-1)
93 enum k3_ringacc_proxy_access_mode {
94 PROXY_ACCESS_MODE_HEAD = 0,
95 PROXY_ACCESS_MODE_TAIL = 1,
96 PROXY_ACCESS_MODE_PEEK_HEAD = 2,
97 PROXY_ACCESS_MODE_PEEK_TAIL = 3,
100 #define K3_RINGACC_FIFO_WINDOW_SIZE_BYTES (512U)
101 #define K3_RINGACC_FIFO_REGS_STEP 0x1000
102 #define K3_RINGACC_MAX_DB_RING_CNT (127U)
105 int (*push_tail)(struct k3_ring *ring, void *elm);
106 int (*push_head)(struct k3_ring *ring, void *elm);
107 int (*pop_tail)(struct k3_ring *ring, void *elm);
108 int (*pop_head)(struct k3_ring *ring, void *elm);
112 * struct k3_ring_state - Internal state tracking structure
114 * @free: Number of free entries
116 * @windex: Write index
117 * @rindex: Read index
119 struct k3_ring_state {
127 * struct k3_ring - RA Ring descriptor
129 * @rt: Ring control/status registers
130 * @fifos: Ring queues registers
131 * @proxy: Ring Proxy Datapath registers
132 * @ring_mem_dma: Ring buffer dma address
133 * @ring_mem_virt: Ring buffer virt address
134 * @ops: Ring operations
135 * @size: Ring size in elements
136 * @elm_size: Size of the ring element
140 * @parent: Pointer on struct @k3_ringacc
141 * @use_count: Use count for shared rings
142 * @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY)
145 struct k3_ring_rt_regs __iomem *rt;
146 struct k3_ring_fifo_regs __iomem *fifos;
147 struct k3_ringacc_proxy_target_regs __iomem *proxy;
148 dma_addr_t ring_mem_dma;
150 struct k3_ring_ops *ops;
152 enum k3_ring_size elm_size;
153 enum k3_ring_mode mode;
155 #define K3_RING_FLAG_BUSY BIT(1)
156 #define K3_RING_FLAG_SHARED BIT(2)
157 struct k3_ring_state state;
159 struct k3_ringacc *parent;
164 struct k3_ringacc_ops {
165 int (*init)(struct platform_device *pdev, struct k3_ringacc *ringacc);
169 * struct k3_ringacc - Rings accelerator descriptor
171 * @dev: pointer on RA device
172 * @proxy_gcfg: RA proxy global config registers
173 * @proxy_target_base: RA proxy datapath region
174 * @num_rings: number of ring in RA
175 * @rings_inuse: bitfield for ring usage tracking
176 * @rm_gp_range: general purpose rings range from tisci
177 * @dma_ring_reset_quirk: DMA reset w/a enable
178 * @num_proxies: number of RA proxies
179 * @proxy_inuse: bitfield for proxy usage tracking
180 * @rings: array of rings descriptors (struct @k3_ring)
181 * @list: list of RAs in the system
182 * @req_lock: protect rings allocation
183 * @tisci: pointer ti-sci handle
184 * @tisci_ring_ops: ti-sci rings ops
185 * @tisci_dev_id: ti-sci device id
186 * @ops: SoC specific ringacc operation
190 struct k3_ringacc_proxy_gcfg_regs __iomem *proxy_gcfg;
191 void __iomem *proxy_target_base;
192 u32 num_rings; /* number of rings in Ringacc module */
193 unsigned long *rings_inuse;
194 struct ti_sci_resource *rm_gp_range;
196 bool dma_ring_reset_quirk;
198 unsigned long *proxy_inuse;
200 struct k3_ring *rings;
201 struct list_head list;
202 struct mutex req_lock; /* protect rings allocation */
204 const struct ti_sci_handle *tisci;
205 const struct ti_sci_rm_ringacc_ops *tisci_ring_ops;
208 const struct k3_ringacc_ops *ops;
211 static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring)
213 return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES -
214 (4 << ring->elm_size);
217 static void *k3_ringacc_get_elm_addr(struct k3_ring *ring, u32 idx)
219 return (ring->ring_mem_virt + idx * (4 << ring->elm_size));
222 static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem);
223 static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem);
225 static struct k3_ring_ops k3_ring_mode_ring_ops = {
226 .push_tail = k3_ringacc_ring_push_mem,
227 .pop_head = k3_ringacc_ring_pop_mem,
230 static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem);
231 static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem);
232 static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem);
233 static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem);
235 static struct k3_ring_ops k3_ring_mode_msg_ops = {
236 .push_tail = k3_ringacc_ring_push_io,
237 .push_head = k3_ringacc_ring_push_head_io,
238 .pop_tail = k3_ringacc_ring_pop_tail_io,
239 .pop_head = k3_ringacc_ring_pop_io,
242 static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem);
243 static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem);
244 static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem);
245 static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem);
247 static struct k3_ring_ops k3_ring_mode_proxy_ops = {
248 .push_tail = k3_ringacc_ring_push_tail_proxy,
249 .push_head = k3_ringacc_ring_push_head_proxy,
250 .pop_tail = k3_ringacc_ring_pop_tail_proxy,
251 .pop_head = k3_ringacc_ring_pop_head_proxy,
254 static void k3_ringacc_ring_dump(struct k3_ring *ring)
256 struct device *dev = ring->parent->dev;
258 dev_dbg(dev, "dump ring: %d\n", ring->ring_id);
259 dev_dbg(dev, "dump mem virt %p, dma %pad\n", ring->ring_mem_virt,
260 &ring->ring_mem_dma);
261 dev_dbg(dev, "dump elmsize %d, size %d, mode %d, proxy_id %d\n",
262 ring->elm_size, ring->size, ring->mode, ring->proxy_id);
263 dev_dbg(dev, "dump flags %08X\n", ring->flags);
265 dev_dbg(dev, "dump ring_rt_regs: db%08x\n", readl(&ring->rt->db));
266 dev_dbg(dev, "dump occ%08x\n", readl(&ring->rt->occ));
267 dev_dbg(dev, "dump indx%08x\n", readl(&ring->rt->indx));
268 dev_dbg(dev, "dump hwocc%08x\n", readl(&ring->rt->hwocc));
269 dev_dbg(dev, "dump hwindx%08x\n", readl(&ring->rt->hwindx));
271 if (ring->ring_mem_virt)
272 print_hex_dump_debug("dump ring_mem_virt ", DUMP_PREFIX_NONE,
273 16, 1, ring->ring_mem_virt, 16 * 8, false);
276 struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
279 int proxy_id = K3_RINGACC_PROXY_NOT_USED;
281 mutex_lock(&ringacc->req_lock);
283 if (id == K3_RINGACC_RING_ID_ANY) {
284 /* Request for any general purpose ring */
285 struct ti_sci_resource_desc *gp_rings =
286 &ringacc->rm_gp_range->desc[0];
289 size = gp_rings->start + gp_rings->num;
290 id = find_next_zero_bit(ringacc->rings_inuse, size,
298 if (test_bit(id, ringacc->rings_inuse) &&
299 !(ringacc->rings[id].flags & K3_RING_FLAG_SHARED))
301 else if (ringacc->rings[id].flags & K3_RING_FLAG_SHARED)
304 if (flags & K3_RINGACC_RING_USE_PROXY) {
305 proxy_id = find_next_zero_bit(ringacc->proxy_inuse,
306 ringacc->num_proxies, 0);
307 if (proxy_id == ringacc->num_proxies)
311 if (proxy_id != K3_RINGACC_PROXY_NOT_USED) {
312 set_bit(proxy_id, ringacc->proxy_inuse);
313 ringacc->rings[id].proxy_id = proxy_id;
314 dev_dbg(ringacc->dev, "Giving ring#%d proxy#%d\n", id,
317 dev_dbg(ringacc->dev, "Giving ring#%d\n", id);
320 set_bit(id, ringacc->rings_inuse);
322 ringacc->rings[id].use_count++;
323 mutex_unlock(&ringacc->req_lock);
324 return &ringacc->rings[id];
327 mutex_unlock(&ringacc->req_lock);
330 EXPORT_SYMBOL_GPL(k3_ringacc_request_ring);
332 int k3_ringacc_request_rings_pair(struct k3_ringacc *ringacc,
333 int fwd_id, int compl_id,
334 struct k3_ring **fwd_ring,
335 struct k3_ring **compl_ring)
339 if (!fwd_ring || !compl_ring)
342 *fwd_ring = k3_ringacc_request_ring(ringacc, fwd_id, 0);
346 *compl_ring = k3_ringacc_request_ring(ringacc, compl_id, 0);
347 if (!(*compl_ring)) {
348 k3_ringacc_ring_free(*fwd_ring);
354 EXPORT_SYMBOL_GPL(k3_ringacc_request_rings_pair);
356 static void k3_ringacc_ring_reset_sci(struct k3_ring *ring)
358 struct k3_ringacc *ringacc = ring->parent;
361 ret = ringacc->tisci_ring_ops->config(
363 TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID,
364 ringacc->tisci_dev_id,
373 dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n",
377 void k3_ringacc_ring_reset(struct k3_ring *ring)
379 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
382 memset(&ring->state, 0, sizeof(ring->state));
384 k3_ringacc_ring_reset_sci(ring);
386 EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset);
388 static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring *ring,
389 enum k3_ring_mode mode)
391 struct k3_ringacc *ringacc = ring->parent;
394 ret = ringacc->tisci_ring_ops->config(
396 TI_SCI_MSG_VALUE_RM_RING_MODE_VALID,
397 ringacc->tisci_dev_id,
406 dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n",
410 void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ)
412 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
415 if (!ring->parent->dma_ring_reset_quirk)
419 occ = readl(&ring->rt->occ);
422 u32 db_ring_cnt, db_ring_cnt_cur;
424 dev_dbg(ring->parent->dev, "%s %u occ: %u\n", __func__,
426 /* TI-SCI ring reset */
427 k3_ringacc_ring_reset_sci(ring);
430 * Setup the ring in ring/doorbell mode (if not already in this
433 if (ring->mode != K3_RINGACC_RING_MODE_RING)
434 k3_ringacc_ring_reconfig_qmode_sci(
435 ring, K3_RINGACC_RING_MODE_RING);
437 * Ring the doorbell 2**22 – ringOcc times.
438 * This will wrap the internal UDMAP ring state occupancy
439 * counter (which is 21-bits wide) to 0.
441 db_ring_cnt = (1U << 22) - occ;
443 while (db_ring_cnt != 0) {
445 * Ring the doorbell with the maximum count each
446 * iteration if possible to minimize the total
449 if (db_ring_cnt > K3_RINGACC_MAX_DB_RING_CNT)
450 db_ring_cnt_cur = K3_RINGACC_MAX_DB_RING_CNT;
452 db_ring_cnt_cur = db_ring_cnt;
454 writel(db_ring_cnt_cur, &ring->rt->db);
455 db_ring_cnt -= db_ring_cnt_cur;
458 /* Restore the original ring mode (if not ring mode) */
459 if (ring->mode != K3_RINGACC_RING_MODE_RING)
460 k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode);
465 k3_ringacc_ring_reset(ring);
467 EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset_dma);
469 static void k3_ringacc_ring_free_sci(struct k3_ring *ring)
471 struct k3_ringacc *ringacc = ring->parent;
474 ret = ringacc->tisci_ring_ops->config(
476 TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
477 ringacc->tisci_dev_id,
486 dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n",
490 int k3_ringacc_ring_free(struct k3_ring *ring)
492 struct k3_ringacc *ringacc;
497 ringacc = ring->parent;
499 dev_dbg(ring->parent->dev, "flags: 0x%08x\n", ring->flags);
501 if (!test_bit(ring->ring_id, ringacc->rings_inuse))
504 mutex_lock(&ringacc->req_lock);
506 if (--ring->use_count)
509 if (!(ring->flags & K3_RING_FLAG_BUSY))
512 k3_ringacc_ring_free_sci(ring);
514 dma_free_coherent(ringacc->dev,
515 ring->size * (4 << ring->elm_size),
516 ring->ring_mem_virt, ring->ring_mem_dma);
519 if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) {
520 clear_bit(ring->proxy_id, ringacc->proxy_inuse);
522 ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
526 clear_bit(ring->ring_id, ringacc->rings_inuse);
529 mutex_unlock(&ringacc->req_lock);
532 EXPORT_SYMBOL_GPL(k3_ringacc_ring_free);
534 u32 k3_ringacc_get_ring_id(struct k3_ring *ring)
539 return ring->ring_id;
541 EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_id);
543 u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring)
548 return ring->parent->tisci_dev_id;
550 EXPORT_SYMBOL_GPL(k3_ringacc_get_tisci_dev_id);
552 int k3_ringacc_get_ring_irq_num(struct k3_ring *ring)
559 irq_num = ti_sci_inta_msi_get_virq(ring->parent->dev, ring->ring_id);
564 EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_irq_num);
566 static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring)
568 struct k3_ringacc *ringacc = ring->parent;
575 ring_idx = ring->ring_id;
576 ret = ringacc->tisci_ring_ops->config(
578 TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
579 ringacc->tisci_dev_id,
581 lower_32_bits(ring->ring_mem_dma),
582 upper_32_bits(ring->ring_mem_dma),
588 dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n",
594 int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
596 struct k3_ringacc *ringacc = ring->parent;
601 if (cfg->elm_size > K3_RINGACC_RING_ELSIZE_256 ||
602 cfg->mode >= K3_RINGACC_RING_MODE_INVALID ||
603 cfg->size & ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK ||
604 !test_bit(ring->ring_id, ringacc->rings_inuse))
607 if (cfg->mode == K3_RINGACC_RING_MODE_MESSAGE &&
608 ring->proxy_id == K3_RINGACC_PROXY_NOT_USED &&
609 cfg->elm_size > K3_RINGACC_RING_ELSIZE_8) {
610 dev_err(ringacc->dev,
611 "Message mode must use proxy for %u element size\n",
612 4 << ring->elm_size);
617 * In case of shared ring only the first user (master user) can
618 * configure the ring. The sequence should be by the client:
619 * ring = k3_ringacc_request_ring(ringacc, ring_id, 0); # master user
620 * k3_ringacc_ring_cfg(ring, cfg); # master configuration
621 * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
622 * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
624 if (ring->use_count != 1)
627 ring->size = cfg->size;
628 ring->elm_size = cfg->elm_size;
629 ring->mode = cfg->mode;
630 memset(&ring->state, 0, sizeof(ring->state));
632 if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED)
633 ring->proxy = ringacc->proxy_target_base +
634 ring->proxy_id * K3_RINGACC_PROXY_TARGET_STEP;
636 switch (ring->mode) {
637 case K3_RINGACC_RING_MODE_RING:
638 ring->ops = &k3_ring_mode_ring_ops;
640 case K3_RINGACC_RING_MODE_MESSAGE:
642 ring->ops = &k3_ring_mode_proxy_ops;
644 ring->ops = &k3_ring_mode_msg_ops;
652 ring->ring_mem_virt = dma_alloc_coherent(ringacc->dev,
653 ring->size * (4 << ring->elm_size),
654 &ring->ring_mem_dma, GFP_KERNEL);
655 if (!ring->ring_mem_virt) {
656 dev_err(ringacc->dev, "Failed to alloc ring mem\n");
661 ret = k3_ringacc_ring_cfg_sci(ring);
666 ring->flags |= K3_RING_FLAG_BUSY;
667 ring->flags |= (cfg->flags & K3_RINGACC_RING_SHARED) ?
668 K3_RING_FLAG_SHARED : 0;
670 k3_ringacc_ring_dump(ring);
675 dma_free_coherent(ringacc->dev,
676 ring->size * (4 << ring->elm_size),
685 EXPORT_SYMBOL_GPL(k3_ringacc_ring_cfg);
687 u32 k3_ringacc_ring_get_size(struct k3_ring *ring)
689 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
694 EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_size);
696 u32 k3_ringacc_ring_get_free(struct k3_ring *ring)
698 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
701 if (!ring->state.free)
702 ring->state.free = ring->size - readl(&ring->rt->occ);
704 return ring->state.free;
706 EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_free);
708 u32 k3_ringacc_ring_get_occ(struct k3_ring *ring)
710 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
713 return readl(&ring->rt->occ);
715 EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_occ);
717 u32 k3_ringacc_ring_is_full(struct k3_ring *ring)
719 return !k3_ringacc_ring_get_free(ring);
721 EXPORT_SYMBOL_GPL(k3_ringacc_ring_is_full);
723 enum k3_ringacc_access_mode {
724 K3_RINGACC_ACCESS_MODE_PUSH_HEAD,
725 K3_RINGACC_ACCESS_MODE_POP_HEAD,
726 K3_RINGACC_ACCESS_MODE_PUSH_TAIL,
727 K3_RINGACC_ACCESS_MODE_POP_TAIL,
728 K3_RINGACC_ACCESS_MODE_PEEK_HEAD,
729 K3_RINGACC_ACCESS_MODE_PEEK_TAIL,
732 #define K3_RINGACC_PROXY_MODE(x) (((x) & 0x3) << 16)
733 #define K3_RINGACC_PROXY_ELSIZE(x) (((x) & 0x7) << 24)
734 static int k3_ringacc_ring_cfg_proxy(struct k3_ring *ring,
735 enum k3_ringacc_proxy_access_mode mode)
740 val |= K3_RINGACC_PROXY_MODE(mode);
741 val |= K3_RINGACC_PROXY_ELSIZE(ring->elm_size);
742 writel(val, &ring->proxy->control);
746 static int k3_ringacc_ring_access_proxy(struct k3_ring *ring, void *elem,
747 enum k3_ringacc_access_mode access_mode)
751 ptr = (void __iomem *)&ring->proxy->data;
753 switch (access_mode) {
754 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
755 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
756 k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_HEAD);
758 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
759 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
760 k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_TAIL);
766 ptr += k3_ringacc_ring_get_fifo_pos(ring);
768 switch (access_mode) {
769 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
770 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
771 dev_dbg(ring->parent->dev,
772 "proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
774 memcpy_fromio(elem, ptr, (4 << ring->elm_size));
777 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
778 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
779 dev_dbg(ring->parent->dev,
780 "proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
782 memcpy_toio(ptr, elem, (4 << ring->elm_size));
789 dev_dbg(ring->parent->dev, "proxy: free%d occ%d\n", ring->state.free,
794 static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem)
796 return k3_ringacc_ring_access_proxy(ring, elem,
797 K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
800 static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem)
802 return k3_ringacc_ring_access_proxy(ring, elem,
803 K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
806 static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem)
808 return k3_ringacc_ring_access_proxy(ring, elem,
809 K3_RINGACC_ACCESS_MODE_POP_HEAD);
812 static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem)
814 return k3_ringacc_ring_access_proxy(ring, elem,
815 K3_RINGACC_ACCESS_MODE_POP_HEAD);
818 static int k3_ringacc_ring_access_io(struct k3_ring *ring, void *elem,
819 enum k3_ringacc_access_mode access_mode)
823 switch (access_mode) {
824 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
825 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
826 ptr = (void __iomem *)&ring->fifos->head_data;
828 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
829 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
830 ptr = (void __iomem *)&ring->fifos->tail_data;
836 ptr += k3_ringacc_ring_get_fifo_pos(ring);
838 switch (access_mode) {
839 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
840 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
841 dev_dbg(ring->parent->dev,
842 "memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
844 memcpy_fromio(elem, ptr, (4 << ring->elm_size));
847 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
848 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
849 dev_dbg(ring->parent->dev,
850 "memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
852 memcpy_toio(ptr, elem, (4 << ring->elm_size));
859 dev_dbg(ring->parent->dev, "free%d index%d occ%d index%d\n",
860 ring->state.free, ring->state.windex, ring->state.occ,
865 static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem)
867 return k3_ringacc_ring_access_io(ring, elem,
868 K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
871 static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem)
873 return k3_ringacc_ring_access_io(ring, elem,
874 K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
877 static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem)
879 return k3_ringacc_ring_access_io(ring, elem,
880 K3_RINGACC_ACCESS_MODE_POP_HEAD);
883 static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem)
885 return k3_ringacc_ring_access_io(ring, elem,
886 K3_RINGACC_ACCESS_MODE_POP_HEAD);
889 static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem)
893 elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.windex);
895 memcpy(elem_ptr, elem, (4 << ring->elm_size));
897 ring->state.windex = (ring->state.windex + 1) % ring->size;
899 writel(1, &ring->rt->db);
901 dev_dbg(ring->parent->dev, "ring_push_mem: free%d index%d\n",
902 ring->state.free, ring->state.windex);
907 static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem)
911 elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.rindex);
913 memcpy(elem, elem_ptr, (4 << ring->elm_size));
915 ring->state.rindex = (ring->state.rindex + 1) % ring->size;
917 writel(-1, &ring->rt->db);
919 dev_dbg(ring->parent->dev, "ring_pop_mem: occ%d index%d pos_ptr%p\n",
920 ring->state.occ, ring->state.rindex, elem_ptr);
924 int k3_ringacc_ring_push(struct k3_ring *ring, void *elem)
926 int ret = -EOPNOTSUPP;
928 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
931 dev_dbg(ring->parent->dev, "ring_push: free%d index%d\n",
932 ring->state.free, ring->state.windex);
934 if (k3_ringacc_ring_is_full(ring))
937 if (ring->ops && ring->ops->push_tail)
938 ret = ring->ops->push_tail(ring, elem);
942 EXPORT_SYMBOL_GPL(k3_ringacc_ring_push);
944 int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem)
946 int ret = -EOPNOTSUPP;
948 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
951 dev_dbg(ring->parent->dev, "ring_push_head: free%d index%d\n",
952 ring->state.free, ring->state.windex);
954 if (k3_ringacc_ring_is_full(ring))
957 if (ring->ops && ring->ops->push_head)
958 ret = ring->ops->push_head(ring, elem);
962 EXPORT_SYMBOL_GPL(k3_ringacc_ring_push_head);
964 int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem)
966 int ret = -EOPNOTSUPP;
968 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
971 if (!ring->state.occ)
972 ring->state.occ = k3_ringacc_ring_get_occ(ring);
974 dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->state.occ,
977 if (!ring->state.occ)
980 if (ring->ops && ring->ops->pop_head)
981 ret = ring->ops->pop_head(ring, elem);
985 EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop);
987 int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem)
989 int ret = -EOPNOTSUPP;
991 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
994 if (!ring->state.occ)
995 ring->state.occ = k3_ringacc_ring_get_occ(ring);
997 dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n",
998 ring->state.occ, ring->state.rindex);
1000 if (!ring->state.occ)
1003 if (ring->ops && ring->ops->pop_tail)
1004 ret = ring->ops->pop_tail(ring, elem);
1008 EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop_tail);
1010 struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np,
1011 const char *property)
1013 struct device_node *ringacc_np;
1014 struct k3_ringacc *ringacc = ERR_PTR(-EPROBE_DEFER);
1015 struct k3_ringacc *entry;
1017 ringacc_np = of_parse_phandle(np, property, 0);
1019 return ERR_PTR(-ENODEV);
1021 mutex_lock(&k3_ringacc_list_lock);
1022 list_for_each_entry(entry, &k3_ringacc_list, list)
1023 if (entry->dev->of_node == ringacc_np) {
1027 mutex_unlock(&k3_ringacc_list_lock);
1028 of_node_put(ringacc_np);
1032 EXPORT_SYMBOL_GPL(of_k3_ringacc_get_by_phandle);
1034 static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc)
1036 struct device_node *node = ringacc->dev->of_node;
1037 struct device *dev = ringacc->dev;
1038 struct platform_device *pdev = to_platform_device(dev);
1042 dev_err(dev, "device tree info unavailable\n");
1046 ret = of_property_read_u32(node, "ti,num-rings", &ringacc->num_rings);
1048 dev_err(dev, "ti,num-rings read failure %d\n", ret);
1052 ringacc->dma_ring_reset_quirk =
1053 of_property_read_bool(node, "ti,dma-ring-reset-quirk");
1055 ringacc->tisci = ti_sci_get_by_phandle(node, "ti,sci");
1056 if (IS_ERR(ringacc->tisci)) {
1057 ret = PTR_ERR(ringacc->tisci);
1058 if (ret != -EPROBE_DEFER)
1059 dev_err(dev, "ti,sci read fail %d\n", ret);
1060 ringacc->tisci = NULL;
1064 ret = of_property_read_u32(node, "ti,sci-dev-id",
1065 &ringacc->tisci_dev_id);
1067 dev_err(dev, "ti,sci-dev-id read fail %d\n", ret);
1071 pdev->id = ringacc->tisci_dev_id;
1073 ringacc->rm_gp_range = devm_ti_sci_get_of_resource(ringacc->tisci, dev,
1074 ringacc->tisci_dev_id,
1075 "ti,sci-rm-range-gp-rings");
1076 if (IS_ERR(ringacc->rm_gp_range)) {
1077 dev_err(dev, "Failed to allocate MSI interrupts\n");
1078 return PTR_ERR(ringacc->rm_gp_range);
1081 return ti_sci_inta_msi_domain_alloc_irqs(ringacc->dev,
1082 ringacc->rm_gp_range);
1085 static int k3_ringacc_init(struct platform_device *pdev,
1086 struct k3_ringacc *ringacc)
1088 void __iomem *base_fifo, *base_rt;
1089 struct device *dev = &pdev->dev;
1090 struct resource *res;
1093 dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
1094 DOMAIN_BUS_TI_SCI_INTA_MSI);
1095 if (!dev->msi_domain) {
1096 dev_err(dev, "Failed to get MSI domain\n");
1097 return -EPROBE_DEFER;
1100 ret = k3_ringacc_probe_dt(ringacc);
1104 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rt");
1105 base_rt = devm_ioremap_resource(dev, res);
1106 if (IS_ERR(base_rt))
1107 return PTR_ERR(base_rt);
1109 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fifos");
1110 base_fifo = devm_ioremap_resource(dev, res);
1111 if (IS_ERR(base_fifo))
1112 return PTR_ERR(base_fifo);
1114 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "proxy_gcfg");
1115 ringacc->proxy_gcfg = devm_ioremap_resource(dev, res);
1116 if (IS_ERR(ringacc->proxy_gcfg))
1117 return PTR_ERR(ringacc->proxy_gcfg);
1119 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1121 ringacc->proxy_target_base = devm_ioremap_resource(dev, res);
1122 if (IS_ERR(ringacc->proxy_target_base))
1123 return PTR_ERR(ringacc->proxy_target_base);
1125 ringacc->num_proxies = readl(&ringacc->proxy_gcfg->config) &
1126 K3_RINGACC_PROXY_CFG_THREADS_MASK;
1128 ringacc->rings = devm_kzalloc(dev,
1129 sizeof(*ringacc->rings) *
1132 ringacc->rings_inuse = devm_kcalloc(dev,
1133 BITS_TO_LONGS(ringacc->num_rings),
1134 sizeof(unsigned long), GFP_KERNEL);
1135 ringacc->proxy_inuse = devm_kcalloc(dev,
1136 BITS_TO_LONGS(ringacc->num_proxies),
1137 sizeof(unsigned long), GFP_KERNEL);
1139 if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse)
1142 for (i = 0; i < ringacc->num_rings; i++) {
1143 ringacc->rings[i].rt = base_rt +
1144 K3_RINGACC_RT_REGS_STEP * i;
1145 ringacc->rings[i].fifos = base_fifo +
1146 K3_RINGACC_FIFO_REGS_STEP * i;
1147 ringacc->rings[i].parent = ringacc;
1148 ringacc->rings[i].ring_id = i;
1149 ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED;
1152 ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
1154 dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n",
1156 ringacc->rm_gp_range->desc[0].start,
1157 ringacc->rm_gp_range->desc[0].num,
1158 ringacc->tisci_dev_id);
1159 dev_info(dev, "dma-ring-reset-quirk: %s\n",
1160 ringacc->dma_ring_reset_quirk ? "enabled" : "disabled");
1161 dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n",
1162 readl(&ringacc->proxy_gcfg->revision), ringacc->num_proxies);
1167 struct ringacc_match_data {
1168 struct k3_ringacc_ops ops;
1171 static struct ringacc_match_data k3_ringacc_data = {
1173 .init = k3_ringacc_init,
1177 /* Match table for of_platform binding */
1178 static const struct of_device_id k3_ringacc_of_match[] = {
1179 { .compatible = "ti,am654-navss-ringacc", .data = &k3_ringacc_data, },
1183 static int k3_ringacc_probe(struct platform_device *pdev)
1185 const struct ringacc_match_data *match_data;
1186 const struct of_device_id *match;
1187 struct device *dev = &pdev->dev;
1188 struct k3_ringacc *ringacc;
1191 match = of_match_node(k3_ringacc_of_match, dev->of_node);
1194 match_data = match->data;
1196 ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
1201 mutex_init(&ringacc->req_lock);
1202 ringacc->ops = &match_data->ops;
1204 ret = ringacc->ops->init(pdev, ringacc);
1208 dev_set_drvdata(dev, ringacc);
1210 mutex_lock(&k3_ringacc_list_lock);
1211 list_add_tail(&ringacc->list, &k3_ringacc_list);
1212 mutex_unlock(&k3_ringacc_list_lock);
1217 static struct platform_driver k3_ringacc_driver = {
1218 .probe = k3_ringacc_probe,
1220 .name = "k3-ringacc",
1221 .of_match_table = k3_ringacc_of_match,
1222 .suppress_bind_attrs = true,
1225 builtin_platform_driver(k3_ringacc_driver);