soc: ti: k3-ringacc: add AM64 DMA rings support.
[linux-2.6-microblaze.git] / drivers / soc / ti / k3-ringacc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * TI K3 NAVSS Ring Accelerator subsystem driver
4  *
5  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
6  */
7
8 #include <linux/dma-mapping.h>
9 #include <linux/io.h>
10 #include <linux/init.h>
11 #include <linux/of.h>
12 #include <linux/platform_device.h>
13 #include <linux/sys_soc.h>
14 #include <linux/dma/ti-cppi5.h>
15 #include <linux/soc/ti/k3-ringacc.h>
16 #include <linux/soc/ti/ti_sci_protocol.h>
17 #include <linux/soc/ti/ti_sci_inta_msi.h>
18 #include <linux/of_irq.h>
19 #include <linux/irqdomain.h>
20
21 static LIST_HEAD(k3_ringacc_list);
22 static DEFINE_MUTEX(k3_ringacc_list_lock);
23
24 #define K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK             GENMASK(19, 0)
25 #define K3_DMARING_CFG_RING_SIZE_ELCNT_MASK             GENMASK(15, 0)
26
27 /**
28  * struct k3_ring_rt_regs - The RA realtime Control/Status Registers region
29  *
30  * @resv_16: Reserved
31  * @db: Ring Doorbell Register
32  * @resv_4: Reserved
33  * @occ: Ring Occupancy Register
34  * @indx: Ring Current Index Register
35  * @hwocc: Ring Hardware Occupancy Register
36  * @hwindx: Ring Hardware Current Index Register
37  */
38 struct k3_ring_rt_regs {
39         u32     resv_16[4];
40         u32     db;
41         u32     resv_4[1];
42         u32     occ;
43         u32     indx;
44         u32     hwocc;
45         u32     hwindx;
46 };
47
48 #define K3_RINGACC_RT_REGS_STEP                 0x1000
49 #define K3_DMARING_RT_REGS_STEP                 0x2000
50 #define K3_DMARING_RT_REGS_REVERSE_OFS          0x1000
51 #define K3_RINGACC_RT_OCC_MASK                  GENMASK(20, 0)
52 #define K3_DMARING_RT_OCC_TDOWN_COMPLETE        BIT(31)
53 #define K3_DMARING_RT_DB_ENTRY_MASK             GENMASK(7, 0)
54 #define K3_DMARING_RT_DB_TDOWN_ACK              BIT(31)
55
56 /**
57  * struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region
58  *
59  * @head_data: Ring Head Entry Data Registers
60  * @tail_data: Ring Tail Entry Data Registers
61  * @peek_head_data: Ring Peek Head Entry Data Regs
62  * @peek_tail_data: Ring Peek Tail Entry Data Regs
63  */
64 struct k3_ring_fifo_regs {
65         u32     head_data[128];
66         u32     tail_data[128];
67         u32     peek_head_data[128];
68         u32     peek_tail_data[128];
69 };
70
71 /**
72  * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region
73  *
74  * @revision: Revision Register
75  * @config: Config Register
76  */
77 struct k3_ringacc_proxy_gcfg_regs {
78         u32     revision;
79         u32     config;
80 };
81
82 #define K3_RINGACC_PROXY_CFG_THREADS_MASK               GENMASK(15, 0)
83
84 /**
85  * struct k3_ringacc_proxy_target_regs - Proxy Datapath MMIO Region
86  *
87  * @control: Proxy Control Register
88  * @status: Proxy Status Register
89  * @resv_512: Reserved
90  * @data: Proxy Data Register
91  */
92 struct k3_ringacc_proxy_target_regs {
93         u32     control;
94         u32     status;
95         u8      resv_512[504];
96         u32     data[128];
97 };
98
99 #define K3_RINGACC_PROXY_TARGET_STEP    0x1000
100 #define K3_RINGACC_PROXY_NOT_USED       (-1)
101
102 enum k3_ringacc_proxy_access_mode {
103         PROXY_ACCESS_MODE_HEAD = 0,
104         PROXY_ACCESS_MODE_TAIL = 1,
105         PROXY_ACCESS_MODE_PEEK_HEAD = 2,
106         PROXY_ACCESS_MODE_PEEK_TAIL = 3,
107 };
108
109 #define K3_RINGACC_FIFO_WINDOW_SIZE_BYTES  (512U)
110 #define K3_RINGACC_FIFO_REGS_STEP       0x1000
111 #define K3_RINGACC_MAX_DB_RING_CNT    (127U)
112
113 struct k3_ring_ops {
114         int (*push_tail)(struct k3_ring *ring, void *elm);
115         int (*push_head)(struct k3_ring *ring, void *elm);
116         int (*pop_tail)(struct k3_ring *ring, void *elm);
117         int (*pop_head)(struct k3_ring *ring, void *elm);
118 };
119
120 /**
121  * struct k3_ring_state - Internal state tracking structure
122  *
123  * @free: Number of free entries
124  * @occ: Occupancy
125  * @windex: Write index
126  * @rindex: Read index
127  */
128 struct k3_ring_state {
129         u32 free;
130         u32 occ;
131         u32 windex;
132         u32 rindex;
133         u32 tdown_complete:1;
134 };
135
136 /**
137  * struct k3_ring - RA Ring descriptor
138  *
139  * @rt: Ring control/status registers
140  * @fifos: Ring queues registers
141  * @proxy: Ring Proxy Datapath registers
142  * @ring_mem_dma: Ring buffer dma address
143  * @ring_mem_virt: Ring buffer virt address
144  * @ops: Ring operations
145  * @size: Ring size in elements
146  * @elm_size: Size of the ring element
147  * @mode: Ring mode
148  * @flags: flags
149  * @state: Ring state
150  * @ring_id: Ring Id
151  * @parent: Pointer on struct @k3_ringacc
152  * @use_count: Use count for shared rings
153  * @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY)
154  * @dma_dev: device to be used for DMA API (allocation, mapping)
155  * @asel: Address Space Select value for physical addresses
156  */
157 struct k3_ring {
158         struct k3_ring_rt_regs __iomem *rt;
159         struct k3_ring_fifo_regs __iomem *fifos;
160         struct k3_ringacc_proxy_target_regs  __iomem *proxy;
161         dma_addr_t      ring_mem_dma;
162         void            *ring_mem_virt;
163         struct k3_ring_ops *ops;
164         u32             size;
165         enum k3_ring_size elm_size;
166         enum k3_ring_mode mode;
167         u32             flags;
168 #define K3_RING_FLAG_BUSY       BIT(1)
169 #define K3_RING_FLAG_SHARED     BIT(2)
170 #define K3_RING_FLAG_REVERSE    BIT(3)
171         struct k3_ring_state state;
172         u32             ring_id;
173         struct k3_ringacc       *parent;
174         u32             use_count;
175         int             proxy_id;
176         struct device   *dma_dev;
177         u32             asel;
178 #define K3_ADDRESS_ASEL_SHIFT   48
179 };
180
181 struct k3_ringacc_ops {
182         int (*init)(struct platform_device *pdev, struct k3_ringacc *ringacc);
183 };
184
185 /**
186  * struct k3_ringacc - Rings accelerator descriptor
187  *
188  * @dev: pointer on RA device
189  * @proxy_gcfg: RA proxy global config registers
190  * @proxy_target_base: RA proxy datapath region
191  * @num_rings: number of ring in RA
192  * @rings_inuse: bitfield for ring usage tracking
193  * @rm_gp_range: general purpose rings range from tisci
194  * @dma_ring_reset_quirk: DMA reset w/a enable
195  * @num_proxies: number of RA proxies
196  * @proxy_inuse: bitfield for proxy usage tracking
197  * @rings: array of rings descriptors (struct @k3_ring)
198  * @list: list of RAs in the system
199  * @req_lock: protect rings allocation
200  * @tisci: pointer ti-sci handle
201  * @tisci_ring_ops: ti-sci rings ops
202  * @tisci_dev_id: ti-sci device id
203  * @ops: SoC specific ringacc operation
204  * @dma_rings: indicate DMA ring (dual ring within BCDMA/PKTDMA)
205  */
206 struct k3_ringacc {
207         struct device *dev;
208         struct k3_ringacc_proxy_gcfg_regs __iomem *proxy_gcfg;
209         void __iomem *proxy_target_base;
210         u32 num_rings; /* number of rings in Ringacc module */
211         unsigned long *rings_inuse;
212         struct ti_sci_resource *rm_gp_range;
213
214         bool dma_ring_reset_quirk;
215         u32 num_proxies;
216         unsigned long *proxy_inuse;
217
218         struct k3_ring *rings;
219         struct list_head list;
220         struct mutex req_lock; /* protect rings allocation */
221
222         const struct ti_sci_handle *tisci;
223         const struct ti_sci_rm_ringacc_ops *tisci_ring_ops;
224         u32 tisci_dev_id;
225
226         const struct k3_ringacc_ops *ops;
227         bool dma_rings;
228 };
229
230 /**
231  * struct k3_ringacc - Rings accelerator SoC data
232  *
233  * @dma_ring_reset_quirk:  DMA reset w/a enable
234  */
235 struct k3_ringacc_soc_data {
236         unsigned dma_ring_reset_quirk:1;
237 };
238
239 static int k3_ringacc_ring_read_occ(struct k3_ring *ring)
240 {
241         return readl(&ring->rt->occ) & K3_RINGACC_RT_OCC_MASK;
242 }
243
244 static void k3_ringacc_ring_update_occ(struct k3_ring *ring)
245 {
246         u32 val;
247
248         val = readl(&ring->rt->occ);
249
250         ring->state.occ = val & K3_RINGACC_RT_OCC_MASK;
251         ring->state.tdown_complete = !!(val & K3_DMARING_RT_OCC_TDOWN_COMPLETE);
252 }
253
254 static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring)
255 {
256         return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES -
257                (4 << ring->elm_size);
258 }
259
260 static void *k3_ringacc_get_elm_addr(struct k3_ring *ring, u32 idx)
261 {
262         return (ring->ring_mem_virt + idx * (4 << ring->elm_size));
263 }
264
265 static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem);
266 static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem);
267 static int k3_dmaring_fwd_pop(struct k3_ring *ring, void *elem);
268 static int k3_dmaring_reverse_pop(struct k3_ring *ring, void *elem);
269
270 static struct k3_ring_ops k3_ring_mode_ring_ops = {
271                 .push_tail = k3_ringacc_ring_push_mem,
272                 .pop_head = k3_ringacc_ring_pop_mem,
273 };
274
275 static struct k3_ring_ops k3_dmaring_fwd_ops = {
276                 .push_tail = k3_ringacc_ring_push_mem,
277                 .pop_head = k3_dmaring_fwd_pop,
278 };
279
280 static struct k3_ring_ops k3_dmaring_reverse_ops = {
281                 /* Reverse side of the DMA ring can only be popped by SW */
282                 .pop_head = k3_dmaring_reverse_pop,
283 };
284
285 static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem);
286 static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem);
287 static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem);
288 static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem);
289
290 static struct k3_ring_ops k3_ring_mode_msg_ops = {
291                 .push_tail = k3_ringacc_ring_push_io,
292                 .push_head = k3_ringacc_ring_push_head_io,
293                 .pop_tail = k3_ringacc_ring_pop_tail_io,
294                 .pop_head = k3_ringacc_ring_pop_io,
295 };
296
297 static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem);
298 static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem);
299 static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem);
300 static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem);
301
302 static struct k3_ring_ops k3_ring_mode_proxy_ops = {
303                 .push_tail = k3_ringacc_ring_push_tail_proxy,
304                 .push_head = k3_ringacc_ring_push_head_proxy,
305                 .pop_tail = k3_ringacc_ring_pop_tail_proxy,
306                 .pop_head = k3_ringacc_ring_pop_head_proxy,
307 };
308
309 static void k3_ringacc_ring_dump(struct k3_ring *ring)
310 {
311         struct device *dev = ring->parent->dev;
312
313         dev_dbg(dev, "dump ring: %d\n", ring->ring_id);
314         dev_dbg(dev, "dump mem virt %p, dma %pad\n", ring->ring_mem_virt,
315                 &ring->ring_mem_dma);
316         dev_dbg(dev, "dump elmsize %d, size %d, mode %d, proxy_id %d\n",
317                 ring->elm_size, ring->size, ring->mode, ring->proxy_id);
318         dev_dbg(dev, "dump flags %08X\n", ring->flags);
319
320         dev_dbg(dev, "dump ring_rt_regs: db%08x\n", readl(&ring->rt->db));
321         dev_dbg(dev, "dump occ%08x\n", readl(&ring->rt->occ));
322         dev_dbg(dev, "dump indx%08x\n", readl(&ring->rt->indx));
323         dev_dbg(dev, "dump hwocc%08x\n", readl(&ring->rt->hwocc));
324         dev_dbg(dev, "dump hwindx%08x\n", readl(&ring->rt->hwindx));
325
326         if (ring->ring_mem_virt)
327                 print_hex_dump_debug("dump ring_mem_virt ", DUMP_PREFIX_NONE,
328                                      16, 1, ring->ring_mem_virt, 16 * 8, false);
329 }
330
331 struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
332                                         int id, u32 flags)
333 {
334         int proxy_id = K3_RINGACC_PROXY_NOT_USED;
335
336         mutex_lock(&ringacc->req_lock);
337
338         if (id == K3_RINGACC_RING_ID_ANY) {
339                 /* Request for any general purpose ring */
340                 struct ti_sci_resource_desc *gp_rings =
341                                                 &ringacc->rm_gp_range->desc[0];
342                 unsigned long size;
343
344                 size = gp_rings->start + gp_rings->num;
345                 id = find_next_zero_bit(ringacc->rings_inuse, size,
346                                         gp_rings->start);
347                 if (id == size)
348                         goto error;
349         } else if (id < 0) {
350                 goto error;
351         }
352
353         if (test_bit(id, ringacc->rings_inuse) &&
354             !(ringacc->rings[id].flags & K3_RING_FLAG_SHARED))
355                 goto error;
356         else if (ringacc->rings[id].flags & K3_RING_FLAG_SHARED)
357                 goto out;
358
359         if (flags & K3_RINGACC_RING_USE_PROXY) {
360                 proxy_id = find_next_zero_bit(ringacc->proxy_inuse,
361                                               ringacc->num_proxies, 0);
362                 if (proxy_id == ringacc->num_proxies)
363                         goto error;
364         }
365
366         if (proxy_id != K3_RINGACC_PROXY_NOT_USED) {
367                 set_bit(proxy_id, ringacc->proxy_inuse);
368                 ringacc->rings[id].proxy_id = proxy_id;
369                 dev_dbg(ringacc->dev, "Giving ring#%d proxy#%d\n", id,
370                         proxy_id);
371         } else {
372                 dev_dbg(ringacc->dev, "Giving ring#%d\n", id);
373         }
374
375         set_bit(id, ringacc->rings_inuse);
376 out:
377         ringacc->rings[id].use_count++;
378         mutex_unlock(&ringacc->req_lock);
379         return &ringacc->rings[id];
380
381 error:
382         mutex_unlock(&ringacc->req_lock);
383         return NULL;
384 }
385 EXPORT_SYMBOL_GPL(k3_ringacc_request_ring);
386
387 static int k3_dmaring_request_dual_ring(struct k3_ringacc *ringacc, int fwd_id,
388                                         struct k3_ring **fwd_ring,
389                                         struct k3_ring **compl_ring)
390 {
391         int ret = 0;
392
393         /*
394          * DMA rings must be requested by ID, completion ring is the reverse
395          * side of the forward ring
396          */
397         if (fwd_id < 0)
398                 return -EINVAL;
399
400         mutex_lock(&ringacc->req_lock);
401
402         if (test_bit(fwd_id, ringacc->rings_inuse)) {
403                 ret = -EBUSY;
404                 goto error;
405         }
406
407         *fwd_ring = &ringacc->rings[fwd_id];
408         *compl_ring = &ringacc->rings[fwd_id + ringacc->num_rings];
409         set_bit(fwd_id, ringacc->rings_inuse);
410         ringacc->rings[fwd_id].use_count++;
411         dev_dbg(ringacc->dev, "Giving ring#%d\n", fwd_id);
412
413         mutex_unlock(&ringacc->req_lock);
414         return 0;
415
416 error:
417         mutex_unlock(&ringacc->req_lock);
418         return ret;
419 }
420
421 int k3_ringacc_request_rings_pair(struct k3_ringacc *ringacc,
422                                   int fwd_id, int compl_id,
423                                   struct k3_ring **fwd_ring,
424                                   struct k3_ring **compl_ring)
425 {
426         int ret = 0;
427
428         if (!fwd_ring || !compl_ring)
429                 return -EINVAL;
430
431         if (ringacc->dma_rings)
432                 return k3_dmaring_request_dual_ring(ringacc, fwd_id,
433                                                     fwd_ring, compl_ring);
434
435         *fwd_ring = k3_ringacc_request_ring(ringacc, fwd_id, 0);
436         if (!(*fwd_ring))
437                 return -ENODEV;
438
439         *compl_ring = k3_ringacc_request_ring(ringacc, compl_id, 0);
440         if (!(*compl_ring)) {
441                 k3_ringacc_ring_free(*fwd_ring);
442                 ret = -ENODEV;
443         }
444
445         return ret;
446 }
447 EXPORT_SYMBOL_GPL(k3_ringacc_request_rings_pair);
448
449 static void k3_ringacc_ring_reset_sci(struct k3_ring *ring)
450 {
451         struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
452         struct k3_ringacc *ringacc = ring->parent;
453         int ret;
454
455         ring_cfg.nav_id = ringacc->tisci_dev_id;
456         ring_cfg.index = ring->ring_id;
457         ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID;
458         ring_cfg.count = ring->size;
459
460         ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
461         if (ret)
462                 dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n",
463                         ret, ring->ring_id);
464 }
465
466 void k3_ringacc_ring_reset(struct k3_ring *ring)
467 {
468         if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
469                 return;
470
471         memset(&ring->state, 0, sizeof(ring->state));
472
473         k3_ringacc_ring_reset_sci(ring);
474 }
475 EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset);
476
477 static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring *ring,
478                                                enum k3_ring_mode mode)
479 {
480         struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
481         struct k3_ringacc *ringacc = ring->parent;
482         int ret;
483
484         ring_cfg.nav_id = ringacc->tisci_dev_id;
485         ring_cfg.index = ring->ring_id;
486         ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_RING_MODE_VALID;
487         ring_cfg.mode = mode;
488
489         ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
490         if (ret)
491                 dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n",
492                         ret, ring->ring_id);
493 }
494
495 void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ)
496 {
497         if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
498                 return;
499
500         if (!ring->parent->dma_ring_reset_quirk)
501                 goto reset;
502
503         if (!occ)
504                 occ = k3_ringacc_ring_read_occ(ring);
505
506         if (occ) {
507                 u32 db_ring_cnt, db_ring_cnt_cur;
508
509                 dev_dbg(ring->parent->dev, "%s %u occ: %u\n", __func__,
510                         ring->ring_id, occ);
511                 /* TI-SCI ring reset */
512                 k3_ringacc_ring_reset_sci(ring);
513
514                 /*
515                  * Setup the ring in ring/doorbell mode (if not already in this
516                  * mode)
517                  */
518                 if (ring->mode != K3_RINGACC_RING_MODE_RING)
519                         k3_ringacc_ring_reconfig_qmode_sci(
520                                         ring, K3_RINGACC_RING_MODE_RING);
521                 /*
522                  * Ring the doorbell 2**22 â€“ ringOcc times.
523                  * This will wrap the internal UDMAP ring state occupancy
524                  * counter (which is 21-bits wide) to 0.
525                  */
526                 db_ring_cnt = (1U << 22) - occ;
527
528                 while (db_ring_cnt != 0) {
529                         /*
530                          * Ring the doorbell with the maximum count each
531                          * iteration if possible to minimize the total
532                          * of writes
533                          */
534                         if (db_ring_cnt > K3_RINGACC_MAX_DB_RING_CNT)
535                                 db_ring_cnt_cur = K3_RINGACC_MAX_DB_RING_CNT;
536                         else
537                                 db_ring_cnt_cur = db_ring_cnt;
538
539                         writel(db_ring_cnt_cur, &ring->rt->db);
540                         db_ring_cnt -= db_ring_cnt_cur;
541                 }
542
543                 /* Restore the original ring mode (if not ring mode) */
544                 if (ring->mode != K3_RINGACC_RING_MODE_RING)
545                         k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode);
546         }
547
548 reset:
549         /* Reset the ring */
550         k3_ringacc_ring_reset(ring);
551 }
552 EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset_dma);
553
554 static void k3_ringacc_ring_free_sci(struct k3_ring *ring)
555 {
556         struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
557         struct k3_ringacc *ringacc = ring->parent;
558         int ret;
559
560         ring_cfg.nav_id = ringacc->tisci_dev_id;
561         ring_cfg.index = ring->ring_id;
562         ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER;
563
564         ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
565         if (ret)
566                 dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n",
567                         ret, ring->ring_id);
568 }
569
570 int k3_ringacc_ring_free(struct k3_ring *ring)
571 {
572         struct k3_ringacc *ringacc;
573
574         if (!ring)
575                 return -EINVAL;
576
577         ringacc = ring->parent;
578
579         /*
580          * DMA rings: rings shared memory and configuration, only forward ring
581          * is configured and reverse ring considered as slave.
582          */
583         if (ringacc->dma_rings && (ring->flags & K3_RING_FLAG_REVERSE))
584                 return 0;
585
586         dev_dbg(ring->parent->dev, "flags: 0x%08x\n", ring->flags);
587
588         if (!test_bit(ring->ring_id, ringacc->rings_inuse))
589                 return -EINVAL;
590
591         mutex_lock(&ringacc->req_lock);
592
593         if (--ring->use_count)
594                 goto out;
595
596         if (!(ring->flags & K3_RING_FLAG_BUSY))
597                 goto no_init;
598
599         k3_ringacc_ring_free_sci(ring);
600
601         dma_free_coherent(ring->dma_dev,
602                           ring->size * (4 << ring->elm_size),
603                           ring->ring_mem_virt, ring->ring_mem_dma);
604         ring->flags = 0;
605         ring->ops = NULL;
606         ring->dma_dev = NULL;
607         ring->asel = 0;
608
609         if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) {
610                 clear_bit(ring->proxy_id, ringacc->proxy_inuse);
611                 ring->proxy = NULL;
612                 ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
613         }
614
615 no_init:
616         clear_bit(ring->ring_id, ringacc->rings_inuse);
617
618 out:
619         mutex_unlock(&ringacc->req_lock);
620         return 0;
621 }
622 EXPORT_SYMBOL_GPL(k3_ringacc_ring_free);
623
624 u32 k3_ringacc_get_ring_id(struct k3_ring *ring)
625 {
626         if (!ring)
627                 return -EINVAL;
628
629         return ring->ring_id;
630 }
631 EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_id);
632
633 u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring)
634 {
635         if (!ring)
636                 return -EINVAL;
637
638         return ring->parent->tisci_dev_id;
639 }
640 EXPORT_SYMBOL_GPL(k3_ringacc_get_tisci_dev_id);
641
642 int k3_ringacc_get_ring_irq_num(struct k3_ring *ring)
643 {
644         int irq_num;
645
646         if (!ring)
647                 return -EINVAL;
648
649         irq_num = ti_sci_inta_msi_get_virq(ring->parent->dev, ring->ring_id);
650         if (irq_num <= 0)
651                 irq_num = -EINVAL;
652         return irq_num;
653 }
654 EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_irq_num);
655
656 static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring)
657 {
658         struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
659         struct k3_ringacc *ringacc = ring->parent;
660         int ret;
661
662         if (!ringacc->tisci)
663                 return -EINVAL;
664
665         ring_cfg.nav_id = ringacc->tisci_dev_id;
666         ring_cfg.index = ring->ring_id;
667         ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER;
668         ring_cfg.addr_lo = lower_32_bits(ring->ring_mem_dma);
669         ring_cfg.addr_hi = upper_32_bits(ring->ring_mem_dma);
670         ring_cfg.count = ring->size;
671         ring_cfg.mode = ring->mode;
672         ring_cfg.size = ring->elm_size;
673         ring_cfg.asel = ring->asel;
674
675         ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
676         if (ret)
677                 dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n",
678                         ret, ring->ring_id);
679
680         return ret;
681 }
682
683 static int k3_dmaring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
684 {
685         struct k3_ringacc *ringacc;
686         struct k3_ring *reverse_ring;
687         int ret = 0;
688
689         if (cfg->elm_size != K3_RINGACC_RING_ELSIZE_8 ||
690             cfg->mode != K3_RINGACC_RING_MODE_RING ||
691             cfg->size & ~K3_DMARING_CFG_RING_SIZE_ELCNT_MASK)
692                 return -EINVAL;
693
694         ringacc = ring->parent;
695
696         /*
697          * DMA rings: rings shared memory and configuration, only forward ring
698          * is configured and reverse ring considered as slave.
699          */
700         if (ringacc->dma_rings && (ring->flags & K3_RING_FLAG_REVERSE))
701                 return 0;
702
703         if (!test_bit(ring->ring_id, ringacc->rings_inuse))
704                 return -EINVAL;
705
706         ring->size = cfg->size;
707         ring->elm_size = cfg->elm_size;
708         ring->mode = cfg->mode;
709         ring->asel = cfg->asel;
710         ring->dma_dev = cfg->dma_dev;
711         if (!ring->dma_dev) {
712                 dev_warn(ringacc->dev, "dma_dev is not provided for ring%d\n",
713                          ring->ring_id);
714                 ring->dma_dev = ringacc->dev;
715         }
716
717         memset(&ring->state, 0, sizeof(ring->state));
718
719         ring->ops = &k3_dmaring_fwd_ops;
720
721         ring->ring_mem_virt = dma_alloc_coherent(ring->dma_dev,
722                                                  ring->size * (4 << ring->elm_size),
723                                                  &ring->ring_mem_dma, GFP_KERNEL);
724         if (!ring->ring_mem_virt) {
725                 dev_err(ringacc->dev, "Failed to alloc ring mem\n");
726                 ret = -ENOMEM;
727                 goto err_free_ops;
728         }
729
730         ret = k3_ringacc_ring_cfg_sci(ring);
731         if (ret)
732                 goto err_free_mem;
733
734         ring->flags |= K3_RING_FLAG_BUSY;
735
736         k3_ringacc_ring_dump(ring);
737
738         /* DMA rings: configure reverse ring */
739         reverse_ring = &ringacc->rings[ring->ring_id + ringacc->num_rings];
740         reverse_ring->size = cfg->size;
741         reverse_ring->elm_size = cfg->elm_size;
742         reverse_ring->mode = cfg->mode;
743         reverse_ring->asel = cfg->asel;
744         memset(&reverse_ring->state, 0, sizeof(reverse_ring->state));
745         reverse_ring->ops = &k3_dmaring_reverse_ops;
746
747         reverse_ring->ring_mem_virt = ring->ring_mem_virt;
748         reverse_ring->ring_mem_dma = ring->ring_mem_dma;
749         reverse_ring->flags |= K3_RING_FLAG_BUSY;
750         k3_ringacc_ring_dump(reverse_ring);
751
752         return 0;
753
754 err_free_mem:
755         dma_free_coherent(ring->dma_dev,
756                           ring->size * (4 << ring->elm_size),
757                           ring->ring_mem_virt,
758                           ring->ring_mem_dma);
759 err_free_ops:
760         ring->ops = NULL;
761         ring->proxy = NULL;
762         ring->dma_dev = NULL;
763         ring->asel = 0;
764         return ret;
765 }
766
767 int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
768 {
769         struct k3_ringacc *ringacc;
770         int ret = 0;
771
772         if (!ring || !cfg)
773                 return -EINVAL;
774
775         ringacc = ring->parent;
776
777         if (ringacc->dma_rings)
778                 return k3_dmaring_cfg(ring, cfg);
779
780         if (cfg->elm_size > K3_RINGACC_RING_ELSIZE_256 ||
781             cfg->mode >= K3_RINGACC_RING_MODE_INVALID ||
782             cfg->size & ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK ||
783             !test_bit(ring->ring_id, ringacc->rings_inuse))
784                 return -EINVAL;
785
786         if (cfg->mode == K3_RINGACC_RING_MODE_MESSAGE &&
787             ring->proxy_id == K3_RINGACC_PROXY_NOT_USED &&
788             cfg->elm_size > K3_RINGACC_RING_ELSIZE_8) {
789                 dev_err(ringacc->dev,
790                         "Message mode must use proxy for %u element size\n",
791                         4 << ring->elm_size);
792                 return -EINVAL;
793         }
794
795         /*
796          * In case of shared ring only the first user (master user) can
797          * configure the ring. The sequence should be by the client:
798          * ring = k3_ringacc_request_ring(ringacc, ring_id, 0); # master user
799          * k3_ringacc_ring_cfg(ring, cfg); # master configuration
800          * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
801          * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
802          */
803         if (ring->use_count != 1)
804                 return 0;
805
806         ring->size = cfg->size;
807         ring->elm_size = cfg->elm_size;
808         ring->mode = cfg->mode;
809         memset(&ring->state, 0, sizeof(ring->state));
810
811         if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED)
812                 ring->proxy = ringacc->proxy_target_base +
813                               ring->proxy_id * K3_RINGACC_PROXY_TARGET_STEP;
814
815         switch (ring->mode) {
816         case K3_RINGACC_RING_MODE_RING:
817                 ring->ops = &k3_ring_mode_ring_ops;
818                 ring->dma_dev = cfg->dma_dev;
819                 if (!ring->dma_dev)
820                         ring->dma_dev = ringacc->dev;
821                 break;
822         case K3_RINGACC_RING_MODE_MESSAGE:
823                 ring->dma_dev = ringacc->dev;
824                 if (ring->proxy)
825                         ring->ops = &k3_ring_mode_proxy_ops;
826                 else
827                         ring->ops = &k3_ring_mode_msg_ops;
828                 break;
829         default:
830                 ring->ops = NULL;
831                 ret = -EINVAL;
832                 goto err_free_proxy;
833         }
834
835         ring->ring_mem_virt = dma_alloc_coherent(ring->dma_dev,
836                                                  ring->size * (4 << ring->elm_size),
837                                                  &ring->ring_mem_dma, GFP_KERNEL);
838         if (!ring->ring_mem_virt) {
839                 dev_err(ringacc->dev, "Failed to alloc ring mem\n");
840                 ret = -ENOMEM;
841                 goto err_free_ops;
842         }
843
844         ret = k3_ringacc_ring_cfg_sci(ring);
845
846         if (ret)
847                 goto err_free_mem;
848
849         ring->flags |= K3_RING_FLAG_BUSY;
850         ring->flags |= (cfg->flags & K3_RINGACC_RING_SHARED) ?
851                         K3_RING_FLAG_SHARED : 0;
852
853         k3_ringacc_ring_dump(ring);
854
855         return 0;
856
857 err_free_mem:
858         dma_free_coherent(ring->dma_dev,
859                           ring->size * (4 << ring->elm_size),
860                           ring->ring_mem_virt,
861                           ring->ring_mem_dma);
862 err_free_ops:
863         ring->ops = NULL;
864         ring->dma_dev = NULL;
865 err_free_proxy:
866         ring->proxy = NULL;
867         return ret;
868 }
869 EXPORT_SYMBOL_GPL(k3_ringacc_ring_cfg);
870
871 u32 k3_ringacc_ring_get_size(struct k3_ring *ring)
872 {
873         if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
874                 return -EINVAL;
875
876         return ring->size;
877 }
878 EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_size);
879
880 u32 k3_ringacc_ring_get_free(struct k3_ring *ring)
881 {
882         if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
883                 return -EINVAL;
884
885         if (!ring->state.free)
886                 ring->state.free = ring->size - k3_ringacc_ring_read_occ(ring);
887
888         return ring->state.free;
889 }
890 EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_free);
891
892 u32 k3_ringacc_ring_get_occ(struct k3_ring *ring)
893 {
894         if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
895                 return -EINVAL;
896
897         return k3_ringacc_ring_read_occ(ring);
898 }
899 EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_occ);
900
901 u32 k3_ringacc_ring_is_full(struct k3_ring *ring)
902 {
903         return !k3_ringacc_ring_get_free(ring);
904 }
905 EXPORT_SYMBOL_GPL(k3_ringacc_ring_is_full);
906
907 enum k3_ringacc_access_mode {
908         K3_RINGACC_ACCESS_MODE_PUSH_HEAD,
909         K3_RINGACC_ACCESS_MODE_POP_HEAD,
910         K3_RINGACC_ACCESS_MODE_PUSH_TAIL,
911         K3_RINGACC_ACCESS_MODE_POP_TAIL,
912         K3_RINGACC_ACCESS_MODE_PEEK_HEAD,
913         K3_RINGACC_ACCESS_MODE_PEEK_TAIL,
914 };
915
916 #define K3_RINGACC_PROXY_MODE(x)        (((x) & 0x3) << 16)
917 #define K3_RINGACC_PROXY_ELSIZE(x)      (((x) & 0x7) << 24)
918 static int k3_ringacc_ring_cfg_proxy(struct k3_ring *ring,
919                                      enum k3_ringacc_proxy_access_mode mode)
920 {
921         u32 val;
922
923         val = ring->ring_id;
924         val |= K3_RINGACC_PROXY_MODE(mode);
925         val |= K3_RINGACC_PROXY_ELSIZE(ring->elm_size);
926         writel(val, &ring->proxy->control);
927         return 0;
928 }
929
930 static int k3_ringacc_ring_access_proxy(struct k3_ring *ring, void *elem,
931                                         enum k3_ringacc_access_mode access_mode)
932 {
933         void __iomem *ptr;
934
935         ptr = (void __iomem *)&ring->proxy->data;
936
937         switch (access_mode) {
938         case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
939         case K3_RINGACC_ACCESS_MODE_POP_HEAD:
940                 k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_HEAD);
941                 break;
942         case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
943         case K3_RINGACC_ACCESS_MODE_POP_TAIL:
944                 k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_TAIL);
945                 break;
946         default:
947                 return -EINVAL;
948         }
949
950         ptr += k3_ringacc_ring_get_fifo_pos(ring);
951
952         switch (access_mode) {
953         case K3_RINGACC_ACCESS_MODE_POP_HEAD:
954         case K3_RINGACC_ACCESS_MODE_POP_TAIL:
955                 dev_dbg(ring->parent->dev,
956                         "proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
957                         access_mode);
958                 memcpy_fromio(elem, ptr, (4 << ring->elm_size));
959                 ring->state.occ--;
960                 break;
961         case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
962         case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
963                 dev_dbg(ring->parent->dev,
964                         "proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
965                         access_mode);
966                 memcpy_toio(ptr, elem, (4 << ring->elm_size));
967                 ring->state.free--;
968                 break;
969         default:
970                 return -EINVAL;
971         }
972
973         dev_dbg(ring->parent->dev, "proxy: free%d occ%d\n", ring->state.free,
974                 ring->state.occ);
975         return 0;
976 }
977
978 static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem)
979 {
980         return k3_ringacc_ring_access_proxy(ring, elem,
981                                             K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
982 }
983
984 static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem)
985 {
986         return k3_ringacc_ring_access_proxy(ring, elem,
987                                             K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
988 }
989
990 static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem)
991 {
992         return k3_ringacc_ring_access_proxy(ring, elem,
993                                             K3_RINGACC_ACCESS_MODE_POP_HEAD);
994 }
995
996 static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem)
997 {
998         return k3_ringacc_ring_access_proxy(ring, elem,
999                                             K3_RINGACC_ACCESS_MODE_POP_HEAD);
1000 }
1001
1002 static int k3_ringacc_ring_access_io(struct k3_ring *ring, void *elem,
1003                                      enum k3_ringacc_access_mode access_mode)
1004 {
1005         void __iomem *ptr;
1006
1007         switch (access_mode) {
1008         case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
1009         case K3_RINGACC_ACCESS_MODE_POP_HEAD:
1010                 ptr = (void __iomem *)&ring->fifos->head_data;
1011                 break;
1012         case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
1013         case K3_RINGACC_ACCESS_MODE_POP_TAIL:
1014                 ptr = (void __iomem *)&ring->fifos->tail_data;
1015                 break;
1016         default:
1017                 return -EINVAL;
1018         }
1019
1020         ptr += k3_ringacc_ring_get_fifo_pos(ring);
1021
1022         switch (access_mode) {
1023         case K3_RINGACC_ACCESS_MODE_POP_HEAD:
1024         case K3_RINGACC_ACCESS_MODE_POP_TAIL:
1025                 dev_dbg(ring->parent->dev,
1026                         "memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
1027                         access_mode);
1028                 memcpy_fromio(elem, ptr, (4 << ring->elm_size));
1029                 ring->state.occ--;
1030                 break;
1031         case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
1032         case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
1033                 dev_dbg(ring->parent->dev,
1034                         "memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
1035                         access_mode);
1036                 memcpy_toio(ptr, elem, (4 << ring->elm_size));
1037                 ring->state.free--;
1038                 break;
1039         default:
1040                 return -EINVAL;
1041         }
1042
1043         dev_dbg(ring->parent->dev, "free%d index%d occ%d index%d\n",
1044                 ring->state.free, ring->state.windex, ring->state.occ,
1045                 ring->state.rindex);
1046         return 0;
1047 }
1048
1049 static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem)
1050 {
1051         return k3_ringacc_ring_access_io(ring, elem,
1052                                          K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
1053 }
1054
1055 static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem)
1056 {
1057         return k3_ringacc_ring_access_io(ring, elem,
1058                                          K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
1059 }
1060
1061 static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem)
1062 {
1063         return k3_ringacc_ring_access_io(ring, elem,
1064                                          K3_RINGACC_ACCESS_MODE_POP_HEAD);
1065 }
1066
1067 static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem)
1068 {
1069         return k3_ringacc_ring_access_io(ring, elem,
1070                                          K3_RINGACC_ACCESS_MODE_POP_HEAD);
1071 }
1072
1073 /*
1074  * The element is 48 bits of address + ASEL bits in the ring.
1075  * ASEL is used by the DMAs and should be removed for the kernel as it is not
1076  * part of the physical memory address.
1077  */
1078 static void k3_dmaring_remove_asel_from_elem(u64 *elem)
1079 {
1080         *elem &= GENMASK_ULL(K3_ADDRESS_ASEL_SHIFT - 1, 0);
1081 }
1082
1083 static int k3_dmaring_fwd_pop(struct k3_ring *ring, void *elem)
1084 {
1085         void *elem_ptr;
1086         u32 elem_idx;
1087
1088         /*
1089          * DMA rings: forward ring is always tied DMA channel and HW does not
1090          * maintain any state data required for POP operation and its unknown
1091          * how much elements were consumed by HW. So, to actually
1092          * do POP, the read pointer has to be recalculated every time.
1093          */
1094         ring->state.occ = k3_ringacc_ring_read_occ(ring);
1095         if (ring->state.windex >= ring->state.occ)
1096                 elem_idx = ring->state.windex - ring->state.occ;
1097         else
1098                 elem_idx = ring->size - (ring->state.occ - ring->state.windex);
1099
1100         elem_ptr = k3_ringacc_get_elm_addr(ring, elem_idx);
1101         memcpy(elem, elem_ptr, (4 << ring->elm_size));
1102         k3_dmaring_remove_asel_from_elem(elem);
1103
1104         ring->state.occ--;
1105         writel(-1, &ring->rt->db);
1106
1107         dev_dbg(ring->parent->dev, "%s: occ%d Windex%d Rindex%d pos_ptr%px\n",
1108                 __func__, ring->state.occ, ring->state.windex, elem_idx,
1109                 elem_ptr);
1110         return 0;
1111 }
1112
1113 static int k3_dmaring_reverse_pop(struct k3_ring *ring, void *elem)
1114 {
1115         void *elem_ptr;
1116
1117         elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.rindex);
1118
1119         if (ring->state.occ) {
1120                 memcpy(elem, elem_ptr, (4 << ring->elm_size));
1121                 k3_dmaring_remove_asel_from_elem(elem);
1122
1123                 ring->state.rindex = (ring->state.rindex + 1) % ring->size;
1124                 ring->state.occ--;
1125                 writel(-1 & K3_DMARING_RT_DB_ENTRY_MASK, &ring->rt->db);
1126         } else if (ring->state.tdown_complete) {
1127                 dma_addr_t *value = elem;
1128
1129                 *value = CPPI5_TDCM_MARKER;
1130                 writel(K3_DMARING_RT_DB_TDOWN_ACK, &ring->rt->db);
1131                 ring->state.tdown_complete = false;
1132         }
1133
1134         dev_dbg(ring->parent->dev, "%s: occ%d index%d pos_ptr%px\n",
1135                 __func__, ring->state.occ, ring->state.rindex, elem_ptr);
1136         return 0;
1137 }
1138
1139 static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem)
1140 {
1141         void *elem_ptr;
1142
1143         elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.windex);
1144
1145         memcpy(elem_ptr, elem, (4 << ring->elm_size));
1146         if (ring->parent->dma_rings) {
1147                 u64 *addr = elem_ptr;
1148
1149                 *addr |= ((u64)ring->asel << K3_ADDRESS_ASEL_SHIFT);
1150         }
1151
1152         ring->state.windex = (ring->state.windex + 1) % ring->size;
1153         ring->state.free--;
1154         writel(1, &ring->rt->db);
1155
1156         dev_dbg(ring->parent->dev, "ring_push_mem: free%d index%d\n",
1157                 ring->state.free, ring->state.windex);
1158
1159         return 0;
1160 }
1161
1162 static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem)
1163 {
1164         void *elem_ptr;
1165
1166         elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.rindex);
1167
1168         memcpy(elem, elem_ptr, (4 << ring->elm_size));
1169
1170         ring->state.rindex = (ring->state.rindex + 1) % ring->size;
1171         ring->state.occ--;
1172         writel(-1, &ring->rt->db);
1173
1174         dev_dbg(ring->parent->dev, "ring_pop_mem: occ%d index%d pos_ptr%p\n",
1175                 ring->state.occ, ring->state.rindex, elem_ptr);
1176         return 0;
1177 }
1178
1179 int k3_ringacc_ring_push(struct k3_ring *ring, void *elem)
1180 {
1181         int ret = -EOPNOTSUPP;
1182
1183         if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
1184                 return -EINVAL;
1185
1186         dev_dbg(ring->parent->dev, "ring_push: free%d index%d\n",
1187                 ring->state.free, ring->state.windex);
1188
1189         if (k3_ringacc_ring_is_full(ring))
1190                 return -ENOMEM;
1191
1192         if (ring->ops && ring->ops->push_tail)
1193                 ret = ring->ops->push_tail(ring, elem);
1194
1195         return ret;
1196 }
1197 EXPORT_SYMBOL_GPL(k3_ringacc_ring_push);
1198
1199 int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem)
1200 {
1201         int ret = -EOPNOTSUPP;
1202
1203         if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
1204                 return -EINVAL;
1205
1206         dev_dbg(ring->parent->dev, "ring_push_head: free%d index%d\n",
1207                 ring->state.free, ring->state.windex);
1208
1209         if (k3_ringacc_ring_is_full(ring))
1210                 return -ENOMEM;
1211
1212         if (ring->ops && ring->ops->push_head)
1213                 ret = ring->ops->push_head(ring, elem);
1214
1215         return ret;
1216 }
1217 EXPORT_SYMBOL_GPL(k3_ringacc_ring_push_head);
1218
1219 int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem)
1220 {
1221         int ret = -EOPNOTSUPP;
1222
1223         if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
1224                 return -EINVAL;
1225
1226         if (!ring->state.occ)
1227                 k3_ringacc_ring_update_occ(ring);
1228
1229         dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->state.occ,
1230                 ring->state.rindex);
1231
1232         if (!ring->state.occ && !ring->state.tdown_complete)
1233                 return -ENODATA;
1234
1235         if (ring->ops && ring->ops->pop_head)
1236                 ret = ring->ops->pop_head(ring, elem);
1237
1238         return ret;
1239 }
1240 EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop);
1241
1242 int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem)
1243 {
1244         int ret = -EOPNOTSUPP;
1245
1246         if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
1247                 return -EINVAL;
1248
1249         if (!ring->state.occ)
1250                 k3_ringacc_ring_update_occ(ring);
1251
1252         dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n",
1253                 ring->state.occ, ring->state.rindex);
1254
1255         if (!ring->state.occ)
1256                 return -ENODATA;
1257
1258         if (ring->ops && ring->ops->pop_tail)
1259                 ret = ring->ops->pop_tail(ring, elem);
1260
1261         return ret;
1262 }
1263 EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop_tail);
1264
1265 struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np,
1266                                                 const char *property)
1267 {
1268         struct device_node *ringacc_np;
1269         struct k3_ringacc *ringacc = ERR_PTR(-EPROBE_DEFER);
1270         struct k3_ringacc *entry;
1271
1272         ringacc_np = of_parse_phandle(np, property, 0);
1273         if (!ringacc_np)
1274                 return ERR_PTR(-ENODEV);
1275
1276         mutex_lock(&k3_ringacc_list_lock);
1277         list_for_each_entry(entry, &k3_ringacc_list, list)
1278                 if (entry->dev->of_node == ringacc_np) {
1279                         ringacc = entry;
1280                         break;
1281                 }
1282         mutex_unlock(&k3_ringacc_list_lock);
1283         of_node_put(ringacc_np);
1284
1285         return ringacc;
1286 }
1287 EXPORT_SYMBOL_GPL(of_k3_ringacc_get_by_phandle);
1288
1289 static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc)
1290 {
1291         struct device_node *node = ringacc->dev->of_node;
1292         struct device *dev = ringacc->dev;
1293         struct platform_device *pdev = to_platform_device(dev);
1294         int ret;
1295
1296         if (!node) {
1297                 dev_err(dev, "device tree info unavailable\n");
1298                 return -ENODEV;
1299         }
1300
1301         ret = of_property_read_u32(node, "ti,num-rings", &ringacc->num_rings);
1302         if (ret) {
1303                 dev_err(dev, "ti,num-rings read failure %d\n", ret);
1304                 return ret;
1305         }
1306
1307         ringacc->tisci = ti_sci_get_by_phandle(node, "ti,sci");
1308         if (IS_ERR(ringacc->tisci)) {
1309                 ret = PTR_ERR(ringacc->tisci);
1310                 if (ret != -EPROBE_DEFER)
1311                         dev_err(dev, "ti,sci read fail %d\n", ret);
1312                 ringacc->tisci = NULL;
1313                 return ret;
1314         }
1315
1316         ret = of_property_read_u32(node, "ti,sci-dev-id",
1317                                    &ringacc->tisci_dev_id);
1318         if (ret) {
1319                 dev_err(dev, "ti,sci-dev-id read fail %d\n", ret);
1320                 return ret;
1321         }
1322
1323         pdev->id = ringacc->tisci_dev_id;
1324
1325         ringacc->rm_gp_range = devm_ti_sci_get_of_resource(ringacc->tisci, dev,
1326                                                 ringacc->tisci_dev_id,
1327                                                 "ti,sci-rm-range-gp-rings");
1328         if (IS_ERR(ringacc->rm_gp_range)) {
1329                 dev_err(dev, "Failed to allocate MSI interrupts\n");
1330                 return PTR_ERR(ringacc->rm_gp_range);
1331         }
1332
1333         return ti_sci_inta_msi_domain_alloc_irqs(ringacc->dev,
1334                                                  ringacc->rm_gp_range);
1335 }
1336
1337 static const struct k3_ringacc_soc_data k3_ringacc_soc_data_sr1 = {
1338         .dma_ring_reset_quirk = 1,
1339 };
1340
1341 static const struct soc_device_attribute k3_ringacc_socinfo[] = {
1342         { .family = "AM65X",
1343           .revision = "SR1.0",
1344           .data = &k3_ringacc_soc_data_sr1
1345         },
1346         {/* sentinel */}
1347 };
1348
1349 static int k3_ringacc_init(struct platform_device *pdev,
1350                            struct k3_ringacc *ringacc)
1351 {
1352         const struct soc_device_attribute *soc;
1353         void __iomem *base_fifo, *base_rt;
1354         struct device *dev = &pdev->dev;
1355         struct resource *res;
1356         int ret, i;
1357
1358         dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
1359                                             DOMAIN_BUS_TI_SCI_INTA_MSI);
1360         if (!dev->msi_domain) {
1361                 dev_err(dev, "Failed to get MSI domain\n");
1362                 return -EPROBE_DEFER;
1363         }
1364
1365         ret = k3_ringacc_probe_dt(ringacc);
1366         if (ret)
1367                 return ret;
1368
1369         soc = soc_device_match(k3_ringacc_socinfo);
1370         if (soc && soc->data) {
1371                 const struct k3_ringacc_soc_data *soc_data = soc->data;
1372
1373                 ringacc->dma_ring_reset_quirk = soc_data->dma_ring_reset_quirk;
1374         }
1375
1376         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rt");
1377         base_rt = devm_ioremap_resource(dev, res);
1378         if (IS_ERR(base_rt))
1379                 return PTR_ERR(base_rt);
1380
1381         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fifos");
1382         base_fifo = devm_ioremap_resource(dev, res);
1383         if (IS_ERR(base_fifo))
1384                 return PTR_ERR(base_fifo);
1385
1386         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "proxy_gcfg");
1387         ringacc->proxy_gcfg = devm_ioremap_resource(dev, res);
1388         if (IS_ERR(ringacc->proxy_gcfg))
1389                 return PTR_ERR(ringacc->proxy_gcfg);
1390
1391         res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1392                                            "proxy_target");
1393         ringacc->proxy_target_base = devm_ioremap_resource(dev, res);
1394         if (IS_ERR(ringacc->proxy_target_base))
1395                 return PTR_ERR(ringacc->proxy_target_base);
1396
1397         ringacc->num_proxies = readl(&ringacc->proxy_gcfg->config) &
1398                                      K3_RINGACC_PROXY_CFG_THREADS_MASK;
1399
1400         ringacc->rings = devm_kzalloc(dev,
1401                                       sizeof(*ringacc->rings) *
1402                                       ringacc->num_rings,
1403                                       GFP_KERNEL);
1404         ringacc->rings_inuse = devm_kcalloc(dev,
1405                                             BITS_TO_LONGS(ringacc->num_rings),
1406                                             sizeof(unsigned long), GFP_KERNEL);
1407         ringacc->proxy_inuse = devm_kcalloc(dev,
1408                                             BITS_TO_LONGS(ringacc->num_proxies),
1409                                             sizeof(unsigned long), GFP_KERNEL);
1410
1411         if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse)
1412                 return -ENOMEM;
1413
1414         for (i = 0; i < ringacc->num_rings; i++) {
1415                 ringacc->rings[i].rt = base_rt +
1416                                        K3_RINGACC_RT_REGS_STEP * i;
1417                 ringacc->rings[i].fifos = base_fifo +
1418                                           K3_RINGACC_FIFO_REGS_STEP * i;
1419                 ringacc->rings[i].parent = ringacc;
1420                 ringacc->rings[i].ring_id = i;
1421                 ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED;
1422         }
1423
1424         ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
1425
1426         dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n",
1427                  ringacc->num_rings,
1428                  ringacc->rm_gp_range->desc[0].start,
1429                  ringacc->rm_gp_range->desc[0].num,
1430                  ringacc->tisci_dev_id);
1431         dev_info(dev, "dma-ring-reset-quirk: %s\n",
1432                  ringacc->dma_ring_reset_quirk ? "enabled" : "disabled");
1433         dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n",
1434                  readl(&ringacc->proxy_gcfg->revision), ringacc->num_proxies);
1435
1436         return 0;
1437 }
1438
1439 struct ringacc_match_data {
1440         struct k3_ringacc_ops ops;
1441 };
1442
1443 static struct ringacc_match_data k3_ringacc_data = {
1444         .ops = {
1445                 .init = k3_ringacc_init,
1446         },
1447 };
1448
1449 /* Match table for of_platform binding */
1450 static const struct of_device_id k3_ringacc_of_match[] = {
1451         { .compatible = "ti,am654-navss-ringacc", .data = &k3_ringacc_data, },
1452         {},
1453 };
1454
1455 struct k3_ringacc *k3_ringacc_dmarings_init(struct platform_device *pdev,
1456                                             struct k3_ringacc_init_data *data)
1457 {
1458         struct device *dev = &pdev->dev;
1459         struct k3_ringacc *ringacc;
1460         void __iomem *base_rt;
1461         struct resource *res;
1462         int i;
1463
1464         ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
1465         if (!ringacc)
1466                 return ERR_PTR(-ENOMEM);
1467
1468         ringacc->dev = dev;
1469         ringacc->dma_rings = true;
1470         ringacc->num_rings = data->num_rings;
1471         ringacc->tisci = data->tisci;
1472         ringacc->tisci_dev_id = data->tisci_dev_id;
1473
1474         mutex_init(&ringacc->req_lock);
1475
1476         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ringrt");
1477         base_rt = devm_ioremap_resource(dev, res);
1478         if (IS_ERR(base_rt))
1479                 return base_rt;
1480
1481         ringacc->rings = devm_kzalloc(dev,
1482                                       sizeof(*ringacc->rings) *
1483                                       ringacc->num_rings * 2,
1484                                       GFP_KERNEL);
1485         ringacc->rings_inuse = devm_kcalloc(dev,
1486                                             BITS_TO_LONGS(ringacc->num_rings),
1487                                             sizeof(unsigned long), GFP_KERNEL);
1488
1489         if (!ringacc->rings || !ringacc->rings_inuse)
1490                 return ERR_PTR(-ENOMEM);
1491
1492         for (i = 0; i < ringacc->num_rings; i++) {
1493                 struct k3_ring *ring = &ringacc->rings[i];
1494
1495                 ring->rt = base_rt + K3_DMARING_RT_REGS_STEP * i;
1496                 ring->parent = ringacc;
1497                 ring->ring_id = i;
1498                 ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
1499
1500                 ring = &ringacc->rings[ringacc->num_rings + i];
1501                 ring->rt = base_rt + K3_DMARING_RT_REGS_STEP * i +
1502                            K3_DMARING_RT_REGS_REVERSE_OFS;
1503                 ring->parent = ringacc;
1504                 ring->ring_id = i;
1505                 ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
1506                 ring->flags = K3_RING_FLAG_REVERSE;
1507         }
1508
1509         ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
1510
1511         dev_info(dev, "Number of rings: %u\n", ringacc->num_rings);
1512
1513         return ringacc;
1514 }
1515 EXPORT_SYMBOL_GPL(k3_ringacc_dmarings_init);
1516
1517 static int k3_ringacc_probe(struct platform_device *pdev)
1518 {
1519         const struct ringacc_match_data *match_data;
1520         const struct of_device_id *match;
1521         struct device *dev = &pdev->dev;
1522         struct k3_ringacc *ringacc;
1523         int ret;
1524
1525         match = of_match_node(k3_ringacc_of_match, dev->of_node);
1526         if (!match)
1527                 return -ENODEV;
1528         match_data = match->data;
1529
1530         ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
1531         if (!ringacc)
1532                 return -ENOMEM;
1533
1534         ringacc->dev = dev;
1535         mutex_init(&ringacc->req_lock);
1536         ringacc->ops = &match_data->ops;
1537
1538         ret = ringacc->ops->init(pdev, ringacc);
1539         if (ret)
1540                 return ret;
1541
1542         dev_set_drvdata(dev, ringacc);
1543
1544         mutex_lock(&k3_ringacc_list_lock);
1545         list_add_tail(&ringacc->list, &k3_ringacc_list);
1546         mutex_unlock(&k3_ringacc_list_lock);
1547
1548         return 0;
1549 }
1550
1551 static struct platform_driver k3_ringacc_driver = {
1552         .probe          = k3_ringacc_probe,
1553         .driver         = {
1554                 .name   = "k3-ringacc",
1555                 .of_match_table = k3_ringacc_of_match,
1556                 .suppress_bind_attrs = true,
1557         },
1558 };
1559 builtin_platform_driver(k3_ringacc_driver);