Merge tag 'for-linus-5.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw...
[linux-2.6-microblaze.git] / drivers / mailbox / mtk-cmdq-mailbox.c
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Copyright (c) 2018 MediaTek Inc.
4
5 #include <linux/bitops.h>
6 #include <linux/clk.h>
7 #include <linux/clk-provider.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/errno.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/iopoll.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/mailbox_controller.h>
17 #include <linux/mailbox/mtk-cmdq-mailbox.h>
18 #include <linux/of_device.h>
19
20 #define CMDQ_OP_CODE_MASK               (0xff << CMDQ_OP_CODE_SHIFT)
21 #define CMDQ_NUM_CMD(t)                 (t->cmd_buf_size / CMDQ_INST_SIZE)
22
23 #define CMDQ_CURR_IRQ_STATUS            0x10
24 #define CMDQ_SYNC_TOKEN_UPDATE          0x68
25 #define CMDQ_THR_SLOT_CYCLES            0x30
26 #define CMDQ_THR_BASE                   0x100
27 #define CMDQ_THR_SIZE                   0x80
28 #define CMDQ_THR_WARM_RESET             0x00
29 #define CMDQ_THR_ENABLE_TASK            0x04
30 #define CMDQ_THR_SUSPEND_TASK           0x08
31 #define CMDQ_THR_CURR_STATUS            0x0c
32 #define CMDQ_THR_IRQ_STATUS             0x10
33 #define CMDQ_THR_IRQ_ENABLE             0x14
34 #define CMDQ_THR_CURR_ADDR              0x20
35 #define CMDQ_THR_END_ADDR               0x24
36 #define CMDQ_THR_WAIT_TOKEN             0x30
37 #define CMDQ_THR_PRIORITY               0x40
38
39 #define CMDQ_THR_ACTIVE_SLOT_CYCLES     0x3200
40 #define CMDQ_THR_ENABLED                0x1
41 #define CMDQ_THR_DISABLED               0x0
42 #define CMDQ_THR_SUSPEND                0x1
43 #define CMDQ_THR_RESUME                 0x0
44 #define CMDQ_THR_STATUS_SUSPENDED       BIT(1)
45 #define CMDQ_THR_DO_WARM_RESET          BIT(0)
46 #define CMDQ_THR_IRQ_DONE               0x1
47 #define CMDQ_THR_IRQ_ERROR              0x12
48 #define CMDQ_THR_IRQ_EN                 (CMDQ_THR_IRQ_ERROR | CMDQ_THR_IRQ_DONE)
49 #define CMDQ_THR_IS_WAITING             BIT(31)
50
51 #define CMDQ_JUMP_BY_OFFSET             0x10000000
52 #define CMDQ_JUMP_BY_PA                 0x10000001
53
54 struct cmdq_thread {
55         struct mbox_chan        *chan;
56         void __iomem            *base;
57         struct list_head        task_busy_list;
58         u32                     priority;
59 };
60
61 struct cmdq_task {
62         struct cmdq             *cmdq;
63         struct list_head        list_entry;
64         dma_addr_t              pa_base;
65         struct cmdq_thread      *thread;
66         struct cmdq_pkt         *pkt; /* the packet sent from mailbox client */
67 };
68
69 struct cmdq {
70         struct mbox_controller  mbox;
71         void __iomem            *base;
72         int                     irq;
73         u32                     thread_nr;
74         u32                     irq_mask;
75         struct cmdq_thread      *thread;
76         struct clk              *clock;
77         bool                    suspended;
78         u8                      shift_pa;
79 };
80
81 struct gce_plat {
82         u32 thread_nr;
83         u8 shift;
84 };
85
86 u8 cmdq_get_shift_pa(struct mbox_chan *chan)
87 {
88         struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
89
90         return cmdq->shift_pa;
91 }
92 EXPORT_SYMBOL(cmdq_get_shift_pa);
93
94 static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
95 {
96         u32 status;
97
98         writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK);
99
100         /* If already disabled, treat as suspended successful. */
101         if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
102                 return 0;
103
104         if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS,
105                         status, status & CMDQ_THR_STATUS_SUSPENDED, 0, 10)) {
106                 dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n",
107                         (u32)(thread->base - cmdq->base));
108                 return -EFAULT;
109         }
110
111         return 0;
112 }
113
114 static void cmdq_thread_resume(struct cmdq_thread *thread)
115 {
116         writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK);
117 }
118
119 static void cmdq_init(struct cmdq *cmdq)
120 {
121         int i;
122
123         WARN_ON(clk_enable(cmdq->clock) < 0);
124         writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
125         for (i = 0; i <= CMDQ_MAX_EVENT; i++)
126                 writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE);
127         clk_disable(cmdq->clock);
128 }
129
130 static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread)
131 {
132         u32 warm_reset;
133
134         writel(CMDQ_THR_DO_WARM_RESET, thread->base + CMDQ_THR_WARM_RESET);
135         if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_WARM_RESET,
136                         warm_reset, !(warm_reset & CMDQ_THR_DO_WARM_RESET),
137                         0, 10)) {
138                 dev_err(cmdq->mbox.dev, "reset GCE thread 0x%x failed\n",
139                         (u32)(thread->base - cmdq->base));
140                 return -EFAULT;
141         }
142
143         return 0;
144 }
145
146 static void cmdq_thread_disable(struct cmdq *cmdq, struct cmdq_thread *thread)
147 {
148         cmdq_thread_reset(cmdq, thread);
149         writel(CMDQ_THR_DISABLED, thread->base + CMDQ_THR_ENABLE_TASK);
150 }
151
152 /* notify GCE to re-fetch commands by setting GCE thread PC */
153 static void cmdq_thread_invalidate_fetched_data(struct cmdq_thread *thread)
154 {
155         writel(readl(thread->base + CMDQ_THR_CURR_ADDR),
156                thread->base + CMDQ_THR_CURR_ADDR);
157 }
158
159 static void cmdq_task_insert_into_thread(struct cmdq_task *task)
160 {
161         struct device *dev = task->cmdq->mbox.dev;
162         struct cmdq_thread *thread = task->thread;
163         struct cmdq_task *prev_task = list_last_entry(
164                         &thread->task_busy_list, typeof(*task), list_entry);
165         u64 *prev_task_base = prev_task->pkt->va_base;
166
167         /* let previous task jump to this task */
168         dma_sync_single_for_cpu(dev, prev_task->pa_base,
169                                 prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
170         prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
171                 (u64)CMDQ_JUMP_BY_PA << 32 | task->pa_base;
172         dma_sync_single_for_device(dev, prev_task->pa_base,
173                                    prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
174
175         cmdq_thread_invalidate_fetched_data(thread);
176 }
177
178 static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
179 {
180         return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING;
181 }
182
183 static void cmdq_task_exec_done(struct cmdq_task *task, int sta)
184 {
185         struct cmdq_task_cb *cb = &task->pkt->async_cb;
186         struct cmdq_cb_data data;
187
188         WARN_ON(cb->cb == (cmdq_async_flush_cb)NULL);
189         data.sta = sta;
190         data.data = cb->data;
191         data.pkt = task->pkt;
192         if (cb->cb)
193                 cb->cb(data);
194
195         mbox_chan_received_data(task->thread->chan, &data);
196
197         list_del(&task->list_entry);
198 }
199
200 static void cmdq_task_handle_error(struct cmdq_task *task)
201 {
202         struct cmdq_thread *thread = task->thread;
203         struct cmdq_task *next_task;
204         struct cmdq *cmdq = task->cmdq;
205
206         dev_err(cmdq->mbox.dev, "task 0x%p error\n", task);
207         WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
208         next_task = list_first_entry_or_null(&thread->task_busy_list,
209                         struct cmdq_task, list_entry);
210         if (next_task)
211                 writel(next_task->pa_base >> cmdq->shift_pa,
212                        thread->base + CMDQ_THR_CURR_ADDR);
213         cmdq_thread_resume(thread);
214 }
215
216 static void cmdq_thread_irq_handler(struct cmdq *cmdq,
217                                     struct cmdq_thread *thread)
218 {
219         struct cmdq_task *task, *tmp, *curr_task = NULL;
220         u32 curr_pa, irq_flag, task_end_pa;
221         bool err;
222
223         irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS);
224         writel(~irq_flag, thread->base + CMDQ_THR_IRQ_STATUS);
225
226         /*
227          * When ISR call this function, another CPU core could run
228          * "release task" right before we acquire the spin lock, and thus
229          * reset / disable this GCE thread, so we need to check the enable
230          * bit of this GCE thread.
231          */
232         if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
233                 return;
234
235         if (irq_flag & CMDQ_THR_IRQ_ERROR)
236                 err = true;
237         else if (irq_flag & CMDQ_THR_IRQ_DONE)
238                 err = false;
239         else
240                 return;
241
242         curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->shift_pa;
243
244         list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
245                                  list_entry) {
246                 task_end_pa = task->pa_base + task->pkt->cmd_buf_size;
247                 if (curr_pa >= task->pa_base && curr_pa < task_end_pa)
248                         curr_task = task;
249
250                 if (!curr_task || curr_pa == task_end_pa - CMDQ_INST_SIZE) {
251                         cmdq_task_exec_done(task, 0);
252                         kfree(task);
253                 } else if (err) {
254                         cmdq_task_exec_done(task, -ENOEXEC);
255                         cmdq_task_handle_error(curr_task);
256                         kfree(task);
257                 }
258
259                 if (curr_task)
260                         break;
261         }
262
263         if (list_empty(&thread->task_busy_list)) {
264                 cmdq_thread_disable(cmdq, thread);
265                 clk_disable(cmdq->clock);
266         }
267 }
268
269 static irqreturn_t cmdq_irq_handler(int irq, void *dev)
270 {
271         struct cmdq *cmdq = dev;
272         unsigned long irq_status, flags = 0L;
273         int bit;
274
275         irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & cmdq->irq_mask;
276         if (!(irq_status ^ cmdq->irq_mask))
277                 return IRQ_NONE;
278
279         for_each_clear_bit(bit, &irq_status, cmdq->thread_nr) {
280                 struct cmdq_thread *thread = &cmdq->thread[bit];
281
282                 spin_lock_irqsave(&thread->chan->lock, flags);
283                 cmdq_thread_irq_handler(cmdq, thread);
284                 spin_unlock_irqrestore(&thread->chan->lock, flags);
285         }
286
287         return IRQ_HANDLED;
288 }
289
290 static int cmdq_suspend(struct device *dev)
291 {
292         struct cmdq *cmdq = dev_get_drvdata(dev);
293         struct cmdq_thread *thread;
294         int i;
295         bool task_running = false;
296
297         cmdq->suspended = true;
298
299         for (i = 0; i < cmdq->thread_nr; i++) {
300                 thread = &cmdq->thread[i];
301                 if (!list_empty(&thread->task_busy_list)) {
302                         task_running = true;
303                         break;
304                 }
305         }
306
307         if (task_running)
308                 dev_warn(dev, "exist running task(s) in suspend\n");
309
310         clk_unprepare(cmdq->clock);
311
312         return 0;
313 }
314
315 static int cmdq_resume(struct device *dev)
316 {
317         struct cmdq *cmdq = dev_get_drvdata(dev);
318
319         WARN_ON(clk_prepare(cmdq->clock) < 0);
320         cmdq->suspended = false;
321         return 0;
322 }
323
324 static int cmdq_remove(struct platform_device *pdev)
325 {
326         struct cmdq *cmdq = platform_get_drvdata(pdev);
327
328         clk_unprepare(cmdq->clock);
329
330         return 0;
331 }
332
333 static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
334 {
335         struct cmdq_pkt *pkt = (struct cmdq_pkt *)data;
336         struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
337         struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
338         struct cmdq_task *task;
339         unsigned long curr_pa, end_pa;
340
341         /* Client should not flush new tasks if suspended. */
342         WARN_ON(cmdq->suspended);
343
344         task = kzalloc(sizeof(*task), GFP_ATOMIC);
345         if (!task)
346                 return -ENOMEM;
347
348         task->cmdq = cmdq;
349         INIT_LIST_HEAD(&task->list_entry);
350         task->pa_base = pkt->pa_base;
351         task->thread = thread;
352         task->pkt = pkt;
353
354         if (list_empty(&thread->task_busy_list)) {
355                 WARN_ON(clk_enable(cmdq->clock) < 0);
356                 /*
357                  * The thread reset will clear thread related register to 0,
358                  * including pc, end, priority, irq, suspend and enable. Thus
359                  * set CMDQ_THR_ENABLED to CMDQ_THR_ENABLE_TASK will enable
360                  * thread and make it running.
361                  */
362                 WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
363
364                 writel(task->pa_base >> cmdq->shift_pa,
365                        thread->base + CMDQ_THR_CURR_ADDR);
366                 writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->shift_pa,
367                        thread->base + CMDQ_THR_END_ADDR);
368
369                 writel(thread->priority, thread->base + CMDQ_THR_PRIORITY);
370                 writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE);
371                 writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
372         } else {
373                 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
374                 curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) <<
375                         cmdq->shift_pa;
376                 end_pa = readl(thread->base + CMDQ_THR_END_ADDR) <<
377                         cmdq->shift_pa;
378                 /* check boundary */
379                 if (curr_pa == end_pa - CMDQ_INST_SIZE ||
380                     curr_pa == end_pa) {
381                         /* set to this task directly */
382                         writel(task->pa_base >> cmdq->shift_pa,
383                                thread->base + CMDQ_THR_CURR_ADDR);
384                 } else {
385                         cmdq_task_insert_into_thread(task);
386                         smp_mb(); /* modify jump before enable thread */
387                 }
388                 writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->shift_pa,
389                        thread->base + CMDQ_THR_END_ADDR);
390                 cmdq_thread_resume(thread);
391         }
392         list_move_tail(&task->list_entry, &thread->task_busy_list);
393
394         return 0;
395 }
396
397 static int cmdq_mbox_startup(struct mbox_chan *chan)
398 {
399         return 0;
400 }
401
402 static void cmdq_mbox_shutdown(struct mbox_chan *chan)
403 {
404         struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
405         struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
406         struct cmdq_task *task, *tmp;
407         unsigned long flags;
408
409         spin_lock_irqsave(&thread->chan->lock, flags);
410         if (list_empty(&thread->task_busy_list))
411                 goto done;
412
413         WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
414
415         /* make sure executed tasks have success callback */
416         cmdq_thread_irq_handler(cmdq, thread);
417         if (list_empty(&thread->task_busy_list))
418                 goto done;
419
420         list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
421                                  list_entry) {
422                 cmdq_task_exec_done(task, -ECONNABORTED);
423                 kfree(task);
424         }
425
426         cmdq_thread_disable(cmdq, thread);
427         clk_disable(cmdq->clock);
428 done:
429         /*
430          * The thread->task_busy_list empty means thread already disable. The
431          * cmdq_mbox_send_data() always reset thread which clear disable and
432          * suspend statue when first pkt send to channel, so there is no need
433          * to do any operation here, only unlock and leave.
434          */
435         spin_unlock_irqrestore(&thread->chan->lock, flags);
436 }
437
438 static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
439 {
440         struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
441         struct cmdq_task_cb *cb;
442         struct cmdq_cb_data data;
443         struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
444         struct cmdq_task *task, *tmp;
445         unsigned long flags;
446         u32 enable;
447
448         spin_lock_irqsave(&thread->chan->lock, flags);
449         if (list_empty(&thread->task_busy_list))
450                 goto out;
451
452         WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
453         if (!cmdq_thread_is_in_wfe(thread))
454                 goto wait;
455
456         list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
457                                  list_entry) {
458                 cb = &task->pkt->async_cb;
459                 data.sta = -ECONNABORTED;
460                 data.data = cb->data;
461                 data.pkt = task->pkt;
462                 if (cb->cb)
463                         cb->cb(data);
464
465                 mbox_chan_received_data(task->thread->chan, &data);
466                 list_del(&task->list_entry);
467                 kfree(task);
468         }
469
470         cmdq_thread_resume(thread);
471         cmdq_thread_disable(cmdq, thread);
472         clk_disable(cmdq->clock);
473
474 out:
475         spin_unlock_irqrestore(&thread->chan->lock, flags);
476         return 0;
477
478 wait:
479         cmdq_thread_resume(thread);
480         spin_unlock_irqrestore(&thread->chan->lock, flags);
481         if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK,
482                                       enable, enable == 0, 1, timeout)) {
483                 dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n",
484                         (u32)(thread->base - cmdq->base));
485
486                 return -EFAULT;
487         }
488         return 0;
489 }
490
491 static const struct mbox_chan_ops cmdq_mbox_chan_ops = {
492         .send_data = cmdq_mbox_send_data,
493         .startup = cmdq_mbox_startup,
494         .shutdown = cmdq_mbox_shutdown,
495         .flush = cmdq_mbox_flush,
496 };
497
498 static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
499                 const struct of_phandle_args *sp)
500 {
501         int ind = sp->args[0];
502         struct cmdq_thread *thread;
503
504         if (ind >= mbox->num_chans)
505                 return ERR_PTR(-EINVAL);
506
507         thread = (struct cmdq_thread *)mbox->chans[ind].con_priv;
508         thread->priority = sp->args[1];
509         thread->chan = &mbox->chans[ind];
510
511         return &mbox->chans[ind];
512 }
513
514 static int cmdq_probe(struct platform_device *pdev)
515 {
516         struct device *dev = &pdev->dev;
517         struct resource *res;
518         struct cmdq *cmdq;
519         int err, i;
520         struct gce_plat *plat_data;
521
522         cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
523         if (!cmdq)
524                 return -ENOMEM;
525
526         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
527         cmdq->base = devm_ioremap_resource(dev, res);
528         if (IS_ERR(cmdq->base))
529                 return PTR_ERR(cmdq->base);
530
531         cmdq->irq = platform_get_irq(pdev, 0);
532         if (cmdq->irq < 0)
533                 return cmdq->irq;
534
535         plat_data = (struct gce_plat *)of_device_get_match_data(dev);
536         if (!plat_data) {
537                 dev_err(dev, "failed to get match data\n");
538                 return -EINVAL;
539         }
540
541         cmdq->thread_nr = plat_data->thread_nr;
542         cmdq->shift_pa = plat_data->shift;
543         cmdq->irq_mask = GENMASK(cmdq->thread_nr - 1, 0);
544         err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED,
545                                "mtk_cmdq", cmdq);
546         if (err < 0) {
547                 dev_err(dev, "failed to register ISR (%d)\n", err);
548                 return err;
549         }
550
551         dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
552                 dev, cmdq->base, cmdq->irq);
553
554         cmdq->clock = devm_clk_get(dev, "gce");
555         if (IS_ERR(cmdq->clock)) {
556                 dev_err(dev, "failed to get gce clk\n");
557                 return PTR_ERR(cmdq->clock);
558         }
559
560         cmdq->mbox.dev = dev;
561         cmdq->mbox.chans = devm_kcalloc(dev, cmdq->thread_nr,
562                                         sizeof(*cmdq->mbox.chans), GFP_KERNEL);
563         if (!cmdq->mbox.chans)
564                 return -ENOMEM;
565
566         cmdq->mbox.num_chans = cmdq->thread_nr;
567         cmdq->mbox.ops = &cmdq_mbox_chan_ops;
568         cmdq->mbox.of_xlate = cmdq_xlate;
569
570         /* make use of TXDONE_BY_ACK */
571         cmdq->mbox.txdone_irq = false;
572         cmdq->mbox.txdone_poll = false;
573
574         cmdq->thread = devm_kcalloc(dev, cmdq->thread_nr,
575                                         sizeof(*cmdq->thread), GFP_KERNEL);
576         if (!cmdq->thread)
577                 return -ENOMEM;
578
579         for (i = 0; i < cmdq->thread_nr; i++) {
580                 cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE +
581                                 CMDQ_THR_SIZE * i;
582                 INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list);
583                 cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i];
584         }
585
586         err = devm_mbox_controller_register(dev, &cmdq->mbox);
587         if (err < 0) {
588                 dev_err(dev, "failed to register mailbox: %d\n", err);
589                 return err;
590         }
591
592         platform_set_drvdata(pdev, cmdq);
593         WARN_ON(clk_prepare(cmdq->clock) < 0);
594
595         cmdq_init(cmdq);
596
597         return 0;
598 }
599
600 static const struct dev_pm_ops cmdq_pm_ops = {
601         .suspend = cmdq_suspend,
602         .resume = cmdq_resume,
603 };
604
605 static const struct gce_plat gce_plat_v2 = {.thread_nr = 16};
606 static const struct gce_plat gce_plat_v3 = {.thread_nr = 24};
607 static const struct gce_plat gce_plat_v4 = {.thread_nr = 24, .shift = 3};
608
609 static const struct of_device_id cmdq_of_ids[] = {
610         {.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_v2},
611         {.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_v3},
612         {.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_v4},
613         {}
614 };
615
616 static struct platform_driver cmdq_drv = {
617         .probe = cmdq_probe,
618         .remove = cmdq_remove,
619         .driver = {
620                 .name = "mtk_cmdq",
621                 .pm = &cmdq_pm_ops,
622                 .of_match_table = cmdq_of_ids,
623         }
624 };
625
626 static int __init cmdq_drv_init(void)
627 {
628         return platform_driver_register(&cmdq_drv);
629 }
630
631 static void __exit cmdq_drv_exit(void)
632 {
633         platform_driver_unregister(&cmdq_drv);
634 }
635
636 subsys_initcall(cmdq_drv_init);
637 module_exit(cmdq_drv_exit);
638
639 MODULE_LICENSE("GPL v2");