Merge tag 'arm-soc/for-6.0/drivers-fixes' of https://github.com/Broadcom/stblinux...
[linux-2.6-microblaze.git] / drivers / remoteproc / qcom_q6v5_mss.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Qualcomm self-authenticating modem subsystem remoteproc driver
4  *
5  * Copyright (C) 2016 Linaro Ltd.
6  * Copyright (C) 2014 Sony Mobile Communications AB
7  * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
8  */
9
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/devcoredump.h>
13 #include <linux/dma-map-ops.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/mfd/syscon.h>
18 #include <linux/module.h>
19 #include <linux/of_address.h>
20 #include <linux/of_device.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_domain.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/regmap.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/remoteproc.h>
27 #include <linux/reset.h>
28 #include <linux/soc/qcom/mdt_loader.h>
29 #include <linux/iopoll.h>
30 #include <linux/slab.h>
31
32 #include "remoteproc_internal.h"
33 #include "qcom_common.h"
34 #include "qcom_pil_info.h"
35 #include "qcom_q6v5.h"
36
37 #include <linux/qcom_scm.h>
38
39 #define MPSS_CRASH_REASON_SMEM          421
40
41 #define MBA_LOG_SIZE                    SZ_4K
42
43 /* RMB Status Register Values */
44 #define RMB_PBL_SUCCESS                 0x1
45
46 #define RMB_MBA_XPU_UNLOCKED            0x1
47 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED  0x2
48 #define RMB_MBA_META_DATA_AUTH_SUCCESS  0x3
49 #define RMB_MBA_AUTH_COMPLETE           0x4
50
51 /* PBL/MBA interface registers */
52 #define RMB_MBA_IMAGE_REG               0x00
53 #define RMB_PBL_STATUS_REG              0x04
54 #define RMB_MBA_COMMAND_REG             0x08
55 #define RMB_MBA_STATUS_REG              0x0C
56 #define RMB_PMI_META_DATA_REG           0x10
57 #define RMB_PMI_CODE_START_REG          0x14
58 #define RMB_PMI_CODE_LENGTH_REG         0x18
59 #define RMB_MBA_MSS_STATUS              0x40
60 #define RMB_MBA_ALT_RESET               0x44
61
62 #define RMB_CMD_META_DATA_READY         0x1
63 #define RMB_CMD_LOAD_READY              0x2
64
65 /* QDSP6SS Register Offsets */
66 #define QDSP6SS_RESET_REG               0x014
67 #define QDSP6SS_GFMUX_CTL_REG           0x020
68 #define QDSP6SS_PWR_CTL_REG             0x030
69 #define QDSP6SS_MEM_PWR_CTL             0x0B0
70 #define QDSP6V6SS_MEM_PWR_CTL           0x034
71 #define QDSP6SS_STRAP_ACC               0x110
72
73 /* AXI Halt Register Offsets */
74 #define AXI_HALTREQ_REG                 0x0
75 #define AXI_HALTACK_REG                 0x4
76 #define AXI_IDLE_REG                    0x8
77 #define AXI_GATING_VALID_OVERRIDE       BIT(0)
78
79 #define HALT_ACK_TIMEOUT_US             100000
80
81 /* QACCEPT Register Offsets */
82 #define QACCEPT_ACCEPT_REG              0x0
83 #define QACCEPT_ACTIVE_REG              0x4
84 #define QACCEPT_DENY_REG                0x8
85 #define QACCEPT_REQ_REG                 0xC
86
87 #define QACCEPT_TIMEOUT_US              50
88
89 /* QDSP6SS_RESET */
90 #define Q6SS_STOP_CORE                  BIT(0)
91 #define Q6SS_CORE_ARES                  BIT(1)
92 #define Q6SS_BUS_ARES_ENABLE            BIT(2)
93
94 /* QDSP6SS CBCR */
95 #define Q6SS_CBCR_CLKEN                 BIT(0)
96 #define Q6SS_CBCR_CLKOFF                BIT(31)
97 #define Q6SS_CBCR_TIMEOUT_US            200
98
99 /* QDSP6SS_GFMUX_CTL */
100 #define Q6SS_CLK_ENABLE                 BIT(1)
101
102 /* QDSP6SS_PWR_CTL */
103 #define Q6SS_L2DATA_SLP_NRET_N_0        BIT(0)
104 #define Q6SS_L2DATA_SLP_NRET_N_1        BIT(1)
105 #define Q6SS_L2DATA_SLP_NRET_N_2        BIT(2)
106 #define Q6SS_L2TAG_SLP_NRET_N           BIT(16)
107 #define Q6SS_ETB_SLP_NRET_N             BIT(17)
108 #define Q6SS_L2DATA_STBY_N              BIT(18)
109 #define Q6SS_SLP_RET_N                  BIT(19)
110 #define Q6SS_CLAMP_IO                   BIT(20)
111 #define QDSS_BHS_ON                     BIT(21)
112 #define QDSS_LDO_BYP                    BIT(22)
113
114 /* QDSP6v56 parameters */
115 #define QDSP6v56_LDO_BYP                BIT(25)
116 #define QDSP6v56_BHS_ON         BIT(24)
117 #define QDSP6v56_CLAMP_WL               BIT(21)
118 #define QDSP6v56_CLAMP_QMC_MEM          BIT(22)
119 #define QDSP6SS_XO_CBCR         0x0038
120 #define QDSP6SS_ACC_OVERRIDE_VAL                0x20
121
122 /* QDSP6v65 parameters */
123 #define QDSP6SS_CORE_CBCR               0x20
124 #define QDSP6SS_SLEEP                   0x3C
125 #define QDSP6SS_BOOT_CORE_START         0x400
126 #define QDSP6SS_BOOT_CMD                0x404
127 #define BOOT_FSM_TIMEOUT                10000
128
129 struct reg_info {
130         struct regulator *reg;
131         int uV;
132         int uA;
133 };
134
135 struct qcom_mss_reg_res {
136         const char *supply;
137         int uV;
138         int uA;
139 };
140
141 struct rproc_hexagon_res {
142         const char *hexagon_mba_image;
143         struct qcom_mss_reg_res *proxy_supply;
144         struct qcom_mss_reg_res *fallback_proxy_supply;
145         struct qcom_mss_reg_res *active_supply;
146         char **proxy_clk_names;
147         char **reset_clk_names;
148         char **active_clk_names;
149         char **proxy_pd_names;
150         int version;
151         bool need_mem_protection;
152         bool has_alt_reset;
153         bool has_mba_logs;
154         bool has_spare_reg;
155         bool has_qaccept_regs;
156         bool has_ext_cntl_regs;
157         bool has_vq6;
158 };
159
160 struct q6v5 {
161         struct device *dev;
162         struct rproc *rproc;
163
164         void __iomem *reg_base;
165         void __iomem *rmb_base;
166
167         struct regmap *halt_map;
168         struct regmap *conn_map;
169
170         u32 halt_q6;
171         u32 halt_modem;
172         u32 halt_nc;
173         u32 halt_vq6;
174         u32 conn_box;
175
176         u32 qaccept_mdm;
177         u32 qaccept_cx;
178         u32 qaccept_axi;
179
180         u32 axim1_clk_off;
181         u32 crypto_clk_off;
182         u32 force_clk_on;
183         u32 rscc_disable;
184
185         struct reset_control *mss_restart;
186         struct reset_control *pdc_reset;
187
188         struct qcom_q6v5 q6v5;
189
190         struct clk *active_clks[8];
191         struct clk *reset_clks[4];
192         struct clk *proxy_clks[4];
193         struct device *proxy_pds[3];
194         int active_clk_count;
195         int reset_clk_count;
196         int proxy_clk_count;
197         int proxy_pd_count;
198
199         struct reg_info active_regs[1];
200         struct reg_info proxy_regs[1];
201         struct reg_info fallback_proxy_regs[2];
202         int active_reg_count;
203         int proxy_reg_count;
204         int fallback_proxy_reg_count;
205
206         bool dump_mba_loaded;
207         size_t current_dump_size;
208         size_t total_dump_size;
209
210         phys_addr_t mba_phys;
211         size_t mba_size;
212         size_t dp_size;
213
214         phys_addr_t mpss_phys;
215         phys_addr_t mpss_reloc;
216         size_t mpss_size;
217
218         struct qcom_rproc_glink glink_subdev;
219         struct qcom_rproc_subdev smd_subdev;
220         struct qcom_rproc_ssr ssr_subdev;
221         struct qcom_sysmon *sysmon;
222         struct platform_device *bam_dmux;
223         bool need_mem_protection;
224         bool has_alt_reset;
225         bool has_mba_logs;
226         bool has_spare_reg;
227         bool has_qaccept_regs;
228         bool has_ext_cntl_regs;
229         bool has_vq6;
230         int mpss_perm;
231         int mba_perm;
232         const char *hexagon_mdt_image;
233         int version;
234 };
235
236 enum {
237         MSS_MSM8916,
238         MSS_MSM8974,
239         MSS_MSM8996,
240         MSS_MSM8998,
241         MSS_SC7180,
242         MSS_SC7280,
243         MSS_SDM845,
244 };
245
246 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
247                                const struct qcom_mss_reg_res *reg_res)
248 {
249         int rc;
250         int i;
251
252         if (!reg_res)
253                 return 0;
254
255         for (i = 0; reg_res[i].supply; i++) {
256                 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
257                 if (IS_ERR(regs[i].reg)) {
258                         rc = PTR_ERR(regs[i].reg);
259                         if (rc != -EPROBE_DEFER)
260                                 dev_err(dev, "Failed to get %s\n regulator",
261                                         reg_res[i].supply);
262                         return rc;
263                 }
264
265                 regs[i].uV = reg_res[i].uV;
266                 regs[i].uA = reg_res[i].uA;
267         }
268
269         return i;
270 }
271
272 static int q6v5_regulator_enable(struct q6v5 *qproc,
273                                  struct reg_info *regs, int count)
274 {
275         int ret;
276         int i;
277
278         for (i = 0; i < count; i++) {
279                 if (regs[i].uV > 0) {
280                         ret = regulator_set_voltage(regs[i].reg,
281                                         regs[i].uV, INT_MAX);
282                         if (ret) {
283                                 dev_err(qproc->dev,
284                                         "Failed to request voltage for %d.\n",
285                                                 i);
286                                 goto err;
287                         }
288                 }
289
290                 if (regs[i].uA > 0) {
291                         ret = regulator_set_load(regs[i].reg,
292                                                  regs[i].uA);
293                         if (ret < 0) {
294                                 dev_err(qproc->dev,
295                                         "Failed to set regulator mode\n");
296                                 goto err;
297                         }
298                 }
299
300                 ret = regulator_enable(regs[i].reg);
301                 if (ret) {
302                         dev_err(qproc->dev, "Regulator enable failed\n");
303                         goto err;
304                 }
305         }
306
307         return 0;
308 err:
309         for (; i >= 0; i--) {
310                 if (regs[i].uV > 0)
311                         regulator_set_voltage(regs[i].reg, 0, INT_MAX);
312
313                 if (regs[i].uA > 0)
314                         regulator_set_load(regs[i].reg, 0);
315
316                 regulator_disable(regs[i].reg);
317         }
318
319         return ret;
320 }
321
322 static void q6v5_regulator_disable(struct q6v5 *qproc,
323                                    struct reg_info *regs, int count)
324 {
325         int i;
326
327         for (i = 0; i < count; i++) {
328                 if (regs[i].uV > 0)
329                         regulator_set_voltage(regs[i].reg, 0, INT_MAX);
330
331                 if (regs[i].uA > 0)
332                         regulator_set_load(regs[i].reg, 0);
333
334                 regulator_disable(regs[i].reg);
335         }
336 }
337
338 static int q6v5_clk_enable(struct device *dev,
339                            struct clk **clks, int count)
340 {
341         int rc;
342         int i;
343
344         for (i = 0; i < count; i++) {
345                 rc = clk_prepare_enable(clks[i]);
346                 if (rc) {
347                         dev_err(dev, "Clock enable failed\n");
348                         goto err;
349                 }
350         }
351
352         return 0;
353 err:
354         for (i--; i >= 0; i--)
355                 clk_disable_unprepare(clks[i]);
356
357         return rc;
358 }
359
360 static void q6v5_clk_disable(struct device *dev,
361                              struct clk **clks, int count)
362 {
363         int i;
364
365         for (i = 0; i < count; i++)
366                 clk_disable_unprepare(clks[i]);
367 }
368
369 static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
370                            size_t pd_count)
371 {
372         int ret;
373         int i;
374
375         for (i = 0; i < pd_count; i++) {
376                 dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
377                 ret = pm_runtime_get_sync(pds[i]);
378                 if (ret < 0) {
379                         pm_runtime_put_noidle(pds[i]);
380                         dev_pm_genpd_set_performance_state(pds[i], 0);
381                         goto unroll_pd_votes;
382                 }
383         }
384
385         return 0;
386
387 unroll_pd_votes:
388         for (i--; i >= 0; i--) {
389                 dev_pm_genpd_set_performance_state(pds[i], 0);
390                 pm_runtime_put(pds[i]);
391         }
392
393         return ret;
394 }
395
396 static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
397                              size_t pd_count)
398 {
399         int i;
400
401         for (i = 0; i < pd_count; i++) {
402                 dev_pm_genpd_set_performance_state(pds[i], 0);
403                 pm_runtime_put(pds[i]);
404         }
405 }
406
407 static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
408                                    bool local, bool remote, phys_addr_t addr,
409                                    size_t size)
410 {
411         struct qcom_scm_vmperm next[2];
412         int perms = 0;
413
414         if (!qproc->need_mem_protection)
415                 return 0;
416
417         if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) &&
418             remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA)))
419                 return 0;
420
421         if (local) {
422                 next[perms].vmid = QCOM_SCM_VMID_HLOS;
423                 next[perms].perm = QCOM_SCM_PERM_RWX;
424                 perms++;
425         }
426
427         if (remote) {
428                 next[perms].vmid = QCOM_SCM_VMID_MSS_MSA;
429                 next[perms].perm = QCOM_SCM_PERM_RW;
430                 perms++;
431         }
432
433         return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
434                                    current_perm, next, perms);
435 }
436
437 static void q6v5_debug_policy_load(struct q6v5 *qproc, void *mba_region)
438 {
439         const struct firmware *dp_fw;
440
441         if (request_firmware_direct(&dp_fw, "msadp", qproc->dev))
442                 return;
443
444         if (SZ_1M + dp_fw->size <= qproc->mba_size) {
445                 memcpy(mba_region + SZ_1M, dp_fw->data, dp_fw->size);
446                 qproc->dp_size = dp_fw->size;
447         }
448
449         release_firmware(dp_fw);
450 }
451
452 static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
453 {
454         struct q6v5 *qproc = rproc->priv;
455         void *mba_region;
456
457         /* MBA is restricted to a maximum size of 1M */
458         if (fw->size > qproc->mba_size || fw->size > SZ_1M) {
459                 dev_err(qproc->dev, "MBA firmware load failed\n");
460                 return -EINVAL;
461         }
462
463         mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC);
464         if (!mba_region) {
465                 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
466                         &qproc->mba_phys, qproc->mba_size);
467                 return -EBUSY;
468         }
469
470         memcpy(mba_region, fw->data, fw->size);
471         q6v5_debug_policy_load(qproc, mba_region);
472         memunmap(mba_region);
473
474         return 0;
475 }
476
477 static int q6v5_reset_assert(struct q6v5 *qproc)
478 {
479         int ret;
480
481         if (qproc->has_alt_reset) {
482                 reset_control_assert(qproc->pdc_reset);
483                 ret = reset_control_reset(qproc->mss_restart);
484                 reset_control_deassert(qproc->pdc_reset);
485         } else if (qproc->has_spare_reg) {
486                 /*
487                  * When the AXI pipeline is being reset with the Q6 modem partly
488                  * operational there is possibility of AXI valid signal to
489                  * glitch, leading to spurious transactions and Q6 hangs. A work
490                  * around is employed by asserting the AXI_GATING_VALID_OVERRIDE
491                  * BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE
492                  * is withdrawn post MSS assert followed by a MSS deassert,
493                  * while holding the PDC reset.
494                  */
495                 reset_control_assert(qproc->pdc_reset);
496                 regmap_update_bits(qproc->conn_map, qproc->conn_box,
497                                    AXI_GATING_VALID_OVERRIDE, 1);
498                 reset_control_assert(qproc->mss_restart);
499                 reset_control_deassert(qproc->pdc_reset);
500                 regmap_update_bits(qproc->conn_map, qproc->conn_box,
501                                    AXI_GATING_VALID_OVERRIDE, 0);
502                 ret = reset_control_deassert(qproc->mss_restart);
503         } else if (qproc->has_ext_cntl_regs) {
504                 regmap_write(qproc->conn_map, qproc->rscc_disable, 0);
505                 reset_control_assert(qproc->pdc_reset);
506                 reset_control_assert(qproc->mss_restart);
507                 reset_control_deassert(qproc->pdc_reset);
508                 ret = reset_control_deassert(qproc->mss_restart);
509         } else {
510                 ret = reset_control_assert(qproc->mss_restart);
511         }
512
513         return ret;
514 }
515
516 static int q6v5_reset_deassert(struct q6v5 *qproc)
517 {
518         int ret;
519
520         if (qproc->has_alt_reset) {
521                 reset_control_assert(qproc->pdc_reset);
522                 writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
523                 ret = reset_control_reset(qproc->mss_restart);
524                 writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
525                 reset_control_deassert(qproc->pdc_reset);
526         } else if (qproc->has_spare_reg || qproc->has_ext_cntl_regs) {
527                 ret = reset_control_reset(qproc->mss_restart);
528         } else {
529                 ret = reset_control_deassert(qproc->mss_restart);
530         }
531
532         return ret;
533 }
534
535 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
536 {
537         unsigned long timeout;
538         s32 val;
539
540         timeout = jiffies + msecs_to_jiffies(ms);
541         for (;;) {
542                 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
543                 if (val)
544                         break;
545
546                 if (time_after(jiffies, timeout))
547                         return -ETIMEDOUT;
548
549                 msleep(1);
550         }
551
552         return val;
553 }
554
555 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
556 {
557
558         unsigned long timeout;
559         s32 val;
560
561         timeout = jiffies + msecs_to_jiffies(ms);
562         for (;;) {
563                 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
564                 if (val < 0)
565                         break;
566
567                 if (!status && val)
568                         break;
569                 else if (status && val == status)
570                         break;
571
572                 if (time_after(jiffies, timeout))
573                         return -ETIMEDOUT;
574
575                 msleep(1);
576         }
577
578         return val;
579 }
580
581 static void q6v5_dump_mba_logs(struct q6v5 *qproc)
582 {
583         struct rproc *rproc = qproc->rproc;
584         void *data;
585         void *mba_region;
586
587         if (!qproc->has_mba_logs)
588                 return;
589
590         if (q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, qproc->mba_phys,
591                                     qproc->mba_size))
592                 return;
593
594         mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC);
595         if (!mba_region)
596                 return;
597
598         data = vmalloc(MBA_LOG_SIZE);
599         if (data) {
600                 memcpy(data, mba_region, MBA_LOG_SIZE);
601                 dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL);
602         }
603         memunmap(mba_region);
604 }
605
606 static int q6v5proc_reset(struct q6v5 *qproc)
607 {
608         u32 val;
609         int ret;
610         int i;
611
612         if (qproc->version == MSS_SDM845) {
613                 val = readl(qproc->reg_base + QDSP6SS_SLEEP);
614                 val |= Q6SS_CBCR_CLKEN;
615                 writel(val, qproc->reg_base + QDSP6SS_SLEEP);
616
617                 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
618                                          val, !(val & Q6SS_CBCR_CLKOFF), 1,
619                                          Q6SS_CBCR_TIMEOUT_US);
620                 if (ret) {
621                         dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
622                         return -ETIMEDOUT;
623                 }
624
625                 /* De-assert QDSP6 stop core */
626                 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
627                 /* Trigger boot FSM */
628                 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
629
630                 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
631                                 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
632                 if (ret) {
633                         dev_err(qproc->dev, "Boot FSM failed to complete.\n");
634                         /* Reset the modem so that boot FSM is in reset state */
635                         q6v5_reset_deassert(qproc);
636                         return ret;
637                 }
638
639                 goto pbl_wait;
640         } else if (qproc->version == MSS_SC7180 || qproc->version == MSS_SC7280) {
641                 val = readl(qproc->reg_base + QDSP6SS_SLEEP);
642                 val |= Q6SS_CBCR_CLKEN;
643                 writel(val, qproc->reg_base + QDSP6SS_SLEEP);
644
645                 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
646                                          val, !(val & Q6SS_CBCR_CLKOFF), 1,
647                                          Q6SS_CBCR_TIMEOUT_US);
648                 if (ret) {
649                         dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
650                         return -ETIMEDOUT;
651                 }
652
653                 /* Turn on the XO clock needed for PLL setup */
654                 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
655                 val |= Q6SS_CBCR_CLKEN;
656                 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
657
658                 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
659                                          val, !(val & Q6SS_CBCR_CLKOFF), 1,
660                                          Q6SS_CBCR_TIMEOUT_US);
661                 if (ret) {
662                         dev_err(qproc->dev, "QDSP6SS XO clock timed out\n");
663                         return -ETIMEDOUT;
664                 }
665
666                 /* Configure Q6 core CBCR to auto-enable after reset sequence */
667                 val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR);
668                 val |= Q6SS_CBCR_CLKEN;
669                 writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR);
670
671                 /* De-assert the Q6 stop core signal */
672                 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
673
674                 /* Wait for 10 us for any staggering logic to settle */
675                 usleep_range(10, 20);
676
677                 /* Trigger the boot FSM to start the Q6 out-of-reset sequence */
678                 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
679
680                 /* Poll the MSS_STATUS for FSM completion */
681                 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
682                                          val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
683                 if (ret) {
684                         dev_err(qproc->dev, "Boot FSM failed to complete.\n");
685                         /* Reset the modem so that boot FSM is in reset state */
686                         q6v5_reset_deassert(qproc);
687                         return ret;
688                 }
689                 goto pbl_wait;
690         } else if (qproc->version == MSS_MSM8996 ||
691                    qproc->version == MSS_MSM8998) {
692                 int mem_pwr_ctl;
693
694                 /* Override the ACC value if required */
695                 writel(QDSP6SS_ACC_OVERRIDE_VAL,
696                        qproc->reg_base + QDSP6SS_STRAP_ACC);
697
698                 /* Assert resets, stop core */
699                 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
700                 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
701                 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
702
703                 /* BHS require xo cbcr to be enabled */
704                 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
705                 val |= Q6SS_CBCR_CLKEN;
706                 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
707
708                 /* Read CLKOFF bit to go low indicating CLK is enabled */
709                 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
710                                          val, !(val & Q6SS_CBCR_CLKOFF), 1,
711                                          Q6SS_CBCR_TIMEOUT_US);
712                 if (ret) {
713                         dev_err(qproc->dev,
714                                 "xo cbcr enabling timed out (rc:%d)\n", ret);
715                         return ret;
716                 }
717                 /* Enable power block headswitch and wait for it to stabilize */
718                 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
719                 val |= QDSP6v56_BHS_ON;
720                 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
721                 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
722                 udelay(1);
723
724                 /* Put LDO in bypass mode */
725                 val |= QDSP6v56_LDO_BYP;
726                 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
727
728                 /* Deassert QDSP6 compiler memory clamp */
729                 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
730                 val &= ~QDSP6v56_CLAMP_QMC_MEM;
731                 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
732
733                 /* Deassert memory peripheral sleep and L2 memory standby */
734                 val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
735                 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
736
737                 /* Turn on L1, L2, ETB and JU memories 1 at a time */
738                 if (qproc->version == MSS_MSM8996) {
739                         mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL;
740                         i = 19;
741                 } else {
742                         /* MSS_MSM8998 */
743                         mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL;
744                         i = 28;
745                 }
746                 val = readl(qproc->reg_base + mem_pwr_ctl);
747                 for (; i >= 0; i--) {
748                         val |= BIT(i);
749                         writel(val, qproc->reg_base + mem_pwr_ctl);
750                         /*
751                          * Read back value to ensure the write is done then
752                          * wait for 1us for both memory peripheral and data
753                          * array to turn on.
754                          */
755                         val |= readl(qproc->reg_base + mem_pwr_ctl);
756                         udelay(1);
757                 }
758                 /* Remove word line clamp */
759                 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
760                 val &= ~QDSP6v56_CLAMP_WL;
761                 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
762         } else {
763                 /* Assert resets, stop core */
764                 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
765                 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
766                 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
767
768                 /* Enable power block headswitch and wait for it to stabilize */
769                 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
770                 val |= QDSS_BHS_ON | QDSS_LDO_BYP;
771                 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
772                 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
773                 udelay(1);
774                 /*
775                  * Turn on memories. L2 banks should be done individually
776                  * to minimize inrush current.
777                  */
778                 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
779                 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
780                         Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
781                 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
782                 val |= Q6SS_L2DATA_SLP_NRET_N_2;
783                 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
784                 val |= Q6SS_L2DATA_SLP_NRET_N_1;
785                 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
786                 val |= Q6SS_L2DATA_SLP_NRET_N_0;
787                 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
788         }
789         /* Remove IO clamp */
790         val &= ~Q6SS_CLAMP_IO;
791         writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
792
793         /* Bring core out of reset */
794         val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
795         val &= ~Q6SS_CORE_ARES;
796         writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
797
798         /* Turn on core clock */
799         val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
800         val |= Q6SS_CLK_ENABLE;
801         writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
802
803         /* Start core execution */
804         val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
805         val &= ~Q6SS_STOP_CORE;
806         writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
807
808 pbl_wait:
809         /* Wait for PBL status */
810         ret = q6v5_rmb_pbl_wait(qproc, 1000);
811         if (ret == -ETIMEDOUT) {
812                 dev_err(qproc->dev, "PBL boot timed out\n");
813         } else if (ret != RMB_PBL_SUCCESS) {
814                 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
815                 ret = -EINVAL;
816         } else {
817                 ret = 0;
818         }
819
820         return ret;
821 }
822
823 static int q6v5proc_enable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset)
824 {
825         unsigned int val;
826         int ret;
827
828         if (!qproc->has_qaccept_regs)
829                 return 0;
830
831         if (qproc->has_ext_cntl_regs) {
832                 regmap_write(qproc->conn_map, qproc->rscc_disable, 0);
833                 regmap_write(qproc->conn_map, qproc->force_clk_on, 1);
834
835                 ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val,
836                                                !val, 1, Q6SS_CBCR_TIMEOUT_US);
837                 if (ret) {
838                         dev_err(qproc->dev, "failed to enable axim1 clock\n");
839                         return -ETIMEDOUT;
840                 }
841         }
842
843         regmap_write(map, offset + QACCEPT_REQ_REG, 1);
844
845         /* Wait for accept */
846         ret = regmap_read_poll_timeout(map, offset + QACCEPT_ACCEPT_REG, val, val, 5,
847                                        QACCEPT_TIMEOUT_US);
848         if (ret) {
849                 dev_err(qproc->dev, "qchannel enable failed\n");
850                 return -ETIMEDOUT;
851         }
852
853         return 0;
854 }
855
856 static void q6v5proc_disable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset)
857 {
858         int ret;
859         unsigned int val, retry;
860         unsigned int nretry = 10;
861         bool takedown_complete = false;
862
863         if (!qproc->has_qaccept_regs)
864                 return;
865
866         while (!takedown_complete && nretry) {
867                 nretry--;
868
869                 /* Wait for active transactions to complete */
870                 regmap_read_poll_timeout(map, offset + QACCEPT_ACTIVE_REG, val, !val, 5,
871                                          QACCEPT_TIMEOUT_US);
872
873                 /* Request Q-channel transaction takedown */
874                 regmap_write(map, offset + QACCEPT_REQ_REG, 0);
875
876                 /*
877                  * If the request is denied, reset the Q-channel takedown request,
878                  * wait for active transactions to complete and retry takedown.
879                  */
880                 retry = 10;
881                 while (retry) {
882                         usleep_range(5, 10);
883                         retry--;
884                         ret = regmap_read(map, offset + QACCEPT_DENY_REG, &val);
885                         if (!ret && val) {
886                                 regmap_write(map, offset + QACCEPT_REQ_REG, 1);
887                                 break;
888                         }
889
890                         ret = regmap_read(map, offset + QACCEPT_ACCEPT_REG, &val);
891                         if (!ret && !val) {
892                                 takedown_complete = true;
893                                 break;
894                         }
895                 }
896
897                 if (!retry)
898                         break;
899         }
900
901         /* Rely on mss_restart to clear out pending transactions on takedown failure */
902         if (!takedown_complete)
903                 dev_err(qproc->dev, "qchannel takedown failed\n");
904 }
905
906 static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
907                                    struct regmap *halt_map,
908                                    u32 offset)
909 {
910         unsigned int val;
911         int ret;
912
913         /* Check if we're already idle */
914         ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
915         if (!ret && val)
916                 return;
917
918         /* Assert halt request */
919         regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
920
921         /* Wait for halt */
922         regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val,
923                                  val, 1000, HALT_ACK_TIMEOUT_US);
924
925         ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
926         if (ret || !val)
927                 dev_err(qproc->dev, "port failed halt\n");
928
929         /* Clear halt request (port will remain halted until reset) */
930         regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
931 }
932
933 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw,
934                                 const char *fw_name)
935 {
936         unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS | DMA_ATTR_NO_KERNEL_MAPPING;
937         unsigned long flags = VM_DMA_COHERENT | VM_FLUSH_RESET_PERMS;
938         struct page **pages;
939         struct page *page;
940         dma_addr_t phys;
941         void *metadata;
942         int mdata_perm;
943         int xferop_ret;
944         size_t size;
945         void *vaddr;
946         int count;
947         int ret;
948         int i;
949
950         metadata = qcom_mdt_read_metadata(fw, &size, fw_name, qproc->dev);
951         if (IS_ERR(metadata))
952                 return PTR_ERR(metadata);
953
954         page = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
955         if (!page) {
956                 kfree(metadata);
957                 dev_err(qproc->dev, "failed to allocate mdt buffer\n");
958                 return -ENOMEM;
959         }
960
961         count = PAGE_ALIGN(size) >> PAGE_SHIFT;
962         pages = kmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
963         if (!pages) {
964                 ret = -ENOMEM;
965                 goto free_dma_attrs;
966         }
967
968         for (i = 0; i < count; i++)
969                 pages[i] = nth_page(page, i);
970
971         vaddr = vmap(pages, count, flags, pgprot_dmacoherent(PAGE_KERNEL));
972         kfree(pages);
973         if (!vaddr) {
974                 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", &phys, size);
975                 ret = -EBUSY;
976                 goto free_dma_attrs;
977         }
978
979         memcpy(vaddr, metadata, size);
980
981         vunmap(vaddr);
982
983         /* Hypervisor mapping to access metadata by modem */
984         mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
985         ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true,
986                                       phys, size);
987         if (ret) {
988                 dev_err(qproc->dev,
989                         "assigning Q6 access to metadata failed: %d\n", ret);
990                 ret = -EAGAIN;
991                 goto free_dma_attrs;
992         }
993
994         writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
995         writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
996
997         ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
998         if (ret == -ETIMEDOUT)
999                 dev_err(qproc->dev, "MPSS header authentication timed out\n");
1000         else if (ret < 0)
1001                 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
1002
1003         /* Metadata authentication done, remove modem access */
1004         xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false,
1005                                              phys, size);
1006         if (xferop_ret)
1007                 dev_warn(qproc->dev,
1008                          "mdt buffer not reclaimed system may become unstable\n");
1009
1010 free_dma_attrs:
1011         dma_free_attrs(qproc->dev, size, page, phys, dma_attrs);
1012         kfree(metadata);
1013
1014         return ret < 0 ? ret : 0;
1015 }
1016
1017 static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
1018 {
1019         if (phdr->p_type != PT_LOAD)
1020                 return false;
1021
1022         if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
1023                 return false;
1024
1025         if (!phdr->p_memsz)
1026                 return false;
1027
1028         return true;
1029 }
1030
1031 static int q6v5_mba_load(struct q6v5 *qproc)
1032 {
1033         int ret;
1034         int xfermemop_ret;
1035         bool mba_load_err = false;
1036
1037         ret = qcom_q6v5_prepare(&qproc->q6v5);
1038         if (ret)
1039                 return ret;
1040
1041         ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1042         if (ret < 0) {
1043                 dev_err(qproc->dev, "failed to enable proxy power domains\n");
1044                 goto disable_irqs;
1045         }
1046
1047         ret = q6v5_regulator_enable(qproc, qproc->fallback_proxy_regs,
1048                                     qproc->fallback_proxy_reg_count);
1049         if (ret) {
1050                 dev_err(qproc->dev, "failed to enable fallback proxy supplies\n");
1051                 goto disable_proxy_pds;
1052         }
1053
1054         ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
1055                                     qproc->proxy_reg_count);
1056         if (ret) {
1057                 dev_err(qproc->dev, "failed to enable proxy supplies\n");
1058                 goto disable_fallback_proxy_reg;
1059         }
1060
1061         ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
1062                               qproc->proxy_clk_count);
1063         if (ret) {
1064                 dev_err(qproc->dev, "failed to enable proxy clocks\n");
1065                 goto disable_proxy_reg;
1066         }
1067
1068         ret = q6v5_regulator_enable(qproc, qproc->active_regs,
1069                                     qproc->active_reg_count);
1070         if (ret) {
1071                 dev_err(qproc->dev, "failed to enable supplies\n");
1072                 goto disable_proxy_clk;
1073         }
1074
1075         ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
1076                               qproc->reset_clk_count);
1077         if (ret) {
1078                 dev_err(qproc->dev, "failed to enable reset clocks\n");
1079                 goto disable_vdd;
1080         }
1081
1082         ret = q6v5_reset_deassert(qproc);
1083         if (ret) {
1084                 dev_err(qproc->dev, "failed to deassert mss restart\n");
1085                 goto disable_reset_clks;
1086         }
1087
1088         ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
1089                               qproc->active_clk_count);
1090         if (ret) {
1091                 dev_err(qproc->dev, "failed to enable clocks\n");
1092                 goto assert_reset;
1093         }
1094
1095         ret = q6v5proc_enable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
1096         if (ret) {
1097                 dev_err(qproc->dev, "failed to enable axi bridge\n");
1098                 goto disable_active_clks;
1099         }
1100
1101         /*
1102          * Some versions of the MBA firmware will upon boot wipe the MPSS region as well, so provide
1103          * the Q6 access to this region.
1104          */
1105         ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
1106                                       qproc->mpss_phys, qproc->mpss_size);
1107         if (ret) {
1108                 dev_err(qproc->dev, "assigning Q6 access to mpss memory failed: %d\n", ret);
1109                 goto disable_active_clks;
1110         }
1111
1112         /* Assign MBA image access in DDR to q6 */
1113         ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true,
1114                                       qproc->mba_phys, qproc->mba_size);
1115         if (ret) {
1116                 dev_err(qproc->dev,
1117                         "assigning Q6 access to mba memory failed: %d\n", ret);
1118                 goto disable_active_clks;
1119         }
1120
1121         writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
1122         if (qproc->dp_size) {
1123                 writel(qproc->mba_phys + SZ_1M, qproc->rmb_base + RMB_PMI_CODE_START_REG);
1124                 writel(qproc->dp_size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1125         }
1126
1127         ret = q6v5proc_reset(qproc);
1128         if (ret)
1129                 goto reclaim_mba;
1130
1131         if (qproc->has_mba_logs)
1132                 qcom_pil_info_store("mba", qproc->mba_phys, MBA_LOG_SIZE);
1133
1134         ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
1135         if (ret == -ETIMEDOUT) {
1136                 dev_err(qproc->dev, "MBA boot timed out\n");
1137                 goto halt_axi_ports;
1138         } else if (ret != RMB_MBA_XPU_UNLOCKED &&
1139                    ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
1140                 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
1141                 ret = -EINVAL;
1142                 goto halt_axi_ports;
1143         }
1144
1145         qproc->dump_mba_loaded = true;
1146         return 0;
1147
1148 halt_axi_ports:
1149         q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
1150         if (qproc->has_vq6)
1151                 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6);
1152         q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
1153         q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
1154         q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm);
1155         q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx);
1156         q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
1157         mba_load_err = true;
1158 reclaim_mba:
1159         xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
1160                                                 false, qproc->mba_phys,
1161                                                 qproc->mba_size);
1162         if (xfermemop_ret) {
1163                 dev_err(qproc->dev,
1164                         "Failed to reclaim mba buffer, system may become unstable\n");
1165         } else if (mba_load_err) {
1166                 q6v5_dump_mba_logs(qproc);
1167         }
1168
1169 disable_active_clks:
1170         q6v5_clk_disable(qproc->dev, qproc->active_clks,
1171                          qproc->active_clk_count);
1172 assert_reset:
1173         q6v5_reset_assert(qproc);
1174 disable_reset_clks:
1175         q6v5_clk_disable(qproc->dev, qproc->reset_clks,
1176                          qproc->reset_clk_count);
1177 disable_vdd:
1178         q6v5_regulator_disable(qproc, qproc->active_regs,
1179                                qproc->active_reg_count);
1180 disable_proxy_clk:
1181         q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1182                          qproc->proxy_clk_count);
1183 disable_proxy_reg:
1184         q6v5_regulator_disable(qproc, qproc->proxy_regs,
1185                                qproc->proxy_reg_count);
1186 disable_fallback_proxy_reg:
1187         q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
1188                                qproc->fallback_proxy_reg_count);
1189 disable_proxy_pds:
1190         q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1191 disable_irqs:
1192         qcom_q6v5_unprepare(&qproc->q6v5);
1193
1194         return ret;
1195 }
1196
1197 static void q6v5_mba_reclaim(struct q6v5 *qproc)
1198 {
1199         int ret;
1200         u32 val;
1201
1202         qproc->dump_mba_loaded = false;
1203         qproc->dp_size = 0;
1204
1205         q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
1206         if (qproc->has_vq6)
1207                 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6);
1208         q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
1209         q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
1210         if (qproc->version == MSS_MSM8996) {
1211                 /*
1212                  * To avoid high MX current during LPASS/MSS restart.
1213                  */
1214                 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
1215                 val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
1216                         QDSP6v56_CLAMP_QMC_MEM;
1217                 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
1218         }
1219
1220         if (qproc->has_ext_cntl_regs) {
1221                 regmap_write(qproc->conn_map, qproc->rscc_disable, 1);
1222
1223                 ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val,
1224                                                !val, 1, Q6SS_CBCR_TIMEOUT_US);
1225                 if (ret)
1226                         dev_err(qproc->dev, "failed to enable axim1 clock\n");
1227
1228                 ret = regmap_read_poll_timeout(qproc->halt_map, qproc->crypto_clk_off, val,
1229                                                !val, 1, Q6SS_CBCR_TIMEOUT_US);
1230                 if (ret)
1231                         dev_err(qproc->dev, "failed to enable crypto clock\n");
1232         }
1233
1234         q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm);
1235         q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx);
1236         q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
1237
1238         q6v5_reset_assert(qproc);
1239
1240         q6v5_clk_disable(qproc->dev, qproc->reset_clks,
1241                          qproc->reset_clk_count);
1242         q6v5_clk_disable(qproc->dev, qproc->active_clks,
1243                          qproc->active_clk_count);
1244         q6v5_regulator_disable(qproc, qproc->active_regs,
1245                                qproc->active_reg_count);
1246
1247         /* In case of failure or coredump scenario where reclaiming MBA memory
1248          * could not happen reclaim it here.
1249          */
1250         ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false,
1251                                       qproc->mba_phys,
1252                                       qproc->mba_size);
1253         WARN_ON(ret);
1254
1255         ret = qcom_q6v5_unprepare(&qproc->q6v5);
1256         if (ret) {
1257                 q6v5_pds_disable(qproc, qproc->proxy_pds,
1258                                  qproc->proxy_pd_count);
1259                 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1260                                  qproc->proxy_clk_count);
1261                 q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
1262                                        qproc->fallback_proxy_reg_count);
1263                 q6v5_regulator_disable(qproc, qproc->proxy_regs,
1264                                        qproc->proxy_reg_count);
1265         }
1266 }
1267
1268 static int q6v5_reload_mba(struct rproc *rproc)
1269 {
1270         struct q6v5 *qproc = rproc->priv;
1271         const struct firmware *fw;
1272         int ret;
1273
1274         ret = request_firmware(&fw, rproc->firmware, qproc->dev);
1275         if (ret < 0)
1276                 return ret;
1277
1278         q6v5_load(rproc, fw);
1279         ret = q6v5_mba_load(qproc);
1280         release_firmware(fw);
1281
1282         return ret;
1283 }
1284
1285 static int q6v5_mpss_load(struct q6v5 *qproc)
1286 {
1287         const struct elf32_phdr *phdrs;
1288         const struct elf32_phdr *phdr;
1289         const struct firmware *seg_fw;
1290         const struct firmware *fw;
1291         struct elf32_hdr *ehdr;
1292         phys_addr_t mpss_reloc;
1293         phys_addr_t boot_addr;
1294         phys_addr_t min_addr = PHYS_ADDR_MAX;
1295         phys_addr_t max_addr = 0;
1296         u32 code_length;
1297         bool relocate = false;
1298         char *fw_name;
1299         size_t fw_name_len;
1300         ssize_t offset;
1301         size_t size = 0;
1302         void *ptr;
1303         int ret;
1304         int i;
1305
1306         fw_name_len = strlen(qproc->hexagon_mdt_image);
1307         if (fw_name_len <= 4)
1308                 return -EINVAL;
1309
1310         fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL);
1311         if (!fw_name)
1312                 return -ENOMEM;
1313
1314         ret = request_firmware(&fw, fw_name, qproc->dev);
1315         if (ret < 0) {
1316                 dev_err(qproc->dev, "unable to load %s\n", fw_name);
1317                 goto out;
1318         }
1319
1320         /* Initialize the RMB validator */
1321         writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1322
1323         ret = q6v5_mpss_init_image(qproc, fw, qproc->hexagon_mdt_image);
1324         if (ret)
1325                 goto release_firmware;
1326
1327         ehdr = (struct elf32_hdr *)fw->data;
1328         phdrs = (struct elf32_phdr *)(ehdr + 1);
1329
1330         for (i = 0; i < ehdr->e_phnum; i++) {
1331                 phdr = &phdrs[i];
1332
1333                 if (!q6v5_phdr_valid(phdr))
1334                         continue;
1335
1336                 if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
1337                         relocate = true;
1338
1339                 if (phdr->p_paddr < min_addr)
1340                         min_addr = phdr->p_paddr;
1341
1342                 if (phdr->p_paddr + phdr->p_memsz > max_addr)
1343                         max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
1344         }
1345
1346         /*
1347          * In case of a modem subsystem restart on secure devices, the modem
1348          * memory can be reclaimed only after MBA is loaded.
1349          */
1350         q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false,
1351                                 qproc->mpss_phys, qproc->mpss_size);
1352
1353         /* Share ownership between Linux and MSS, during segment loading */
1354         ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true,
1355                                       qproc->mpss_phys, qproc->mpss_size);
1356         if (ret) {
1357                 dev_err(qproc->dev,
1358                         "assigning Q6 access to mpss memory failed: %d\n", ret);
1359                 ret = -EAGAIN;
1360                 goto release_firmware;
1361         }
1362
1363         mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
1364         qproc->mpss_reloc = mpss_reloc;
1365         /* Load firmware segments */
1366         for (i = 0; i < ehdr->e_phnum; i++) {
1367                 phdr = &phdrs[i];
1368
1369                 if (!q6v5_phdr_valid(phdr))
1370                         continue;
1371
1372                 offset = phdr->p_paddr - mpss_reloc;
1373                 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
1374                         dev_err(qproc->dev, "segment outside memory range\n");
1375                         ret = -EINVAL;
1376                         goto release_firmware;
1377                 }
1378
1379                 if (phdr->p_filesz > phdr->p_memsz) {
1380                         dev_err(qproc->dev,
1381                                 "refusing to load segment %d with p_filesz > p_memsz\n",
1382                                 i);
1383                         ret = -EINVAL;
1384                         goto release_firmware;
1385                 }
1386
1387                 ptr = memremap(qproc->mpss_phys + offset, phdr->p_memsz, MEMREMAP_WC);
1388                 if (!ptr) {
1389                         dev_err(qproc->dev,
1390                                 "unable to map memory region: %pa+%zx-%x\n",
1391                                 &qproc->mpss_phys, offset, phdr->p_memsz);
1392                         goto release_firmware;
1393                 }
1394
1395                 if (phdr->p_filesz && phdr->p_offset < fw->size) {
1396                         /* Firmware is large enough to be non-split */
1397                         if (phdr->p_offset + phdr->p_filesz > fw->size) {
1398                                 dev_err(qproc->dev,
1399                                         "failed to load segment %d from truncated file %s\n",
1400                                         i, fw_name);
1401                                 ret = -EINVAL;
1402                                 memunmap(ptr);
1403                                 goto release_firmware;
1404                         }
1405
1406                         memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
1407                 } else if (phdr->p_filesz) {
1408                         /* Replace "xxx.xxx" with "xxx.bxx" */
1409                         sprintf(fw_name + fw_name_len - 3, "b%02d", i);
1410                         ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev,
1411                                                         ptr, phdr->p_filesz);
1412                         if (ret) {
1413                                 dev_err(qproc->dev, "failed to load %s\n", fw_name);
1414                                 memunmap(ptr);
1415                                 goto release_firmware;
1416                         }
1417
1418                         if (seg_fw->size != phdr->p_filesz) {
1419                                 dev_err(qproc->dev,
1420                                         "failed to load segment %d from truncated file %s\n",
1421                                         i, fw_name);
1422                                 ret = -EINVAL;
1423                                 release_firmware(seg_fw);
1424                                 memunmap(ptr);
1425                                 goto release_firmware;
1426                         }
1427
1428                         release_firmware(seg_fw);
1429                 }
1430
1431                 if (phdr->p_memsz > phdr->p_filesz) {
1432                         memset(ptr + phdr->p_filesz, 0,
1433                                phdr->p_memsz - phdr->p_filesz);
1434                 }
1435                 memunmap(ptr);
1436                 size += phdr->p_memsz;
1437
1438                 code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1439                 if (!code_length) {
1440                         boot_addr = relocate ? qproc->mpss_phys : min_addr;
1441                         writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
1442                         writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
1443                 }
1444                 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1445
1446                 ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
1447                 if (ret < 0) {
1448                         dev_err(qproc->dev, "MPSS authentication failed: %d\n",
1449                                 ret);
1450                         goto release_firmware;
1451                 }
1452         }
1453
1454         /* Transfer ownership of modem ddr region to q6 */
1455         ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
1456                                       qproc->mpss_phys, qproc->mpss_size);
1457         if (ret) {
1458                 dev_err(qproc->dev,
1459                         "assigning Q6 access to mpss memory failed: %d\n", ret);
1460                 ret = -EAGAIN;
1461                 goto release_firmware;
1462         }
1463
1464         ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
1465         if (ret == -ETIMEDOUT)
1466                 dev_err(qproc->dev, "MPSS authentication timed out\n");
1467         else if (ret < 0)
1468                 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
1469
1470         qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size);
1471
1472 release_firmware:
1473         release_firmware(fw);
1474 out:
1475         kfree(fw_name);
1476
1477         return ret < 0 ? ret : 0;
1478 }
1479
1480 static void qcom_q6v5_dump_segment(struct rproc *rproc,
1481                                    struct rproc_dump_segment *segment,
1482                                    void *dest, size_t cp_offset, size_t size)
1483 {
1484         int ret = 0;
1485         struct q6v5 *qproc = rproc->priv;
1486         int offset = segment->da - qproc->mpss_reloc;
1487         void *ptr = NULL;
1488
1489         /* Unlock mba before copying segments */
1490         if (!qproc->dump_mba_loaded) {
1491                 ret = q6v5_reload_mba(rproc);
1492                 if (!ret) {
1493                         /* Reset ownership back to Linux to copy segments */
1494                         ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
1495                                                       true, false,
1496                                                       qproc->mpss_phys,
1497                                                       qproc->mpss_size);
1498                 }
1499         }
1500
1501         if (!ret)
1502                 ptr = memremap(qproc->mpss_phys + offset + cp_offset, size, MEMREMAP_WC);
1503
1504         if (ptr) {
1505                 memcpy(dest, ptr, size);
1506                 memunmap(ptr);
1507         } else {
1508                 memset(dest, 0xff, size);
1509         }
1510
1511         qproc->current_dump_size += size;
1512
1513         /* Reclaim mba after copying segments */
1514         if (qproc->current_dump_size == qproc->total_dump_size) {
1515                 if (qproc->dump_mba_loaded) {
1516                         /* Try to reset ownership back to Q6 */
1517                         q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
1518                                                 false, true,
1519                                                 qproc->mpss_phys,
1520                                                 qproc->mpss_size);
1521                         q6v5_mba_reclaim(qproc);
1522                 }
1523         }
1524 }
1525
1526 static int q6v5_start(struct rproc *rproc)
1527 {
1528         struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1529         int xfermemop_ret;
1530         int ret;
1531
1532         ret = q6v5_mba_load(qproc);
1533         if (ret)
1534                 return ret;
1535
1536         dev_info(qproc->dev, "MBA booted with%s debug policy, loading mpss\n",
1537                  qproc->dp_size ? "" : "out");
1538
1539         ret = q6v5_mpss_load(qproc);
1540         if (ret)
1541                 goto reclaim_mpss;
1542
1543         ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
1544         if (ret == -ETIMEDOUT) {
1545                 dev_err(qproc->dev, "start timed out\n");
1546                 goto reclaim_mpss;
1547         }
1548
1549         xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
1550                                                 false, qproc->mba_phys,
1551                                                 qproc->mba_size);
1552         if (xfermemop_ret)
1553                 dev_err(qproc->dev,
1554                         "Failed to reclaim mba buffer system may become unstable\n");
1555
1556         /* Reset Dump Segment Mask */
1557         qproc->current_dump_size = 0;
1558
1559         return 0;
1560
1561 reclaim_mpss:
1562         q6v5_mba_reclaim(qproc);
1563         q6v5_dump_mba_logs(qproc);
1564
1565         return ret;
1566 }
1567
1568 static int q6v5_stop(struct rproc *rproc)
1569 {
1570         struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1571         int ret;
1572
1573         ret = qcom_q6v5_request_stop(&qproc->q6v5, qproc->sysmon);
1574         if (ret == -ETIMEDOUT)
1575                 dev_err(qproc->dev, "timed out on wait\n");
1576
1577         q6v5_mba_reclaim(qproc);
1578
1579         return 0;
1580 }
1581
1582 static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
1583                                             const struct firmware *mba_fw)
1584 {
1585         const struct firmware *fw;
1586         const struct elf32_phdr *phdrs;
1587         const struct elf32_phdr *phdr;
1588         const struct elf32_hdr *ehdr;
1589         struct q6v5 *qproc = rproc->priv;
1590         unsigned long i;
1591         int ret;
1592
1593         ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev);
1594         if (ret < 0) {
1595                 dev_err(qproc->dev, "unable to load %s\n",
1596                         qproc->hexagon_mdt_image);
1597                 return ret;
1598         }
1599
1600         rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
1601
1602         ehdr = (struct elf32_hdr *)fw->data;
1603         phdrs = (struct elf32_phdr *)(ehdr + 1);
1604         qproc->total_dump_size = 0;
1605
1606         for (i = 0; i < ehdr->e_phnum; i++) {
1607                 phdr = &phdrs[i];
1608
1609                 if (!q6v5_phdr_valid(phdr))
1610                         continue;
1611
1612                 ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
1613                                                         phdr->p_memsz,
1614                                                         qcom_q6v5_dump_segment,
1615                                                         NULL);
1616                 if (ret)
1617                         break;
1618
1619                 qproc->total_dump_size += phdr->p_memsz;
1620         }
1621
1622         release_firmware(fw);
1623         return ret;
1624 }
1625
1626 static unsigned long q6v5_panic(struct rproc *rproc)
1627 {
1628         struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1629
1630         return qcom_q6v5_panic(&qproc->q6v5);
1631 }
1632
1633 static const struct rproc_ops q6v5_ops = {
1634         .start = q6v5_start,
1635         .stop = q6v5_stop,
1636         .parse_fw = qcom_q6v5_register_dump_segments,
1637         .load = q6v5_load,
1638         .panic = q6v5_panic,
1639 };
1640
1641 static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
1642 {
1643         struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
1644
1645         q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1646                          qproc->proxy_clk_count);
1647         q6v5_regulator_disable(qproc, qproc->proxy_regs,
1648                                qproc->proxy_reg_count);
1649         q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
1650                                qproc->fallback_proxy_reg_count);
1651         q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1652 }
1653
1654 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
1655 {
1656         struct of_phandle_args args;
1657         int halt_cell_cnt = 3;
1658         int ret;
1659
1660         qproc->reg_base = devm_platform_ioremap_resource_byname(pdev, "qdsp6");
1661         if (IS_ERR(qproc->reg_base))
1662                 return PTR_ERR(qproc->reg_base);
1663
1664         qproc->rmb_base = devm_platform_ioremap_resource_byname(pdev, "rmb");
1665         if (IS_ERR(qproc->rmb_base))
1666                 return PTR_ERR(qproc->rmb_base);
1667
1668         if (qproc->has_vq6)
1669                 halt_cell_cnt++;
1670
1671         ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1672                                                "qcom,halt-regs", halt_cell_cnt, 0, &args);
1673         if (ret < 0) {
1674                 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
1675                 return -EINVAL;
1676         }
1677
1678         qproc->halt_map = syscon_node_to_regmap(args.np);
1679         of_node_put(args.np);
1680         if (IS_ERR(qproc->halt_map))
1681                 return PTR_ERR(qproc->halt_map);
1682
1683         qproc->halt_q6 = args.args[0];
1684         qproc->halt_modem = args.args[1];
1685         qproc->halt_nc = args.args[2];
1686
1687         if (qproc->has_vq6)
1688                 qproc->halt_vq6 = args.args[3];
1689
1690         if (qproc->has_qaccept_regs) {
1691                 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1692                                                        "qcom,qaccept-regs",
1693                                                        3, 0, &args);
1694                 if (ret < 0) {
1695                         dev_err(&pdev->dev, "failed to parse qaccept-regs\n");
1696                         return -EINVAL;
1697                 }
1698
1699                 qproc->qaccept_mdm = args.args[0];
1700                 qproc->qaccept_cx = args.args[1];
1701                 qproc->qaccept_axi = args.args[2];
1702         }
1703
1704         if (qproc->has_ext_cntl_regs) {
1705                 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1706                                                        "qcom,ext-regs",
1707                                                        2, 0, &args);
1708                 if (ret < 0) {
1709                         dev_err(&pdev->dev, "failed to parse ext-regs index 0\n");
1710                         return -EINVAL;
1711                 }
1712
1713                 qproc->conn_map = syscon_node_to_regmap(args.np);
1714                 of_node_put(args.np);
1715                 if (IS_ERR(qproc->conn_map))
1716                         return PTR_ERR(qproc->conn_map);
1717
1718                 qproc->force_clk_on = args.args[0];
1719                 qproc->rscc_disable = args.args[1];
1720
1721                 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1722                                                        "qcom,ext-regs",
1723                                                        2, 1, &args);
1724                 if (ret < 0) {
1725                         dev_err(&pdev->dev, "failed to parse ext-regs index 1\n");
1726                         return -EINVAL;
1727                 }
1728
1729                 qproc->axim1_clk_off = args.args[0];
1730                 qproc->crypto_clk_off = args.args[1];
1731         }
1732
1733         if (qproc->has_spare_reg) {
1734                 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1735                                                        "qcom,spare-regs",
1736                                                        1, 0, &args);
1737                 if (ret < 0) {
1738                         dev_err(&pdev->dev, "failed to parse spare-regs\n");
1739                         return -EINVAL;
1740                 }
1741
1742                 qproc->conn_map = syscon_node_to_regmap(args.np);
1743                 of_node_put(args.np);
1744                 if (IS_ERR(qproc->conn_map))
1745                         return PTR_ERR(qproc->conn_map);
1746
1747                 qproc->conn_box = args.args[0];
1748         }
1749
1750         return 0;
1751 }
1752
1753 static int q6v5_init_clocks(struct device *dev, struct clk **clks,
1754                 char **clk_names)
1755 {
1756         int i;
1757
1758         if (!clk_names)
1759                 return 0;
1760
1761         for (i = 0; clk_names[i]; i++) {
1762                 clks[i] = devm_clk_get(dev, clk_names[i]);
1763                 if (IS_ERR(clks[i])) {
1764                         int rc = PTR_ERR(clks[i]);
1765
1766                         if (rc != -EPROBE_DEFER)
1767                                 dev_err(dev, "Failed to get %s clock\n",
1768                                         clk_names[i]);
1769                         return rc;
1770                 }
1771         }
1772
1773         return i;
1774 }
1775
1776 static int q6v5_pds_attach(struct device *dev, struct device **devs,
1777                            char **pd_names)
1778 {
1779         size_t num_pds = 0;
1780         int ret;
1781         int i;
1782
1783         if (!pd_names)
1784                 return 0;
1785
1786         while (pd_names[num_pds])
1787                 num_pds++;
1788
1789         for (i = 0; i < num_pds; i++) {
1790                 devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
1791                 if (IS_ERR_OR_NULL(devs[i])) {
1792                         ret = PTR_ERR(devs[i]) ? : -ENODATA;
1793                         goto unroll_attach;
1794                 }
1795         }
1796
1797         return num_pds;
1798
1799 unroll_attach:
1800         for (i--; i >= 0; i--)
1801                 dev_pm_domain_detach(devs[i], false);
1802
1803         return ret;
1804 }
1805
1806 static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
1807                             size_t pd_count)
1808 {
1809         int i;
1810
1811         for (i = 0; i < pd_count; i++)
1812                 dev_pm_domain_detach(pds[i], false);
1813 }
1814
1815 static int q6v5_init_reset(struct q6v5 *qproc)
1816 {
1817         qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
1818                                                               "mss_restart");
1819         if (IS_ERR(qproc->mss_restart)) {
1820                 dev_err(qproc->dev, "failed to acquire mss restart\n");
1821                 return PTR_ERR(qproc->mss_restart);
1822         }
1823
1824         if (qproc->has_alt_reset || qproc->has_spare_reg || qproc->has_ext_cntl_regs) {
1825                 qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
1826                                                                     "pdc_reset");
1827                 if (IS_ERR(qproc->pdc_reset)) {
1828                         dev_err(qproc->dev, "failed to acquire pdc reset\n");
1829                         return PTR_ERR(qproc->pdc_reset);
1830                 }
1831         }
1832
1833         return 0;
1834 }
1835
1836 static int q6v5_alloc_memory_region(struct q6v5 *qproc)
1837 {
1838         struct device_node *child;
1839         struct device_node *node;
1840         struct resource r;
1841         int ret;
1842
1843         /*
1844          * In the absence of mba/mpss sub-child, extract the mba and mpss
1845          * reserved memory regions from device's memory-region property.
1846          */
1847         child = of_get_child_by_name(qproc->dev->of_node, "mba");
1848         if (!child) {
1849                 node = of_parse_phandle(qproc->dev->of_node,
1850                                         "memory-region", 0);
1851         } else {
1852                 node = of_parse_phandle(child, "memory-region", 0);
1853                 of_node_put(child);
1854         }
1855
1856         ret = of_address_to_resource(node, 0, &r);
1857         of_node_put(node);
1858         if (ret) {
1859                 dev_err(qproc->dev, "unable to resolve mba region\n");
1860                 return ret;
1861         }
1862
1863         qproc->mba_phys = r.start;
1864         qproc->mba_size = resource_size(&r);
1865
1866         if (!child) {
1867                 node = of_parse_phandle(qproc->dev->of_node,
1868                                         "memory-region", 1);
1869         } else {
1870                 child = of_get_child_by_name(qproc->dev->of_node, "mpss");
1871                 node = of_parse_phandle(child, "memory-region", 0);
1872                 of_node_put(child);
1873         }
1874
1875         ret = of_address_to_resource(node, 0, &r);
1876         of_node_put(node);
1877         if (ret) {
1878                 dev_err(qproc->dev, "unable to resolve mpss region\n");
1879                 return ret;
1880         }
1881
1882         qproc->mpss_phys = qproc->mpss_reloc = r.start;
1883         qproc->mpss_size = resource_size(&r);
1884
1885         return 0;
1886 }
1887
1888 static int q6v5_probe(struct platform_device *pdev)
1889 {
1890         const struct rproc_hexagon_res *desc;
1891         struct device_node *node;
1892         struct q6v5 *qproc;
1893         struct rproc *rproc;
1894         const char *mba_image;
1895         int ret;
1896
1897         desc = of_device_get_match_data(&pdev->dev);
1898         if (!desc)
1899                 return -EINVAL;
1900
1901         if (desc->need_mem_protection && !qcom_scm_is_available())
1902                 return -EPROBE_DEFER;
1903
1904         mba_image = desc->hexagon_mba_image;
1905         ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1906                                             0, &mba_image);
1907         if (ret < 0 && ret != -EINVAL) {
1908                 dev_err(&pdev->dev, "unable to read mba firmware-name\n");
1909                 return ret;
1910         }
1911
1912         rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
1913                             mba_image, sizeof(*qproc));
1914         if (!rproc) {
1915                 dev_err(&pdev->dev, "failed to allocate rproc\n");
1916                 return -ENOMEM;
1917         }
1918
1919         rproc->auto_boot = false;
1920         rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
1921
1922         qproc = (struct q6v5 *)rproc->priv;
1923         qproc->dev = &pdev->dev;
1924         qproc->rproc = rproc;
1925         qproc->hexagon_mdt_image = "modem.mdt";
1926         ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1927                                             1, &qproc->hexagon_mdt_image);
1928         if (ret < 0 && ret != -EINVAL) {
1929                 dev_err(&pdev->dev, "unable to read mpss firmware-name\n");
1930                 goto free_rproc;
1931         }
1932
1933         platform_set_drvdata(pdev, qproc);
1934
1935         qproc->has_qaccept_regs = desc->has_qaccept_regs;
1936         qproc->has_ext_cntl_regs = desc->has_ext_cntl_regs;
1937         qproc->has_vq6 = desc->has_vq6;
1938         qproc->has_spare_reg = desc->has_spare_reg;
1939         ret = q6v5_init_mem(qproc, pdev);
1940         if (ret)
1941                 goto free_rproc;
1942
1943         ret = q6v5_alloc_memory_region(qproc);
1944         if (ret)
1945                 goto free_rproc;
1946
1947         ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
1948                                desc->proxy_clk_names);
1949         if (ret < 0) {
1950                 dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
1951                 goto free_rproc;
1952         }
1953         qproc->proxy_clk_count = ret;
1954
1955         ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
1956                                desc->reset_clk_names);
1957         if (ret < 0) {
1958                 dev_err(&pdev->dev, "Failed to get reset clocks.\n");
1959                 goto free_rproc;
1960         }
1961         qproc->reset_clk_count = ret;
1962
1963         ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
1964                                desc->active_clk_names);
1965         if (ret < 0) {
1966                 dev_err(&pdev->dev, "Failed to get active clocks.\n");
1967                 goto free_rproc;
1968         }
1969         qproc->active_clk_count = ret;
1970
1971         ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
1972                                   desc->proxy_supply);
1973         if (ret < 0) {
1974                 dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
1975                 goto free_rproc;
1976         }
1977         qproc->proxy_reg_count = ret;
1978
1979         ret = q6v5_regulator_init(&pdev->dev,  qproc->active_regs,
1980                                   desc->active_supply);
1981         if (ret < 0) {
1982                 dev_err(&pdev->dev, "Failed to get active regulators.\n");
1983                 goto free_rproc;
1984         }
1985         qproc->active_reg_count = ret;
1986
1987         ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
1988                               desc->proxy_pd_names);
1989         /* Fallback to regulators for old device trees */
1990         if (ret == -ENODATA && desc->fallback_proxy_supply) {
1991                 ret = q6v5_regulator_init(&pdev->dev,
1992                                           qproc->fallback_proxy_regs,
1993                                           desc->fallback_proxy_supply);
1994                 if (ret < 0) {
1995                         dev_err(&pdev->dev, "Failed to get fallback proxy regulators.\n");
1996                         goto free_rproc;
1997                 }
1998                 qproc->fallback_proxy_reg_count = ret;
1999         } else if (ret < 0) {
2000                 dev_err(&pdev->dev, "Failed to init power domains\n");
2001                 goto free_rproc;
2002         } else {
2003                 qproc->proxy_pd_count = ret;
2004         }
2005
2006         qproc->has_alt_reset = desc->has_alt_reset;
2007         ret = q6v5_init_reset(qproc);
2008         if (ret)
2009                 goto detach_proxy_pds;
2010
2011         qproc->version = desc->version;
2012         qproc->need_mem_protection = desc->need_mem_protection;
2013         qproc->has_mba_logs = desc->has_mba_logs;
2014
2015         ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, "modem",
2016                              qcom_msa_handover);
2017         if (ret)
2018                 goto detach_proxy_pds;
2019
2020         qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
2021         qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
2022         qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss");
2023         qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
2024         qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
2025         qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
2026         if (IS_ERR(qproc->sysmon)) {
2027                 ret = PTR_ERR(qproc->sysmon);
2028                 goto remove_subdevs;
2029         }
2030
2031         ret = rproc_add(rproc);
2032         if (ret)
2033                 goto remove_sysmon_subdev;
2034
2035         node = of_get_compatible_child(pdev->dev.of_node, "qcom,bam-dmux");
2036         qproc->bam_dmux = of_platform_device_create(node, NULL, &pdev->dev);
2037         of_node_put(node);
2038
2039         return 0;
2040
2041 remove_sysmon_subdev:
2042         qcom_remove_sysmon_subdev(qproc->sysmon);
2043 remove_subdevs:
2044         qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
2045         qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
2046         qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
2047 detach_proxy_pds:
2048         q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
2049 free_rproc:
2050         rproc_free(rproc);
2051
2052         return ret;
2053 }
2054
2055 static int q6v5_remove(struct platform_device *pdev)
2056 {
2057         struct q6v5 *qproc = platform_get_drvdata(pdev);
2058         struct rproc *rproc = qproc->rproc;
2059
2060         if (qproc->bam_dmux)
2061                 of_platform_device_destroy(&qproc->bam_dmux->dev, NULL);
2062         rproc_del(rproc);
2063
2064         qcom_q6v5_deinit(&qproc->q6v5);
2065         qcom_remove_sysmon_subdev(qproc->sysmon);
2066         qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
2067         qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
2068         qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
2069
2070         q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
2071
2072         rproc_free(rproc);
2073
2074         return 0;
2075 }
2076
2077 static const struct rproc_hexagon_res sc7180_mss = {
2078         .hexagon_mba_image = "mba.mbn",
2079         .proxy_clk_names = (char*[]){
2080                 "xo",
2081                 NULL
2082         },
2083         .reset_clk_names = (char*[]){
2084                 "iface",
2085                 "bus",
2086                 "snoc_axi",
2087                 NULL
2088         },
2089         .active_clk_names = (char*[]){
2090                 "mnoc_axi",
2091                 "nav",
2092                 NULL
2093         },
2094         .proxy_pd_names = (char*[]){
2095                 "cx",
2096                 "mx",
2097                 "mss",
2098                 NULL
2099         },
2100         .need_mem_protection = true,
2101         .has_alt_reset = false,
2102         .has_mba_logs = true,
2103         .has_spare_reg = true,
2104         .has_qaccept_regs = false,
2105         .has_ext_cntl_regs = false,
2106         .has_vq6 = false,
2107         .version = MSS_SC7180,
2108 };
2109
2110 static const struct rproc_hexagon_res sc7280_mss = {
2111         .hexagon_mba_image = "mba.mbn",
2112         .proxy_clk_names = (char*[]){
2113                 "xo",
2114                 "pka",
2115                 NULL
2116         },
2117         .active_clk_names = (char*[]){
2118                 "iface",
2119                 "offline",
2120                 "snoc_axi",
2121                 NULL
2122         },
2123         .proxy_pd_names = (char*[]){
2124                 "cx",
2125                 "mss",
2126                 NULL
2127         },
2128         .need_mem_protection = true,
2129         .has_alt_reset = false,
2130         .has_mba_logs = true,
2131         .has_spare_reg = false,
2132         .has_qaccept_regs = true,
2133         .has_ext_cntl_regs = true,
2134         .has_vq6 = true,
2135         .version = MSS_SC7280,
2136 };
2137
2138 static const struct rproc_hexagon_res sdm845_mss = {
2139         .hexagon_mba_image = "mba.mbn",
2140         .proxy_clk_names = (char*[]){
2141                         "xo",
2142                         "prng",
2143                         NULL
2144         },
2145         .reset_clk_names = (char*[]){
2146                         "iface",
2147                         "snoc_axi",
2148                         NULL
2149         },
2150         .active_clk_names = (char*[]){
2151                         "bus",
2152                         "mem",
2153                         "gpll0_mss",
2154                         "mnoc_axi",
2155                         NULL
2156         },
2157         .proxy_pd_names = (char*[]){
2158                         "cx",
2159                         "mx",
2160                         "mss",
2161                         NULL
2162         },
2163         .need_mem_protection = true,
2164         .has_alt_reset = true,
2165         .has_mba_logs = false,
2166         .has_spare_reg = false,
2167         .has_qaccept_regs = false,
2168         .has_ext_cntl_regs = false,
2169         .has_vq6 = false,
2170         .version = MSS_SDM845,
2171 };
2172
2173 static const struct rproc_hexagon_res msm8998_mss = {
2174         .hexagon_mba_image = "mba.mbn",
2175         .proxy_clk_names = (char*[]){
2176                         "xo",
2177                         "qdss",
2178                         "mem",
2179                         NULL
2180         },
2181         .active_clk_names = (char*[]){
2182                         "iface",
2183                         "bus",
2184                         "gpll0_mss",
2185                         "mnoc_axi",
2186                         "snoc_axi",
2187                         NULL
2188         },
2189         .proxy_pd_names = (char*[]){
2190                         "cx",
2191                         "mx",
2192                         NULL
2193         },
2194         .need_mem_protection = true,
2195         .has_alt_reset = false,
2196         .has_mba_logs = false,
2197         .has_spare_reg = false,
2198         .has_qaccept_regs = false,
2199         .has_ext_cntl_regs = false,
2200         .has_vq6 = false,
2201         .version = MSS_MSM8998,
2202 };
2203
2204 static const struct rproc_hexagon_res msm8996_mss = {
2205         .hexagon_mba_image = "mba.mbn",
2206         .proxy_supply = (struct qcom_mss_reg_res[]) {
2207                 {
2208                         .supply = "pll",
2209                         .uA = 100000,
2210                 },
2211                 {}
2212         },
2213         .proxy_clk_names = (char*[]){
2214                         "xo",
2215                         "pnoc",
2216                         "qdss",
2217                         NULL
2218         },
2219         .active_clk_names = (char*[]){
2220                         "iface",
2221                         "bus",
2222                         "mem",
2223                         "gpll0_mss",
2224                         "snoc_axi",
2225                         "mnoc_axi",
2226                         NULL
2227         },
2228         .proxy_pd_names = (char*[]){
2229                         "mx",
2230                         "cx",
2231                         NULL
2232         },
2233         .need_mem_protection = true,
2234         .has_alt_reset = false,
2235         .has_mba_logs = false,
2236         .has_spare_reg = false,
2237         .has_qaccept_regs = false,
2238         .has_ext_cntl_regs = false,
2239         .has_vq6 = false,
2240         .version = MSS_MSM8996,
2241 };
2242
2243 static const struct rproc_hexagon_res msm8916_mss = {
2244         .hexagon_mba_image = "mba.mbn",
2245         .proxy_supply = (struct qcom_mss_reg_res[]) {
2246                 {
2247                         .supply = "pll",
2248                         .uA = 100000,
2249                 },
2250                 {}
2251         },
2252         .fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
2253                 {
2254                         .supply = "mx",
2255                         .uV = 1050000,
2256                 },
2257                 {
2258                         .supply = "cx",
2259                         .uA = 100000,
2260                 },
2261                 {}
2262         },
2263         .proxy_clk_names = (char*[]){
2264                 "xo",
2265                 NULL
2266         },
2267         .active_clk_names = (char*[]){
2268                 "iface",
2269                 "bus",
2270                 "mem",
2271                 NULL
2272         },
2273         .proxy_pd_names = (char*[]){
2274                 "mx",
2275                 "cx",
2276                 NULL
2277         },
2278         .need_mem_protection = false,
2279         .has_alt_reset = false,
2280         .has_mba_logs = false,
2281         .has_spare_reg = false,
2282         .has_qaccept_regs = false,
2283         .has_ext_cntl_regs = false,
2284         .has_vq6 = false,
2285         .version = MSS_MSM8916,
2286 };
2287
2288 static const struct rproc_hexagon_res msm8974_mss = {
2289         .hexagon_mba_image = "mba.b00",
2290         .proxy_supply = (struct qcom_mss_reg_res[]) {
2291                 {
2292                         .supply = "pll",
2293                         .uA = 100000,
2294                 },
2295                 {}
2296         },
2297         .fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
2298                 {
2299                         .supply = "mx",
2300                         .uV = 1050000,
2301                 },
2302                 {
2303                         .supply = "cx",
2304                         .uA = 100000,
2305                 },
2306                 {}
2307         },
2308         .active_supply = (struct qcom_mss_reg_res[]) {
2309                 {
2310                         .supply = "mss",
2311                         .uV = 1050000,
2312                         .uA = 100000,
2313                 },
2314                 {}
2315         },
2316         .proxy_clk_names = (char*[]){
2317                 "xo",
2318                 NULL
2319         },
2320         .active_clk_names = (char*[]){
2321                 "iface",
2322                 "bus",
2323                 "mem",
2324                 NULL
2325         },
2326         .proxy_pd_names = (char*[]){
2327                 "mx",
2328                 "cx",
2329                 NULL
2330         },
2331         .need_mem_protection = false,
2332         .has_alt_reset = false,
2333         .has_mba_logs = false,
2334         .has_spare_reg = false,
2335         .has_qaccept_regs = false,
2336         .has_ext_cntl_regs = false,
2337         .has_vq6 = false,
2338         .version = MSS_MSM8974,
2339 };
2340
2341 static const struct of_device_id q6v5_of_match[] = {
2342         { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
2343         { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
2344         { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
2345         { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
2346         { .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
2347         { .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss},
2348         { .compatible = "qcom,sc7280-mss-pil", .data = &sc7280_mss},
2349         { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
2350         { },
2351 };
2352 MODULE_DEVICE_TABLE(of, q6v5_of_match);
2353
2354 static struct platform_driver q6v5_driver = {
2355         .probe = q6v5_probe,
2356         .remove = q6v5_remove,
2357         .driver = {
2358                 .name = "qcom-q6v5-mss",
2359                 .of_match_table = q6v5_of_match,
2360         },
2361 };
2362 module_platform_driver(q6v5_driver);
2363
2364 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
2365 MODULE_LICENSE("GPL v2");