1 // SPDX-License-Identifier: GPL-2.0
3 * ZynqMP R5 Remote Processor driver
7 #include <dt-bindings/power/xlnx-zynqmp-power.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/firmware/xlnx-zynqmp.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/of_address.h>
13 #include <linux/of_platform.h>
14 #include <linux/of_reserved_mem.h>
15 #include <linux/platform_device.h>
16 #include <linux/remoteproc.h>
17 #include <linux/slab.h>
19 #include "remoteproc_internal.h"
22 * settings for RPU cluster mode which
23 * reflects possible values of xlnx,cluster-mode dt-property
25 enum zynqmp_r5_cluster_mode {
26 SPLIT_MODE = 0, /* When cores run as separate processor */
27 LOCKSTEP_MODE = 1, /* cores execute same code in lockstep,clk-for-clk */
28 SINGLE_CPU_MODE = 2, /* core0 is held in reset and only core1 runs */
32 * struct mem_bank_data - Memory Bank description
34 * @addr: Start address of memory bank
35 * @size: Size of Memory bank
36 * @pm_domain_id: Power-domains id of memory bank for firmware to turn on/off
37 * @bank_name: name of the bank for remoteproc framework
39 struct mem_bank_data {
47 * Hardcoded TCM bank values. This will be removed once TCM bindings are
48 * accepted for system-dt specifications and upstreamed in linux kernel
50 static const struct mem_bank_data zynqmp_tcm_banks[] = {
51 {0xffe00000UL, 0x10000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 64KB each */
52 {0xffe20000UL, 0x10000UL, PD_R5_0_BTCM, "btcm0"},
53 {0xffe90000UL, 0x10000UL, PD_R5_1_ATCM, "atcm1"},
54 {0xffeb0000UL, 0x10000UL, PD_R5_1_BTCM, "btcm1"},
58 * struct zynqmp_r5_core
60 * @dev: device of RPU instance
61 * @np: device node of RPU instance
62 * @tcm_bank_count: number TCM banks accessible to this RPU
63 * @tcm_banks: array of each TCM bank data
64 * @rmem_count: Number of reserved mem regions
65 * @rmem: reserved memory region nodes from device tree
66 * @rproc: rproc handle
67 * @pm_domain_id: RPU CPU power domain id
69 struct zynqmp_r5_core {
71 struct device_node *np;
73 struct mem_bank_data **tcm_banks;
75 struct reserved_mem **rmem;
81 * struct zynqmp_r5_cluster
83 * @dev: r5f subsystem cluster device node
84 * @mode: cluster mode of type zynqmp_r5_cluster_mode
85 * @core_count: number of r5 cores used for this cluster mode
86 * @r5_cores: Array of pointers pointing to r5 core
88 struct zynqmp_r5_cluster {
90 enum zynqmp_r5_cluster_mode mode;
92 struct zynqmp_r5_core **r5_cores;
96 * zynqmp_r5_set_mode()
98 * set RPU cluster and TCM operation mode
100 * @r5_core: pointer to zynqmp_r5_core type object
101 * @fw_reg_val: value expected by firmware to configure RPU cluster mode
102 * @tcm_mode: value expected by fw to configure TCM mode (lockstep or split)
104 * Return: 0 for success and < 0 for failure
106 static int zynqmp_r5_set_mode(struct zynqmp_r5_core *r5_core,
107 enum rpu_oper_mode fw_reg_val,
108 enum rpu_tcm_comb tcm_mode)
112 ret = zynqmp_pm_set_rpu_mode(r5_core->pm_domain_id, fw_reg_val);
114 dev_err(r5_core->dev, "failed to set RPU mode\n");
118 ret = zynqmp_pm_set_tcm_config(r5_core->pm_domain_id, tcm_mode);
120 dev_err(r5_core->dev, "failed to configure TCM\n");
126 * zynqmp_r5_rproc_start()
127 * @rproc: single R5 core's corresponding rproc instance
129 * Start R5 Core from designated boot address.
131 * return 0 on success, otherwise non-zero value on failure
133 static int zynqmp_r5_rproc_start(struct rproc *rproc)
135 struct zynqmp_r5_core *r5_core = rproc->priv;
136 enum rpu_boot_mem bootmem;
140 * The exception vector pointers (EVP) refer to the base-address of
141 * exception vectors (for reset, IRQ, FIQ, etc). The reset-vector
142 * starts at the base-address and subsequent vectors are on 4-byte
145 * Exception vectors can start either from 0x0000_0000 (LOVEC) or
146 * from 0xFFFF_0000 (HIVEC) which is mapped in the OCM (On-Chip Memory)
148 * Usually firmware will put Exception vectors at LOVEC.
150 * It is not recommend that you change the exception vector.
151 * Changing the EVP to HIVEC will result in increased interrupt latency
152 * and jitter. Also, if the OCM is secured and the Cortex-R5F processor
153 * is non-secured, then the Cortex-R5F processor cannot access the
154 * HIVEC exception vectors in the OCM.
156 bootmem = (rproc->bootaddr >= 0xFFFC0000) ?
157 PM_RPU_BOOTMEM_HIVEC : PM_RPU_BOOTMEM_LOVEC;
159 dev_dbg(r5_core->dev, "RPU boot addr 0x%llx from %s.", rproc->bootaddr,
160 bootmem == PM_RPU_BOOTMEM_HIVEC ? "OCM" : "TCM");
162 ret = zynqmp_pm_request_wake(r5_core->pm_domain_id, 1,
163 bootmem, ZYNQMP_PM_REQUEST_ACK_NO);
165 dev_err(r5_core->dev,
166 "failed to start RPU = 0x%x\n", r5_core->pm_domain_id);
171 * zynqmp_r5_rproc_stop()
172 * @rproc: single R5 core's corresponding rproc instance
174 * Power down R5 Core.
176 * return 0 on success, otherwise non-zero value on failure
178 static int zynqmp_r5_rproc_stop(struct rproc *rproc)
180 struct zynqmp_r5_core *r5_core = rproc->priv;
183 ret = zynqmp_pm_force_pwrdwn(r5_core->pm_domain_id,
184 ZYNQMP_PM_REQUEST_ACK_BLOCKING);
186 dev_err(r5_core->dev, "failed to stop remoteproc RPU %d\n", ret);
192 * zynqmp_r5_mem_region_map()
193 * @rproc: single R5 core's corresponding rproc instance
194 * @mem: mem descriptor to map reserved memory-regions
196 * Callback to map va for memory-region's carveout.
198 * return 0 on success, otherwise non-zero value on failure
200 static int zynqmp_r5_mem_region_map(struct rproc *rproc,
201 struct rproc_mem_entry *mem)
205 va = ioremap_wc(mem->dma, mem->len);
206 if (IS_ERR_OR_NULL(va))
209 mem->va = (void *)va;
215 * zynqmp_r5_rproc_mem_unmap
216 * @rproc: single R5 core's corresponding rproc instance
217 * @mem: mem entry to unmap
219 * Unmap memory-region carveout
221 * return: always returns 0
223 static int zynqmp_r5_mem_region_unmap(struct rproc *rproc,
224 struct rproc_mem_entry *mem)
226 iounmap((void __iomem *)mem->va);
231 * add_mem_regions_carveout()
232 * @rproc: single R5 core's corresponding rproc instance
234 * Construct rproc mem carveouts from memory-region property nodes
236 * return 0 on success, otherwise non-zero value on failure
238 static int add_mem_regions_carveout(struct rproc *rproc)
240 struct rproc_mem_entry *rproc_mem;
241 struct zynqmp_r5_core *r5_core;
242 struct reserved_mem *rmem;
243 int i, num_mem_regions;
245 r5_core = (struct zynqmp_r5_core *)rproc->priv;
246 num_mem_regions = r5_core->rmem_count;
248 for (i = 0; i < num_mem_regions; i++) {
249 rmem = r5_core->rmem[i];
251 if (!strncmp(rmem->name, "vdev0buffer", strlen("vdev0buffer"))) {
252 /* Init reserved memory for vdev buffer */
253 rproc_mem = rproc_of_resm_mem_entry_init(&rproc->dev, i,
258 /* Register associated reserved memory regions */
259 rproc_mem = rproc_mem_entry_init(&rproc->dev, NULL,
260 (dma_addr_t)rmem->base,
261 rmem->size, rmem->base,
262 zynqmp_r5_mem_region_map,
263 zynqmp_r5_mem_region_unmap,
270 rproc_add_carveout(rproc, rproc_mem);
272 dev_dbg(&rproc->dev, "reserved mem carveout %s addr=%llx, size=0x%llx",
273 rmem->name, rmem->base, rmem->size);
281 * @rproc: single R5 core's corresponding rproc instance
282 * @mem: tcm mem entry to unmap
284 * Unmap TCM banks when powering down R5 core.
288 static int tcm_mem_unmap(struct rproc *rproc, struct rproc_mem_entry *mem)
290 iounmap((void __iomem *)mem->va);
297 * @rproc: single R5 core's corresponding rproc instance
298 * @mem: tcm memory entry descriptor
300 * Given TCM bank entry, this func setup virtual address for TCM bank
301 * remoteproc carveout. It also takes care of va to da address translation
303 * return 0 on success, otherwise non-zero value on failure
305 static int tcm_mem_map(struct rproc *rproc,
306 struct rproc_mem_entry *mem)
310 va = ioremap_wc(mem->dma, mem->len);
311 if (IS_ERR_OR_NULL(va))
314 /* Update memory entry va */
315 mem->va = (void *)va;
318 memset_io(va, 0, mem->len);
321 * The R5s expect their TCM banks to be at address 0x0 and 0x2000,
322 * while on the Linux side they are at 0xffexxxxx.
324 * Zero out the high 12 bits of the address. This will give
325 * expected values for TCM Banks 0A and 0B (0x0 and 0x20000).
327 mem->da &= 0x000fffff;
330 * TCM Banks 1A and 1B still have to be translated.
332 * Below handle these two banks' absolute addresses (0xffe90000 and
333 * 0xffeb0000) and convert to the expected relative addresses
336 if (mem->da == 0x90000 || mem->da == 0xB0000)
339 /* if translated TCM bank address is not valid report error */
340 if (mem->da != 0x0 && mem->da != 0x20000) {
341 dev_err(&rproc->dev, "invalid TCM address: %x\n", mem->da);
348 * add_tcm_carveout_split_mode()
349 * @rproc: single R5 core's corresponding rproc instance
351 * allocate and add remoteproc carveout for TCM memory in split mode
353 * return 0 on success, otherwise non-zero value on failure
355 static int add_tcm_carveout_split_mode(struct rproc *rproc)
357 struct rproc_mem_entry *rproc_mem;
358 struct zynqmp_r5_core *r5_core;
359 int i, num_banks, ret;
360 phys_addr_t bank_addr;
366 r5_core = (struct zynqmp_r5_core *)rproc->priv;
368 num_banks = r5_core->tcm_bank_count;
371 * Power-on Each 64KB TCM,
372 * register its address space, map and unmap functions
373 * and add carveouts accordingly
375 for (i = 0; i < num_banks; i++) {
376 bank_addr = r5_core->tcm_banks[i]->addr;
377 bank_name = r5_core->tcm_banks[i]->bank_name;
378 bank_size = r5_core->tcm_banks[i]->size;
379 pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
381 ret = zynqmp_pm_request_node(pm_domain_id,
382 ZYNQMP_PM_CAPABILITY_ACCESS, 0,
383 ZYNQMP_PM_REQUEST_ACK_BLOCKING);
385 dev_err(dev, "failed to turn on TCM 0x%x", pm_domain_id);
386 goto release_tcm_split;
389 dev_dbg(dev, "TCM carveout split mode %s addr=%llx, size=0x%lx",
390 bank_name, bank_addr, bank_size);
392 rproc_mem = rproc_mem_entry_init(dev, NULL, bank_addr,
393 bank_size, bank_addr,
394 tcm_mem_map, tcm_mem_unmap,
398 zynqmp_pm_release_node(pm_domain_id);
399 goto release_tcm_split;
402 rproc_add_carveout(rproc, rproc_mem);
408 /* If failed, Turn off all TCM banks turned on before */
409 for (i--; i >= 0; i--) {
410 pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
411 zynqmp_pm_release_node(pm_domain_id);
417 * add_tcm_carveout_lockstep_mode()
418 * @rproc: single R5 core's corresponding rproc instance
420 * allocate and add remoteproc carveout for TCM memory in lockstep mode
422 * return 0 on success, otherwise non-zero value on failure
424 static int add_tcm_carveout_lockstep_mode(struct rproc *rproc)
426 struct rproc_mem_entry *rproc_mem;
427 struct zynqmp_r5_core *r5_core;
428 int i, num_banks, ret;
429 phys_addr_t bank_addr;
430 size_t bank_size = 0;
435 r5_core = (struct zynqmp_r5_core *)rproc->priv;
438 /* Go through zynqmp banks for r5 node */
439 num_banks = r5_core->tcm_bank_count;
442 * In lockstep mode, TCM is contiguous memory block
443 * However, each TCM block still needs to be enabled individually.
444 * So, Enable each TCM block individually, but add their size
445 * to create contiguous memory region.
447 bank_addr = r5_core->tcm_banks[0]->addr;
448 bank_name = r5_core->tcm_banks[0]->bank_name;
450 for (i = 0; i < num_banks; i++) {
451 bank_size += r5_core->tcm_banks[i]->size;
452 pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
454 /* Turn on each TCM bank individually */
455 ret = zynqmp_pm_request_node(pm_domain_id,
456 ZYNQMP_PM_CAPABILITY_ACCESS, 0,
457 ZYNQMP_PM_REQUEST_ACK_BLOCKING);
459 dev_err(dev, "failed to turn on TCM 0x%x", pm_domain_id);
460 goto release_tcm_lockstep;
464 dev_dbg(dev, "TCM add carveout lockstep mode %s addr=0x%llx, size=0x%lx",
465 bank_name, bank_addr, bank_size);
467 /* Register TCM address range, TCM map and unmap functions */
468 rproc_mem = rproc_mem_entry_init(dev, NULL, bank_addr,
469 bank_size, bank_addr,
470 tcm_mem_map, tcm_mem_unmap,
474 goto release_tcm_lockstep;
477 /* If registration is success, add carveouts */
478 rproc_add_carveout(rproc, rproc_mem);
482 release_tcm_lockstep:
483 /* If failed, Turn off all TCM banks turned on before */
484 for (i--; i >= 0; i--) {
485 pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
486 zynqmp_pm_release_node(pm_domain_id);
493 * @rproc: single R5 core's corresponding rproc instance
495 * allocate and add remoteproc carveouts for TCM memory based on cluster mode
497 * return 0 on success, otherwise non-zero value on failure
499 static int add_tcm_banks(struct rproc *rproc)
501 struct zynqmp_r5_cluster *cluster;
502 struct zynqmp_r5_core *r5_core;
505 r5_core = (struct zynqmp_r5_core *)rproc->priv;
511 cluster = dev_get_drvdata(dev->parent);
513 dev_err(dev->parent, "Invalid driver data\n");
518 * In lockstep mode TCM banks are one contiguous memory region of 256Kb
519 * In split mode, each TCM bank is 64Kb and not contiguous.
520 * We add memory carveouts accordingly.
522 if (cluster->mode == SPLIT_MODE)
523 return add_tcm_carveout_split_mode(rproc);
524 else if (cluster->mode == LOCKSTEP_MODE)
525 return add_tcm_carveout_lockstep_mode(rproc);
531 * zynqmp_r5_parse_fw()
532 * @rproc: single R5 core's corresponding rproc instance
533 * @fw: ptr to firmware to be loaded onto r5 core
535 * get resource table if available
537 * return 0 on success, otherwise non-zero value on failure
539 static int zynqmp_r5_parse_fw(struct rproc *rproc, const struct firmware *fw)
543 ret = rproc_elf_load_rsc_table(rproc, fw);
544 if (ret == -EINVAL) {
546 * resource table only required for IPC.
547 * if not present, this is not necessarily an error;
548 * for example, loading r5 hello world application
549 * so simply inform user and keep going.
551 dev_info(&rproc->dev, "no resource table found.\n");
558 * zynqmp_r5_rproc_prepare()
559 * adds carveouts for TCM bank and reserved memory regions
561 * @rproc: Device node of each rproc
563 * Return: 0 for success else < 0 error code
565 static int zynqmp_r5_rproc_prepare(struct rproc *rproc)
569 ret = add_tcm_banks(rproc);
571 dev_err(&rproc->dev, "failed to get TCM banks, err %d\n", ret);
575 ret = add_mem_regions_carveout(rproc);
577 dev_err(&rproc->dev, "failed to get reserve mem regions %d\n", ret);
585 * zynqmp_r5_rproc_unprepare()
586 * Turns off TCM banks using power-domain id
588 * @rproc: Device node of each rproc
592 static int zynqmp_r5_rproc_unprepare(struct rproc *rproc)
594 struct zynqmp_r5_core *r5_core;
598 r5_core = (struct zynqmp_r5_core *)rproc->priv;
600 for (i = 0; i < r5_core->tcm_bank_count; i++) {
601 pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
602 if (zynqmp_pm_release_node(pm_domain_id))
603 dev_warn(r5_core->dev,
604 "can't turn off TCM bank 0x%x", pm_domain_id);
610 static const struct rproc_ops zynqmp_r5_rproc_ops = {
611 .prepare = zynqmp_r5_rproc_prepare,
612 .unprepare = zynqmp_r5_rproc_unprepare,
613 .start = zynqmp_r5_rproc_start,
614 .stop = zynqmp_r5_rproc_stop,
615 .load = rproc_elf_load_segments,
616 .parse_fw = zynqmp_r5_parse_fw,
617 .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
618 .sanity_check = rproc_elf_sanity_check,
619 .get_boot_addr = rproc_elf_get_boot_addr,
623 * zynqmp_r5_add_rproc_core()
624 * Allocate and add struct rproc object for each r5f core
625 * This is called for each individual r5f core
627 * @cdev: Device node of each r5 core
629 * Return: zynqmp_r5_core object for success else error code pointer
631 static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
633 struct zynqmp_r5_core *r5_core;
634 struct rproc *r5_rproc;
637 /* Set up DMA mask */
638 ret = dma_set_coherent_mask(cdev, DMA_BIT_MASK(32));
642 /* Allocate remoteproc instance */
643 r5_rproc = rproc_alloc(cdev, dev_name(cdev),
644 &zynqmp_r5_rproc_ops,
645 NULL, sizeof(struct zynqmp_r5_core));
647 dev_err(cdev, "failed to allocate memory for rproc instance\n");
648 return ERR_PTR(-ENOMEM);
651 r5_rproc->auto_boot = false;
652 r5_core = (struct zynqmp_r5_core *)r5_rproc->priv;
654 r5_core->np = dev_of_node(cdev);
656 dev_err(cdev, "can't get device node for r5 core\n");
661 /* Add R5 remoteproc core */
662 ret = rproc_add(r5_rproc);
664 dev_err(cdev, "failed to add r5 remoteproc\n");
668 r5_core->rproc = r5_rproc;
672 rproc_free(r5_rproc);
677 * zynqmp_r5_get_tcm_node()
678 * Ideally this function should parse tcm node and store information
679 * in r5_core instance. For now, Hardcoded TCM information is used.
680 * This approach is used as TCM bindings for system-dt is being developed
682 * @cluster: pointer to zynqmp_r5_cluster type object
684 * Return: 0 for success and < 0 error code for failure.
686 static int zynqmp_r5_get_tcm_node(struct zynqmp_r5_cluster *cluster)
688 struct device *dev = cluster->dev;
689 struct zynqmp_r5_core *r5_core;
690 int tcm_bank_count, tcm_node;
693 tcm_bank_count = ARRAY_SIZE(zynqmp_tcm_banks);
695 /* count per core tcm banks */
696 tcm_bank_count = tcm_bank_count / cluster->core_count;
699 * r5 core 0 will use all of TCM banks in lockstep mode.
700 * In split mode, r5 core0 will use 128k and r5 core1 will use another
701 * 128k. Assign TCM banks to each core accordingly
704 for (i = 0; i < cluster->core_count; i++) {
705 r5_core = cluster->r5_cores[i];
706 r5_core->tcm_banks = devm_kcalloc(dev, tcm_bank_count,
707 sizeof(struct mem_bank_data *),
709 if (!r5_core->tcm_banks)
712 for (j = 0; j < tcm_bank_count; j++) {
714 * Use pre-defined TCM reg values.
715 * Eventually this should be replaced by values
718 r5_core->tcm_banks[j] =
719 (struct mem_bank_data *)&zynqmp_tcm_banks[tcm_node];
723 r5_core->tcm_bank_count = tcm_bank_count;
730 * zynqmp_r5_get_mem_region_node()
731 * parse memory-region property and get reserved mem regions
733 * @r5_core: pointer to zynqmp_r5_core type object
735 * Return: 0 for success and error code for failure.
737 static int zynqmp_r5_get_mem_region_node(struct zynqmp_r5_core *r5_core)
739 struct device_node *np, *rmem_np;
740 struct reserved_mem **rmem;
741 int res_mem_count, i;
747 res_mem_count = of_property_count_elems_of_size(np, "memory-region",
749 if (res_mem_count <= 0) {
750 dev_warn(dev, "failed to get memory-region property %d\n",
755 rmem = devm_kcalloc(dev, res_mem_count,
756 sizeof(struct reserved_mem *), GFP_KERNEL);
760 for (i = 0; i < res_mem_count; i++) {
761 rmem_np = of_parse_phandle(np, "memory-region", i);
765 rmem[i] = of_reserved_mem_lookup(rmem_np);
767 of_node_put(rmem_np);
771 of_node_put(rmem_np);
774 r5_core->rmem_count = res_mem_count;
775 r5_core->rmem = rmem;
783 * zynqmp_r5_core_init()
784 * Create and initialize zynqmp_r5_core type object
786 * @cluster: pointer to zynqmp_r5_cluster type object
787 * @fw_reg_val: value expected by firmware to configure RPU cluster mode
788 * @tcm_mode: value expected by fw to configure TCM mode (lockstep or split)
790 * Return: 0 for success and error code for failure.
792 static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster,
793 enum rpu_oper_mode fw_reg_val,
794 enum rpu_tcm_comb tcm_mode)
796 struct device *dev = cluster->dev;
797 struct zynqmp_r5_core *r5_core;
800 ret = zynqmp_r5_get_tcm_node(cluster);
802 dev_err(dev, "can't get tcm node, err %d\n", ret);
806 for (i = 0; i < cluster->core_count; i++) {
807 r5_core = cluster->r5_cores[i];
809 ret = zynqmp_r5_get_mem_region_node(r5_core);
811 dev_warn(dev, "memory-region prop failed %d\n", ret);
813 /* Initialize r5 cores with power-domains parsed from dts */
814 ret = of_property_read_u32_index(r5_core->np, "power-domains",
815 1, &r5_core->pm_domain_id);
817 dev_err(dev, "failed to get power-domains property\n");
821 ret = zynqmp_r5_set_mode(r5_core, fw_reg_val, tcm_mode);
823 dev_err(dev, "failed to set r5 cluster mode %d, err %d\n",
833 * zynqmp_r5_cluster_init()
834 * Create and initialize zynqmp_r5_cluster type object
836 * @cluster: pointer to zynqmp_r5_cluster type object
838 * Return: 0 for success and error code for failure.
840 static int zynqmp_r5_cluster_init(struct zynqmp_r5_cluster *cluster)
842 enum zynqmp_r5_cluster_mode cluster_mode = LOCKSTEP_MODE;
843 struct device *dev = cluster->dev;
844 struct device_node *dev_node = dev_of_node(dev);
845 struct platform_device *child_pdev;
846 struct zynqmp_r5_core **r5_cores;
847 enum rpu_oper_mode fw_reg_val;
848 struct device **child_devs;
849 struct device_node *child;
850 enum rpu_tcm_comb tcm_mode;
851 int core_count, ret, i;
853 ret = of_property_read_u32(dev_node, "xlnx,cluster-mode", &cluster_mode);
856 * on success returns 0, if not defined then returns -EINVAL,
857 * In that case, default is LOCKSTEP mode. Other than that
858 * returns relative error code < 0.
860 if (ret != -EINVAL && ret != 0) {
861 dev_err(dev, "Invalid xlnx,cluster-mode property\n");
866 * For now driver only supports split mode and lockstep mode.
867 * fail driver probe if either of that is not set in dts.
869 if (cluster_mode == LOCKSTEP_MODE) {
870 tcm_mode = PM_RPU_TCM_COMB;
871 fw_reg_val = PM_RPU_MODE_LOCKSTEP;
872 } else if (cluster_mode == SPLIT_MODE) {
873 tcm_mode = PM_RPU_TCM_SPLIT;
874 fw_reg_val = PM_RPU_MODE_SPLIT;
876 dev_err(dev, "driver does not support cluster mode %d\n", cluster_mode);
881 * Number of cores is decided by number of child nodes of
882 * r5f subsystem node in dts. If Split mode is used in dts
883 * 2 child nodes are expected.
884 * In lockstep mode if two child nodes are available,
885 * only use first child node and consider it as core0
886 * and ignore core1 dt node.
888 core_count = of_get_available_child_count(dev_node);
889 if (core_count == 0) {
890 dev_err(dev, "Invalid number of r5 cores %d", core_count);
892 } else if (cluster_mode == SPLIT_MODE && core_count != 2) {
893 dev_err(dev, "Invalid number of r5 cores for split mode\n");
895 } else if (cluster_mode == LOCKSTEP_MODE && core_count == 2) {
896 dev_warn(dev, "Only r5 core0 will be used\n");
900 child_devs = kcalloc(core_count, sizeof(struct device *), GFP_KERNEL);
904 r5_cores = kcalloc(core_count,
905 sizeof(struct zynqmp_r5_core *), GFP_KERNEL);
912 for_each_available_child_of_node(dev_node, child) {
913 child_pdev = of_find_device_by_node(child);
917 goto release_r5_cores;
920 child_devs[i] = &child_pdev->dev;
922 /* create and add remoteproc instance of type struct rproc */
923 r5_cores[i] = zynqmp_r5_add_rproc_core(&child_pdev->dev);
924 if (IS_ERR(r5_cores[i])) {
926 ret = PTR_ERR(r5_cores[i]);
928 goto release_r5_cores;
932 * If two child nodes are available in dts in lockstep mode,
933 * then ignore second child node.
935 if (cluster_mode == LOCKSTEP_MODE) {
943 cluster->mode = cluster_mode;
944 cluster->core_count = core_count;
945 cluster->r5_cores = r5_cores;
947 ret = zynqmp_r5_core_init(cluster, fw_reg_val, tcm_mode);
949 dev_err(dev, "failed to init r5 core err %d\n", ret);
950 cluster->core_count = 0;
951 cluster->r5_cores = NULL;
954 * at this point rproc resources for each core are allocated.
955 * adjust index to free resources in reverse order
958 goto release_r5_cores;
966 put_device(child_devs[i]);
968 of_reserved_mem_device_release(r5_cores[i]->dev);
969 rproc_del(r5_cores[i]->rproc);
970 rproc_free(r5_cores[i]->rproc);
979 static void zynqmp_r5_cluster_exit(void *data)
981 struct platform_device *pdev = (struct platform_device *)data;
982 struct zynqmp_r5_cluster *cluster;
983 struct zynqmp_r5_core *r5_core;
986 cluster = (struct zynqmp_r5_cluster *)platform_get_drvdata(pdev);
990 for (i = 0; i < cluster->core_count; i++) {
991 r5_core = cluster->r5_cores[i];
992 of_reserved_mem_device_release(r5_core->dev);
993 put_device(r5_core->dev);
994 rproc_del(r5_core->rproc);
995 rproc_free(r5_core->rproc);
998 kfree(cluster->r5_cores);
1000 platform_set_drvdata(pdev, NULL);
1004 * zynqmp_r5_remoteproc_probe()
1005 * parse device-tree, initialize hardware and allocate required resources
1006 * and remoteproc ops
1008 * @pdev: domain platform device for R5 cluster
1010 * Return: 0 for success and < 0 for failure.
1012 static int zynqmp_r5_remoteproc_probe(struct platform_device *pdev)
1014 struct zynqmp_r5_cluster *cluster;
1015 struct device *dev = &pdev->dev;
1018 cluster = kzalloc(sizeof(*cluster), GFP_KERNEL);
1024 ret = devm_of_platform_populate(dev);
1026 dev_err_probe(dev, ret, "failed to populate platform dev\n");
1031 /* wire in so each core can be cleaned up at driver remove */
1032 platform_set_drvdata(pdev, cluster);
1034 ret = zynqmp_r5_cluster_init(cluster);
1037 platform_set_drvdata(pdev, NULL);
1038 dev_err_probe(dev, ret, "Invalid r5f subsystem device tree\n");
1042 ret = devm_add_action_or_reset(dev, zynqmp_r5_cluster_exit, pdev);
1049 /* Match table for OF platform binding */
1050 static const struct of_device_id zynqmp_r5_remoteproc_match[] = {
1051 { .compatible = "xlnx,zynqmp-r5fss", },
1052 { /* end of list */ },
1054 MODULE_DEVICE_TABLE(of, zynqmp_r5_remoteproc_match);
1056 static struct platform_driver zynqmp_r5_remoteproc_driver = {
1057 .probe = zynqmp_r5_remoteproc_probe,
1059 .name = "zynqmp_r5_remoteproc",
1060 .of_match_table = zynqmp_r5_remoteproc_match,
1063 module_platform_driver(zynqmp_r5_remoteproc_driver);
1065 MODULE_DESCRIPTION("Xilinx R5F remote processor driver");
1066 MODULE_AUTHOR("Xilinx Inc.");
1067 MODULE_LICENSE("GPL");