3d56ec4b373b4b71358786879e5df5e5233f13c9
[linux-2.6-microblaze.git] / drivers / soc / fsl / qbman / qman_portal.c
1 /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2  *
3  * Redistribution and use in source and binary forms, with or without
4  * modification, are permitted provided that the following conditions are met:
5  *     * Redistributions of source code must retain the above copyright
6  *       notice, this list of conditions and the following disclaimer.
7  *     * Redistributions in binary form must reproduce the above copyright
8  *       notice, this list of conditions and the following disclaimer in the
9  *       documentation and/or other materials provided with the distribution.
10  *     * Neither the name of Freescale Semiconductor nor the
11  *       names of its contributors may be used to endorse or promote products
12  *       derived from this software without specific prior written permission.
13  *
14  * ALTERNATIVELY, this software may be distributed under the terms of the
15  * GNU General Public License ("GPL") as published by the Free Software
16  * Foundation, either version 2 of that License or (at your option) any
17  * later version.
18  *
19  * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22  * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30
31 #include "qman_priv.h"
32
33 struct qman_portal *qman_dma_portal;
34 EXPORT_SYMBOL(qman_dma_portal);
35
36 /* Enable portal interupts (as opposed to polling mode) */
37 #define CONFIG_FSL_DPA_PIRQ_SLOW  1
38 #define CONFIG_FSL_DPA_PIRQ_FAST  1
39
40 static struct cpumask portal_cpus;
41 static int __qman_portals_probed;
42 /* protect qman global registers and global data shared among portals */
43 static DEFINE_SPINLOCK(qman_lock);
44
45 static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
46 {
47 #ifdef CONFIG_FSL_PAMU
48         struct device *dev = pcfg->dev;
49         int window_count = 1;
50         struct pamu_stash_attribute stash_attr;
51         int ret;
52
53         pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
54         if (!pcfg->iommu_domain) {
55                 dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
56                 goto no_iommu;
57         }
58         stash_attr.cpu = cpu;
59         stash_attr.cache = PAMU_ATTR_CACHE_L1;
60         ret = iommu_domain_set_attr(pcfg->iommu_domain,
61                                     DOMAIN_ATTR_FSL_PAMU_STASH,
62                                     &stash_attr);
63         if (ret < 0) {
64                 dev_err(dev, "%s(): iommu_domain_set_attr() = %d",
65                         __func__, ret);
66                 goto out_domain_free;
67         }
68         ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
69                                          IOMMU_READ | IOMMU_WRITE);
70         if (ret < 0) {
71                 dev_err(dev, "%s(): iommu_domain_window_enable() = %d",
72                         __func__, ret);
73                 goto out_domain_free;
74         }
75         ret = iommu_attach_device(pcfg->iommu_domain, dev);
76         if (ret < 0) {
77                 dev_err(dev, "%s(): iommu_device_attach() = %d", __func__,
78                         ret);
79                 goto out_domain_free;
80         }
81         ret = iommu_domain_set_attr(pcfg->iommu_domain,
82                                     DOMAIN_ATTR_FSL_PAMU_ENABLE,
83                                     &window_count);
84         if (ret < 0) {
85                 dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
86                         ret);
87                 goto out_detach_device;
88         }
89
90 no_iommu:
91 #endif
92         qman_set_sdest(pcfg->channel, cpu);
93
94         return;
95
96 #ifdef CONFIG_FSL_PAMU
97 out_detach_device:
98         iommu_detach_device(pcfg->iommu_domain, NULL);
99 out_domain_free:
100         iommu_domain_free(pcfg->iommu_domain);
101         pcfg->iommu_domain = NULL;
102 #endif
103 }
104
105 static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
106 {
107         struct qman_portal *p;
108         u32 irq_sources = 0;
109
110         /* We need the same LIODN offset for all portals */
111         qman_liodn_fixup(pcfg->channel);
112
113         pcfg->iommu_domain = NULL;
114         portal_set_cpu(pcfg, pcfg->cpu);
115
116         p = qman_create_affine_portal(pcfg, NULL);
117         if (!p) {
118                 dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
119                          __func__, pcfg->cpu);
120                 return NULL;
121         }
122
123         /* Determine what should be interrupt-vs-poll driven */
124 #ifdef CONFIG_FSL_DPA_PIRQ_SLOW
125         irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
126                        QM_PIRQ_CSCI;
127 #endif
128 #ifdef CONFIG_FSL_DPA_PIRQ_FAST
129         irq_sources |= QM_PIRQ_DQRI;
130 #endif
131         qman_p_irqsource_add(p, irq_sources);
132
133         spin_lock(&qman_lock);
134         if (cpumask_equal(&portal_cpus, cpu_possible_mask)) {
135                 /* all assigned portals are initialized now */
136                 qman_init_cgr_all();
137         }
138
139         if (!qman_dma_portal)
140                 qman_dma_portal = p;
141
142         spin_unlock(&qman_lock);
143
144         dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
145
146         return p;
147 }
148
149 static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
150                                                         unsigned int cpu)
151 {
152 #ifdef CONFIG_FSL_PAMU /* TODO */
153         struct pamu_stash_attribute stash_attr;
154         int ret;
155
156         if (pcfg->iommu_domain) {
157                 stash_attr.cpu = cpu;
158                 stash_attr.cache = PAMU_ATTR_CACHE_L1;
159                 ret = iommu_domain_set_attr(pcfg->iommu_domain,
160                                 DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
161                 if (ret < 0) {
162                         dev_err(pcfg->dev,
163                                 "Failed to update pamu stash setting\n");
164                         return;
165                 }
166         }
167 #endif
168         qman_set_sdest(pcfg->channel, cpu);
169 }
170
171 static int qman_offline_cpu(unsigned int cpu)
172 {
173         struct qman_portal *p;
174         const struct qm_portal_config *pcfg;
175
176         p = affine_portals[cpu];
177         if (p) {
178                 pcfg = qman_get_qm_portal_config(p);
179                 if (pcfg) {
180                         /* select any other online CPU */
181                         cpu = cpumask_any_but(cpu_online_mask, cpu);
182                         irq_set_affinity(pcfg->irq, cpumask_of(cpu));
183                         qman_portal_update_sdest(pcfg, cpu);
184                 }
185         }
186         return 0;
187 }
188
189 static int qman_online_cpu(unsigned int cpu)
190 {
191         struct qman_portal *p;
192         const struct qm_portal_config *pcfg;
193
194         p = affine_portals[cpu];
195         if (p) {
196                 pcfg = qman_get_qm_portal_config(p);
197                 if (pcfg) {
198                         irq_set_affinity(pcfg->irq, cpumask_of(cpu));
199                         qman_portal_update_sdest(pcfg, cpu);
200                 }
201         }
202         return 0;
203 }
204
205 int qman_portals_probed(void)
206 {
207         return __qman_portals_probed;
208 }
209 EXPORT_SYMBOL_GPL(qman_portals_probed);
210
211 static int qman_portal_probe(struct platform_device *pdev)
212 {
213         struct device *dev = &pdev->dev;
214         struct device_node *node = dev->of_node;
215         struct qm_portal_config *pcfg;
216         struct resource *addr_phys[2];
217         int irq, cpu, err, i;
218         u32 val;
219
220         err = qman_is_probed();
221         if (!err)
222                 return -EPROBE_DEFER;
223         if (err < 0) {
224                 dev_err(&pdev->dev, "failing probe due to qman probe error\n");
225                 return -ENODEV;
226         }
227
228         pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
229         if (!pcfg) {
230                 __qman_portals_probed = -1;
231                 return -ENOMEM;
232         }
233
234         pcfg->dev = dev;
235
236         addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
237                                              DPAA_PORTAL_CE);
238         if (!addr_phys[0]) {
239                 dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
240                 goto err_ioremap1;
241         }
242
243         addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
244                                              DPAA_PORTAL_CI);
245         if (!addr_phys[1]) {
246                 dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
247                 goto err_ioremap1;
248         }
249
250         err = of_property_read_u32(node, "cell-index", &val);
251         if (err) {
252                 dev_err(dev, "Can't get %pOF property 'cell-index'\n", node);
253                 __qman_portals_probed = -1;
254                 return err;
255         }
256         pcfg->channel = val;
257         pcfg->cpu = -1;
258         irq = platform_get_irq(pdev, 0);
259         if (irq <= 0)
260                 goto err_ioremap1;
261         pcfg->irq = irq;
262
263         pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
264                                         resource_size(addr_phys[0]),
265                                         QBMAN_MEMREMAP_ATTR);
266         if (!pcfg->addr_virt_ce) {
267                 dev_err(dev, "memremap::CE failed\n");
268                 goto err_ioremap1;
269         }
270
271         pcfg->addr_virt_ci = ioremap(addr_phys[1]->start,
272                                 resource_size(addr_phys[1]));
273         if (!pcfg->addr_virt_ci) {
274                 dev_err(dev, "ioremap::CI failed\n");
275                 goto err_ioremap2;
276         }
277
278         pcfg->pools = qm_get_pools_sdqcr();
279
280         spin_lock(&qman_lock);
281         cpu = cpumask_next_zero(-1, &portal_cpus);
282         if (cpu >= nr_cpu_ids) {
283                 __qman_portals_probed = 1;
284                 /* unassigned portal, skip init */
285                 spin_unlock(&qman_lock);
286                 return 0;
287         }
288
289         cpumask_set_cpu(cpu, &portal_cpus);
290         spin_unlock(&qman_lock);
291         pcfg->cpu = cpu;
292
293         if (dma_set_mask(dev, DMA_BIT_MASK(40))) {
294                 dev_err(dev, "dma_set_mask() failed\n");
295                 goto err_portal_init;
296         }
297
298         if (!init_pcfg(pcfg)) {
299                 dev_err(dev, "portal init failed\n");
300                 goto err_portal_init;
301         }
302
303         /* clear irq affinity if assigned cpu is offline */
304         if (!cpu_online(cpu))
305                 qman_offline_cpu(cpu);
306
307         if (__qman_portals_probed == 1 && qman_requires_cleanup()) {
308                 /*
309                  * QMan wasn't reset prior to boot (Kexec for example)
310                  * Empty all the frame queues so they are in reset state
311                  */
312                 for (i = 0; i < qm_get_fqid_maxcnt(); i++) {
313                         err =  qman_shutdown_fq(i);
314                         if (err) {
315                                 dev_err(dev, "Failed to shutdown frame queue %d\n",
316                                         i);
317                                 goto err_portal_init;
318                         }
319                 }
320                 qman_done_cleanup();
321         }
322
323         return 0;
324
325 err_portal_init:
326         iounmap(pcfg->addr_virt_ci);
327 err_ioremap2:
328         memunmap(pcfg->addr_virt_ce);
329 err_ioremap1:
330         __qman_portals_probed = -1;
331
332         return -ENXIO;
333 }
334
335 static const struct of_device_id qman_portal_ids[] = {
336         {
337                 .compatible = "fsl,qman-portal",
338         },
339         {}
340 };
341 MODULE_DEVICE_TABLE(of, qman_portal_ids);
342
343 static struct platform_driver qman_portal_driver = {
344         .driver = {
345                 .name = KBUILD_MODNAME,
346                 .of_match_table = qman_portal_ids,
347         },
348         .probe = qman_portal_probe,
349 };
350
351 static int __init qman_portal_driver_register(struct platform_driver *drv)
352 {
353         int ret;
354
355         ret = platform_driver_register(drv);
356         if (ret < 0)
357                 return ret;
358
359         ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
360                                         "soc/qman_portal:online",
361                                         qman_online_cpu, qman_offline_cpu);
362         if (ret < 0) {
363                 pr_err("qman: failed to register hotplug callbacks.\n");
364                 platform_driver_unregister(drv);
365                 return ret;
366         }
367         return 0;
368 }
369
370 module_driver(qman_portal_driver,
371               qman_portal_driver_register, platform_driver_unregister);