1 /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "qman_priv.h"
33 #define DQRR_MAXFILL 15
34 #define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
35 #define IRQNAME "QMan portal %d"
36 #define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
37 #define QMAN_POLL_LIMIT 32
38 #define QMAN_PIRQ_DQRR_ITHRESH 12
39 #define QMAN_DQRR_IT_MAX 15
40 #define QMAN_ITP_MAX 0xFFF
41 #define QMAN_PIRQ_MR_ITHRESH 4
42 #define QMAN_PIRQ_IPERIOD 100
44 /* Portal register assists */
46 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
47 /* Cache-inhibited register offsets */
48 #define QM_REG_EQCR_PI_CINH 0x3000
49 #define QM_REG_EQCR_CI_CINH 0x3040
50 #define QM_REG_EQCR_ITR 0x3080
51 #define QM_REG_DQRR_PI_CINH 0x3100
52 #define QM_REG_DQRR_CI_CINH 0x3140
53 #define QM_REG_DQRR_ITR 0x3180
54 #define QM_REG_DQRR_DCAP 0x31C0
55 #define QM_REG_DQRR_SDQCR 0x3200
56 #define QM_REG_DQRR_VDQCR 0x3240
57 #define QM_REG_DQRR_PDQCR 0x3280
58 #define QM_REG_MR_PI_CINH 0x3300
59 #define QM_REG_MR_CI_CINH 0x3340
60 #define QM_REG_MR_ITR 0x3380
61 #define QM_REG_CFG 0x3500
62 #define QM_REG_ISR 0x3600
63 #define QM_REG_IER 0x3640
64 #define QM_REG_ISDR 0x3680
65 #define QM_REG_IIR 0x36C0
66 #define QM_REG_ITPR 0x3740
68 /* Cache-enabled register offsets */
69 #define QM_CL_EQCR 0x0000
70 #define QM_CL_DQRR 0x1000
71 #define QM_CL_MR 0x2000
72 #define QM_CL_EQCR_PI_CENA 0x3000
73 #define QM_CL_EQCR_CI_CENA 0x3040
74 #define QM_CL_DQRR_PI_CENA 0x3100
75 #define QM_CL_DQRR_CI_CENA 0x3140
76 #define QM_CL_MR_PI_CENA 0x3300
77 #define QM_CL_MR_CI_CENA 0x3340
78 #define QM_CL_CR 0x3800
79 #define QM_CL_RR0 0x3900
80 #define QM_CL_RR1 0x3940
83 /* Cache-inhibited register offsets */
84 #define QM_REG_EQCR_PI_CINH 0x0000
85 #define QM_REG_EQCR_CI_CINH 0x0004
86 #define QM_REG_EQCR_ITR 0x0008
87 #define QM_REG_DQRR_PI_CINH 0x0040
88 #define QM_REG_DQRR_CI_CINH 0x0044
89 #define QM_REG_DQRR_ITR 0x0048
90 #define QM_REG_DQRR_DCAP 0x0050
91 #define QM_REG_DQRR_SDQCR 0x0054
92 #define QM_REG_DQRR_VDQCR 0x0058
93 #define QM_REG_DQRR_PDQCR 0x005c
94 #define QM_REG_MR_PI_CINH 0x0080
95 #define QM_REG_MR_CI_CINH 0x0084
96 #define QM_REG_MR_ITR 0x0088
97 #define QM_REG_CFG 0x0100
98 #define QM_REG_ISR 0x0e00
99 #define QM_REG_IER 0x0e04
100 #define QM_REG_ISDR 0x0e08
101 #define QM_REG_IIR 0x0e0c
102 #define QM_REG_ITPR 0x0e14
104 /* Cache-enabled register offsets */
105 #define QM_CL_EQCR 0x0000
106 #define QM_CL_DQRR 0x1000
107 #define QM_CL_MR 0x2000
108 #define QM_CL_EQCR_PI_CENA 0x3000
109 #define QM_CL_EQCR_CI_CENA 0x3100
110 #define QM_CL_DQRR_PI_CENA 0x3200
111 #define QM_CL_DQRR_CI_CENA 0x3300
112 #define QM_CL_MR_PI_CENA 0x3400
113 #define QM_CL_MR_CI_CENA 0x3500
114 #define QM_CL_CR 0x3800
115 #define QM_CL_RR0 0x3900
116 #define QM_CL_RR1 0x3940
120 * BTW, the drivers (and h/w programming model) already obtain the required
121 * synchronisation for portal accesses and data-dependencies. Use of barrier()s
122 * or other order-preserving primitives simply degrade performance. Hence the
123 * use of the __raw_*() interfaces, which simply ensure that the compiler treats
124 * the portal registers as volatile
127 /* Cache-enabled ring access */
128 #define qm_cl(base, idx) ((void *)base + ((idx) << 6))
133 * pmode == production mode
134 * cmode == consumption mode,
135 * dmode == h/w dequeue mode.
136 * Enum values use 3 letter codes. First letter matches the portal mode,
137 * remaining two letters indicate;
138 * ci == cache-inhibited portal register
139 * ce == cache-enabled portal register
140 * vb == in-band valid-bit (cache-enabled)
141 * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
142 * As for "enum qm_dqrr_dmode", it should be self-explanatory.
144 enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
145 qm_eqcr_pci = 0, /* PI index, cache-inhibited */
146 qm_eqcr_pce = 1, /* PI index, cache-enabled */
147 qm_eqcr_pvb = 2 /* valid-bit */
149 enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
150 qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
151 qm_dqrr_dpull = 1 /* PDQCR */
153 enum qm_dqrr_pmode { /* s/w-only */
154 qm_dqrr_pci, /* reads DQRR_PI_CINH */
155 qm_dqrr_pce, /* reads DQRR_PI_CENA */
156 qm_dqrr_pvb /* reads valid-bit */
158 enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */
159 qm_dqrr_cci = 0, /* CI index, cache-inhibited */
160 qm_dqrr_cce = 1, /* CI index, cache-enabled */
161 qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */
163 enum qm_mr_pmode { /* s/w-only */
164 qm_mr_pci, /* reads MR_PI_CINH */
165 qm_mr_pce, /* reads MR_PI_CENA */
166 qm_mr_pvb /* reads valid-bit */
168 enum qm_mr_cmode { /* matches QCSP_CFG::MM */
169 qm_mr_cci = 0, /* CI index, cache-inhibited */
170 qm_mr_cce = 1 /* CI index, cache-enabled */
173 /* --- Portal structures --- */
175 #define QM_EQCR_SIZE 8
176 #define QM_DQRR_SIZE 16
179 /* "Enqueue Command" */
180 struct qm_eqcr_entry {
181 u8 _ncw_verb; /* writes to this are non-coherent */
185 __be32 fqid; /* 24-bit */
190 #define QM_EQCR_VERB_VBIT 0x80
191 #define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
192 #define QM_EQCR_VERB_CMD_ENQUEUE 0x01
193 #define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
194 #define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
195 #define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
198 struct qm_eqcr_entry *ring, *cursor;
199 u8 ci, available, ithresh, vbit;
200 #ifdef CONFIG_FSL_DPAA_CHECKING
202 enum qm_eqcr_pmode pmode;
207 const struct qm_dqrr_entry *ring, *cursor;
208 u8 pi, ci, fill, ithresh, vbit;
209 #ifdef CONFIG_FSL_DPAA_CHECKING
210 enum qm_dqrr_dmode dmode;
211 enum qm_dqrr_pmode pmode;
212 enum qm_dqrr_cmode cmode;
217 union qm_mr_entry *ring, *cursor;
218 u8 pi, ci, fill, ithresh, vbit;
219 #ifdef CONFIG_FSL_DPAA_CHECKING
220 enum qm_mr_pmode pmode;
221 enum qm_mr_cmode cmode;
225 /* MC (Management Command) command */
226 /* "FQ" command layout */
230 __be32 fqid; /* 24-bit */
234 /* "CGR" command layout */
242 #define QM_MCC_VERB_VBIT 0x80
243 #define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
244 #define QM_MCC_VERB_INITFQ_PARKED 0x40
245 #define QM_MCC_VERB_INITFQ_SCHED 0x41
246 #define QM_MCC_VERB_QUERYFQ 0x44
247 #define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
248 #define QM_MCC_VERB_QUERYWQ 0x46
249 #define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
250 #define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
251 #define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
252 #define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
253 #define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
254 #define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
255 #define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
256 #define QM_MCC_VERB_INITCGR 0x50
257 #define QM_MCC_VERB_MODIFYCGR 0x51
258 #define QM_MCC_VERB_CGRTESTWRITE 0x52
259 #define QM_MCC_VERB_QUERYCGR 0x58
260 #define QM_MCC_VERB_QUERYCONGESTION 0x59
261 union qm_mc_command {
263 u8 _ncw_verb; /* writes to this are non-coherent */
266 struct qm_mcc_initfq initfq;
267 struct qm_mcc_initcgr initcgr;
269 struct qm_mcc_cgr cgr;
272 /* MC (Management Command) result */
274 struct qm_mcr_queryfq {
278 struct qm_fqd fqd; /* the FQD fields are here */
282 /* "Alter FQ State Commands" */
283 struct qm_mcr_alterfq {
286 u8 fqs; /* Frame Queue Status */
289 #define QM_MCR_VERB_RRID 0x80
290 #define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
291 #define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
292 #define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
293 #define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
294 #define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
295 #define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
296 #define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
297 #define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
298 #define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
299 #define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
300 #define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
301 #define QM_MCR_RESULT_NULL 0x00
302 #define QM_MCR_RESULT_OK 0xf0
303 #define QM_MCR_RESULT_ERR_FQID 0xf1
304 #define QM_MCR_RESULT_ERR_FQSTATE 0xf2
305 #define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
306 #define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
307 #define QM_MCR_RESULT_PENDING 0xf8
308 #define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
309 #define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
310 #define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
311 #define QM_MCR_TIMEOUT 10000 /* us */
318 struct qm_mcr_queryfq queryfq;
319 struct qm_mcr_alterfq alterfq;
320 struct qm_mcr_querycgr querycgr;
321 struct qm_mcr_querycongestion querycongestion;
322 struct qm_mcr_querywq querywq;
323 struct qm_mcr_queryfq_np queryfq_np;
327 union qm_mc_command *cr;
328 union qm_mc_result *rr;
330 #ifdef CONFIG_FSL_DPAA_CHECKING
332 /* Can be _mc_start()ed */
334 /* Can be _mc_commit()ed or _mc_abort()ed */
336 /* Can only be _mc_retry()ed */
343 void *ce; /* cache-enabled */
344 __be32 *ce_be; /* same value as above but for direct access */
345 void __iomem *ci; /* cache-inhibited */
350 * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to
351 * and including 'mc' fits within a cacheline (yay!). The 'config' part
352 * is setup-only, so isn't a cause for a concern. In other words, don't
353 * rearrange this structure on a whim, there be dragons ...
360 } ____cacheline_aligned;
362 /* Cache-inhibited register access. */
363 static inline u32 qm_in(struct qm_portal *p, u32 offset)
365 return ioread32be(p->addr.ci + offset);
368 static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
370 iowrite32be(val, p->addr.ci + offset);
373 /* Cache Enabled Portal Access */
374 static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset)
376 dpaa_invalidate(p->addr.ce + offset);
379 static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
381 dpaa_touch_ro(p->addr.ce + offset);
384 static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
386 return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
389 /* --- EQCR API --- */
391 #define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry))
392 #define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT)
394 /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
395 static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
397 uintptr_t addr = (uintptr_t)p;
401 return (struct qm_eqcr_entry *)addr;
404 /* Bit-wise logic to convert a ring pointer to a ring index */
405 static int eqcr_ptr2idx(struct qm_eqcr_entry *e)
407 return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1);
410 /* Increment the 'cursor' ring pointer, taking 'vbit' into account */
411 static inline void eqcr_inc(struct qm_eqcr *eqcr)
413 /* increment to the next EQCR pointer and handle overflow and 'vbit' */
414 struct qm_eqcr_entry *partial = eqcr->cursor + 1;
416 eqcr->cursor = eqcr_carryclear(partial);
417 if (partial != eqcr->cursor)
418 eqcr->vbit ^= QM_EQCR_VERB_VBIT;
421 static inline int qm_eqcr_init(struct qm_portal *portal,
422 enum qm_eqcr_pmode pmode,
423 unsigned int eq_stash_thresh,
426 struct qm_eqcr *eqcr = &portal->eqcr;
430 eqcr->ring = portal->addr.ce + QM_CL_EQCR;
431 eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
432 qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
433 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
434 eqcr->cursor = eqcr->ring + pi;
435 eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ?
436 QM_EQCR_VERB_VBIT : 0;
437 eqcr->available = QM_EQCR_SIZE - 1 -
438 dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
439 eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR);
440 #ifdef CONFIG_FSL_DPAA_CHECKING
444 cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) |
445 (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
446 (eq_stash_prio << 26) | /* QCSP_CFG: EP */
447 ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
448 qm_out(portal, QM_REG_CFG, cfg);
452 static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
454 return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7;
457 static inline void qm_eqcr_finish(struct qm_portal *portal)
459 struct qm_eqcr *eqcr = &portal->eqcr;
460 u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
461 u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
463 DPAA_ASSERT(!eqcr->busy);
464 if (pi != eqcr_ptr2idx(eqcr->cursor))
465 pr_crit("losing uncommitted EQCR entries\n");
467 pr_crit("missing existing EQCR completions\n");
468 if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
469 pr_crit("EQCR destroyed unquiesced\n");
472 static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
475 struct qm_eqcr *eqcr = &portal->eqcr;
477 DPAA_ASSERT(!eqcr->busy);
478 if (!eqcr->available)
481 #ifdef CONFIG_FSL_DPAA_CHECKING
484 dpaa_zero(eqcr->cursor);
488 static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
491 struct qm_eqcr *eqcr = &portal->eqcr;
494 DPAA_ASSERT(!eqcr->busy);
495 if (!eqcr->available) {
497 eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) &
499 diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
500 eqcr->available += diff;
504 #ifdef CONFIG_FSL_DPAA_CHECKING
507 dpaa_zero(eqcr->cursor);
511 static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
513 DPAA_ASSERT(eqcr->busy);
514 DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK));
515 DPAA_ASSERT(eqcr->available >= 1);
518 static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
520 struct qm_eqcr *eqcr = &portal->eqcr;
521 struct qm_eqcr_entry *eqcursor;
523 eqcr_commit_checks(eqcr);
524 DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
526 eqcursor = eqcr->cursor;
527 eqcursor->_ncw_verb = myverb | eqcr->vbit;
528 dpaa_flush(eqcursor);
531 #ifdef CONFIG_FSL_DPAA_CHECKING
536 static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
538 qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA);
541 static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
543 struct qm_eqcr *eqcr = &portal->eqcr;
544 u8 diff, old_ci = eqcr->ci;
546 eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1);
547 qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
548 diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
549 eqcr->available += diff;
553 static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
555 struct qm_eqcr *eqcr = &portal->eqcr;
557 eqcr->ithresh = ithresh;
558 qm_out(portal, QM_REG_EQCR_ITR, ithresh);
561 static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
563 struct qm_eqcr *eqcr = &portal->eqcr;
565 return eqcr->available;
568 static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
570 struct qm_eqcr *eqcr = &portal->eqcr;
572 return QM_EQCR_SIZE - 1 - eqcr->available;
575 /* --- DQRR API --- */
577 #define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry))
578 #define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT)
580 static const struct qm_dqrr_entry *dqrr_carryclear(
581 const struct qm_dqrr_entry *p)
583 uintptr_t addr = (uintptr_t)p;
587 return (const struct qm_dqrr_entry *)addr;
590 static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e)
592 return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1);
595 static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e)
597 return dqrr_carryclear(e + 1);
600 static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
602 qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) |
603 ((mf & (QM_DQRR_SIZE - 1)) << 20));
606 static inline int qm_dqrr_init(struct qm_portal *portal,
607 const struct qm_portal_config *config,
608 enum qm_dqrr_dmode dmode,
609 enum qm_dqrr_pmode pmode,
610 enum qm_dqrr_cmode cmode, u8 max_fill)
612 struct qm_dqrr *dqrr = &portal->dqrr;
615 /* Make sure the DQRR will be idle when we enable */
616 qm_out(portal, QM_REG_DQRR_SDQCR, 0);
617 qm_out(portal, QM_REG_DQRR_VDQCR, 0);
618 qm_out(portal, QM_REG_DQRR_PDQCR, 0);
619 dqrr->ring = portal->addr.ce + QM_CL_DQRR;
620 dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
621 dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
622 dqrr->cursor = dqrr->ring + dqrr->ci;
623 dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
624 dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ?
625 QM_DQRR_VERB_VBIT : 0;
626 dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR);
627 #ifdef CONFIG_FSL_DPAA_CHECKING
632 /* Invalidate every ring entry before beginning */
633 for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
634 dpaa_invalidate(qm_cl(dqrr->ring, cfg));
635 cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) |
636 ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
637 ((dmode & 1) << 18) | /* DP */
638 ((cmode & 3) << 16) | /* DCM */
640 (0 ? 0x40 : 0) | /* Ignore RP */
641 (0 ? 0x10 : 0); /* Ignore SP */
642 qm_out(portal, QM_REG_CFG, cfg);
643 qm_dqrr_set_maxfill(portal, max_fill);
647 static inline void qm_dqrr_finish(struct qm_portal *portal)
649 #ifdef CONFIG_FSL_DPAA_CHECKING
650 struct qm_dqrr *dqrr = &portal->dqrr;
652 if (dqrr->cmode != qm_dqrr_cdc &&
653 dqrr->ci != dqrr_ptr2idx(dqrr->cursor))
654 pr_crit("Ignoring completed DQRR entries\n");
658 static inline const struct qm_dqrr_entry *qm_dqrr_current(
659 struct qm_portal *portal)
661 struct qm_dqrr *dqrr = &portal->dqrr;
668 static inline u8 qm_dqrr_next(struct qm_portal *portal)
670 struct qm_dqrr *dqrr = &portal->dqrr;
672 DPAA_ASSERT(dqrr->fill);
673 dqrr->cursor = dqrr_inc(dqrr->cursor);
677 static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
679 struct qm_dqrr *dqrr = &portal->dqrr;
680 struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
682 DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
683 #ifndef CONFIG_FSL_PAMU
685 * If PAMU is not available we need to invalidate the cache.
686 * When PAMU is available the cache is updated by stash
688 dpaa_invalidate_touch_ro(res);
690 if ((res->verb & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
691 dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
693 dqrr->vbit ^= QM_DQRR_VERB_VBIT;
698 static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
699 const struct qm_dqrr_entry *dq,
702 __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
703 int idx = dqrr_ptr2idx(dq);
705 DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
706 DPAA_ASSERT((dqrr->ring + idx) == dq);
707 DPAA_ASSERT(idx < QM_DQRR_SIZE);
708 qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
709 ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
710 idx); /* DQRR_DCAP::DCAP_CI */
713 static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask)
715 __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
717 DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
718 qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
719 (bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
722 static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
724 qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr);
727 static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
729 qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr);
732 static inline int qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
735 if (ithresh > QMAN_DQRR_IT_MAX)
738 qm_out(portal, QM_REG_DQRR_ITR, ithresh);
745 #define MR_SHIFT ilog2(sizeof(union qm_mr_entry))
746 #define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT)
748 static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p)
750 uintptr_t addr = (uintptr_t)p;
754 return (union qm_mr_entry *)addr;
757 static inline int mr_ptr2idx(const union qm_mr_entry *e)
759 return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1);
762 static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e)
764 return mr_carryclear(e + 1);
767 static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
768 enum qm_mr_cmode cmode)
770 struct qm_mr *mr = &portal->mr;
773 mr->ring = portal->addr.ce + QM_CL_MR;
774 mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1);
775 mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1);
776 mr->cursor = mr->ring + mr->ci;
777 mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
778 mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE)
779 ? QM_MR_VERB_VBIT : 0;
780 mr->ithresh = qm_in(portal, QM_REG_MR_ITR);
781 #ifdef CONFIG_FSL_DPAA_CHECKING
785 cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) |
786 ((cmode & 1) << 8); /* QCSP_CFG:MM */
787 qm_out(portal, QM_REG_CFG, cfg);
791 static inline void qm_mr_finish(struct qm_portal *portal)
793 struct qm_mr *mr = &portal->mr;
795 if (mr->ci != mr_ptr2idx(mr->cursor))
796 pr_crit("Ignoring completed MR entries\n");
799 static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal)
801 struct qm_mr *mr = &portal->mr;
808 static inline int qm_mr_next(struct qm_portal *portal)
810 struct qm_mr *mr = &portal->mr;
812 DPAA_ASSERT(mr->fill);
813 mr->cursor = mr_inc(mr->cursor);
817 static inline void qm_mr_pvb_update(struct qm_portal *portal)
819 struct qm_mr *mr = &portal->mr;
820 union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
822 DPAA_ASSERT(mr->pmode == qm_mr_pvb);
824 if ((res->verb & QM_MR_VERB_VBIT) == mr->vbit) {
825 mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
827 mr->vbit ^= QM_MR_VERB_VBIT;
831 dpaa_invalidate_touch_ro(res);
834 static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
836 struct qm_mr *mr = &portal->mr;
838 DPAA_ASSERT(mr->cmode == qm_mr_cci);
839 mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
840 qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
843 static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
845 struct qm_mr *mr = &portal->mr;
847 DPAA_ASSERT(mr->cmode == qm_mr_cci);
848 mr->ci = mr_ptr2idx(mr->cursor);
849 qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
852 static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
854 qm_out(portal, QM_REG_MR_ITR, ithresh);
857 /* --- Management command API --- */
859 static inline int qm_mc_init(struct qm_portal *portal)
862 struct qm_mc *mc = &portal->mc;
864 mc->cr = portal->addr.ce + QM_CL_CR;
865 mc->rr = portal->addr.ce + QM_CL_RR0;
867 * The expected valid bit polarity for the next CR command is 0
868 * if RR1 contains a valid response, and is 1 if RR0 contains a
869 * valid response. If both RR contain all 0, this indicates either
870 * that no command has been executed since reset (in which case the
871 * expected valid bit polarity is 1)
874 rr1 = (mc->rr+1)->verb;
875 if ((rr0 == 0 && rr1 == 0) || rr0 != 0)
879 mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
880 #ifdef CONFIG_FSL_DPAA_CHECKING
881 mc->state = qman_mc_idle;
886 static inline void qm_mc_finish(struct qm_portal *portal)
888 #ifdef CONFIG_FSL_DPAA_CHECKING
889 struct qm_mc *mc = &portal->mc;
891 DPAA_ASSERT(mc->state == qman_mc_idle);
892 if (mc->state != qman_mc_idle)
893 pr_crit("Losing incomplete MC command\n");
897 static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal)
899 struct qm_mc *mc = &portal->mc;
901 DPAA_ASSERT(mc->state == qman_mc_idle);
902 #ifdef CONFIG_FSL_DPAA_CHECKING
903 mc->state = qman_mc_user;
909 static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
911 struct qm_mc *mc = &portal->mc;
912 union qm_mc_result *rr = mc->rr + mc->rridx;
914 DPAA_ASSERT(mc->state == qman_mc_user);
916 mc->cr->_ncw_verb = myverb | mc->vbit;
918 dpaa_invalidate_touch_ro(rr);
919 #ifdef CONFIG_FSL_DPAA_CHECKING
920 mc->state = qman_mc_hw;
924 static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
926 struct qm_mc *mc = &portal->mc;
927 union qm_mc_result *rr = mc->rr + mc->rridx;
929 DPAA_ASSERT(mc->state == qman_mc_hw);
931 * The inactive response register's verb byte always returns zero until
932 * its command is submitted and completed. This includes the valid-bit,
933 * in case you were wondering...
936 dpaa_invalidate_touch_ro(rr);
940 mc->vbit ^= QM_MCC_VERB_VBIT;
941 #ifdef CONFIG_FSL_DPAA_CHECKING
942 mc->state = qman_mc_idle;
947 static inline int qm_mc_result_timeout(struct qm_portal *portal,
948 union qm_mc_result **mcr)
950 int timeout = QM_MCR_TIMEOUT;
953 *mcr = qm_mc_result(portal);
962 static inline void fq_set(struct qman_fq *fq, u32 mask)
967 static inline void fq_clear(struct qman_fq *fq, u32 mask)
972 static inline int fq_isset(struct qman_fq *fq, u32 mask)
974 return fq->flags & mask;
977 static inline int fq_isclear(struct qman_fq *fq, u32 mask)
979 return !(fq->flags & mask);
984 /* PORTAL_BITS_*** - dynamic, strictly internal */
986 /* interrupt sources processed by portal_isr(), configurable */
987 unsigned long irq_sources;
988 u32 use_eqcr_ci_stashing;
989 /* only 1 volatile dequeue at a time */
990 struct qman_fq *vdqcr_owned;
992 /* probing time config params for cpu-affine portals */
993 const struct qm_portal_config *config;
994 /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
995 struct qman_cgrs *cgrs;
996 /* linked-list of CSCN handlers. */
997 struct list_head cgr_cbs;
1000 struct work_struct congestion_work;
1001 struct work_struct mr_work;
1002 char irqname[MAX_IRQNAME];
1005 static cpumask_t affine_mask;
1006 static DEFINE_SPINLOCK(affine_mask_lock);
1007 static u16 affine_channels[NR_CPUS];
1008 static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
1009 struct qman_portal *affine_portals[NR_CPUS];
1011 static inline struct qman_portal *get_affine_portal(void)
1013 return &get_cpu_var(qman_affine_portal);
1016 static inline void put_affine_portal(void)
1018 put_cpu_var(qman_affine_portal);
1021 static struct workqueue_struct *qm_portal_wq;
1023 int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh)
1030 res = qm_dqrr_set_ithresh(&portal->p, ithresh);
1034 portal->p.dqrr.ithresh = ithresh;
1038 EXPORT_SYMBOL(qman_dqrr_set_ithresh);
1040 void qman_dqrr_get_ithresh(struct qman_portal *portal, u8 *ithresh)
1042 if (portal && ithresh)
1043 *ithresh = qm_in(&portal->p, QM_REG_DQRR_ITR);
1045 EXPORT_SYMBOL(qman_dqrr_get_ithresh);
1047 void qman_portal_get_iperiod(struct qman_portal *portal, u32 *iperiod)
1049 if (portal && iperiod)
1050 *iperiod = qm_in(&portal->p, QM_REG_ITPR);
1052 EXPORT_SYMBOL(qman_portal_get_iperiod);
1054 int qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod)
1056 if (!portal || iperiod > QMAN_ITP_MAX)
1059 qm_out(&portal->p, QM_REG_ITPR, iperiod);
1063 EXPORT_SYMBOL(qman_portal_set_iperiod);
1065 int qman_wq_alloc(void)
1067 qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1);
1074 * This is what everything can wait on, even if it migrates to a different cpu
1075 * to the one whose affine portal it is waiting on.
1077 static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
1079 static struct qman_fq **fq_table;
1080 static u32 num_fqids;
1082 int qman_alloc_fq_table(u32 _num_fqids)
1084 num_fqids = _num_fqids;
1086 fq_table = vzalloc(array3_size(sizeof(struct qman_fq *),
1091 pr_debug("Allocated fq lookup table at %p, entry count %u\n",
1092 fq_table, num_fqids * 2);
1096 static struct qman_fq *idx_to_fq(u32 idx)
1100 #ifdef CONFIG_FSL_DPAA_CHECKING
1101 if (WARN_ON(idx >= num_fqids * 2))
1105 DPAA_ASSERT(!fq || idx == fq->idx);
1111 * Only returns full-service fq objects, not enqueue-only
1112 * references (QMAN_FQ_FLAG_NO_MODIFY).
1114 static struct qman_fq *fqid_to_fq(u32 fqid)
1116 return idx_to_fq(fqid * 2);
1119 static struct qman_fq *tag_to_fq(u32 tag)
1121 #if BITS_PER_LONG == 64
1122 return idx_to_fq(tag);
1124 return (struct qman_fq *)tag;
1128 static u32 fq_to_tag(struct qman_fq *fq)
1130 #if BITS_PER_LONG == 64
1137 static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
1138 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
1139 unsigned int poll_limit);
1140 static void qm_congestion_task(struct work_struct *work);
1141 static void qm_mr_process_task(struct work_struct *work);
1143 static irqreturn_t portal_isr(int irq, void *ptr)
1145 struct qman_portal *p = ptr;
1147 u32 clear = QM_DQAVAIL_MASK | p->irq_sources;
1148 u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
1153 /* DQRR-handling if it's interrupt-driven */
1154 if (is & QM_PIRQ_DQRI)
1155 __poll_portal_fast(p, QMAN_POLL_LIMIT);
1156 /* Handling of anything else that's interrupt-driven */
1157 clear |= __poll_portal_slow(p, is);
1158 qm_out(&p->p, QM_REG_ISR, clear);
1162 static int drain_mr_fqrni(struct qm_portal *p)
1164 const union qm_mr_entry *msg;
1166 msg = qm_mr_current(p);
1169 * if MR was full and h/w had other FQRNI entries to produce, we
1170 * need to allow it time to produce those entries once the
1171 * existing entries are consumed. A worst-case situation
1172 * (fully-loaded system) means h/w sequencers may have to do 3-4
1173 * other things before servicing the portal's MR pump, each of
1174 * which (if slow) may take ~50 qman cycles (which is ~200
1175 * processor cycles). So rounding up and then multiplying this
1176 * worst-case estimate by a factor of 10, just to be
1177 * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
1178 * one entry at a time, so h/w has an opportunity to produce new
1179 * entries well before the ring has been fully consumed, so
1180 * we're being *really* paranoid here.
1183 msg = qm_mr_current(p);
1187 if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
1188 /* We aren't draining anything but FQRNIs */
1189 pr_err("Found verb 0x%x in MR\n", msg->verb);
1193 qm_mr_cci_consume(p, 1);
1197 static int qman_create_portal(struct qman_portal *portal,
1198 const struct qm_portal_config *c,
1199 const struct qman_cgrs *cgrs)
1201 struct qm_portal *p;
1207 #ifdef CONFIG_FSL_PAMU
1208 /* PAMU is required for stashing */
1209 portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
1211 portal->use_eqcr_ci_stashing = 0;
1214 * prep the low-level portal struct with the mapped addresses from the
1215 * config, everything that follows depends on it and "config" is more
1218 p->addr.ce = c->addr_virt_ce;
1219 p->addr.ce_be = c->addr_virt_ce;
1220 p->addr.ci = c->addr_virt_ci;
1222 * If CI-stashing is used, the current defaults use a threshold of 3,
1223 * and stash with high-than-DQRR priority.
1225 if (qm_eqcr_init(p, qm_eqcr_pvb,
1226 portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
1227 dev_err(c->dev, "EQCR initialisation failed\n");
1230 if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
1231 qm_dqrr_cdc, DQRR_MAXFILL)) {
1232 dev_err(c->dev, "DQRR initialisation failed\n");
1235 if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
1236 dev_err(c->dev, "MR initialisation failed\n");
1239 if (qm_mc_init(p)) {
1240 dev_err(c->dev, "MC initialisation failed\n");
1243 /* static interrupt-gating controls */
1244 qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
1245 qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
1246 qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
1247 portal->cgrs = kmalloc_array(2, sizeof(*cgrs), GFP_KERNEL);
1250 /* initial snapshot is no-depletion */
1251 qman_cgrs_init(&portal->cgrs[1]);
1253 portal->cgrs[0] = *cgrs;
1255 /* if the given mask is NULL, assume all CGRs can be seen */
1256 qman_cgrs_fill(&portal->cgrs[0]);
1257 INIT_LIST_HEAD(&portal->cgr_cbs);
1258 spin_lock_init(&portal->cgr_lock);
1259 INIT_WORK(&portal->congestion_work, qm_congestion_task);
1260 INIT_WORK(&portal->mr_work, qm_mr_process_task);
1262 portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
1263 QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
1264 QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
1266 qm_out(p, QM_REG_ISDR, isdr);
1267 portal->irq_sources = 0;
1268 qm_out(p, QM_REG_IER, 0);
1269 qm_out(p, QM_REG_ISR, 0xffffffff);
1270 snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
1271 if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
1272 dev_err(c->dev, "request_irq() failed\n");
1276 if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu))
1279 /* Need EQCR to be empty before continuing */
1280 isdr &= ~QM_PIRQ_EQCI;
1281 qm_out(p, QM_REG_ISDR, isdr);
1282 ret = qm_eqcr_get_fill(p);
1284 dev_err(c->dev, "EQCR unclean\n");
1285 goto fail_eqcr_empty;
1287 isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
1288 qm_out(p, QM_REG_ISDR, isdr);
1289 if (qm_dqrr_current(p)) {
1290 dev_err(c->dev, "DQRR unclean\n");
1291 qm_dqrr_cdc_consume_n(p, 0xffff);
1293 if (qm_mr_current(p) && drain_mr_fqrni(p)) {
1294 /* special handling, drain just in case it's a few FQRNIs */
1295 const union qm_mr_entry *e = qm_mr_current(p);
1297 dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n",
1298 e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd));
1299 goto fail_dqrr_mr_empty;
1303 qm_out(p, QM_REG_ISDR, 0);
1304 qm_out(p, QM_REG_IIR, 0);
1305 /* Write a sane SDQCR */
1306 qm_dqrr_sdqcr_set(p, portal->sdqcr);
1312 free_irq(c->irq, portal);
1314 kfree(portal->cgrs);
1327 struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
1328 const struct qman_cgrs *cgrs)
1330 struct qman_portal *portal;
1333 portal = &per_cpu(qman_affine_portal, c->cpu);
1334 err = qman_create_portal(portal, c, cgrs);
1338 spin_lock(&affine_mask_lock);
1339 cpumask_set_cpu(c->cpu, &affine_mask);
1340 affine_channels[c->cpu] = c->channel;
1341 affine_portals[c->cpu] = portal;
1342 spin_unlock(&affine_mask_lock);
1347 static void qman_destroy_portal(struct qman_portal *qm)
1349 const struct qm_portal_config *pcfg;
1351 /* Stop dequeues on the portal */
1352 qm_dqrr_sdqcr_set(&qm->p, 0);
1355 * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
1356 * something related to QM_PIRQ_EQCI, this may need fixing.
1357 * Also, due to the prefetching model used for CI updates in the enqueue
1358 * path, this update will only invalidate the CI cacheline *after*
1359 * working on it, so we need to call this twice to ensure a full update
1360 * irrespective of where the enqueue processing was at when the teardown
1363 qm_eqcr_cce_update(&qm->p);
1364 qm_eqcr_cce_update(&qm->p);
1367 free_irq(pcfg->irq, qm);
1370 qm_mc_finish(&qm->p);
1371 qm_mr_finish(&qm->p);
1372 qm_dqrr_finish(&qm->p);
1373 qm_eqcr_finish(&qm->p);
1378 const struct qm_portal_config *qman_destroy_affine_portal(void)
1380 struct qman_portal *qm = get_affine_portal();
1381 const struct qm_portal_config *pcfg;
1387 qman_destroy_portal(qm);
1389 spin_lock(&affine_mask_lock);
1390 cpumask_clear_cpu(cpu, &affine_mask);
1391 spin_unlock(&affine_mask_lock);
1392 put_affine_portal();
1396 /* Inline helper to reduce nesting in __poll_portal_slow() */
1397 static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
1398 const union qm_mr_entry *msg, u8 verb)
1401 case QM_MR_VERB_FQRL:
1402 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
1403 fq_clear(fq, QMAN_FQ_STATE_ORL);
1405 case QM_MR_VERB_FQRN:
1406 DPAA_ASSERT(fq->state == qman_fq_state_parked ||
1407 fq->state == qman_fq_state_sched);
1408 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
1409 fq_clear(fq, QMAN_FQ_STATE_CHANGING);
1410 if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
1411 fq_set(fq, QMAN_FQ_STATE_NE);
1412 if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
1413 fq_set(fq, QMAN_FQ_STATE_ORL);
1414 fq->state = qman_fq_state_retired;
1416 case QM_MR_VERB_FQPN:
1417 DPAA_ASSERT(fq->state == qman_fq_state_sched);
1418 DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
1419 fq->state = qman_fq_state_parked;
1423 static void qm_congestion_task(struct work_struct *work)
1425 struct qman_portal *p = container_of(work, struct qman_portal,
1427 struct qman_cgrs rr, c;
1428 union qm_mc_result *mcr;
1429 struct qman_cgr *cgr;
1431 spin_lock(&p->cgr_lock);
1433 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
1434 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1435 spin_unlock(&p->cgr_lock);
1436 dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
1437 qman_p_irqsource_add(p, QM_PIRQ_CSCI);
1440 /* mask out the ones I'm not interested in */
1441 qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state,
1443 /* check previous snapshot for delta, enter/exit congestion */
1444 qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
1445 /* update snapshot */
1446 qman_cgrs_cp(&p->cgrs[1], &rr);
1447 /* Invoke callback */
1448 list_for_each_entry(cgr, &p->cgr_cbs, node)
1449 if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
1450 cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
1451 spin_unlock(&p->cgr_lock);
1452 qman_p_irqsource_add(p, QM_PIRQ_CSCI);
1455 static void qm_mr_process_task(struct work_struct *work)
1457 struct qman_portal *p = container_of(work, struct qman_portal,
1459 const union qm_mr_entry *msg;
1466 qm_mr_pvb_update(&p->p);
1467 msg = qm_mr_current(&p->p);
1471 verb = msg->verb & QM_MR_VERB_TYPE_MASK;
1472 /* The message is a software ERN iff the 0x20 bit is clear */
1475 case QM_MR_VERB_FQRNI:
1476 /* nada, we drop FQRNIs on the floor */
1478 case QM_MR_VERB_FQRN:
1479 case QM_MR_VERB_FQRL:
1480 /* Lookup in the retirement table */
1481 fq = fqid_to_fq(qm_fqid_get(&msg->fq));
1484 fq_state_change(p, fq, msg, verb);
1486 fq->cb.fqs(p, fq, msg);
1488 case QM_MR_VERB_FQPN:
1490 fq = tag_to_fq(be32_to_cpu(msg->fq.context_b));
1491 fq_state_change(p, fq, msg, verb);
1493 fq->cb.fqs(p, fq, msg);
1495 case QM_MR_VERB_DC_ERN:
1497 pr_crit_once("Leaking DCP ERNs!\n");
1500 pr_crit("Invalid MR verb 0x%02x\n", verb);
1503 /* Its a software ERN */
1504 fq = tag_to_fq(be32_to_cpu(msg->ern.tag));
1505 fq->cb.ern(p, fq, msg);
1511 qm_mr_cci_consume(&p->p, num);
1512 qman_p_irqsource_add(p, QM_PIRQ_MRI);
1516 static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
1518 if (is & QM_PIRQ_CSCI) {
1519 qman_p_irqsource_remove(p, QM_PIRQ_CSCI);
1520 queue_work_on(smp_processor_id(), qm_portal_wq,
1521 &p->congestion_work);
1524 if (is & QM_PIRQ_EQRI) {
1525 qm_eqcr_cce_update(&p->p);
1526 qm_eqcr_set_ithresh(&p->p, 0);
1527 wake_up(&affine_queue);
1530 if (is & QM_PIRQ_MRI) {
1531 qman_p_irqsource_remove(p, QM_PIRQ_MRI);
1532 queue_work_on(smp_processor_id(), qm_portal_wq,
1540 * remove some slowish-path stuff from the "fast path" and make sure it isn't
1543 static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
1545 p->vdqcr_owned = NULL;
1546 fq_clear(fq, QMAN_FQ_STATE_VDQCR);
1547 wake_up(&affine_queue);
1551 * The only states that would conflict with other things if they ran at the
1552 * same time on the same cpu are:
1554 * (i) setting/clearing vdqcr_owned, and
1555 * (ii) clearing the NE (Not Empty) flag.
1557 * Both are safe. Because;
1559 * (i) this clearing can only occur after qman_volatile_dequeue() has set the
1560 * vdqcr_owned field (which it does before setting VDQCR), and
1561 * qman_volatile_dequeue() blocks interrupts and preemption while this is
1562 * done so that we can't interfere.
1563 * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
1564 * with (i) that API prevents us from interfering until it's safe.
1566 * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
1567 * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
1568 * advantage comes from this function not having to "lock" anything at all.
1570 * Note also that the callbacks are invoked at points which are safe against the
1571 * above potential conflicts, but that this function itself is not re-entrant
1572 * (this is because the function tracks one end of each FIFO in the portal and
1573 * we do *not* want to lock that). So the consequence is that it is safe for
1574 * user callbacks to call into any QMan API.
1576 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
1577 unsigned int poll_limit)
1579 const struct qm_dqrr_entry *dq;
1581 enum qman_cb_dqrr_result res;
1582 unsigned int limit = 0;
1585 qm_dqrr_pvb_update(&p->p);
1586 dq = qm_dqrr_current(&p->p);
1590 if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
1592 * VDQCR: don't trust context_b as the FQ may have
1593 * been configured for h/w consumption and we're
1594 * draining it post-retirement.
1596 fq = p->vdqcr_owned;
1598 * We only set QMAN_FQ_STATE_NE when retiring, so we
1599 * only need to check for clearing it when doing
1600 * volatile dequeues. It's one less thing to check
1601 * in the critical path (SDQCR).
1603 if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
1604 fq_clear(fq, QMAN_FQ_STATE_NE);
1606 * This is duplicated from the SDQCR code, but we
1607 * have stuff to do before *and* after this callback,
1608 * and we don't want multiple if()s in the critical
1611 res = fq->cb.dqrr(p, fq, dq);
1612 if (res == qman_cb_dqrr_stop)
1614 /* Check for VDQCR completion */
1615 if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1618 /* SDQCR: context_b points to the FQ */
1619 fq = tag_to_fq(be32_to_cpu(dq->context_b));
1620 /* Now let the callback do its stuff */
1621 res = fq->cb.dqrr(p, fq, dq);
1623 * The callback can request that we exit without
1624 * consuming this entry nor advancing;
1626 if (res == qman_cb_dqrr_stop)
1629 /* Interpret 'dq' from a driver perspective. */
1631 * Parking isn't possible unless HELDACTIVE was set. NB,
1632 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
1633 * check for HELDACTIVE to cover both.
1635 DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
1636 (res != qman_cb_dqrr_park));
1637 /* just means "skip it, I'll consume it myself later on" */
1638 if (res != qman_cb_dqrr_defer)
1639 qm_dqrr_cdc_consume_1ptr(&p->p, dq,
1640 res == qman_cb_dqrr_park);
1642 qm_dqrr_next(&p->p);
1644 * Entry processed and consumed, increment our counter. The
1645 * callback can request that we exit after consuming the
1646 * entry, and we also exit if we reach our processing limit,
1647 * so loop back only if neither of these conditions is met.
1649 } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
1654 void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
1656 unsigned long irqflags;
1658 local_irq_save(irqflags);
1659 p->irq_sources |= bits & QM_PIRQ_VISIBLE;
1660 qm_out(&p->p, QM_REG_IER, p->irq_sources);
1661 local_irq_restore(irqflags);
1663 EXPORT_SYMBOL(qman_p_irqsource_add);
1665 void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
1667 unsigned long irqflags;
1671 * Our interrupt handler only processes+clears status register bits that
1672 * are in p->irq_sources. As we're trimming that mask, if one of them
1673 * were to assert in the status register just before we remove it from
1674 * the enable register, there would be an interrupt-storm when we
1675 * release the IRQ lock. So we wait for the enable register update to
1676 * take effect in h/w (by reading it back) and then clear all other bits
1677 * in the status register. Ie. we clear them from ISR once it's certain
1678 * IER won't allow them to reassert.
1680 local_irq_save(irqflags);
1681 bits &= QM_PIRQ_VISIBLE;
1682 p->irq_sources &= ~bits;
1683 qm_out(&p->p, QM_REG_IER, p->irq_sources);
1684 ier = qm_in(&p->p, QM_REG_IER);
1686 * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
1687 * data-dependency, ie. to protect against re-ordering.
1689 qm_out(&p->p, QM_REG_ISR, ~ier);
1690 local_irq_restore(irqflags);
1692 EXPORT_SYMBOL(qman_p_irqsource_remove);
1694 const cpumask_t *qman_affine_cpus(void)
1696 return &affine_mask;
1698 EXPORT_SYMBOL(qman_affine_cpus);
1700 u16 qman_affine_channel(int cpu)
1703 struct qman_portal *portal = get_affine_portal();
1705 cpu = portal->config->cpu;
1706 put_affine_portal();
1708 WARN_ON(!cpumask_test_cpu(cpu, &affine_mask));
1709 return affine_channels[cpu];
1711 EXPORT_SYMBOL(qman_affine_channel);
1713 struct qman_portal *qman_get_affine_portal(int cpu)
1715 return affine_portals[cpu];
1717 EXPORT_SYMBOL(qman_get_affine_portal);
1719 int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
1721 return __poll_portal_fast(p, limit);
1723 EXPORT_SYMBOL(qman_p_poll_dqrr);
1725 void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
1727 unsigned long irqflags;
1729 local_irq_save(irqflags);
1730 pools &= p->config->pools;
1732 qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1733 local_irq_restore(irqflags);
1735 EXPORT_SYMBOL(qman_p_static_dequeue_add);
1737 /* Frame queue API */
1739 static const char *mcr_result_str(u8 result)
1742 case QM_MCR_RESULT_NULL:
1743 return "QM_MCR_RESULT_NULL";
1744 case QM_MCR_RESULT_OK:
1745 return "QM_MCR_RESULT_OK";
1746 case QM_MCR_RESULT_ERR_FQID:
1747 return "QM_MCR_RESULT_ERR_FQID";
1748 case QM_MCR_RESULT_ERR_FQSTATE:
1749 return "QM_MCR_RESULT_ERR_FQSTATE";
1750 case QM_MCR_RESULT_ERR_NOTEMPTY:
1751 return "QM_MCR_RESULT_ERR_NOTEMPTY";
1752 case QM_MCR_RESULT_PENDING:
1753 return "QM_MCR_RESULT_PENDING";
1754 case QM_MCR_RESULT_ERR_BADCOMMAND:
1755 return "QM_MCR_RESULT_ERR_BADCOMMAND";
1757 return "<unknown MCR result>";
1760 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
1762 if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
1763 int ret = qman_alloc_fqid(&fqid);
1770 fq->state = qman_fq_state_oos;
1771 fq->cgr_groupid = 0;
1773 /* A context_b of 0 is allegedly special, so don't use that fqid */
1774 if (fqid == 0 || fqid >= num_fqids) {
1775 WARN(1, "bad fqid %d\n", fqid);
1780 if (flags & QMAN_FQ_FLAG_NO_MODIFY)
1783 WARN_ON(fq_table[fq->idx]);
1784 fq_table[fq->idx] = fq;
1788 EXPORT_SYMBOL(qman_create_fq);
1790 void qman_destroy_fq(struct qman_fq *fq)
1793 * We don't need to lock the FQ as it is a pre-condition that the FQ be
1794 * quiesced. Instead, run some checks.
1796 switch (fq->state) {
1797 case qman_fq_state_parked:
1798 case qman_fq_state_oos:
1799 if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
1800 qman_release_fqid(fq->fqid);
1802 DPAA_ASSERT(fq_table[fq->idx]);
1803 fq_table[fq->idx] = NULL;
1808 DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
1810 EXPORT_SYMBOL(qman_destroy_fq);
1812 u32 qman_fq_fqid(struct qman_fq *fq)
1816 EXPORT_SYMBOL(qman_fq_fqid);
1818 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
1820 union qm_mc_command *mcc;
1821 union qm_mc_result *mcr;
1822 struct qman_portal *p;
1826 myverb = (flags & QMAN_INITFQ_FLAG_SCHED)
1827 ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
1829 if (fq->state != qman_fq_state_oos &&
1830 fq->state != qman_fq_state_parked)
1832 #ifdef CONFIG_FSL_DPAA_CHECKING
1833 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1836 if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) {
1837 /* And can't be set at the same time as TDTHRESH */
1838 if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH)
1841 /* Issue an INITFQ_[PARKED|SCHED] management command */
1842 p = get_affine_portal();
1843 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1844 (fq->state != qman_fq_state_oos &&
1845 fq->state != qman_fq_state_parked)) {
1849 mcc = qm_mc_start(&p->p);
1851 mcc->initfq = *opts;
1852 qm_fqid_set(&mcc->fq, fq->fqid);
1853 mcc->initfq.count = 0;
1855 * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
1856 * demux pointer. Otherwise, the caller-provided value is allowed to
1857 * stand, don't overwrite it.
1859 if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
1862 mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB);
1863 mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq));
1865 * and the physical address - NB, if the user wasn't trying to
1866 * set CONTEXTA, clear the stashing settings.
1868 if (!(be16_to_cpu(mcc->initfq.we_mask) &
1869 QM_INITFQ_WE_CONTEXTA)) {
1870 mcc->initfq.we_mask |=
1871 cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1872 memset(&mcc->initfq.fqd.context_a, 0,
1873 sizeof(mcc->initfq.fqd.context_a));
1875 struct qman_portal *p = qman_dma_portal;
1877 phys_fq = dma_map_single(p->config->dev, fq,
1878 sizeof(*fq), DMA_TO_DEVICE);
1879 if (dma_mapping_error(p->config->dev, phys_fq)) {
1880 dev_err(p->config->dev, "dma_mapping failed\n");
1885 qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
1888 if (flags & QMAN_INITFQ_FLAG_LOCAL) {
1891 if (!(be16_to_cpu(mcc->initfq.we_mask) &
1892 QM_INITFQ_WE_DESTWQ)) {
1893 mcc->initfq.we_mask |=
1894 cpu_to_be16(QM_INITFQ_WE_DESTWQ);
1897 qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
1899 qm_mc_commit(&p->p, myverb);
1900 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1901 dev_err(p->config->dev, "MCR timeout\n");
1906 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1908 if (res != QM_MCR_RESULT_OK) {
1913 if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) {
1914 if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE)
1915 fq_set(fq, QMAN_FQ_STATE_CGR_EN);
1917 fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
1919 if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID)
1920 fq->cgr_groupid = opts->fqd.cgid;
1922 fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1923 qman_fq_state_sched : qman_fq_state_parked;
1926 put_affine_portal();
1929 EXPORT_SYMBOL(qman_init_fq);
1931 int qman_schedule_fq(struct qman_fq *fq)
1933 union qm_mc_command *mcc;
1934 union qm_mc_result *mcr;
1935 struct qman_portal *p;
1938 if (fq->state != qman_fq_state_parked)
1940 #ifdef CONFIG_FSL_DPAA_CHECKING
1941 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1944 /* Issue a ALTERFQ_SCHED management command */
1945 p = get_affine_portal();
1946 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1947 fq->state != qman_fq_state_parked) {
1951 mcc = qm_mc_start(&p->p);
1952 qm_fqid_set(&mcc->fq, fq->fqid);
1953 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
1954 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1955 dev_err(p->config->dev, "ALTER_SCHED timeout\n");
1960 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
1961 if (mcr->result != QM_MCR_RESULT_OK) {
1965 fq->state = qman_fq_state_sched;
1967 put_affine_portal();
1970 EXPORT_SYMBOL(qman_schedule_fq);
1972 int qman_retire_fq(struct qman_fq *fq, u32 *flags)
1974 union qm_mc_command *mcc;
1975 union qm_mc_result *mcr;
1976 struct qman_portal *p;
1980 if (fq->state != qman_fq_state_parked &&
1981 fq->state != qman_fq_state_sched)
1983 #ifdef CONFIG_FSL_DPAA_CHECKING
1984 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1987 p = get_affine_portal();
1988 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1989 fq->state == qman_fq_state_retired ||
1990 fq->state == qman_fq_state_oos) {
1994 mcc = qm_mc_start(&p->p);
1995 qm_fqid_set(&mcc->fq, fq->fqid);
1996 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
1997 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1998 dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
2003 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
2006 * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
2007 * and defer the flags until FQRNI or FQRN (respectively) show up. But
2008 * "Friendly" is to process OK immediately, and not set CHANGING. We do
2009 * friendly, otherwise the caller doesn't necessarily have a fully
2010 * "retired" FQ on return even if the retirement was immediate. However
2011 * this does mean some code duplication between here and
2012 * fq_state_change().
2014 if (res == QM_MCR_RESULT_OK) {
2016 /* Process 'fq' right away, we'll ignore FQRNI */
2017 if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
2018 fq_set(fq, QMAN_FQ_STATE_NE);
2019 if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
2020 fq_set(fq, QMAN_FQ_STATE_ORL);
2023 fq->state = qman_fq_state_retired;
2026 * Another issue with supporting "immediate" retirement
2027 * is that we're forced to drop FQRNIs, because by the
2028 * time they're seen it may already be "too late" (the
2029 * fq may have been OOS'd and free()'d already). But if
2030 * the upper layer wants a callback whether it's
2031 * immediate or not, we have to fake a "MR" entry to
2032 * look like an FQRNI...
2034 union qm_mr_entry msg;
2036 msg.verb = QM_MR_VERB_FQRNI;
2037 msg.fq.fqs = mcr->alterfq.fqs;
2038 qm_fqid_set(&msg.fq, fq->fqid);
2039 msg.fq.context_b = cpu_to_be32(fq_to_tag(fq));
2040 fq->cb.fqs(p, fq, &msg);
2042 } else if (res == QM_MCR_RESULT_PENDING) {
2044 fq_set(fq, QMAN_FQ_STATE_CHANGING);
2049 put_affine_portal();
2052 EXPORT_SYMBOL(qman_retire_fq);
2054 int qman_oos_fq(struct qman_fq *fq)
2056 union qm_mc_command *mcc;
2057 union qm_mc_result *mcr;
2058 struct qman_portal *p;
2061 if (fq->state != qman_fq_state_retired)
2063 #ifdef CONFIG_FSL_DPAA_CHECKING
2064 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
2067 p = get_affine_portal();
2068 if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) ||
2069 fq->state != qman_fq_state_retired) {
2073 mcc = qm_mc_start(&p->p);
2074 qm_fqid_set(&mcc->fq, fq->fqid);
2075 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2076 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2080 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
2081 if (mcr->result != QM_MCR_RESULT_OK) {
2085 fq->state = qman_fq_state_oos;
2087 put_affine_portal();
2090 EXPORT_SYMBOL(qman_oos_fq);
2092 int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
2094 union qm_mc_command *mcc;
2095 union qm_mc_result *mcr;
2096 struct qman_portal *p = get_affine_portal();
2099 mcc = qm_mc_start(&p->p);
2100 qm_fqid_set(&mcc->fq, fq->fqid);
2101 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
2102 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2107 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2108 if (mcr->result == QM_MCR_RESULT_OK)
2109 *fqd = mcr->queryfq.fqd;
2113 put_affine_portal();
2117 int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
2119 union qm_mc_command *mcc;
2120 union qm_mc_result *mcr;
2121 struct qman_portal *p = get_affine_portal();
2124 mcc = qm_mc_start(&p->p);
2125 qm_fqid_set(&mcc->fq, fq->fqid);
2126 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
2127 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2132 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2133 if (mcr->result == QM_MCR_RESULT_OK)
2134 *np = mcr->queryfq_np;
2135 else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
2140 put_affine_portal();
2143 EXPORT_SYMBOL(qman_query_fq_np);
2145 static int qman_query_cgr(struct qman_cgr *cgr,
2146 struct qm_mcr_querycgr *cgrd)
2148 union qm_mc_command *mcc;
2149 union qm_mc_result *mcr;
2150 struct qman_portal *p = get_affine_portal();
2153 mcc = qm_mc_start(&p->p);
2154 mcc->cgr.cgid = cgr->cgrid;
2155 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
2156 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2160 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
2161 if (mcr->result == QM_MCR_RESULT_OK)
2162 *cgrd = mcr->querycgr;
2164 dev_err(p->config->dev, "QUERY_CGR failed: %s\n",
2165 mcr_result_str(mcr->result));
2169 put_affine_portal();
2173 int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result)
2175 struct qm_mcr_querycgr query_cgr;
2178 err = qman_query_cgr(cgr, &query_cgr);
2182 *result = !!query_cgr.cgr.cs;
2185 EXPORT_SYMBOL(qman_query_cgr_congested);
2187 /* internal function used as a wait_event() expression */
2188 static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
2190 unsigned long irqflags;
2193 local_irq_save(irqflags);
2196 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2199 fq_set(fq, QMAN_FQ_STATE_VDQCR);
2200 p->vdqcr_owned = fq;
2201 qm_dqrr_vdqcr_set(&p->p, vdqcr);
2204 local_irq_restore(irqflags);
2208 static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
2212 *p = get_affine_portal();
2213 ret = set_p_vdqcr(*p, fq, vdqcr);
2214 put_affine_portal();
2218 static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
2219 u32 vdqcr, u32 flags)
2223 if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
2224 ret = wait_event_interruptible(affine_queue,
2225 !set_vdqcr(p, fq, vdqcr));
2227 wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr));
2231 int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr)
2233 struct qman_portal *p;
2236 if (fq->state != qman_fq_state_parked &&
2237 fq->state != qman_fq_state_retired)
2239 if (vdqcr & QM_VDQCR_FQID_MASK)
2241 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2243 vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
2244 if (flags & QMAN_VOLATILE_FLAG_WAIT)
2245 ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
2247 ret = set_vdqcr(&p, fq, vdqcr);
2251 if (flags & QMAN_VOLATILE_FLAG_FINISH) {
2252 if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
2254 * NB: don't propagate any error - the caller wouldn't
2255 * know whether the VDQCR was issued or not. A signal
2256 * could arrive after returning anyway, so the caller
2257 * can check signal_pending() if that's an issue.
2259 wait_event_interruptible(affine_queue,
2260 !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
2262 wait_event(affine_queue,
2263 !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
2267 EXPORT_SYMBOL(qman_volatile_dequeue);
2269 static void update_eqcr_ci(struct qman_portal *p, u8 avail)
2272 qm_eqcr_cce_prefetch(&p->p);
2274 qm_eqcr_cce_update(&p->p);
2277 int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
2279 struct qman_portal *p;
2280 struct qm_eqcr_entry *eq;
2281 unsigned long irqflags;
2284 p = get_affine_portal();
2285 local_irq_save(irqflags);
2287 if (p->use_eqcr_ci_stashing) {
2289 * The stashing case is easy, only update if we need to in
2290 * order to try and liberate ring entries.
2292 eq = qm_eqcr_start_stash(&p->p);
2295 * The non-stashing case is harder, need to prefetch ahead of
2298 avail = qm_eqcr_get_avail(&p->p);
2300 update_eqcr_ci(p, avail);
2301 eq = qm_eqcr_start_no_stash(&p->p);
2307 qm_fqid_set(eq, fq->fqid);
2308 eq->tag = cpu_to_be32(fq_to_tag(fq));
2311 qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
2313 local_irq_restore(irqflags);
2314 put_affine_portal();
2317 EXPORT_SYMBOL(qman_enqueue);
2319 static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
2320 struct qm_mcc_initcgr *opts)
2322 union qm_mc_command *mcc;
2323 union qm_mc_result *mcr;
2324 struct qman_portal *p = get_affine_portal();
2325 u8 verb = QM_MCC_VERB_MODIFYCGR;
2328 mcc = qm_mc_start(&p->p);
2330 mcc->initcgr = *opts;
2331 mcc->initcgr.cgid = cgr->cgrid;
2332 if (flags & QMAN_CGR_FLAG_USE_INIT)
2333 verb = QM_MCC_VERB_INITCGR;
2334 qm_mc_commit(&p->p, verb);
2335 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2340 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
2341 if (mcr->result != QM_MCR_RESULT_OK)
2345 put_affine_portal();
2349 #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
2351 /* congestion state change notification target update control */
2352 static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val)
2354 if (qman_ip_rev >= QMAN_REV30)
2355 cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi |
2356 QM_CGR_TARG_UDP_CTRL_WRITE_BIT);
2358 cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi));
2361 static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val)
2363 if (qman_ip_rev >= QMAN_REV30)
2364 cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi);
2366 cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi));
2369 static u8 qman_cgr_cpus[CGR_NUM];
2371 void qman_init_cgr_all(void)
2373 struct qman_cgr cgr;
2376 for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) {
2377 if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL))
2382 pr_err("Warning: %d error%s while initialising CGR h/w\n",
2383 err_cnt, (err_cnt > 1) ? "s" : "");
2386 int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
2387 struct qm_mcc_initcgr *opts)
2389 struct qm_mcr_querycgr cgr_state;
2391 struct qman_portal *p;
2394 * We have to check that the provided CGRID is within the limits of the
2395 * data-structures, for obvious reasons. However we'll let h/w take
2396 * care of determining whether it's within the limits of what exists on
2399 if (cgr->cgrid >= CGR_NUM)
2403 p = get_affine_portal();
2404 qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
2407 cgr->chan = p->config->channel;
2408 spin_lock(&p->cgr_lock);
2411 struct qm_mcc_initcgr local_opts = *opts;
2413 ret = qman_query_cgr(cgr, &cgr_state);
2417 qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p),
2418 be32_to_cpu(cgr_state.cgr.cscn_targ));
2419 local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG);
2421 /* send init if flags indicate so */
2422 if (flags & QMAN_CGR_FLAG_USE_INIT)
2423 ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
2426 ret = qm_modify_cgr(cgr, 0, &local_opts);
2431 list_add(&cgr->node, &p->cgr_cbs);
2433 /* Determine if newly added object requires its callback to be called */
2434 ret = qman_query_cgr(cgr, &cgr_state);
2436 /* we can't go back, so proceed and return success */
2437 dev_err(p->config->dev, "CGR HW state partially modified\n");
2441 if (cgr->cb && cgr_state.cgr.cscn_en &&
2442 qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
2445 spin_unlock(&p->cgr_lock);
2446 put_affine_portal();
2449 EXPORT_SYMBOL(qman_create_cgr);
2451 int qman_delete_cgr(struct qman_cgr *cgr)
2453 unsigned long irqflags;
2454 struct qm_mcr_querycgr cgr_state;
2455 struct qm_mcc_initcgr local_opts;
2458 struct qman_portal *p = get_affine_portal();
2460 if (cgr->chan != p->config->channel) {
2461 /* attempt to delete from other portal than creator */
2462 dev_err(p->config->dev, "CGR not owned by current portal");
2463 dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
2464 cgr->chan, p->config->channel);
2469 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2470 spin_lock_irqsave(&p->cgr_lock, irqflags);
2471 list_del(&cgr->node);
2473 * If there are no other CGR objects for this CGRID in the list,
2474 * update CSCN_TARG accordingly
2476 list_for_each_entry(i, &p->cgr_cbs, node)
2477 if (i->cgrid == cgr->cgrid && i->cb)
2479 ret = qman_query_cgr(cgr, &cgr_state);
2481 /* add back to the list */
2482 list_add(&cgr->node, &p->cgr_cbs);
2486 local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG);
2487 qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p),
2488 be32_to_cpu(cgr_state.cgr.cscn_targ));
2490 ret = qm_modify_cgr(cgr, 0, &local_opts);
2492 /* add back to the list */
2493 list_add(&cgr->node, &p->cgr_cbs);
2495 spin_unlock_irqrestore(&p->cgr_lock, irqflags);
2497 put_affine_portal();
2500 EXPORT_SYMBOL(qman_delete_cgr);
2503 struct qman_cgr *cgr;
2504 struct completion completion;
2507 static void qman_delete_cgr_smp_call(void *p)
2509 qman_delete_cgr((struct qman_cgr *)p);
2512 void qman_delete_cgr_safe(struct qman_cgr *cgr)
2515 if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
2516 smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
2517 qman_delete_cgr_smp_call, cgr, true);
2522 qman_delete_cgr(cgr);
2525 EXPORT_SYMBOL(qman_delete_cgr_safe);
2529 static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
2531 const union qm_mr_entry *msg;
2534 qm_mr_pvb_update(p);
2535 msg = qm_mr_current(p);
2537 if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v)
2540 qm_mr_cci_consume_to_current(p);
2541 qm_mr_pvb_update(p);
2542 msg = qm_mr_current(p);
2547 static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
2550 const struct qm_dqrr_entry *dqrr;
2554 qm_dqrr_pvb_update(p);
2555 dqrr = qm_dqrr_current(p);
2558 } while (wait && !dqrr);
2561 if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s))
2563 qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
2564 qm_dqrr_pvb_update(p);
2566 dqrr = qm_dqrr_current(p);
2571 #define qm_mr_drain(p, V) \
2572 _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V)
2574 #define qm_dqrr_drain(p, f, S) \
2575 _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false)
2577 #define qm_dqrr_drain_wait(p, f, S) \
2578 _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true)
2580 #define qm_dqrr_drain_nomatch(p) \
2581 _qm_dqrr_consume_and_match(p, 0, 0, false)
2583 static int qman_shutdown_fq(u32 fqid)
2585 struct qman_portal *p;
2587 union qm_mc_command *mcc;
2588 union qm_mc_result *mcr;
2589 int orl_empty, drain = 0, ret = 0;
2590 u32 channel, wq, res;
2593 p = get_affine_portal();
2594 dev = p->config->dev;
2595 /* Determine the state of the FQID */
2596 mcc = qm_mc_start(&p->p);
2597 qm_fqid_set(&mcc->fq, fqid);
2598 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
2599 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2600 dev_err(dev, "QUERYFQ_NP timeout\n");
2605 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2606 state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
2607 if (state == QM_MCR_NP_STATE_OOS)
2608 goto out; /* Already OOS, no need to do anymore checks */
2610 /* Query which channel the FQ is using */
2611 mcc = qm_mc_start(&p->p);
2612 qm_fqid_set(&mcc->fq, fqid);
2613 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
2614 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2615 dev_err(dev, "QUERYFQ timeout\n");
2620 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2621 /* Need to store these since the MCR gets reused */
2622 channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
2623 wq = qm_fqd_get_wq(&mcr->queryfq.fqd);
2626 case QM_MCR_NP_STATE_TEN_SCHED:
2627 case QM_MCR_NP_STATE_TRU_SCHED:
2628 case QM_MCR_NP_STATE_ACTIVE:
2629 case QM_MCR_NP_STATE_PARKED:
2631 mcc = qm_mc_start(&p->p);
2632 qm_fqid_set(&mcc->fq, fqid);
2633 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
2634 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2635 dev_err(dev, "QUERYFQ_NP timeout\n");
2639 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2640 QM_MCR_VERB_ALTER_RETIRE);
2641 res = mcr->result; /* Make a copy as we reuse MCR below */
2643 if (res == QM_MCR_RESULT_PENDING) {
2645 * Need to wait for the FQRN in the message ring, which
2646 * will only occur once the FQ has been drained. In
2647 * order for the FQ to drain the portal needs to be set
2648 * to dequeue from the channel the FQ is scheduled on
2653 /* Flag that we need to drain FQ */
2656 if (channel >= qm_channel_pool1 &&
2657 channel < qm_channel_pool1 + 15) {
2658 /* Pool channel, enable the bit in the portal */
2659 dequeue_wq = (channel -
2660 qm_channel_pool1 + 1)<<4 | wq;
2661 } else if (channel < qm_channel_pool1) {
2662 /* Dedicated channel */
2665 dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
2670 /* Set the sdqcr to drain this channel */
2671 if (channel < qm_channel_pool1)
2672 qm_dqrr_sdqcr_set(&p->p,
2673 QM_SDQCR_TYPE_ACTIVE |
2674 QM_SDQCR_CHANNELS_DEDICATED);
2676 qm_dqrr_sdqcr_set(&p->p,
2677 QM_SDQCR_TYPE_ACTIVE |
2678 QM_SDQCR_CHANNELS_POOL_CONV
2681 /* Keep draining DQRR while checking the MR*/
2682 qm_dqrr_drain_nomatch(&p->p);
2683 /* Process message ring too */
2684 found_fqrn = qm_mr_drain(&p->p, FQRN);
2686 } while (!found_fqrn);
2689 if (res != QM_MCR_RESULT_OK &&
2690 res != QM_MCR_RESULT_PENDING) {
2691 dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n",
2696 if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
2698 * ORL had no entries, no need to wait until the
2704 * Retirement succeeded, check to see if FQ needs
2707 if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
2708 /* FQ is Not Empty, drain using volatile DQ commands */
2710 u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
2712 qm_dqrr_vdqcr_set(&p->p, vdqcr);
2714 * Wait for a dequeue and process the dequeues,
2715 * making sure to empty the ring completely
2717 } while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
2719 qm_dqrr_sdqcr_set(&p->p, 0);
2721 while (!orl_empty) {
2722 /* Wait for the ORL to have been completely drained */
2723 orl_empty = qm_mr_drain(&p->p, FQRL);
2726 mcc = qm_mc_start(&p->p);
2727 qm_fqid_set(&mcc->fq, fqid);
2728 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2729 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2734 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2735 QM_MCR_VERB_ALTER_OOS);
2736 if (mcr->result != QM_MCR_RESULT_OK) {
2737 dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n",
2744 case QM_MCR_NP_STATE_RETIRED:
2745 /* Send OOS Command */
2746 mcc = qm_mc_start(&p->p);
2747 qm_fqid_set(&mcc->fq, fqid);
2748 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2749 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2754 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2755 QM_MCR_VERB_ALTER_OOS);
2757 dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
2764 case QM_MCR_NP_STATE_OOS:
2773 put_affine_portal();
2777 const struct qm_portal_config *qman_get_qm_portal_config(
2778 struct qman_portal *portal)
2780 return portal->config;
2782 EXPORT_SYMBOL(qman_get_qm_portal_config);
2784 struct gen_pool *qm_fqalloc; /* FQID allocator */
2785 struct gen_pool *qm_qpalloc; /* pool-channel allocator */
2786 struct gen_pool *qm_cgralloc; /* CGR ID allocator */
2788 static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
2795 addr = gen_pool_alloc(p, cnt);
2799 *result = addr & ~DPAA_GENALLOC_OFF;
2804 int qman_alloc_fqid_range(u32 *result, u32 count)
2806 return qman_alloc_range(qm_fqalloc, result, count);
2808 EXPORT_SYMBOL(qman_alloc_fqid_range);
2810 int qman_alloc_pool_range(u32 *result, u32 count)
2812 return qman_alloc_range(qm_qpalloc, result, count);
2814 EXPORT_SYMBOL(qman_alloc_pool_range);
2816 int qman_alloc_cgrid_range(u32 *result, u32 count)
2818 return qman_alloc_range(qm_cgralloc, result, count);
2820 EXPORT_SYMBOL(qman_alloc_cgrid_range);
2822 int qman_release_fqid(u32 fqid)
2824 int ret = qman_shutdown_fq(fqid);
2827 pr_debug("FQID %d leaked\n", fqid);
2831 gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1);
2834 EXPORT_SYMBOL(qman_release_fqid);
2836 static int qpool_cleanup(u32 qp)
2839 * We query all FQDs starting from
2840 * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
2841 * whose destination channel is the pool-channel being released.
2842 * When a non-OOS FQD is found we attempt to clean it up
2844 struct qman_fq fq = {
2845 .fqid = QM_FQID_RANGE_START
2850 struct qm_mcr_queryfq_np np;
2852 err = qman_query_fq_np(&fq, &np);
2854 /* FQID range exceeded, found no problems */
2856 else if (WARN_ON(err))
2859 if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
2862 err = qman_query_fq(&fq, &fqd);
2865 if (qm_fqd_get_chan(&fqd) == qp) {
2866 /* The channel is the FQ's target, clean it */
2867 err = qman_shutdown_fq(fq.fqid);
2870 * Couldn't shut down the FQ
2871 * so the pool must be leaked
2876 /* Move to the next FQID */
2881 int qman_release_pool(u32 qp)
2885 ret = qpool_cleanup(qp);
2887 pr_debug("CHID %d leaked\n", qp);
2891 gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1);
2894 EXPORT_SYMBOL(qman_release_pool);
2896 static int cgr_cleanup(u32 cgrid)
2899 * query all FQDs starting from FQID 1 until we get an "invalid FQID"
2900 * error, looking for non-OOS FQDs whose CGR is the CGR being released
2902 struct qman_fq fq = {
2903 .fqid = QM_FQID_RANGE_START
2908 struct qm_mcr_queryfq_np np;
2910 err = qman_query_fq_np(&fq, &np);
2912 /* FQID range exceeded, found no problems */
2914 else if (WARN_ON(err))
2917 if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
2920 err = qman_query_fq(&fq, &fqd);
2923 if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE &&
2924 fqd.cgid == cgrid) {
2925 pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
2930 /* Move to the next FQID */
2935 int qman_release_cgrid(u32 cgrid)
2939 ret = cgr_cleanup(cgrid);
2941 pr_debug("CGRID %d leaked\n", cgrid);
2945 gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1);
2948 EXPORT_SYMBOL(qman_release_cgrid);