ARM: dts: kirkwood: ReadyNAS NV+v2: Add LCD panel
[linux-2.6-microblaze.git] / drivers / soc / fsl / dpio / qbman-portal.c
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4  * Copyright 2016-2019 NXP
5  *
6  */
7
8 #include <asm/cacheflush.h>
9 #include <linux/io.h>
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <soc/fsl/dpaa2-global.h>
13
14 #include "qbman-portal.h"
15
16 /* All QBMan command and result structures use this "valid bit" encoding */
17 #define QB_VALID_BIT ((u32)0x80)
18
19 /* QBMan portal management command codes */
20 #define QBMAN_MC_ACQUIRE       0x30
21 #define QBMAN_WQCHAN_CONFIGURE 0x46
22
23 /* CINH register offsets */
24 #define QBMAN_CINH_SWP_EQCR_PI      0x800
25 #define QBMAN_CINH_SWP_EQCR_CI      0x840
26 #define QBMAN_CINH_SWP_EQAR    0x8c0
27 #define QBMAN_CINH_SWP_CR_RT        0x900
28 #define QBMAN_CINH_SWP_VDQCR_RT     0x940
29 #define QBMAN_CINH_SWP_EQCR_AM_RT   0x980
30 #define QBMAN_CINH_SWP_RCR_AM_RT    0x9c0
31 #define QBMAN_CINH_SWP_DQPI    0xa00
32 #define QBMAN_CINH_SWP_DCAP    0xac0
33 #define QBMAN_CINH_SWP_SDQCR   0xb00
34 #define QBMAN_CINH_SWP_EQCR_AM_RT2  0xb40
35 #define QBMAN_CINH_SWP_RCR_PI       0xc00
36 #define QBMAN_CINH_SWP_RAR     0xcc0
37 #define QBMAN_CINH_SWP_ISR     0xe00
38 #define QBMAN_CINH_SWP_IER     0xe40
39 #define QBMAN_CINH_SWP_ISDR    0xe80
40 #define QBMAN_CINH_SWP_IIR     0xec0
41
42 /* CENA register offsets */
43 #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
44 #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6))
45 #define QBMAN_CENA_SWP_RCR(n)  (0x400 + ((u32)(n) << 6))
46 #define QBMAN_CENA_SWP_CR      0x600
47 #define QBMAN_CENA_SWP_RR(vb)  (0x700 + ((u32)(vb) >> 1))
48 #define QBMAN_CENA_SWP_VDQCR   0x780
49 #define QBMAN_CENA_SWP_EQCR_CI 0x840
50 #define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840
51
52 /* CENA register offsets in memory-backed mode */
53 #define QBMAN_CENA_SWP_DQRR_MEM(n)  (0x800 + ((u32)(n) << 6))
54 #define QBMAN_CENA_SWP_RCR_MEM(n)   (0x1400 + ((u32)(n) << 6))
55 #define QBMAN_CENA_SWP_CR_MEM       0x1600
56 #define QBMAN_CENA_SWP_RR_MEM       0x1680
57 #define QBMAN_CENA_SWP_VDQCR_MEM    0x1780
58
59 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
60 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
61
62 /* Define token used to determine if response written to memory is valid */
63 #define QMAN_DQ_TOKEN_VALID 1
64
65 /* SDQCR attribute codes */
66 #define QB_SDQCR_FC_SHIFT   29
67 #define QB_SDQCR_FC_MASK    0x1
68 #define QB_SDQCR_DCT_SHIFT  24
69 #define QB_SDQCR_DCT_MASK   0x3
70 #define QB_SDQCR_TOK_SHIFT  16
71 #define QB_SDQCR_TOK_MASK   0xff
72 #define QB_SDQCR_SRC_SHIFT  0
73 #define QB_SDQCR_SRC_MASK   0xffff
74
75 /* opaque token for static dequeues */
76 #define QMAN_SDQCR_TOKEN    0xbb
77
78 #define QBMAN_EQCR_DCA_IDXMASK          0x0f
79 #define QBMAN_ENQUEUE_FLAG_DCA          (1ULL << 31)
80
81 #define EQ_DESC_SIZE_WITHOUT_FD 29
82 #define EQ_DESC_SIZE_FD_START 32
83
84 enum qbman_sdqcr_dct {
85         qbman_sdqcr_dct_null = 0,
86         qbman_sdqcr_dct_prio_ics,
87         qbman_sdqcr_dct_active_ics,
88         qbman_sdqcr_dct_active
89 };
90
91 enum qbman_sdqcr_fc {
92         qbman_sdqcr_fc_one = 0,
93         qbman_sdqcr_fc_up_to_3 = 1
94 };
95
96 /* Internal Function declaration */
97 static int qbman_swp_enqueue_direct(struct qbman_swp *s,
98                                     const struct qbman_eq_desc *d,
99                                     const struct dpaa2_fd *fd);
100 static int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
101                                       const struct qbman_eq_desc *d,
102                                       const struct dpaa2_fd *fd);
103 static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
104                                              const struct qbman_eq_desc *d,
105                                              const struct dpaa2_fd *fd,
106                                              uint32_t *flags,
107                                              int num_frames);
108 static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
109                                                const struct qbman_eq_desc *d,
110                                                const struct dpaa2_fd *fd,
111                                                uint32_t *flags,
112                                                int num_frames);
113 static int
114 qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
115                                        const struct qbman_eq_desc *d,
116                                        const struct dpaa2_fd *fd,
117                                        int num_frames);
118 static
119 int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
120                                              const struct qbman_eq_desc *d,
121                                              const struct dpaa2_fd *fd,
122                                              int num_frames);
123 static int qbman_swp_pull_direct(struct qbman_swp *s,
124                                  struct qbman_pull_desc *d);
125 static int qbman_swp_pull_mem_back(struct qbman_swp *s,
126                                    struct qbman_pull_desc *d);
127
128 const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
129 const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
130
131 static int qbman_swp_release_direct(struct qbman_swp *s,
132                                     const struct qbman_release_desc *d,
133                                     const u64 *buffers,
134                                     unsigned int num_buffers);
135 static int qbman_swp_release_mem_back(struct qbman_swp *s,
136                                       const struct qbman_release_desc *d,
137                                       const u64 *buffers,
138                                       unsigned int num_buffers);
139
140 /* Function pointers */
141 int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
142                              const struct qbman_eq_desc *d,
143                              const struct dpaa2_fd *fd)
144         = qbman_swp_enqueue_direct;
145
146 int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
147                                       const struct qbman_eq_desc *d,
148                                       const struct dpaa2_fd *fd,
149                                       uint32_t *flags,
150                                              int num_frames)
151         = qbman_swp_enqueue_multiple_direct;
152
153 int
154 (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
155                                        const struct qbman_eq_desc *d,
156                                        const struct dpaa2_fd *fd,
157                                        int num_frames)
158         = qbman_swp_enqueue_multiple_desc_direct;
159
160 int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d)
161                         = qbman_swp_pull_direct;
162
163 const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
164                         = qbman_swp_dqrr_next_direct;
165
166 int (*qbman_swp_release_ptr)(struct qbman_swp *s,
167                              const struct qbman_release_desc *d,
168                              const u64 *buffers,
169                              unsigned int num_buffers)
170                         = qbman_swp_release_direct;
171
172 /* Portal Access */
173
174 static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
175 {
176         return readl_relaxed(p->addr_cinh + offset);
177 }
178
179 static inline void qbman_write_register(struct qbman_swp *p, u32 offset,
180                                         u32 value)
181 {
182         writel_relaxed(value, p->addr_cinh + offset);
183 }
184
185 static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
186 {
187         return p->addr_cena + offset;
188 }
189
190 #define QBMAN_CINH_SWP_CFG   0xd00
191
192 #define SWP_CFG_DQRR_MF_SHIFT 20
193 #define SWP_CFG_EST_SHIFT     16
194 #define SWP_CFG_CPBS_SHIFT    15
195 #define SWP_CFG_WN_SHIFT      14
196 #define SWP_CFG_RPM_SHIFT     12
197 #define SWP_CFG_DCM_SHIFT     10
198 #define SWP_CFG_EPM_SHIFT     8
199 #define SWP_CFG_VPM_SHIFT     7
200 #define SWP_CFG_CPM_SHIFT     6
201 #define SWP_CFG_SD_SHIFT      5
202 #define SWP_CFG_SP_SHIFT      4
203 #define SWP_CFG_SE_SHIFT      3
204 #define SWP_CFG_DP_SHIFT      2
205 #define SWP_CFG_DE_SHIFT      1
206 #define SWP_CFG_EP_SHIFT      0
207
208 static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
209                                     u8 epm, int sd, int sp, int se,
210                                     int dp, int de, int ep)
211 {
212         return (max_fill << SWP_CFG_DQRR_MF_SHIFT |
213                 est << SWP_CFG_EST_SHIFT |
214                 wn << SWP_CFG_WN_SHIFT |
215                 rpm << SWP_CFG_RPM_SHIFT |
216                 dcm << SWP_CFG_DCM_SHIFT |
217                 epm << SWP_CFG_EPM_SHIFT |
218                 sd << SWP_CFG_SD_SHIFT |
219                 sp << SWP_CFG_SP_SHIFT |
220                 se << SWP_CFG_SE_SHIFT |
221                 dp << SWP_CFG_DP_SHIFT |
222                 de << SWP_CFG_DE_SHIFT |
223                 ep << SWP_CFG_EP_SHIFT);
224 }
225
226 #define QMAN_RT_MODE       0x00000100
227
228 static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
229 {
230         /* 'first' is included, 'last' is excluded */
231         if (first <= last)
232                 return last - first;
233         else
234                 return (2 * ringsize) - (first - last);
235 }
236
237 /**
238  * qbman_swp_init() - Create a functional object representing the given
239  *                    QBMan portal descriptor.
240  * @d: the given qbman swp descriptor
241  *
242  * Return qbman_swp portal for success, NULL if the object cannot
243  * be created.
244  */
245 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
246 {
247         struct qbman_swp *p = kzalloc(sizeof(*p), GFP_KERNEL);
248         u32 reg;
249         u32 mask_size;
250         u32 eqcr_pi;
251
252         if (!p)
253                 return NULL;
254
255         spin_lock_init(&p->access_spinlock);
256
257         p->desc = d;
258         p->mc.valid_bit = QB_VALID_BIT;
259         p->sdq = 0;
260         p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
261         p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
262         p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
263         if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
264                 p->mr.valid_bit = QB_VALID_BIT;
265
266         atomic_set(&p->vdq.available, 1);
267         p->vdq.valid_bit = QB_VALID_BIT;
268         p->dqrr.next_idx = 0;
269         p->dqrr.valid_bit = QB_VALID_BIT;
270
271         if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
272                 p->dqrr.dqrr_size = 4;
273                 p->dqrr.reset_bug = 1;
274         } else {
275                 p->dqrr.dqrr_size = 8;
276                 p->dqrr.reset_bug = 0;
277         }
278
279         p->addr_cena = d->cena_bar;
280         p->addr_cinh = d->cinh_bar;
281
282         if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
283
284                 reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
285                         1, /* Writes Non-cacheable */
286                         0, /* EQCR_CI stashing threshold */
287                         3, /* RPM: RCR in array mode */
288                         2, /* DCM: Discrete consumption ack */
289                         2, /* EPM: EQCR in ring mode */
290                         1, /* mem stashing drop enable enable */
291                         1, /* mem stashing priority enable */
292                         1, /* mem stashing enable */
293                         1, /* dequeue stashing priority enable */
294                         0, /* dequeue stashing enable enable */
295                         0); /* EQCR_CI stashing priority enable */
296         } else {
297                 memset(p->addr_cena, 0, 64 * 1024);
298                 reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
299                         1, /* Writes Non-cacheable */
300                         1, /* EQCR_CI stashing threshold */
301                         3, /* RPM: RCR in array mode */
302                         2, /* DCM: Discrete consumption ack */
303                         0, /* EPM: EQCR in ring mode */
304                         1, /* mem stashing drop enable */
305                         1, /* mem stashing priority enable */
306                         1, /* mem stashing enable */
307                         1, /* dequeue stashing priority enable */
308                         0, /* dequeue stashing enable */
309                         0); /* EQCR_CI stashing priority enable */
310                 reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
311                        1 << SWP_CFG_VPM_SHIFT |  /* VDQCR read triggered mode */
312                        1 << SWP_CFG_CPM_SHIFT;   /* CR read triggered mode */
313         }
314
315         qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
316         reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
317         if (!reg) {
318                 pr_err("qbman: the portal is not enabled!\n");
319                 kfree(p);
320                 return NULL;
321         }
322
323         if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
324                 qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
325                 qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
326         }
327         /*
328          * SDQCR needs to be initialized to 0 when no channels are
329          * being dequeued from or else the QMan HW will indicate an
330          * error.  The values that were calculated above will be
331          * applied when dequeues from a specific channel are enabled.
332          */
333         qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
334
335         p->eqcr.pi_ring_size = 8;
336         if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
337                 p->eqcr.pi_ring_size = 32;
338                 qbman_swp_enqueue_ptr =
339                         qbman_swp_enqueue_mem_back;
340                 qbman_swp_enqueue_multiple_ptr =
341                         qbman_swp_enqueue_multiple_mem_back;
342                 qbman_swp_enqueue_multiple_desc_ptr =
343                         qbman_swp_enqueue_multiple_desc_mem_back;
344                 qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
345                 qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
346                 qbman_swp_release_ptr = qbman_swp_release_mem_back;
347         }
348
349         for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
350                 p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1;
351         eqcr_pi = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_PI);
352         p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
353         p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
354         p->eqcr.ci = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_CI)
355                         & p->eqcr.pi_ci_mask;
356         p->eqcr.available = p->eqcr.pi_ring_size;
357
358         return p;
359 }
360
361 /**
362  * qbman_swp_finish() - Create and destroy a functional object representing
363  *                      the given QBMan portal descriptor.
364  * @p: the qbman_swp object to be destroyed
365  */
366 void qbman_swp_finish(struct qbman_swp *p)
367 {
368         kfree(p);
369 }
370
371 /**
372  * qbman_swp_interrupt_read_status()
373  * @p: the given software portal
374  *
375  * Return the value in the SWP_ISR register.
376  */
377 u32 qbman_swp_interrupt_read_status(struct qbman_swp *p)
378 {
379         return qbman_read_register(p, QBMAN_CINH_SWP_ISR);
380 }
381
382 /**
383  * qbman_swp_interrupt_clear_status()
384  * @p: the given software portal
385  * @mask: The mask to clear in SWP_ISR register
386  */
387 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask)
388 {
389         qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask);
390 }
391
392 /**
393  * qbman_swp_interrupt_get_trigger() - read interrupt enable register
394  * @p: the given software portal
395  *
396  * Return the value in the SWP_IER register.
397  */
398 u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
399 {
400         return qbman_read_register(p, QBMAN_CINH_SWP_IER);
401 }
402
403 /**
404  * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp
405  * @p: the given software portal
406  * @mask: The mask of bits to enable in SWP_IER
407  */
408 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask)
409 {
410         qbman_write_register(p, QBMAN_CINH_SWP_IER, mask);
411 }
412
413 /**
414  * qbman_swp_interrupt_get_inhibit() - read interrupt mask register
415  * @p: the given software portal object
416  *
417  * Return the value in the SWP_IIR register.
418  */
419 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
420 {
421         return qbman_read_register(p, QBMAN_CINH_SWP_IIR);
422 }
423
424 /**
425  * qbman_swp_interrupt_set_inhibit() - write interrupt mask register
426  * @p: the given software portal object
427  * @mask: The mask to set in SWP_IIR register
428  */
429 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
430 {
431         qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
432 }
433
434 /*
435  * Different management commands all use this common base layer of code to issue
436  * commands and poll for results.
437  */
438
439 /*
440  * Returns a pointer to where the caller should fill in their management command
441  * (caller should ignore the verb byte)
442  */
443 void *qbman_swp_mc_start(struct qbman_swp *p)
444 {
445         if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
446                 return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
447         else
448                 return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM);
449 }
450
451 /*
452  * Commits merges in the caller-supplied command verb (which should not include
453  * the valid-bit) and submits the command to hardware
454  */
455 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
456 {
457         u8 *v = cmd;
458
459         if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
460                 dma_wmb();
461                 *v = cmd_verb | p->mc.valid_bit;
462         } else {
463                 *v = cmd_verb | p->mc.valid_bit;
464                 dma_wmb();
465                 qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
466         }
467 }
468
469 /*
470  * Checks for a completed response (returns non-NULL if only if the response
471  * is complete).
472  */
473 void *qbman_swp_mc_result(struct qbman_swp *p)
474 {
475         u32 *ret, verb;
476
477         if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
478                 ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
479                 /* Remove the valid-bit - command completed if the rest
480                  * is non-zero.
481                  */
482                 verb = ret[0] & ~QB_VALID_BIT;
483                 if (!verb)
484                         return NULL;
485                 p->mc.valid_bit ^= QB_VALID_BIT;
486         } else {
487                 ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM);
488                 /* Command completed if the valid bit is toggled */
489                 if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
490                         return NULL;
491                 /* Command completed if the rest is non-zero */
492                 verb = ret[0] & ~QB_VALID_BIT;
493                 if (!verb)
494                         return NULL;
495                 p->mr.valid_bit ^= QB_VALID_BIT;
496         }
497
498         return ret;
499 }
500
501 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT    0
502 enum qb_enqueue_commands {
503         enqueue_empty = 0,
504         enqueue_response_always = 1,
505         enqueue_rejects_to_fq = 2
506 };
507
508 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT      2
509 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
510 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT     4
511 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT          7
512
513 /**
514  * qbman_eq_desc_clear() - Clear the contents of a descriptor to
515  *                         default/starting state.
516  */
517 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
518 {
519         memset(d, 0, sizeof(*d));
520 }
521
522 /**
523  * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
524  * @d:                the enqueue descriptor.
525  * @response_success: 1 = enqueue with response always; 0 = enqueue with
526  *                    rejections returned on a FQ.
527  */
528 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
529 {
530         d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
531         if (respond_success)
532                 d->verb |= enqueue_response_always;
533         else
534                 d->verb |= enqueue_rejects_to_fq;
535 }
536
537 /*
538  * Exactly one of the following descriptor "targets" should be set. (Calling any
539  * one of these will replace the effect of any prior call to one of these.)
540  *   -enqueue to a frame queue
541  *   -enqueue to a queuing destination
542  */
543
544 /**
545  * qbman_eq_desc_set_fq() - set the FQ for the enqueue command
546  * @d:    the enqueue descriptor
547  * @fqid: the id of the frame queue to be enqueued
548  */
549 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid)
550 {
551         d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
552         d->tgtid = cpu_to_le32(fqid);
553 }
554
555 /**
556  * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command
557  * @d:       the enqueue descriptor
558  * @qdid:    the id of the queuing destination to be enqueued
559  * @qd_bin:  the queuing destination bin
560  * @qd_prio: the queuing destination priority
561  */
562 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
563                           u32 qd_bin, u32 qd_prio)
564 {
565         d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
566         d->tgtid = cpu_to_le32(qdid);
567         d->qdbin = cpu_to_le16(qd_bin);
568         d->qpri = qd_prio;
569 }
570
571 #define EQAR_IDX(eqar)     ((eqar) & 0x7)
572 #define EQAR_VB(eqar)      ((eqar) & 0x80)
573 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
574
575 static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
576                                                    u8 idx)
577 {
578         if (idx < 16)
579                 qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
580                                      QMAN_RT_MODE);
581         else
582                 qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT2 +
583                                      (idx - 16) * 4,
584                                      QMAN_RT_MODE);
585 }
586
587 #define QB_RT_BIT ((u32)0x100)
588 /**
589  * qbman_swp_enqueue_direct() - Issue an enqueue command
590  * @s:  the software portal used for enqueue
591  * @d:  the enqueue descriptor
592  * @fd: the frame descriptor to be enqueued
593  *
594  * Please note that 'fd' should only be NULL if the "action" of the
595  * descriptor is "orp_hole" or "orp_nesn".
596  *
597  * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
598  */
599 static
600 int qbman_swp_enqueue_direct(struct qbman_swp *s,
601                              const struct qbman_eq_desc *d,
602                              const struct dpaa2_fd *fd)
603 {
604         int flags = 0;
605         int ret = qbman_swp_enqueue_multiple_direct(s, d, fd, &flags, 1);
606
607         if (ret >= 0)
608                 ret = 0;
609         else
610                 ret = -EBUSY;
611         return  ret;
612 }
613
614 /**
615  * qbman_swp_enqueue_mem_back() - Issue an enqueue command
616  * @s:  the software portal used for enqueue
617  * @d:  the enqueue descriptor
618  * @fd: the frame descriptor to be enqueued
619  *
620  * Please note that 'fd' should only be NULL if the "action" of the
621  * descriptor is "orp_hole" or "orp_nesn".
622  *
623  * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
624  */
625 static
626 int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
627                                const struct qbman_eq_desc *d,
628                                const struct dpaa2_fd *fd)
629 {
630         int flags = 0;
631         int ret = qbman_swp_enqueue_multiple_mem_back(s, d, fd, &flags, 1);
632
633         if (ret >= 0)
634                 ret = 0;
635         else
636                 ret = -EBUSY;
637         return  ret;
638 }
639
640 /**
641  * qbman_swp_enqueue_multiple_direct() - Issue a multi enqueue command
642  * using one enqueue descriptor
643  * @s:  the software portal used for enqueue
644  * @d:  the enqueue descriptor
645  * @fd: table pointer of frame descriptor table to be enqueued
646  * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
647  * @num_frames: number of fd to be enqueued
648  *
649  * Return the number of fd enqueued, or a negative error number.
650  */
651 static
652 int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
653                                       const struct qbman_eq_desc *d,
654                                       const struct dpaa2_fd *fd,
655                                       uint32_t *flags,
656                                       int num_frames)
657 {
658         uint32_t *p = NULL;
659         const uint32_t *cl = (uint32_t *)d;
660         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
661         int i, num_enqueued = 0;
662         uint64_t addr_cena;
663
664         spin_lock(&s->access_spinlock);
665         half_mask = (s->eqcr.pi_ci_mask>>1);
666         full_mask = s->eqcr.pi_ci_mask;
667
668         if (!s->eqcr.available) {
669                 eqcr_ci = s->eqcr.ci;
670                 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
671                 s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
672
673                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
674                                         eqcr_ci, s->eqcr.ci);
675                 if (!s->eqcr.available) {
676                         spin_unlock(&s->access_spinlock);
677                         return 0;
678                 }
679         }
680
681         eqcr_pi = s->eqcr.pi;
682         num_enqueued = (s->eqcr.available < num_frames) ?
683                         s->eqcr.available : num_frames;
684         s->eqcr.available -= num_enqueued;
685         /* Fill in the EQCR ring */
686         for (i = 0; i < num_enqueued; i++) {
687                 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
688                 /* Skip copying the verb */
689                 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
690                 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
691                        &fd[i], sizeof(*fd));
692                 eqcr_pi++;
693         }
694
695         dma_wmb();
696
697         /* Set the verb byte, have to substitute in the valid-bit */
698         eqcr_pi = s->eqcr.pi;
699         for (i = 0; i < num_enqueued; i++) {
700                 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
701                 p[0] = cl[0] | s->eqcr.pi_vb;
702                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
703                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
704
705                         d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
706                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
707                 }
708                 eqcr_pi++;
709                 if (!(eqcr_pi & half_mask))
710                         s->eqcr.pi_vb ^= QB_VALID_BIT;
711         }
712
713         /* Flush all the cacheline without load/store in between */
714         eqcr_pi = s->eqcr.pi;
715         addr_cena = (size_t)s->addr_cena;
716         for (i = 0; i < num_enqueued; i++)
717                 eqcr_pi++;
718         s->eqcr.pi = eqcr_pi & full_mask;
719         spin_unlock(&s->access_spinlock);
720
721         return num_enqueued;
722 }
723
724 /**
725  * qbman_swp_enqueue_multiple_mem_back() - Issue a multi enqueue command
726  * using one enqueue descriptor
727  * @s:  the software portal used for enqueue
728  * @d:  the enqueue descriptor
729  * @fd: table pointer of frame descriptor table to be enqueued
730  * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
731  * @num_frames: number of fd to be enqueued
732  *
733  * Return the number of fd enqueued, or a negative error number.
734  */
735 static
736 int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
737                                         const struct qbman_eq_desc *d,
738                                         const struct dpaa2_fd *fd,
739                                         uint32_t *flags,
740                                         int num_frames)
741 {
742         uint32_t *p = NULL;
743         const uint32_t *cl = (uint32_t *)(d);
744         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
745         int i, num_enqueued = 0;
746         unsigned long irq_flags;
747
748         spin_lock(&s->access_spinlock);
749         local_irq_save(irq_flags);
750
751         half_mask = (s->eqcr.pi_ci_mask>>1);
752         full_mask = s->eqcr.pi_ci_mask;
753         if (!s->eqcr.available) {
754                 eqcr_ci = s->eqcr.ci;
755                 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
756                 s->eqcr.ci = __raw_readl(p) & full_mask;
757                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
758                                         eqcr_ci, s->eqcr.ci);
759                 if (!s->eqcr.available) {
760                         local_irq_restore(irq_flags);
761                         spin_unlock(&s->access_spinlock);
762                         return 0;
763                 }
764         }
765
766         eqcr_pi = s->eqcr.pi;
767         num_enqueued = (s->eqcr.available < num_frames) ?
768                         s->eqcr.available : num_frames;
769         s->eqcr.available -= num_enqueued;
770         /* Fill in the EQCR ring */
771         for (i = 0; i < num_enqueued; i++) {
772                 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
773                 /* Skip copying the verb */
774                 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
775                 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
776                        &fd[i], sizeof(*fd));
777                 eqcr_pi++;
778         }
779
780         /* Set the verb byte, have to substitute in the valid-bit */
781         eqcr_pi = s->eqcr.pi;
782         for (i = 0; i < num_enqueued; i++) {
783                 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
784                 p[0] = cl[0] | s->eqcr.pi_vb;
785                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
786                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
787
788                         d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
789                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
790                 }
791                 eqcr_pi++;
792                 if (!(eqcr_pi & half_mask))
793                         s->eqcr.pi_vb ^= QB_VALID_BIT;
794         }
795         s->eqcr.pi = eqcr_pi & full_mask;
796
797         dma_wmb();
798         qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
799                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
800         local_irq_restore(irq_flags);
801         spin_unlock(&s->access_spinlock);
802
803         return num_enqueued;
804 }
805
806 /**
807  * qbman_swp_enqueue_multiple_desc_direct() - Issue a multi enqueue command
808  * using multiple enqueue descriptor
809  * @s:  the software portal used for enqueue
810  * @d:  table of minimal enqueue descriptor
811  * @fd: table pointer of frame descriptor table to be enqueued
812  * @num_frames: number of fd to be enqueued
813  *
814  * Return the number of fd enqueued, or a negative error number.
815  */
816 static
817 int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
818                                            const struct qbman_eq_desc *d,
819                                            const struct dpaa2_fd *fd,
820                                            int num_frames)
821 {
822         uint32_t *p;
823         const uint32_t *cl;
824         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
825         int i, num_enqueued = 0;
826         uint64_t addr_cena;
827
828         half_mask = (s->eqcr.pi_ci_mask>>1);
829         full_mask = s->eqcr.pi_ci_mask;
830         if (!s->eqcr.available) {
831                 eqcr_ci = s->eqcr.ci;
832                 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
833                 s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
834                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
835                                         eqcr_ci, s->eqcr.ci);
836                 if (!s->eqcr.available)
837                         return 0;
838         }
839
840         eqcr_pi = s->eqcr.pi;
841         num_enqueued = (s->eqcr.available < num_frames) ?
842                         s->eqcr.available : num_frames;
843         s->eqcr.available -= num_enqueued;
844         /* Fill in the EQCR ring */
845         for (i = 0; i < num_enqueued; i++) {
846                 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
847                 cl = (uint32_t *)(&d[i]);
848                 /* Skip copying the verb */
849                 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
850                 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
851                        &fd[i], sizeof(*fd));
852                 eqcr_pi++;
853         }
854
855         dma_wmb();
856
857         /* Set the verb byte, have to substitute in the valid-bit */
858         eqcr_pi = s->eqcr.pi;
859         for (i = 0; i < num_enqueued; i++) {
860                 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
861                 cl = (uint32_t *)(&d[i]);
862                 p[0] = cl[0] | s->eqcr.pi_vb;
863                 eqcr_pi++;
864                 if (!(eqcr_pi & half_mask))
865                         s->eqcr.pi_vb ^= QB_VALID_BIT;
866         }
867
868         /* Flush all the cacheline without load/store in between */
869         eqcr_pi = s->eqcr.pi;
870         addr_cena = (uint64_t)s->addr_cena;
871         for (i = 0; i < num_enqueued; i++)
872                 eqcr_pi++;
873         s->eqcr.pi = eqcr_pi & full_mask;
874
875         return num_enqueued;
876 }
877
878 /**
879  * qbman_swp_enqueue_multiple_desc_mem_back() - Issue a multi enqueue command
880  * using multiple enqueue descriptor
881  * @s:  the software portal used for enqueue
882  * @d:  table of minimal enqueue descriptor
883  * @fd: table pointer of frame descriptor table to be enqueued
884  * @num_frames: number of fd to be enqueued
885  *
886  * Return the number of fd enqueued, or a negative error number.
887  */
888 static
889 int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
890                                            const struct qbman_eq_desc *d,
891                                            const struct dpaa2_fd *fd,
892                                            int num_frames)
893 {
894         uint32_t *p;
895         const uint32_t *cl;
896         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
897         int i, num_enqueued = 0;
898
899         half_mask = (s->eqcr.pi_ci_mask>>1);
900         full_mask = s->eqcr.pi_ci_mask;
901         if (!s->eqcr.available) {
902                 eqcr_ci = s->eqcr.ci;
903                 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
904                 s->eqcr.ci = __raw_readl(p) & full_mask;
905                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
906                                         eqcr_ci, s->eqcr.ci);
907                 if (!s->eqcr.available)
908                         return 0;
909         }
910
911         eqcr_pi = s->eqcr.pi;
912         num_enqueued = (s->eqcr.available < num_frames) ?
913                         s->eqcr.available : num_frames;
914         s->eqcr.available -= num_enqueued;
915         /* Fill in the EQCR ring */
916         for (i = 0; i < num_enqueued; i++) {
917                 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
918                 cl = (uint32_t *)(&d[i]);
919                 /* Skip copying the verb */
920                 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
921                 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
922                        &fd[i], sizeof(*fd));
923                 eqcr_pi++;
924         }
925
926         /* Set the verb byte, have to substitute in the valid-bit */
927         eqcr_pi = s->eqcr.pi;
928         for (i = 0; i < num_enqueued; i++) {
929                 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
930                 cl = (uint32_t *)(&d[i]);
931                 p[0] = cl[0] | s->eqcr.pi_vb;
932                 eqcr_pi++;
933                 if (!(eqcr_pi & half_mask))
934                         s->eqcr.pi_vb ^= QB_VALID_BIT;
935         }
936
937         s->eqcr.pi = eqcr_pi & full_mask;
938
939         dma_wmb();
940         qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
941                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
942
943         return num_enqueued;
944 }
945
946 /* Static (push) dequeue */
947
948 /**
949  * qbman_swp_push_get() - Get the push dequeue setup
950  * @p:           the software portal object
951  * @channel_idx: the channel index to query
952  * @enabled:     returned boolean to show whether the push dequeue is enabled
953  *               for the given channel
954  */
955 void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
956 {
957         u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
958
959         WARN_ON(channel_idx > 15);
960         *enabled = src | (1 << channel_idx);
961 }
962
963 /**
964  * qbman_swp_push_set() - Enable or disable push dequeue
965  * @p:           the software portal object
966  * @channel_idx: the channel index (0 to 15)
967  * @enable:      enable or disable push dequeue
968  */
969 void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable)
970 {
971         u16 dqsrc;
972
973         WARN_ON(channel_idx > 15);
974         if (enable)
975                 s->sdq |= 1 << channel_idx;
976         else
977                 s->sdq &= ~(1 << channel_idx);
978
979         /* Read make the complete src map.  If no channels are enabled
980          * the SDQCR must be 0 or else QMan will assert errors
981          */
982         dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
983         if (dqsrc != 0)
984                 qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq);
985         else
986                 qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0);
987 }
988
989 #define QB_VDQCR_VERB_DCT_SHIFT    0
990 #define QB_VDQCR_VERB_DT_SHIFT     2
991 #define QB_VDQCR_VERB_RLS_SHIFT    4
992 #define QB_VDQCR_VERB_WAE_SHIFT    5
993
994 enum qb_pull_dt_e {
995         qb_pull_dt_channel,
996         qb_pull_dt_workqueue,
997         qb_pull_dt_framequeue
998 };
999
1000 /**
1001  * qbman_pull_desc_clear() - Clear the contents of a descriptor to
1002  *                           default/starting state
1003  * @d: the pull dequeue descriptor to be cleared
1004  */
1005 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
1006 {
1007         memset(d, 0, sizeof(*d));
1008 }
1009
1010 /**
1011  * qbman_pull_desc_set_storage()- Set the pull dequeue storage
1012  * @d:            the pull dequeue descriptor to be set
1013  * @storage:      the pointer of the memory to store the dequeue result
1014  * @storage_phys: the physical address of the storage memory
1015  * @stash:        to indicate whether write allocate is enabled
1016  *
1017  * If not called, or if called with 'storage' as NULL, the result pull dequeues
1018  * will produce results to DQRR. If 'storage' is non-NULL, then results are
1019  * produced to the given memory location (using the DMA address which
1020  * the caller provides in 'storage_phys'), and 'stash' controls whether or not
1021  * those writes to main-memory express a cache-warming attribute.
1022  */
1023 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
1024                                  struct dpaa2_dq *storage,
1025                                  dma_addr_t storage_phys,
1026                                  int stash)
1027 {
1028         /* save the virtual address */
1029         d->rsp_addr_virt = (u64)(uintptr_t)storage;
1030
1031         if (!storage) {
1032                 d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
1033                 return;
1034         }
1035         d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
1036         if (stash)
1037                 d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
1038         else
1039                 d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
1040
1041         d->rsp_addr = cpu_to_le64(storage_phys);
1042 }
1043
1044 /**
1045  * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued
1046  * @d:         the pull dequeue descriptor to be set
1047  * @numframes: number of frames to be set, must be between 1 and 16, inclusive
1048  */
1049 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
1050 {
1051         d->numf = numframes - 1;
1052 }
1053
1054 /*
1055  * Exactly one of the following descriptor "actions" should be set. (Calling any
1056  * one of these will replace the effect of any prior call to one of these.)
1057  * - pull dequeue from the given frame queue (FQ)
1058  * - pull dequeue from any FQ in the given work queue (WQ)
1059  * - pull dequeue from any FQ in any WQ in the given channel
1060  */
1061
1062 /**
1063  * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues
1064  * @fqid: the frame queue index of the given FQ
1065  */
1066 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
1067 {
1068         d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
1069         d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
1070         d->dq_src = cpu_to_le32(fqid);
1071 }
1072
1073 /**
1074  * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues
1075  * @wqid: composed of channel id and wqid within the channel
1076  * @dct:  the dequeue command type
1077  */
1078 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
1079                             enum qbman_pull_type_e dct)
1080 {
1081         d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1082         d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
1083         d->dq_src = cpu_to_le32(wqid);
1084 }
1085
1086 /**
1087  * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
1088  *                                 dequeues
1089  * @chid: the channel id to be dequeued
1090  * @dct:  the dequeue command type
1091  */
1092 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
1093                                  enum qbman_pull_type_e dct)
1094 {
1095         d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1096         d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
1097         d->dq_src = cpu_to_le32(chid);
1098 }
1099
1100 /**
1101  * qbman_swp_pull_direct() - Issue the pull dequeue command
1102  * @s: the software portal object
1103  * @d: the software portal descriptor which has been configured with
1104  *     the set of qbman_pull_desc_set_*() calls
1105  *
1106  * Return 0 for success, and -EBUSY if the software portal is not ready
1107  * to do pull dequeue.
1108  */
1109 static
1110 int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d)
1111 {
1112         struct qbman_pull_desc *p;
1113
1114         if (!atomic_dec_and_test(&s->vdq.available)) {
1115                 atomic_inc(&s->vdq.available);
1116                 return -EBUSY;
1117         }
1118         s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
1119         if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
1120                 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
1121         else
1122                 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
1123         p->numf = d->numf;
1124         p->tok = QMAN_DQ_TOKEN_VALID;
1125         p->dq_src = d->dq_src;
1126         p->rsp_addr = d->rsp_addr;
1127         p->rsp_addr_virt = d->rsp_addr_virt;
1128         dma_wmb();
1129         /* Set the verb byte, have to substitute in the valid-bit */
1130         p->verb = d->verb | s->vdq.valid_bit;
1131         s->vdq.valid_bit ^= QB_VALID_BIT;
1132
1133         return 0;
1134 }
1135
1136 /**
1137  * qbman_swp_pull_mem_back() - Issue the pull dequeue command
1138  * @s: the software portal object
1139  * @d: the software portal descriptor which has been configured with
1140  *     the set of qbman_pull_desc_set_*() calls
1141  *
1142  * Return 0 for success, and -EBUSY if the software portal is not ready
1143  * to do pull dequeue.
1144  */
1145 static
1146 int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d)
1147 {
1148         struct qbman_pull_desc *p;
1149
1150         if (!atomic_dec_and_test(&s->vdq.available)) {
1151                 atomic_inc(&s->vdq.available);
1152                 return -EBUSY;
1153         }
1154         s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
1155         if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
1156                 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
1157         else
1158                 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
1159         p->numf = d->numf;
1160         p->tok = QMAN_DQ_TOKEN_VALID;
1161         p->dq_src = d->dq_src;
1162         p->rsp_addr = d->rsp_addr;
1163         p->rsp_addr_virt = d->rsp_addr_virt;
1164
1165         /* Set the verb byte, have to substitute in the valid-bit */
1166         p->verb = d->verb | s->vdq.valid_bit;
1167         s->vdq.valid_bit ^= QB_VALID_BIT;
1168         dma_wmb();
1169         qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1170
1171         return 0;
1172 }
1173
1174 #define QMAN_DQRR_PI_MASK   0xf
1175
1176 /**
1177  * qbman_swp_dqrr_next_direct() - Get an valid DQRR entry
1178  * @s: the software portal object
1179  *
1180  * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
1181  * only once, so repeated calls can return a sequence of DQRR entries, without
1182  * requiring they be consumed immediately or in any particular order.
1183  */
1184 const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
1185 {
1186         u32 verb;
1187         u32 response_verb;
1188         u32 flags;
1189         struct dpaa2_dq *p;
1190
1191         /* Before using valid-bit to detect if something is there, we have to
1192          * handle the case of the DQRR reset bug...
1193          */
1194         if (unlikely(s->dqrr.reset_bug)) {
1195                 /*
1196                  * We pick up new entries by cache-inhibited producer index,
1197                  * which means that a non-coherent mapping would require us to
1198                  * invalidate and read *only* once that PI has indicated that
1199                  * there's an entry here. The first trip around the DQRR ring
1200                  * will be much less efficient than all subsequent trips around
1201                  * it...
1202                  */
1203                 u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
1204                         QMAN_DQRR_PI_MASK;
1205
1206                 /* there are new entries if pi != next_idx */
1207                 if (pi == s->dqrr.next_idx)
1208                         return NULL;
1209
1210                 /*
1211                  * if next_idx is/was the last ring index, and 'pi' is
1212                  * different, we can disable the workaround as all the ring
1213                  * entries have now been DMA'd to so valid-bit checking is
1214                  * repaired. Note: this logic needs to be based on next_idx
1215                  * (which increments one at a time), rather than on pi (which
1216                  * can burst and wrap-around between our snapshots of it).
1217                  */
1218                 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
1219                         pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
1220                                  s->dqrr.next_idx, pi);
1221                         s->dqrr.reset_bug = 0;
1222                 }
1223                 prefetch(qbman_get_cmd(s,
1224                                        QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1225         }
1226
1227         p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1228         verb = p->dq.verb;
1229
1230         /*
1231          * If the valid-bit isn't of the expected polarity, nothing there. Note,
1232          * in the DQRR reset bug workaround, we shouldn't need to skip these
1233          * check, because we've already determined that a new entry is available
1234          * and we've invalidated the cacheline before reading it, so the
1235          * valid-bit behaviour is repaired and should tell us what we already
1236          * knew from reading PI.
1237          */
1238         if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
1239                 prefetch(qbman_get_cmd(s,
1240                                        QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1241                 return NULL;
1242         }
1243         /*
1244          * There's something there. Move "next_idx" attention to the next ring
1245          * entry (and prefetch it) before returning what we found.
1246          */
1247         s->dqrr.next_idx++;
1248         s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
1249         if (!s->dqrr.next_idx)
1250                 s->dqrr.valid_bit ^= QB_VALID_BIT;
1251
1252         /*
1253          * If this is the final response to a volatile dequeue command
1254          * indicate that the vdq is available
1255          */
1256         flags = p->dq.stat;
1257         response_verb = verb & QBMAN_RESULT_MASK;
1258         if ((response_verb == QBMAN_RESULT_DQ) &&
1259             (flags & DPAA2_DQ_STAT_VOLATILE) &&
1260             (flags & DPAA2_DQ_STAT_EXPIRED))
1261                 atomic_inc(&s->vdq.available);
1262
1263         prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1264
1265         return p;
1266 }
1267
1268 /**
1269  * qbman_swp_dqrr_next_mem_back() - Get an valid DQRR entry
1270  * @s: the software portal object
1271  *
1272  * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
1273  * only once, so repeated calls can return a sequence of DQRR entries, without
1274  * requiring they be consumed immediately or in any particular order.
1275  */
1276 const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
1277 {
1278         u32 verb;
1279         u32 response_verb;
1280         u32 flags;
1281         struct dpaa2_dq *p;
1282
1283         /* Before using valid-bit to detect if something is there, we have to
1284          * handle the case of the DQRR reset bug...
1285          */
1286         if (unlikely(s->dqrr.reset_bug)) {
1287                 /*
1288                  * We pick up new entries by cache-inhibited producer index,
1289                  * which means that a non-coherent mapping would require us to
1290                  * invalidate and read *only* once that PI has indicated that
1291                  * there's an entry here. The first trip around the DQRR ring
1292                  * will be much less efficient than all subsequent trips around
1293                  * it...
1294                  */
1295                 u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
1296                         QMAN_DQRR_PI_MASK;
1297
1298                 /* there are new entries if pi != next_idx */
1299                 if (pi == s->dqrr.next_idx)
1300                         return NULL;
1301
1302                 /*
1303                  * if next_idx is/was the last ring index, and 'pi' is
1304                  * different, we can disable the workaround as all the ring
1305                  * entries have now been DMA'd to so valid-bit checking is
1306                  * repaired. Note: this logic needs to be based on next_idx
1307                  * (which increments one at a time), rather than on pi (which
1308                  * can burst and wrap-around between our snapshots of it).
1309                  */
1310                 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
1311                         pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
1312                                  s->dqrr.next_idx, pi);
1313                         s->dqrr.reset_bug = 0;
1314                 }
1315                 prefetch(qbman_get_cmd(s,
1316                                        QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1317         }
1318
1319         p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
1320         verb = p->dq.verb;
1321
1322         /*
1323          * If the valid-bit isn't of the expected polarity, nothing there. Note,
1324          * in the DQRR reset bug workaround, we shouldn't need to skip these
1325          * check, because we've already determined that a new entry is available
1326          * and we've invalidated the cacheline before reading it, so the
1327          * valid-bit behaviour is repaired and should tell us what we already
1328          * knew from reading PI.
1329          */
1330         if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
1331                 prefetch(qbman_get_cmd(s,
1332                                        QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1333                 return NULL;
1334         }
1335         /*
1336          * There's something there. Move "next_idx" attention to the next ring
1337          * entry (and prefetch it) before returning what we found.
1338          */
1339         s->dqrr.next_idx++;
1340         s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
1341         if (!s->dqrr.next_idx)
1342                 s->dqrr.valid_bit ^= QB_VALID_BIT;
1343
1344         /*
1345          * If this is the final response to a volatile dequeue command
1346          * indicate that the vdq is available
1347          */
1348         flags = p->dq.stat;
1349         response_verb = verb & QBMAN_RESULT_MASK;
1350         if ((response_verb == QBMAN_RESULT_DQ) &&
1351             (flags & DPAA2_DQ_STAT_VOLATILE) &&
1352             (flags & DPAA2_DQ_STAT_EXPIRED))
1353                 atomic_inc(&s->vdq.available);
1354
1355         prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1356
1357         return p;
1358 }
1359
1360 /**
1361  * qbman_swp_dqrr_consume() -  Consume DQRR entries previously returned from
1362  *                             qbman_swp_dqrr_next().
1363  * @s: the software portal object
1364  * @dq: the DQRR entry to be consumed
1365  */
1366 void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq)
1367 {
1368         qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
1369 }
1370
1371 /**
1372  * qbman_result_has_new_result() - Check and get the dequeue response from the
1373  *                                 dq storage memory set in pull dequeue command
1374  * @s: the software portal object
1375  * @dq: the dequeue result read from the memory
1376  *
1377  * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
1378  * dequeue result.
1379  *
1380  * Only used for user-provided storage of dequeue results, not DQRR. For
1381  * efficiency purposes, the driver will perform any required endianness
1382  * conversion to ensure that the user's dequeue result storage is in host-endian
1383  * format. As such, once the user has called qbman_result_has_new_result() and
1384  * been returned a valid dequeue result, they should not call it again on
1385  * the same memory location (except of course if another dequeue command has
1386  * been executed to produce a new result to that location).
1387  */
1388 int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
1389 {
1390         if (dq->dq.tok != QMAN_DQ_TOKEN_VALID)
1391                 return 0;
1392
1393         /*
1394          * Set token to be 0 so we will detect change back to 1
1395          * next time the looping is traversed. Const is cast away here
1396          * as we want users to treat the dequeue responses as read only.
1397          */
1398         ((struct dpaa2_dq *)dq)->dq.tok = 0;
1399
1400         /*
1401          * Determine whether VDQCR is available based on whether the
1402          * current result is sitting in the first storage location of
1403          * the busy command.
1404          */
1405         if (s->vdq.storage == dq) {
1406                 s->vdq.storage = NULL;
1407                 atomic_inc(&s->vdq.available);
1408         }
1409
1410         return 1;
1411 }
1412
1413 /**
1414  * qbman_release_desc_clear() - Clear the contents of a descriptor to
1415  *                              default/starting state.
1416  */
1417 void qbman_release_desc_clear(struct qbman_release_desc *d)
1418 {
1419         memset(d, 0, sizeof(*d));
1420         d->verb = 1 << 5; /* Release Command Valid */
1421 }
1422
1423 /**
1424  * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
1425  */
1426 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
1427 {
1428         d->bpid = cpu_to_le16(bpid);
1429 }
1430
1431 /**
1432  * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
1433  * interrupt source should be asserted after the release command is completed.
1434  */
1435 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
1436 {
1437         if (enable)
1438                 d->verb |= 1 << 6;
1439         else
1440                 d->verb &= ~(1 << 6);
1441 }
1442
1443 #define RAR_IDX(rar)     ((rar) & 0x7)
1444 #define RAR_VB(rar)      ((rar) & 0x80)
1445 #define RAR_SUCCESS(rar) ((rar) & 0x100)
1446
1447 /**
1448  * qbman_swp_release_direct() - Issue a buffer release command
1449  * @s:           the software portal object
1450  * @d:           the release descriptor
1451  * @buffers:     a pointer pointing to the buffer address to be released
1452  * @num_buffers: number of buffers to be released,  must be less than 8
1453  *
1454  * Return 0 for success, -EBUSY if the release command ring is not ready.
1455  */
1456 int qbman_swp_release_direct(struct qbman_swp *s,
1457                              const struct qbman_release_desc *d,
1458                              const u64 *buffers, unsigned int num_buffers)
1459 {
1460         int i;
1461         struct qbman_release_desc *p;
1462         u32 rar;
1463
1464         if (!num_buffers || (num_buffers > 7))
1465                 return -EINVAL;
1466
1467         rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
1468         if (!RAR_SUCCESS(rar))
1469                 return -EBUSY;
1470
1471         /* Start the release command */
1472         p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1473
1474         /* Copy the caller's buffer pointers to the command */
1475         for (i = 0; i < num_buffers; i++)
1476                 p->buf[i] = cpu_to_le64(buffers[i]);
1477         p->bpid = d->bpid;
1478
1479         /*
1480          * Set the verb byte, have to substitute in the valid-bit
1481          * and the number of buffers.
1482          */
1483         dma_wmb();
1484         p->verb = d->verb | RAR_VB(rar) | num_buffers;
1485
1486         return 0;
1487 }
1488
1489 /**
1490  * qbman_swp_release_mem_back() - Issue a buffer release command
1491  * @s:           the software portal object
1492  * @d:           the release descriptor
1493  * @buffers:     a pointer pointing to the buffer address to be released
1494  * @num_buffers: number of buffers to be released,  must be less than 8
1495  *
1496  * Return 0 for success, -EBUSY if the release command ring is not ready.
1497  */
1498 int qbman_swp_release_mem_back(struct qbman_swp *s,
1499                                const struct qbman_release_desc *d,
1500                                const u64 *buffers, unsigned int num_buffers)
1501 {
1502         int i;
1503         struct qbman_release_desc *p;
1504         u32 rar;
1505
1506         if (!num_buffers || (num_buffers > 7))
1507                 return -EINVAL;
1508
1509         rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
1510         if (!RAR_SUCCESS(rar))
1511                 return -EBUSY;
1512
1513         /* Start the release command */
1514         p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
1515
1516         /* Copy the caller's buffer pointers to the command */
1517         for (i = 0; i < num_buffers; i++)
1518                 p->buf[i] = cpu_to_le64(buffers[i]);
1519         p->bpid = d->bpid;
1520
1521         p->verb = d->verb | RAR_VB(rar) | num_buffers;
1522         dma_wmb();
1523         qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
1524                              RAR_IDX(rar)  * 4, QMAN_RT_MODE);
1525
1526         return 0;
1527 }
1528
1529 struct qbman_acquire_desc {
1530         u8 verb;
1531         u8 reserved;
1532         __le16 bpid;
1533         u8 num;
1534         u8 reserved2[59];
1535 };
1536
1537 struct qbman_acquire_rslt {
1538         u8 verb;
1539         u8 rslt;
1540         __le16 reserved;
1541         u8 num;
1542         u8 reserved2[3];
1543         __le64 buf[7];
1544 };
1545
1546 /**
1547  * qbman_swp_acquire() - Issue a buffer acquire command
1548  * @s:           the software portal object
1549  * @bpid:        the buffer pool index
1550  * @buffers:     a pointer pointing to the acquired buffer addresses
1551  * @num_buffers: number of buffers to be acquired, must be less than 8
1552  *
1553  * Return 0 for success, or negative error code if the acquire command
1554  * fails.
1555  */
1556 int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
1557                       unsigned int num_buffers)
1558 {
1559         struct qbman_acquire_desc *p;
1560         struct qbman_acquire_rslt *r;
1561         int i;
1562
1563         if (!num_buffers || (num_buffers > 7))
1564                 return -EINVAL;
1565
1566         /* Start the management command */
1567         p = qbman_swp_mc_start(s);
1568
1569         if (!p)
1570                 return -EBUSY;
1571
1572         /* Encode the caller-provided attributes */
1573         p->bpid = cpu_to_le16(bpid);
1574         p->num = num_buffers;
1575
1576         /* Complete the management command */
1577         r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
1578         if (unlikely(!r)) {
1579                 pr_err("qbman: acquire from BPID %d failed, no response\n",
1580                        bpid);
1581                 return -EIO;
1582         }
1583
1584         /* Decode the outcome */
1585         WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE);
1586
1587         /* Determine success or failure */
1588         if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1589                 pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n",
1590                        bpid, r->rslt);
1591                 return -EIO;
1592         }
1593
1594         WARN_ON(r->num > num_buffers);
1595
1596         /* Copy the acquired buffers to the caller's array */
1597         for (i = 0; i < r->num; i++)
1598                 buffers[i] = le64_to_cpu(r->buf[i]);
1599
1600         return (int)r->num;
1601 }
1602
1603 struct qbman_alt_fq_state_desc {
1604         u8 verb;
1605         u8 reserved[3];
1606         __le32 fqid;
1607         u8 reserved2[56];
1608 };
1609
1610 struct qbman_alt_fq_state_rslt {
1611         u8 verb;
1612         u8 rslt;
1613         u8 reserved[62];
1614 };
1615
1616 #define ALT_FQ_FQID_MASK 0x00FFFFFF
1617
1618 int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
1619                            u8 alt_fq_verb)
1620 {
1621         struct qbman_alt_fq_state_desc *p;
1622         struct qbman_alt_fq_state_rslt *r;
1623
1624         /* Start the management command */
1625         p = qbman_swp_mc_start(s);
1626         if (!p)
1627                 return -EBUSY;
1628
1629         p->fqid = cpu_to_le32(fqid & ALT_FQ_FQID_MASK);
1630
1631         /* Complete the management command */
1632         r = qbman_swp_mc_complete(s, p, alt_fq_verb);
1633         if (unlikely(!r)) {
1634                 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
1635                        alt_fq_verb);
1636                 return -EIO;
1637         }
1638
1639         /* Decode the outcome */
1640         WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb);
1641
1642         /* Determine success or failure */
1643         if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1644                 pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n",
1645                        fqid, r->verb, r->rslt);
1646                 return -EIO;
1647         }
1648
1649         return 0;
1650 }
1651
1652 struct qbman_cdan_ctrl_desc {
1653         u8 verb;
1654         u8 reserved;
1655         __le16 ch;
1656         u8 we;
1657         u8 ctrl;
1658         __le16 reserved2;
1659         __le64 cdan_ctx;
1660         u8 reserved3[48];
1661
1662 };
1663
1664 struct qbman_cdan_ctrl_rslt {
1665         u8 verb;
1666         u8 rslt;
1667         __le16 ch;
1668         u8 reserved[60];
1669 };
1670
1671 int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
1672                        u8 we_mask, u8 cdan_en,
1673                        u64 ctx)
1674 {
1675         struct qbman_cdan_ctrl_desc *p = NULL;
1676         struct qbman_cdan_ctrl_rslt *r = NULL;
1677
1678         /* Start the management command */
1679         p = qbman_swp_mc_start(s);
1680         if (!p)
1681                 return -EBUSY;
1682
1683         /* Encode the caller-provided attributes */
1684         p->ch = cpu_to_le16(channelid);
1685         p->we = we_mask;
1686         if (cdan_en)
1687                 p->ctrl = 1;
1688         else
1689                 p->ctrl = 0;
1690         p->cdan_ctx = cpu_to_le64(ctx);
1691
1692         /* Complete the management command */
1693         r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
1694         if (unlikely(!r)) {
1695                 pr_err("qbman: wqchan config failed, no response\n");
1696                 return -EIO;
1697         }
1698
1699         WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE);
1700
1701         /* Determine success or failure */
1702         if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1703                 pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n",
1704                        channelid, r->rslt);
1705                 return -EIO;
1706         }
1707
1708         return 0;
1709 }
1710
1711 #define QBMAN_RESPONSE_VERB_MASK        0x7f
1712 #define QBMAN_FQ_QUERY_NP               0x45
1713 #define QBMAN_BP_QUERY                  0x32
1714
1715 struct qbman_fq_query_desc {
1716         u8 verb;
1717         u8 reserved[3];
1718         __le32 fqid;
1719         u8 reserved2[56];
1720 };
1721
1722 int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
1723                          struct qbman_fq_query_np_rslt *r)
1724 {
1725         struct qbman_fq_query_desc *p;
1726         void *resp;
1727
1728         p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s);
1729         if (!p)
1730                 return -EBUSY;
1731
1732         /* FQID is a 24 bit value */
1733         p->fqid = cpu_to_le32(fqid & 0x00FFFFFF);
1734         resp = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP);
1735         if (!resp) {
1736                 pr_err("qbman: Query FQID %d NP fields failed, no response\n",
1737                        fqid);
1738                 return -EIO;
1739         }
1740         *r = *(struct qbman_fq_query_np_rslt *)resp;
1741         /* Decode the outcome */
1742         WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY_NP);
1743
1744         /* Determine success or failure */
1745         if (r->rslt != QBMAN_MC_RSLT_OK) {
1746                 pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
1747                        p->fqid, r->rslt);
1748                 return -EIO;
1749         }
1750
1751         return 0;
1752 }
1753
1754 u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r)
1755 {
1756         return (le32_to_cpu(r->frm_cnt) & 0x00FFFFFF);
1757 }
1758
1759 u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r)
1760 {
1761         return le32_to_cpu(r->byte_cnt);
1762 }
1763
1764 struct qbman_bp_query_desc {
1765         u8 verb;
1766         u8 reserved;
1767         __le16 bpid;
1768         u8 reserved2[60];
1769 };
1770
1771 int qbman_bp_query(struct qbman_swp *s, u16 bpid,
1772                    struct qbman_bp_query_rslt *r)
1773 {
1774         struct qbman_bp_query_desc *p;
1775         void *resp;
1776
1777         p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s);
1778         if (!p)
1779                 return -EBUSY;
1780
1781         p->bpid = cpu_to_le16(bpid);
1782         resp = qbman_swp_mc_complete(s, p, QBMAN_BP_QUERY);
1783         if (!resp) {
1784                 pr_err("qbman: Query BPID %d fields failed, no response\n",
1785                        bpid);
1786                 return -EIO;
1787         }
1788         *r = *(struct qbman_bp_query_rslt *)resp;
1789         /* Decode the outcome */
1790         WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY);
1791
1792         /* Determine success or failure */
1793         if (r->rslt != QBMAN_MC_RSLT_OK) {
1794                 pr_err("Query fields of BPID 0x%x failed, code=0x%02x\n",
1795                        bpid, r->rslt);
1796                 return -EIO;
1797         }
1798
1799         return 0;
1800 }
1801
1802 u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a)
1803 {
1804         return le32_to_cpu(a->fill);
1805 }