1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4 * Copyright 2016-2019 NXP
8 #include <asm/cacheflush.h>
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <soc/fsl/dpaa2-global.h>
14 #include "qbman-portal.h"
16 /* All QBMan command and result structures use this "valid bit" encoding */
17 #define QB_VALID_BIT ((u32)0x80)
19 /* QBMan portal management command codes */
20 #define QBMAN_MC_ACQUIRE 0x30
21 #define QBMAN_WQCHAN_CONFIGURE 0x46
23 /* CINH register offsets */
24 #define QBMAN_CINH_SWP_EQCR_PI 0x800
25 #define QBMAN_CINH_SWP_EQCR_CI 0x840
26 #define QBMAN_CINH_SWP_EQAR 0x8c0
27 #define QBMAN_CINH_SWP_CR_RT 0x900
28 #define QBMAN_CINH_SWP_VDQCR_RT 0x940
29 #define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
30 #define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
31 #define QBMAN_CINH_SWP_DQPI 0xa00
32 #define QBMAN_CINH_SWP_DCAP 0xac0
33 #define QBMAN_CINH_SWP_SDQCR 0xb00
34 #define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
35 #define QBMAN_CINH_SWP_RCR_PI 0xc00
36 #define QBMAN_CINH_SWP_RAR 0xcc0
37 #define QBMAN_CINH_SWP_ISR 0xe00
38 #define QBMAN_CINH_SWP_IER 0xe40
39 #define QBMAN_CINH_SWP_ISDR 0xe80
40 #define QBMAN_CINH_SWP_IIR 0xec0
42 /* CENA register offsets */
43 #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
44 #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6))
45 #define QBMAN_CENA_SWP_RCR(n) (0x400 + ((u32)(n) << 6))
46 #define QBMAN_CENA_SWP_CR 0x600
47 #define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
48 #define QBMAN_CENA_SWP_VDQCR 0x780
49 #define QBMAN_CENA_SWP_EQCR_CI 0x840
50 #define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840
52 /* CENA register offsets in memory-backed mode */
53 #define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6))
54 #define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((u32)(n) << 6))
55 #define QBMAN_CENA_SWP_CR_MEM 0x1600
56 #define QBMAN_CENA_SWP_RR_MEM 0x1680
57 #define QBMAN_CENA_SWP_VDQCR_MEM 0x1780
59 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
60 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
62 /* Define token used to determine if response written to memory is valid */
63 #define QMAN_DQ_TOKEN_VALID 1
65 /* SDQCR attribute codes */
66 #define QB_SDQCR_FC_SHIFT 29
67 #define QB_SDQCR_FC_MASK 0x1
68 #define QB_SDQCR_DCT_SHIFT 24
69 #define QB_SDQCR_DCT_MASK 0x3
70 #define QB_SDQCR_TOK_SHIFT 16
71 #define QB_SDQCR_TOK_MASK 0xff
72 #define QB_SDQCR_SRC_SHIFT 0
73 #define QB_SDQCR_SRC_MASK 0xffff
75 /* opaque token for static dequeues */
76 #define QMAN_SDQCR_TOKEN 0xbb
78 #define QBMAN_EQCR_DCA_IDXMASK 0x0f
79 #define QBMAN_ENQUEUE_FLAG_DCA (1ULL << 31)
81 #define EQ_DESC_SIZE_WITHOUT_FD 29
82 #define EQ_DESC_SIZE_FD_START 32
84 enum qbman_sdqcr_dct {
85 qbman_sdqcr_dct_null = 0,
86 qbman_sdqcr_dct_prio_ics,
87 qbman_sdqcr_dct_active_ics,
88 qbman_sdqcr_dct_active
92 qbman_sdqcr_fc_one = 0,
93 qbman_sdqcr_fc_up_to_3 = 1
96 /* Internal Function declaration */
97 static int qbman_swp_enqueue_direct(struct qbman_swp *s,
98 const struct qbman_eq_desc *d,
99 const struct dpaa2_fd *fd);
100 static int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
101 const struct qbman_eq_desc *d,
102 const struct dpaa2_fd *fd);
103 static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
104 const struct qbman_eq_desc *d,
105 const struct dpaa2_fd *fd,
108 static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
109 const struct qbman_eq_desc *d,
110 const struct dpaa2_fd *fd,
114 qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
115 const struct qbman_eq_desc *d,
116 const struct dpaa2_fd *fd,
119 int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
120 const struct qbman_eq_desc *d,
121 const struct dpaa2_fd *fd,
123 static int qbman_swp_pull_direct(struct qbman_swp *s,
124 struct qbman_pull_desc *d);
125 static int qbman_swp_pull_mem_back(struct qbman_swp *s,
126 struct qbman_pull_desc *d);
128 const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
129 const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
131 static int qbman_swp_release_direct(struct qbman_swp *s,
132 const struct qbman_release_desc *d,
134 unsigned int num_buffers);
135 static int qbman_swp_release_mem_back(struct qbman_swp *s,
136 const struct qbman_release_desc *d,
138 unsigned int num_buffers);
140 /* Function pointers */
141 int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
142 const struct qbman_eq_desc *d,
143 const struct dpaa2_fd *fd)
144 = qbman_swp_enqueue_direct;
146 int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
147 const struct qbman_eq_desc *d,
148 const struct dpaa2_fd *fd,
151 = qbman_swp_enqueue_multiple_direct;
154 (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
155 const struct qbman_eq_desc *d,
156 const struct dpaa2_fd *fd,
158 = qbman_swp_enqueue_multiple_desc_direct;
160 int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d)
161 = qbman_swp_pull_direct;
163 const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
164 = qbman_swp_dqrr_next_direct;
166 int (*qbman_swp_release_ptr)(struct qbman_swp *s,
167 const struct qbman_release_desc *d,
169 unsigned int num_buffers)
170 = qbman_swp_release_direct;
174 static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
176 return readl_relaxed(p->addr_cinh + offset);
179 static inline void qbman_write_register(struct qbman_swp *p, u32 offset,
182 writel_relaxed(value, p->addr_cinh + offset);
185 static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
187 return p->addr_cena + offset;
190 #define QBMAN_CINH_SWP_CFG 0xd00
192 #define SWP_CFG_DQRR_MF_SHIFT 20
193 #define SWP_CFG_EST_SHIFT 16
194 #define SWP_CFG_CPBS_SHIFT 15
195 #define SWP_CFG_WN_SHIFT 14
196 #define SWP_CFG_RPM_SHIFT 12
197 #define SWP_CFG_DCM_SHIFT 10
198 #define SWP_CFG_EPM_SHIFT 8
199 #define SWP_CFG_VPM_SHIFT 7
200 #define SWP_CFG_CPM_SHIFT 6
201 #define SWP_CFG_SD_SHIFT 5
202 #define SWP_CFG_SP_SHIFT 4
203 #define SWP_CFG_SE_SHIFT 3
204 #define SWP_CFG_DP_SHIFT 2
205 #define SWP_CFG_DE_SHIFT 1
206 #define SWP_CFG_EP_SHIFT 0
208 static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
209 u8 epm, int sd, int sp, int se,
210 int dp, int de, int ep)
212 return (max_fill << SWP_CFG_DQRR_MF_SHIFT |
213 est << SWP_CFG_EST_SHIFT |
214 wn << SWP_CFG_WN_SHIFT |
215 rpm << SWP_CFG_RPM_SHIFT |
216 dcm << SWP_CFG_DCM_SHIFT |
217 epm << SWP_CFG_EPM_SHIFT |
218 sd << SWP_CFG_SD_SHIFT |
219 sp << SWP_CFG_SP_SHIFT |
220 se << SWP_CFG_SE_SHIFT |
221 dp << SWP_CFG_DP_SHIFT |
222 de << SWP_CFG_DE_SHIFT |
223 ep << SWP_CFG_EP_SHIFT);
226 #define QMAN_RT_MODE 0x00000100
228 static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
230 /* 'first' is included, 'last' is excluded */
234 return (2 * ringsize) - (first - last);
238 * qbman_swp_init() - Create a functional object representing the given
239 * QBMan portal descriptor.
240 * @d: the given qbman swp descriptor
242 * Return qbman_swp portal for success, NULL if the object cannot
245 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
247 struct qbman_swp *p = kzalloc(sizeof(*p), GFP_KERNEL);
255 spin_lock_init(&p->access_spinlock);
258 p->mc.valid_bit = QB_VALID_BIT;
260 p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
261 p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
262 p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
263 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
264 p->mr.valid_bit = QB_VALID_BIT;
266 atomic_set(&p->vdq.available, 1);
267 p->vdq.valid_bit = QB_VALID_BIT;
268 p->dqrr.next_idx = 0;
269 p->dqrr.valid_bit = QB_VALID_BIT;
271 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
272 p->dqrr.dqrr_size = 4;
273 p->dqrr.reset_bug = 1;
275 p->dqrr.dqrr_size = 8;
276 p->dqrr.reset_bug = 0;
279 p->addr_cena = d->cena_bar;
280 p->addr_cinh = d->cinh_bar;
282 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
284 reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
285 1, /* Writes Non-cacheable */
286 0, /* EQCR_CI stashing threshold */
287 3, /* RPM: RCR in array mode */
288 2, /* DCM: Discrete consumption ack */
289 2, /* EPM: EQCR in ring mode */
290 1, /* mem stashing drop enable enable */
291 1, /* mem stashing priority enable */
292 1, /* mem stashing enable */
293 1, /* dequeue stashing priority enable */
294 0, /* dequeue stashing enable enable */
295 0); /* EQCR_CI stashing priority enable */
297 memset(p->addr_cena, 0, 64 * 1024);
298 reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
299 1, /* Writes Non-cacheable */
300 1, /* EQCR_CI stashing threshold */
301 3, /* RPM: RCR in array mode */
302 2, /* DCM: Discrete consumption ack */
303 0, /* EPM: EQCR in ring mode */
304 1, /* mem stashing drop enable */
305 1, /* mem stashing priority enable */
306 1, /* mem stashing enable */
307 1, /* dequeue stashing priority enable */
308 0, /* dequeue stashing enable */
309 0); /* EQCR_CI stashing priority enable */
310 reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
311 1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
312 1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */
315 qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
316 reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
318 pr_err("qbman: the portal is not enabled!\n");
323 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
324 qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
325 qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
328 * SDQCR needs to be initialized to 0 when no channels are
329 * being dequeued from or else the QMan HW will indicate an
330 * error. The values that were calculated above will be
331 * applied when dequeues from a specific channel are enabled.
333 qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
335 p->eqcr.pi_ring_size = 8;
336 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
337 p->eqcr.pi_ring_size = 32;
338 qbman_swp_enqueue_ptr =
339 qbman_swp_enqueue_mem_back;
340 qbman_swp_enqueue_multiple_ptr =
341 qbman_swp_enqueue_multiple_mem_back;
342 qbman_swp_enqueue_multiple_desc_ptr =
343 qbman_swp_enqueue_multiple_desc_mem_back;
344 qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
345 qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
346 qbman_swp_release_ptr = qbman_swp_release_mem_back;
349 for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
350 p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1;
351 eqcr_pi = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_PI);
352 p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
353 p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
354 p->eqcr.ci = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_CI)
355 & p->eqcr.pi_ci_mask;
356 p->eqcr.available = p->eqcr.pi_ring_size;
362 * qbman_swp_finish() - Create and destroy a functional object representing
363 * the given QBMan portal descriptor.
364 * @p: the qbman_swp object to be destroyed
366 void qbman_swp_finish(struct qbman_swp *p)
372 * qbman_swp_interrupt_read_status()
373 * @p: the given software portal
375 * Return the value in the SWP_ISR register.
377 u32 qbman_swp_interrupt_read_status(struct qbman_swp *p)
379 return qbman_read_register(p, QBMAN_CINH_SWP_ISR);
383 * qbman_swp_interrupt_clear_status()
384 * @p: the given software portal
385 * @mask: The mask to clear in SWP_ISR register
387 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask)
389 qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask);
393 * qbman_swp_interrupt_get_trigger() - read interrupt enable register
394 * @p: the given software portal
396 * Return the value in the SWP_IER register.
398 u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
400 return qbman_read_register(p, QBMAN_CINH_SWP_IER);
404 * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp
405 * @p: the given software portal
406 * @mask: The mask of bits to enable in SWP_IER
408 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask)
410 qbman_write_register(p, QBMAN_CINH_SWP_IER, mask);
414 * qbman_swp_interrupt_get_inhibit() - read interrupt mask register
415 * @p: the given software portal object
417 * Return the value in the SWP_IIR register.
419 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
421 return qbman_read_register(p, QBMAN_CINH_SWP_IIR);
425 * qbman_swp_interrupt_set_inhibit() - write interrupt mask register
426 * @p: the given software portal object
427 * @mask: The mask to set in SWP_IIR register
429 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
431 qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
435 * Different management commands all use this common base layer of code to issue
436 * commands and poll for results.
440 * Returns a pointer to where the caller should fill in their management command
441 * (caller should ignore the verb byte)
443 void *qbman_swp_mc_start(struct qbman_swp *p)
445 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
446 return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
448 return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM);
452 * Commits merges in the caller-supplied command verb (which should not include
453 * the valid-bit) and submits the command to hardware
455 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
459 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
461 *v = cmd_verb | p->mc.valid_bit;
463 *v = cmd_verb | p->mc.valid_bit;
465 qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
470 * Checks for a completed response (returns non-NULL if only if the response
473 void *qbman_swp_mc_result(struct qbman_swp *p)
477 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
478 ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
479 /* Remove the valid-bit - command completed if the rest
482 verb = ret[0] & ~QB_VALID_BIT;
485 p->mc.valid_bit ^= QB_VALID_BIT;
487 ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM);
488 /* Command completed if the valid bit is toggled */
489 if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
491 /* Command completed if the rest is non-zero */
492 verb = ret[0] & ~QB_VALID_BIT;
495 p->mr.valid_bit ^= QB_VALID_BIT;
501 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
502 enum qb_enqueue_commands {
504 enqueue_response_always = 1,
505 enqueue_rejects_to_fq = 2
508 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
509 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
510 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
511 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
514 * qbman_eq_desc_clear() - Clear the contents of a descriptor to
515 * default/starting state.
517 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
519 memset(d, 0, sizeof(*d));
523 * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
524 * @d: the enqueue descriptor.
525 * @response_success: 1 = enqueue with response always; 0 = enqueue with
526 * rejections returned on a FQ.
528 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
530 d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
532 d->verb |= enqueue_response_always;
534 d->verb |= enqueue_rejects_to_fq;
538 * Exactly one of the following descriptor "targets" should be set. (Calling any
539 * one of these will replace the effect of any prior call to one of these.)
540 * -enqueue to a frame queue
541 * -enqueue to a queuing destination
545 * qbman_eq_desc_set_fq() - set the FQ for the enqueue command
546 * @d: the enqueue descriptor
547 * @fqid: the id of the frame queue to be enqueued
549 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid)
551 d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
552 d->tgtid = cpu_to_le32(fqid);
556 * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command
557 * @d: the enqueue descriptor
558 * @qdid: the id of the queuing destination to be enqueued
559 * @qd_bin: the queuing destination bin
560 * @qd_prio: the queuing destination priority
562 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
563 u32 qd_bin, u32 qd_prio)
565 d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
566 d->tgtid = cpu_to_le32(qdid);
567 d->qdbin = cpu_to_le16(qd_bin);
571 #define EQAR_IDX(eqar) ((eqar) & 0x7)
572 #define EQAR_VB(eqar) ((eqar) & 0x80)
573 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
575 static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
579 qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
582 qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT2 +
587 #define QB_RT_BIT ((u32)0x100)
589 * qbman_swp_enqueue_direct() - Issue an enqueue command
590 * @s: the software portal used for enqueue
591 * @d: the enqueue descriptor
592 * @fd: the frame descriptor to be enqueued
594 * Please note that 'fd' should only be NULL if the "action" of the
595 * descriptor is "orp_hole" or "orp_nesn".
597 * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
600 int qbman_swp_enqueue_direct(struct qbman_swp *s,
601 const struct qbman_eq_desc *d,
602 const struct dpaa2_fd *fd)
605 int ret = qbman_swp_enqueue_multiple_direct(s, d, fd, &flags, 1);
615 * qbman_swp_enqueue_mem_back() - Issue an enqueue command
616 * @s: the software portal used for enqueue
617 * @d: the enqueue descriptor
618 * @fd: the frame descriptor to be enqueued
620 * Please note that 'fd' should only be NULL if the "action" of the
621 * descriptor is "orp_hole" or "orp_nesn".
623 * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
626 int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
627 const struct qbman_eq_desc *d,
628 const struct dpaa2_fd *fd)
631 int ret = qbman_swp_enqueue_multiple_mem_back(s, d, fd, &flags, 1);
641 * qbman_swp_enqueue_multiple_direct() - Issue a multi enqueue command
642 * using one enqueue descriptor
643 * @s: the software portal used for enqueue
644 * @d: the enqueue descriptor
645 * @fd: table pointer of frame descriptor table to be enqueued
646 * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
647 * @num_frames: number of fd to be enqueued
649 * Return the number of fd enqueued, or a negative error number.
652 int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
653 const struct qbman_eq_desc *d,
654 const struct dpaa2_fd *fd,
659 const uint32_t *cl = (uint32_t *)d;
660 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
661 int i, num_enqueued = 0;
664 spin_lock(&s->access_spinlock);
665 half_mask = (s->eqcr.pi_ci_mask>>1);
666 full_mask = s->eqcr.pi_ci_mask;
668 if (!s->eqcr.available) {
669 eqcr_ci = s->eqcr.ci;
670 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
671 s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
673 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
674 eqcr_ci, s->eqcr.ci);
675 if (!s->eqcr.available) {
676 spin_unlock(&s->access_spinlock);
681 eqcr_pi = s->eqcr.pi;
682 num_enqueued = (s->eqcr.available < num_frames) ?
683 s->eqcr.available : num_frames;
684 s->eqcr.available -= num_enqueued;
685 /* Fill in the EQCR ring */
686 for (i = 0; i < num_enqueued; i++) {
687 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
688 /* Skip copying the verb */
689 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
690 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
691 &fd[i], sizeof(*fd));
697 /* Set the verb byte, have to substitute in the valid-bit */
698 eqcr_pi = s->eqcr.pi;
699 for (i = 0; i < num_enqueued; i++) {
700 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
701 p[0] = cl[0] | s->eqcr.pi_vb;
702 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
703 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
705 d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
706 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
709 if (!(eqcr_pi & half_mask))
710 s->eqcr.pi_vb ^= QB_VALID_BIT;
713 /* Flush all the cacheline without load/store in between */
714 eqcr_pi = s->eqcr.pi;
715 addr_cena = (size_t)s->addr_cena;
716 for (i = 0; i < num_enqueued; i++)
718 s->eqcr.pi = eqcr_pi & full_mask;
719 spin_unlock(&s->access_spinlock);
725 * qbman_swp_enqueue_multiple_mem_back() - Issue a multi enqueue command
726 * using one enqueue descriptor
727 * @s: the software portal used for enqueue
728 * @d: the enqueue descriptor
729 * @fd: table pointer of frame descriptor table to be enqueued
730 * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
731 * @num_frames: number of fd to be enqueued
733 * Return the number of fd enqueued, or a negative error number.
736 int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
737 const struct qbman_eq_desc *d,
738 const struct dpaa2_fd *fd,
743 const uint32_t *cl = (uint32_t *)(d);
744 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
745 int i, num_enqueued = 0;
746 unsigned long irq_flags;
748 spin_lock(&s->access_spinlock);
749 local_irq_save(irq_flags);
751 half_mask = (s->eqcr.pi_ci_mask>>1);
752 full_mask = s->eqcr.pi_ci_mask;
753 if (!s->eqcr.available) {
754 eqcr_ci = s->eqcr.ci;
755 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
756 s->eqcr.ci = __raw_readl(p) & full_mask;
757 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
758 eqcr_ci, s->eqcr.ci);
759 if (!s->eqcr.available) {
760 local_irq_restore(irq_flags);
761 spin_unlock(&s->access_spinlock);
766 eqcr_pi = s->eqcr.pi;
767 num_enqueued = (s->eqcr.available < num_frames) ?
768 s->eqcr.available : num_frames;
769 s->eqcr.available -= num_enqueued;
770 /* Fill in the EQCR ring */
771 for (i = 0; i < num_enqueued; i++) {
772 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
773 /* Skip copying the verb */
774 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
775 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
776 &fd[i], sizeof(*fd));
780 /* Set the verb byte, have to substitute in the valid-bit */
781 eqcr_pi = s->eqcr.pi;
782 for (i = 0; i < num_enqueued; i++) {
783 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
784 p[0] = cl[0] | s->eqcr.pi_vb;
785 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
786 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
788 d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
789 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
792 if (!(eqcr_pi & half_mask))
793 s->eqcr.pi_vb ^= QB_VALID_BIT;
795 s->eqcr.pi = eqcr_pi & full_mask;
798 qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
799 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
800 local_irq_restore(irq_flags);
801 spin_unlock(&s->access_spinlock);
807 * qbman_swp_enqueue_multiple_desc_direct() - Issue a multi enqueue command
808 * using multiple enqueue descriptor
809 * @s: the software portal used for enqueue
810 * @d: table of minimal enqueue descriptor
811 * @fd: table pointer of frame descriptor table to be enqueued
812 * @num_frames: number of fd to be enqueued
814 * Return the number of fd enqueued, or a negative error number.
817 int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
818 const struct qbman_eq_desc *d,
819 const struct dpaa2_fd *fd,
824 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
825 int i, num_enqueued = 0;
828 half_mask = (s->eqcr.pi_ci_mask>>1);
829 full_mask = s->eqcr.pi_ci_mask;
830 if (!s->eqcr.available) {
831 eqcr_ci = s->eqcr.ci;
832 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
833 s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
834 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
835 eqcr_ci, s->eqcr.ci);
836 if (!s->eqcr.available)
840 eqcr_pi = s->eqcr.pi;
841 num_enqueued = (s->eqcr.available < num_frames) ?
842 s->eqcr.available : num_frames;
843 s->eqcr.available -= num_enqueued;
844 /* Fill in the EQCR ring */
845 for (i = 0; i < num_enqueued; i++) {
846 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
847 cl = (uint32_t *)(&d[i]);
848 /* Skip copying the verb */
849 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
850 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
851 &fd[i], sizeof(*fd));
857 /* Set the verb byte, have to substitute in the valid-bit */
858 eqcr_pi = s->eqcr.pi;
859 for (i = 0; i < num_enqueued; i++) {
860 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
861 cl = (uint32_t *)(&d[i]);
862 p[0] = cl[0] | s->eqcr.pi_vb;
864 if (!(eqcr_pi & half_mask))
865 s->eqcr.pi_vb ^= QB_VALID_BIT;
868 /* Flush all the cacheline without load/store in between */
869 eqcr_pi = s->eqcr.pi;
870 addr_cena = (uint64_t)s->addr_cena;
871 for (i = 0; i < num_enqueued; i++)
873 s->eqcr.pi = eqcr_pi & full_mask;
879 * qbman_swp_enqueue_multiple_desc_mem_back() - Issue a multi enqueue command
880 * using multiple enqueue descriptor
881 * @s: the software portal used for enqueue
882 * @d: table of minimal enqueue descriptor
883 * @fd: table pointer of frame descriptor table to be enqueued
884 * @num_frames: number of fd to be enqueued
886 * Return the number of fd enqueued, or a negative error number.
889 int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
890 const struct qbman_eq_desc *d,
891 const struct dpaa2_fd *fd,
896 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
897 int i, num_enqueued = 0;
899 half_mask = (s->eqcr.pi_ci_mask>>1);
900 full_mask = s->eqcr.pi_ci_mask;
901 if (!s->eqcr.available) {
902 eqcr_ci = s->eqcr.ci;
903 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
904 s->eqcr.ci = __raw_readl(p) & full_mask;
905 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
906 eqcr_ci, s->eqcr.ci);
907 if (!s->eqcr.available)
911 eqcr_pi = s->eqcr.pi;
912 num_enqueued = (s->eqcr.available < num_frames) ?
913 s->eqcr.available : num_frames;
914 s->eqcr.available -= num_enqueued;
915 /* Fill in the EQCR ring */
916 for (i = 0; i < num_enqueued; i++) {
917 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
918 cl = (uint32_t *)(&d[i]);
919 /* Skip copying the verb */
920 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
921 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
922 &fd[i], sizeof(*fd));
926 /* Set the verb byte, have to substitute in the valid-bit */
927 eqcr_pi = s->eqcr.pi;
928 for (i = 0; i < num_enqueued; i++) {
929 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
930 cl = (uint32_t *)(&d[i]);
931 p[0] = cl[0] | s->eqcr.pi_vb;
933 if (!(eqcr_pi & half_mask))
934 s->eqcr.pi_vb ^= QB_VALID_BIT;
937 s->eqcr.pi = eqcr_pi & full_mask;
940 qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
941 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
946 /* Static (push) dequeue */
949 * qbman_swp_push_get() - Get the push dequeue setup
950 * @p: the software portal object
951 * @channel_idx: the channel index to query
952 * @enabled: returned boolean to show whether the push dequeue is enabled
953 * for the given channel
955 void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
957 u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
959 WARN_ON(channel_idx > 15);
960 *enabled = src | (1 << channel_idx);
964 * qbman_swp_push_set() - Enable or disable push dequeue
965 * @p: the software portal object
966 * @channel_idx: the channel index (0 to 15)
967 * @enable: enable or disable push dequeue
969 void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable)
973 WARN_ON(channel_idx > 15);
975 s->sdq |= 1 << channel_idx;
977 s->sdq &= ~(1 << channel_idx);
979 /* Read make the complete src map. If no channels are enabled
980 * the SDQCR must be 0 or else QMan will assert errors
982 dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
984 qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq);
986 qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0);
989 #define QB_VDQCR_VERB_DCT_SHIFT 0
990 #define QB_VDQCR_VERB_DT_SHIFT 2
991 #define QB_VDQCR_VERB_RLS_SHIFT 4
992 #define QB_VDQCR_VERB_WAE_SHIFT 5
996 qb_pull_dt_workqueue,
997 qb_pull_dt_framequeue
1001 * qbman_pull_desc_clear() - Clear the contents of a descriptor to
1002 * default/starting state
1003 * @d: the pull dequeue descriptor to be cleared
1005 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
1007 memset(d, 0, sizeof(*d));
1011 * qbman_pull_desc_set_storage()- Set the pull dequeue storage
1012 * @d: the pull dequeue descriptor to be set
1013 * @storage: the pointer of the memory to store the dequeue result
1014 * @storage_phys: the physical address of the storage memory
1015 * @stash: to indicate whether write allocate is enabled
1017 * If not called, or if called with 'storage' as NULL, the result pull dequeues
1018 * will produce results to DQRR. If 'storage' is non-NULL, then results are
1019 * produced to the given memory location (using the DMA address which
1020 * the caller provides in 'storage_phys'), and 'stash' controls whether or not
1021 * those writes to main-memory express a cache-warming attribute.
1023 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
1024 struct dpaa2_dq *storage,
1025 dma_addr_t storage_phys,
1028 /* save the virtual address */
1029 d->rsp_addr_virt = (u64)(uintptr_t)storage;
1032 d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
1035 d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
1037 d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
1039 d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
1041 d->rsp_addr = cpu_to_le64(storage_phys);
1045 * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued
1046 * @d: the pull dequeue descriptor to be set
1047 * @numframes: number of frames to be set, must be between 1 and 16, inclusive
1049 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
1051 d->numf = numframes - 1;
1055 * Exactly one of the following descriptor "actions" should be set. (Calling any
1056 * one of these will replace the effect of any prior call to one of these.)
1057 * - pull dequeue from the given frame queue (FQ)
1058 * - pull dequeue from any FQ in the given work queue (WQ)
1059 * - pull dequeue from any FQ in any WQ in the given channel
1063 * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues
1064 * @fqid: the frame queue index of the given FQ
1066 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
1068 d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
1069 d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
1070 d->dq_src = cpu_to_le32(fqid);
1074 * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues
1075 * @wqid: composed of channel id and wqid within the channel
1076 * @dct: the dequeue command type
1078 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
1079 enum qbman_pull_type_e dct)
1081 d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1082 d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
1083 d->dq_src = cpu_to_le32(wqid);
1087 * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
1089 * @chid: the channel id to be dequeued
1090 * @dct: the dequeue command type
1092 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
1093 enum qbman_pull_type_e dct)
1095 d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1096 d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
1097 d->dq_src = cpu_to_le32(chid);
1101 * qbman_swp_pull_direct() - Issue the pull dequeue command
1102 * @s: the software portal object
1103 * @d: the software portal descriptor which has been configured with
1104 * the set of qbman_pull_desc_set_*() calls
1106 * Return 0 for success, and -EBUSY if the software portal is not ready
1107 * to do pull dequeue.
1110 int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d)
1112 struct qbman_pull_desc *p;
1114 if (!atomic_dec_and_test(&s->vdq.available)) {
1115 atomic_inc(&s->vdq.available);
1118 s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
1119 if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
1120 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
1122 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
1124 p->tok = QMAN_DQ_TOKEN_VALID;
1125 p->dq_src = d->dq_src;
1126 p->rsp_addr = d->rsp_addr;
1127 p->rsp_addr_virt = d->rsp_addr_virt;
1129 /* Set the verb byte, have to substitute in the valid-bit */
1130 p->verb = d->verb | s->vdq.valid_bit;
1131 s->vdq.valid_bit ^= QB_VALID_BIT;
1137 * qbman_swp_pull_mem_back() - Issue the pull dequeue command
1138 * @s: the software portal object
1139 * @d: the software portal descriptor which has been configured with
1140 * the set of qbman_pull_desc_set_*() calls
1142 * Return 0 for success, and -EBUSY if the software portal is not ready
1143 * to do pull dequeue.
1146 int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d)
1148 struct qbman_pull_desc *p;
1150 if (!atomic_dec_and_test(&s->vdq.available)) {
1151 atomic_inc(&s->vdq.available);
1154 s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
1155 if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
1156 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
1158 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
1160 p->tok = QMAN_DQ_TOKEN_VALID;
1161 p->dq_src = d->dq_src;
1162 p->rsp_addr = d->rsp_addr;
1163 p->rsp_addr_virt = d->rsp_addr_virt;
1165 /* Set the verb byte, have to substitute in the valid-bit */
1166 p->verb = d->verb | s->vdq.valid_bit;
1167 s->vdq.valid_bit ^= QB_VALID_BIT;
1169 qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1174 #define QMAN_DQRR_PI_MASK 0xf
1177 * qbman_swp_dqrr_next_direct() - Get an valid DQRR entry
1178 * @s: the software portal object
1180 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
1181 * only once, so repeated calls can return a sequence of DQRR entries, without
1182 * requiring they be consumed immediately or in any particular order.
1184 const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
1191 /* Before using valid-bit to detect if something is there, we have to
1192 * handle the case of the DQRR reset bug...
1194 if (unlikely(s->dqrr.reset_bug)) {
1196 * We pick up new entries by cache-inhibited producer index,
1197 * which means that a non-coherent mapping would require us to
1198 * invalidate and read *only* once that PI has indicated that
1199 * there's an entry here. The first trip around the DQRR ring
1200 * will be much less efficient than all subsequent trips around
1203 u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
1206 /* there are new entries if pi != next_idx */
1207 if (pi == s->dqrr.next_idx)
1211 * if next_idx is/was the last ring index, and 'pi' is
1212 * different, we can disable the workaround as all the ring
1213 * entries have now been DMA'd to so valid-bit checking is
1214 * repaired. Note: this logic needs to be based on next_idx
1215 * (which increments one at a time), rather than on pi (which
1216 * can burst and wrap-around between our snapshots of it).
1218 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
1219 pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
1220 s->dqrr.next_idx, pi);
1221 s->dqrr.reset_bug = 0;
1223 prefetch(qbman_get_cmd(s,
1224 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1227 p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1231 * If the valid-bit isn't of the expected polarity, nothing there. Note,
1232 * in the DQRR reset bug workaround, we shouldn't need to skip these
1233 * check, because we've already determined that a new entry is available
1234 * and we've invalidated the cacheline before reading it, so the
1235 * valid-bit behaviour is repaired and should tell us what we already
1236 * knew from reading PI.
1238 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
1239 prefetch(qbman_get_cmd(s,
1240 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1244 * There's something there. Move "next_idx" attention to the next ring
1245 * entry (and prefetch it) before returning what we found.
1248 s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
1249 if (!s->dqrr.next_idx)
1250 s->dqrr.valid_bit ^= QB_VALID_BIT;
1253 * If this is the final response to a volatile dequeue command
1254 * indicate that the vdq is available
1257 response_verb = verb & QBMAN_RESULT_MASK;
1258 if ((response_verb == QBMAN_RESULT_DQ) &&
1259 (flags & DPAA2_DQ_STAT_VOLATILE) &&
1260 (flags & DPAA2_DQ_STAT_EXPIRED))
1261 atomic_inc(&s->vdq.available);
1263 prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1269 * qbman_swp_dqrr_next_mem_back() - Get an valid DQRR entry
1270 * @s: the software portal object
1272 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
1273 * only once, so repeated calls can return a sequence of DQRR entries, without
1274 * requiring they be consumed immediately or in any particular order.
1276 const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
1283 /* Before using valid-bit to detect if something is there, we have to
1284 * handle the case of the DQRR reset bug...
1286 if (unlikely(s->dqrr.reset_bug)) {
1288 * We pick up new entries by cache-inhibited producer index,
1289 * which means that a non-coherent mapping would require us to
1290 * invalidate and read *only* once that PI has indicated that
1291 * there's an entry here. The first trip around the DQRR ring
1292 * will be much less efficient than all subsequent trips around
1295 u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
1298 /* there are new entries if pi != next_idx */
1299 if (pi == s->dqrr.next_idx)
1303 * if next_idx is/was the last ring index, and 'pi' is
1304 * different, we can disable the workaround as all the ring
1305 * entries have now been DMA'd to so valid-bit checking is
1306 * repaired. Note: this logic needs to be based on next_idx
1307 * (which increments one at a time), rather than on pi (which
1308 * can burst and wrap-around between our snapshots of it).
1310 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
1311 pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
1312 s->dqrr.next_idx, pi);
1313 s->dqrr.reset_bug = 0;
1315 prefetch(qbman_get_cmd(s,
1316 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1319 p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
1323 * If the valid-bit isn't of the expected polarity, nothing there. Note,
1324 * in the DQRR reset bug workaround, we shouldn't need to skip these
1325 * check, because we've already determined that a new entry is available
1326 * and we've invalidated the cacheline before reading it, so the
1327 * valid-bit behaviour is repaired and should tell us what we already
1328 * knew from reading PI.
1330 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
1331 prefetch(qbman_get_cmd(s,
1332 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1336 * There's something there. Move "next_idx" attention to the next ring
1337 * entry (and prefetch it) before returning what we found.
1340 s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
1341 if (!s->dqrr.next_idx)
1342 s->dqrr.valid_bit ^= QB_VALID_BIT;
1345 * If this is the final response to a volatile dequeue command
1346 * indicate that the vdq is available
1349 response_verb = verb & QBMAN_RESULT_MASK;
1350 if ((response_verb == QBMAN_RESULT_DQ) &&
1351 (flags & DPAA2_DQ_STAT_VOLATILE) &&
1352 (flags & DPAA2_DQ_STAT_EXPIRED))
1353 atomic_inc(&s->vdq.available);
1355 prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1361 * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from
1362 * qbman_swp_dqrr_next().
1363 * @s: the software portal object
1364 * @dq: the DQRR entry to be consumed
1366 void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq)
1368 qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
1372 * qbman_result_has_new_result() - Check and get the dequeue response from the
1373 * dq storage memory set in pull dequeue command
1374 * @s: the software portal object
1375 * @dq: the dequeue result read from the memory
1377 * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
1380 * Only used for user-provided storage of dequeue results, not DQRR. For
1381 * efficiency purposes, the driver will perform any required endianness
1382 * conversion to ensure that the user's dequeue result storage is in host-endian
1383 * format. As such, once the user has called qbman_result_has_new_result() and
1384 * been returned a valid dequeue result, they should not call it again on
1385 * the same memory location (except of course if another dequeue command has
1386 * been executed to produce a new result to that location).
1388 int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
1390 if (dq->dq.tok != QMAN_DQ_TOKEN_VALID)
1394 * Set token to be 0 so we will detect change back to 1
1395 * next time the looping is traversed. Const is cast away here
1396 * as we want users to treat the dequeue responses as read only.
1398 ((struct dpaa2_dq *)dq)->dq.tok = 0;
1401 * Determine whether VDQCR is available based on whether the
1402 * current result is sitting in the first storage location of
1405 if (s->vdq.storage == dq) {
1406 s->vdq.storage = NULL;
1407 atomic_inc(&s->vdq.available);
1414 * qbman_release_desc_clear() - Clear the contents of a descriptor to
1415 * default/starting state.
1417 void qbman_release_desc_clear(struct qbman_release_desc *d)
1419 memset(d, 0, sizeof(*d));
1420 d->verb = 1 << 5; /* Release Command Valid */
1424 * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
1426 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
1428 d->bpid = cpu_to_le16(bpid);
1432 * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
1433 * interrupt source should be asserted after the release command is completed.
1435 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
1440 d->verb &= ~(1 << 6);
1443 #define RAR_IDX(rar) ((rar) & 0x7)
1444 #define RAR_VB(rar) ((rar) & 0x80)
1445 #define RAR_SUCCESS(rar) ((rar) & 0x100)
1448 * qbman_swp_release_direct() - Issue a buffer release command
1449 * @s: the software portal object
1450 * @d: the release descriptor
1451 * @buffers: a pointer pointing to the buffer address to be released
1452 * @num_buffers: number of buffers to be released, must be less than 8
1454 * Return 0 for success, -EBUSY if the release command ring is not ready.
1456 int qbman_swp_release_direct(struct qbman_swp *s,
1457 const struct qbman_release_desc *d,
1458 const u64 *buffers, unsigned int num_buffers)
1461 struct qbman_release_desc *p;
1464 if (!num_buffers || (num_buffers > 7))
1467 rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
1468 if (!RAR_SUCCESS(rar))
1471 /* Start the release command */
1472 p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1474 /* Copy the caller's buffer pointers to the command */
1475 for (i = 0; i < num_buffers; i++)
1476 p->buf[i] = cpu_to_le64(buffers[i]);
1480 * Set the verb byte, have to substitute in the valid-bit
1481 * and the number of buffers.
1484 p->verb = d->verb | RAR_VB(rar) | num_buffers;
1490 * qbman_swp_release_mem_back() - Issue a buffer release command
1491 * @s: the software portal object
1492 * @d: the release descriptor
1493 * @buffers: a pointer pointing to the buffer address to be released
1494 * @num_buffers: number of buffers to be released, must be less than 8
1496 * Return 0 for success, -EBUSY if the release command ring is not ready.
1498 int qbman_swp_release_mem_back(struct qbman_swp *s,
1499 const struct qbman_release_desc *d,
1500 const u64 *buffers, unsigned int num_buffers)
1503 struct qbman_release_desc *p;
1506 if (!num_buffers || (num_buffers > 7))
1509 rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
1510 if (!RAR_SUCCESS(rar))
1513 /* Start the release command */
1514 p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
1516 /* Copy the caller's buffer pointers to the command */
1517 for (i = 0; i < num_buffers; i++)
1518 p->buf[i] = cpu_to_le64(buffers[i]);
1521 p->verb = d->verb | RAR_VB(rar) | num_buffers;
1523 qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
1524 RAR_IDX(rar) * 4, QMAN_RT_MODE);
1529 struct qbman_acquire_desc {
1537 struct qbman_acquire_rslt {
1547 * qbman_swp_acquire() - Issue a buffer acquire command
1548 * @s: the software portal object
1549 * @bpid: the buffer pool index
1550 * @buffers: a pointer pointing to the acquired buffer addresses
1551 * @num_buffers: number of buffers to be acquired, must be less than 8
1553 * Return 0 for success, or negative error code if the acquire command
1556 int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
1557 unsigned int num_buffers)
1559 struct qbman_acquire_desc *p;
1560 struct qbman_acquire_rslt *r;
1563 if (!num_buffers || (num_buffers > 7))
1566 /* Start the management command */
1567 p = qbman_swp_mc_start(s);
1572 /* Encode the caller-provided attributes */
1573 p->bpid = cpu_to_le16(bpid);
1574 p->num = num_buffers;
1576 /* Complete the management command */
1577 r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
1579 pr_err("qbman: acquire from BPID %d failed, no response\n",
1584 /* Decode the outcome */
1585 WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE);
1587 /* Determine success or failure */
1588 if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1589 pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n",
1594 WARN_ON(r->num > num_buffers);
1596 /* Copy the acquired buffers to the caller's array */
1597 for (i = 0; i < r->num; i++)
1598 buffers[i] = le64_to_cpu(r->buf[i]);
1603 struct qbman_alt_fq_state_desc {
1610 struct qbman_alt_fq_state_rslt {
1616 #define ALT_FQ_FQID_MASK 0x00FFFFFF
1618 int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
1621 struct qbman_alt_fq_state_desc *p;
1622 struct qbman_alt_fq_state_rslt *r;
1624 /* Start the management command */
1625 p = qbman_swp_mc_start(s);
1629 p->fqid = cpu_to_le32(fqid & ALT_FQ_FQID_MASK);
1631 /* Complete the management command */
1632 r = qbman_swp_mc_complete(s, p, alt_fq_verb);
1634 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
1639 /* Decode the outcome */
1640 WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb);
1642 /* Determine success or failure */
1643 if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1644 pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n",
1645 fqid, r->verb, r->rslt);
1652 struct qbman_cdan_ctrl_desc {
1664 struct qbman_cdan_ctrl_rslt {
1671 int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
1672 u8 we_mask, u8 cdan_en,
1675 struct qbman_cdan_ctrl_desc *p = NULL;
1676 struct qbman_cdan_ctrl_rslt *r = NULL;
1678 /* Start the management command */
1679 p = qbman_swp_mc_start(s);
1683 /* Encode the caller-provided attributes */
1684 p->ch = cpu_to_le16(channelid);
1690 p->cdan_ctx = cpu_to_le64(ctx);
1692 /* Complete the management command */
1693 r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
1695 pr_err("qbman: wqchan config failed, no response\n");
1699 WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE);
1701 /* Determine success or failure */
1702 if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1703 pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n",
1704 channelid, r->rslt);
1711 #define QBMAN_RESPONSE_VERB_MASK 0x7f
1712 #define QBMAN_FQ_QUERY_NP 0x45
1713 #define QBMAN_BP_QUERY 0x32
1715 struct qbman_fq_query_desc {
1722 int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
1723 struct qbman_fq_query_np_rslt *r)
1725 struct qbman_fq_query_desc *p;
1728 p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s);
1732 /* FQID is a 24 bit value */
1733 p->fqid = cpu_to_le32(fqid & 0x00FFFFFF);
1734 resp = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP);
1736 pr_err("qbman: Query FQID %d NP fields failed, no response\n",
1740 *r = *(struct qbman_fq_query_np_rslt *)resp;
1741 /* Decode the outcome */
1742 WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY_NP);
1744 /* Determine success or failure */
1745 if (r->rslt != QBMAN_MC_RSLT_OK) {
1746 pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
1754 u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r)
1756 return (le32_to_cpu(r->frm_cnt) & 0x00FFFFFF);
1759 u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r)
1761 return le32_to_cpu(r->byte_cnt);
1764 struct qbman_bp_query_desc {
1771 int qbman_bp_query(struct qbman_swp *s, u16 bpid,
1772 struct qbman_bp_query_rslt *r)
1774 struct qbman_bp_query_desc *p;
1777 p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s);
1781 p->bpid = cpu_to_le16(bpid);
1782 resp = qbman_swp_mc_complete(s, p, QBMAN_BP_QUERY);
1784 pr_err("qbman: Query BPID %d fields failed, no response\n",
1788 *r = *(struct qbman_bp_query_rslt *)resp;
1789 /* Decode the outcome */
1790 WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY);
1792 /* Determine success or failure */
1793 if (r->rslt != QBMAN_MC_RSLT_OK) {
1794 pr_err("Query fields of BPID 0x%x failed, code=0x%02x\n",
1802 u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a)
1804 return le32_to_cpu(a->fill);