1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/list.h>
15 #include <linux/pci.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
21 #include "qed_dev_api.h"
25 #include "qed_iscsi.h"
28 #include "qed_reg_addr.h"
30 #include "qed_sriov.h"
33 /***************************************************************************
34 * Structures & Definitions
35 ***************************************************************************/
37 #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
39 #define SPQ_BLOCK_DELAY_MAX_ITER (10)
40 #define SPQ_BLOCK_DELAY_US (10)
41 #define SPQ_BLOCK_SLEEP_MAX_ITER (1000)
42 #define SPQ_BLOCK_SLEEP_MS (5)
44 /***************************************************************************
45 * Blocking Imp. (BLOCK/EBLOCK mode)
46 ***************************************************************************/
47 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
49 union event_ring_data *data, u8 fw_return_code)
51 struct qed_spq_comp_done *comp_done;
53 comp_done = (struct qed_spq_comp_done *)cookie;
55 comp_done->fw_return_code = fw_return_code;
57 /* Make sure completion done is visible on waiting thread */
58 smp_store_release(&comp_done->done, 0x1);
61 static int __qed_spq_block(struct qed_hwfn *p_hwfn,
62 struct qed_spq_entry *p_ent,
63 u8 *p_fw_ret, bool sleep_between_iter)
65 struct qed_spq_comp_done *comp_done;
68 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
69 iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
70 : SPQ_BLOCK_DELAY_MAX_ITER;
73 /* Validate we receive completion update */
74 if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */
76 *p_fw_ret = comp_done->fw_return_code;
80 if (sleep_between_iter)
81 msleep(SPQ_BLOCK_SLEEP_MS);
83 udelay(SPQ_BLOCK_DELAY_US);
89 static int qed_spq_block(struct qed_hwfn *p_hwfn,
90 struct qed_spq_entry *p_ent,
91 u8 *p_fw_ret, bool skip_quick_poll)
93 struct qed_spq_comp_done *comp_done;
94 struct qed_ptt *p_ptt;
97 /* A relatively short polling period w/o sleeping, to allow the FW to
98 * complete the ramrod and thus possibly to avoid the following sleeps.
100 if (!skip_quick_poll) {
101 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
106 /* Move to polling with a sleeping period between iterations */
107 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
111 p_ptt = qed_ptt_acquire(p_hwfn);
113 DP_NOTICE(p_hwfn, "ptt, failed to acquire\n");
117 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
118 rc = qed_mcp_drain(p_hwfn, p_ptt);
119 qed_ptt_release(p_hwfn, p_ptt);
121 DP_NOTICE(p_hwfn, "MCP drain failed\n");
125 /* Retry after drain */
126 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
130 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
131 if (comp_done->done == 1) {
133 *p_fw_ret = comp_done->fw_return_code;
137 p_ptt = qed_ptt_acquire(p_hwfn);
140 qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_RAMROD_FAIL,
141 "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
142 le32_to_cpu(p_ent->elem.hdr.cid),
143 p_ent->elem.hdr.cmd_id,
144 p_ent->elem.hdr.protocol_id,
145 le16_to_cpu(p_ent->elem.hdr.echo));
146 qed_ptt_release(p_hwfn, p_ptt);
151 /***************************************************************************
152 * SPQ entries inner API
153 ***************************************************************************/
154 static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
155 struct qed_spq_entry *p_ent)
159 switch (p_ent->comp_mode) {
160 case QED_SPQ_MODE_EBLOCK:
161 case QED_SPQ_MODE_BLOCK:
162 p_ent->comp_cb.function = qed_spq_blocking_cb;
164 case QED_SPQ_MODE_CB:
167 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
172 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
173 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
175 p_ent->elem.hdr.cmd_id,
176 p_ent->elem.hdr.protocol_id,
177 p_ent->elem.data_ptr.hi,
178 p_ent->elem.data_ptr.lo,
179 D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
180 QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
186 /***************************************************************************
188 ***************************************************************************/
189 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
190 struct qed_spq *p_spq)
192 struct e4_core_conn_context *p_cxt;
193 struct qed_cxt_info cxt_info;
197 cxt_info.iid = p_spq->cid;
199 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
202 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
207 p_cxt = cxt_info.p_cxt;
209 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
210 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
211 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
212 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
213 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
214 E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
216 /* QM physical queue */
217 physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
218 p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
220 p_cxt->xstorm_st_context.spq_base_lo =
221 DMA_LO_LE(p_spq->chain.p_phys_addr);
222 p_cxt->xstorm_st_context.spq_base_hi =
223 DMA_HI_LE(p_spq->chain.p_phys_addr);
225 DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
226 p_hwfn->p_consq->chain.p_phys_addr);
229 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
230 struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
232 struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
233 struct core_db_data *p_db_data = &p_spq->db_data;
234 u16 echo = qed_chain_get_prod_idx(p_chain);
235 struct slow_path_element *elem;
237 p_ent->elem.hdr.echo = cpu_to_le16(echo);
238 elem = qed_chain_produce(p_chain);
240 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
244 *elem = p_ent->elem; /* struct assignment */
246 /* send a doorbell on the slow hwfn session */
247 p_db_data->spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
249 /* make sure the SPQE is updated before the doorbell */
252 DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
254 /* make sure doorbell is rang */
257 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
258 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
259 p_spq->db_addr_offset,
262 p_db_data->agg_flags, qed_chain_get_prod_idx(p_chain));
267 /***************************************************************************
268 * Asynchronous events
269 ***************************************************************************/
271 qed_async_event_completion(struct qed_hwfn *p_hwfn,
272 struct event_ring_entry *p_eqe)
274 qed_spq_async_comp_cb cb;
276 if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
279 cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
281 return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
282 &p_eqe->data, p_eqe->fw_return_code);
285 "Unknown Async completion for protocol: %d\n",
292 qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
293 enum protocol_type protocol_id,
294 qed_spq_async_comp_cb cb)
296 if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
299 p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
304 qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
305 enum protocol_type protocol_id)
307 if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
310 p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL;
313 /***************************************************************************
315 ***************************************************************************/
316 void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
318 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
319 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
321 REG_WR16(p_hwfn, addr, prod);
324 int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
326 struct qed_eq *p_eq = cookie;
327 struct qed_chain *p_chain = &p_eq->chain;
330 /* take a snapshot of the FW consumer */
331 u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
333 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
335 /* Need to guarantee the fw_cons index we use points to a usuable
336 * element (to comply with our chain), so our macros would comply
338 if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
339 qed_chain_get_usable_per_page(p_chain))
340 fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
342 /* Complete current segment of eq entries */
343 while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
344 struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
351 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
352 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
356 le16_to_cpu(p_eqe->echo),
357 p_eqe->fw_return_code,
360 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
361 if (qed_async_event_completion(p_hwfn, p_eqe))
363 } else if (qed_spq_completion(p_hwfn,
365 p_eqe->fw_return_code,
370 qed_chain_recycle_consumed(p_chain);
373 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
375 /* Attempt to post pending requests */
376 spin_lock_bh(&p_hwfn->p_spq->lock);
377 rc = qed_spq_pend_post(p_hwfn);
378 spin_unlock_bh(&p_hwfn->p_spq->lock);
383 int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
387 /* Allocate EQ struct */
388 p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
392 /* Allocate and initialize EQ chain*/
393 if (qed_chain_alloc(p_hwfn->cdev,
394 QED_CHAIN_USE_TO_PRODUCE,
396 QED_CHAIN_CNT_TYPE_U16,
398 sizeof(union event_ring_element),
400 goto eq_allocate_fail;
402 /* register EQ completion on the SP SB */
403 qed_int_register_cb(p_hwfn, qed_eq_completion,
404 p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
414 void qed_eq_setup(struct qed_hwfn *p_hwfn)
416 qed_chain_reset(&p_hwfn->p_eq->chain);
419 void qed_eq_free(struct qed_hwfn *p_hwfn)
424 qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain);
430 /***************************************************************************
431 * CQE API - manipulate EQ functionality
432 ***************************************************************************/
433 static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
434 struct eth_slow_path_rx_cqe *cqe,
435 enum protocol_type protocol)
437 if (IS_VF(p_hwfn->cdev))
440 /* @@@tmp - it's possible we'll eventually want to handle some
441 * actual commands that can arrive here, but for now this is only
442 * used to complete the ramrod using the echo value on the cqe
444 return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
447 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
448 struct eth_slow_path_rx_cqe *cqe)
452 rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
455 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
461 /***************************************************************************
462 * Slow hwfn Queue (spq)
463 ***************************************************************************/
464 void qed_spq_setup(struct qed_hwfn *p_hwfn)
466 struct qed_spq *p_spq = p_hwfn->p_spq;
467 struct qed_spq_entry *p_virt = NULL;
468 struct core_db_data *p_db_data;
469 void __iomem *db_addr;
470 dma_addr_t p_phys = 0;
474 INIT_LIST_HEAD(&p_spq->pending);
475 INIT_LIST_HEAD(&p_spq->completion_pending);
476 INIT_LIST_HEAD(&p_spq->free_pool);
477 INIT_LIST_HEAD(&p_spq->unlimited_pending);
478 spin_lock_init(&p_spq->lock);
481 p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
482 p_virt = p_spq->p_virt;
484 capacity = qed_chain_get_capacity(&p_spq->chain);
485 for (i = 0; i < capacity; i++) {
486 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
488 list_add_tail(&p_virt->list, &p_spq->free_pool);
491 p_phys += sizeof(struct qed_spq_entry);
495 p_spq->normal_count = 0;
496 p_spq->comp_count = 0;
497 p_spq->comp_sent_count = 0;
498 p_spq->unlimited_pending_count = 0;
500 bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
501 p_spq->comp_bitmap_idx = 0;
503 /* SPQ cid, cannot fail */
504 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
505 qed_spq_hw_initialize(p_hwfn, p_spq);
507 /* reset the chain itself */
508 qed_chain_reset(&p_spq->chain);
510 /* Initialize the address/data of the SPQ doorbell */
511 p_spq->db_addr_offset = qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY);
512 p_db_data = &p_spq->db_data;
513 memset(p_db_data, 0, sizeof(*p_db_data));
514 SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
515 SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
516 SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
517 DQ_XCM_CORE_SPQ_PROD_CMD);
518 p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
520 /* Register the SPQ doorbell with the doorbell recovery mechanism */
521 db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
522 p_spq->db_addr_offset);
523 rc = qed_db_recovery_add(p_hwfn->cdev, db_addr, &p_spq->db_data,
524 DB_REC_WIDTH_32B, DB_REC_KERNEL);
527 "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
530 int qed_spq_alloc(struct qed_hwfn *p_hwfn)
532 struct qed_spq_entry *p_virt = NULL;
533 struct qed_spq *p_spq = NULL;
534 dma_addr_t p_phys = 0;
538 p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
543 if (qed_chain_alloc(p_hwfn->cdev,
544 QED_CHAIN_USE_TO_PRODUCE,
545 QED_CHAIN_MODE_SINGLE,
546 QED_CHAIN_CNT_TYPE_U16,
547 0, /* N/A when the mode is SINGLE */
548 sizeof(struct slow_path_element),
549 &p_spq->chain, NULL))
550 goto spq_allocate_fail;
552 /* allocate and fill the SPQ elements (incl. ramrod data list) */
553 capacity = qed_chain_get_capacity(&p_spq->chain);
554 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
555 capacity * sizeof(struct qed_spq_entry),
556 &p_phys, GFP_KERNEL);
558 goto spq_allocate_fail;
560 p_spq->p_virt = p_virt;
561 p_spq->p_phys = p_phys;
562 p_hwfn->p_spq = p_spq;
567 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
572 void qed_spq_free(struct qed_hwfn *p_hwfn)
574 struct qed_spq *p_spq = p_hwfn->p_spq;
575 void __iomem *db_addr;
581 /* Delete the SPQ doorbell from the doorbell recovery mechanism */
582 db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
583 p_spq->db_addr_offset);
584 qed_db_recovery_del(p_hwfn->cdev, db_addr, &p_spq->db_data);
587 capacity = qed_chain_get_capacity(&p_spq->chain);
588 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
590 sizeof(struct qed_spq_entry),
591 p_spq->p_virt, p_spq->p_phys);
594 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
596 p_hwfn->p_spq = NULL;
599 int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
601 struct qed_spq *p_spq = p_hwfn->p_spq;
602 struct qed_spq_entry *p_ent = NULL;
605 spin_lock_bh(&p_spq->lock);
607 if (list_empty(&p_spq->free_pool)) {
608 p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
611 "Failed to allocate an SPQ entry for a pending ramrod\n");
615 p_ent->queue = &p_spq->unlimited_pending;
617 p_ent = list_first_entry(&p_spq->free_pool,
618 struct qed_spq_entry, list);
619 list_del(&p_ent->list);
620 p_ent->queue = &p_spq->pending;
626 spin_unlock_bh(&p_spq->lock);
630 /* Locked variant; Should be called while the SPQ lock is taken */
631 static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
632 struct qed_spq_entry *p_ent)
634 list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
637 void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
639 spin_lock_bh(&p_hwfn->p_spq->lock);
640 __qed_spq_return_entry(p_hwfn, p_ent);
641 spin_unlock_bh(&p_hwfn->p_spq->lock);
645 * qed_spq_add_entry() - Add a new entry to the pending list.
646 * Should be used while lock is being held.
648 * @p_hwfn: HW device data.
649 * @p_ent: An entry to add.
650 * @priority: Desired priority.
652 * Adds an entry to the pending list is there is room (an empty
653 * element is available in the free_pool), or else places the
654 * entry in the unlimited_pending pool.
656 * Return: zero on success, -EINVAL on invalid @priority.
658 static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
659 struct qed_spq_entry *p_ent,
660 enum spq_priority priority)
662 struct qed_spq *p_spq = p_hwfn->p_spq;
664 if (p_ent->queue == &p_spq->unlimited_pending) {
666 if (list_empty(&p_spq->free_pool)) {
667 list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
668 p_spq->unlimited_pending_count++;
672 struct qed_spq_entry *p_en2;
674 p_en2 = list_first_entry(&p_spq->free_pool,
675 struct qed_spq_entry, list);
676 list_del(&p_en2->list);
678 /* Copy the ring element physical pointer to the new
679 * entry, since we are about to override the entire ring
680 * entry and don't want to lose the pointer.
682 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
686 /* EBLOCK responsible to free the allocated p_ent */
687 if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
690 p_ent->post_ent = p_en2;
696 /* entry is to be placed in 'pending' queue */
698 case QED_SPQ_PRIORITY_NORMAL:
699 list_add_tail(&p_ent->list, &p_spq->pending);
700 p_spq->normal_count++;
702 case QED_SPQ_PRIORITY_HIGH:
703 list_add(&p_ent->list, &p_spq->pending);
713 /***************************************************************************
715 ***************************************************************************/
716 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
719 return 0xffffffff; /* illegal */
720 return p_hwfn->p_spq->cid;
723 /***************************************************************************
724 * Posting new Ramrods
725 ***************************************************************************/
726 static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
727 struct list_head *head, u32 keep_reserve)
729 struct qed_spq *p_spq = p_hwfn->p_spq;
732 while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
734 struct qed_spq_entry *p_ent =
735 list_first_entry(head, struct qed_spq_entry, list);
736 list_move_tail(&p_ent->list, &p_spq->completion_pending);
737 p_spq->comp_sent_count++;
739 rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
741 list_del(&p_ent->list);
742 __qed_spq_return_entry(p_hwfn, p_ent);
750 int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
752 struct qed_spq *p_spq = p_hwfn->p_spq;
753 struct qed_spq_entry *p_ent = NULL;
755 while (!list_empty(&p_spq->free_pool)) {
756 if (list_empty(&p_spq->unlimited_pending))
759 p_ent = list_first_entry(&p_spq->unlimited_pending,
760 struct qed_spq_entry, list);
764 list_del(&p_ent->list);
766 qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
769 return qed_spq_post_list(p_hwfn, &p_spq->pending,
770 SPQ_HIGH_PRI_RESERVE_DEFAULT);
773 static void qed_spq_recov_set_ret_code(struct qed_spq_entry *p_ent,
779 if (p_ent->elem.hdr.protocol_id == PROTOCOLID_ROCE ||
780 p_ent->elem.hdr.protocol_id == PROTOCOLID_IWARP)
781 *fw_return_code = RDMA_RETURN_OK;
784 /* Avoid overriding of SPQ entries when getting out-of-order completions, by
785 * marking the completions in a bitmap and increasing the chain consumer only
786 * for the first successive completed entries.
788 static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
790 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
791 struct qed_spq *p_spq = p_hwfn->p_spq;
793 __set_bit(pos, p_spq->p_comp_bitmap);
794 while (test_bit(p_spq->comp_bitmap_idx,
795 p_spq->p_comp_bitmap)) {
796 __clear_bit(p_spq->comp_bitmap_idx,
797 p_spq->p_comp_bitmap);
798 p_spq->comp_bitmap_idx++;
799 qed_chain_return_produced(&p_spq->chain);
803 int qed_spq_post(struct qed_hwfn *p_hwfn,
804 struct qed_spq_entry *p_ent, u8 *fw_return_code)
807 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
808 bool b_ret_ent = true;
815 DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
819 if (p_hwfn->cdev->recov_in_prog) {
822 "Recovery is in progress. Skip spq post [cmd %02x protocol %02x]\n",
823 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
825 /* Let the flow complete w/o any error handling */
826 qed_spq_recov_set_ret_code(p_ent, fw_return_code);
830 /* Complete the entry */
831 rc = qed_spq_fill_entry(p_hwfn, p_ent);
833 spin_lock_bh(&p_spq->lock);
835 /* Check return value after LOCK is taken for cleaner error flow */
839 /* Check if entry is in block mode before qed_spq_add_entry,
840 * which might kfree p_ent.
842 eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
844 /* Add the request to the pending queue */
845 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
849 rc = qed_spq_pend_post(p_hwfn);
851 /* Since it's possible that pending failed for a different
852 * entry [although unlikely], the failed entry was already
853 * dealt with; No need to return it here.
859 spin_unlock_bh(&p_spq->lock);
862 /* For entries in QED BLOCK mode, the completion code cannot
863 * perform the necessary cleanup - if it did, we couldn't
864 * access p_ent here to see whether it's successful or not.
865 * Thus, after gaining the answer perform the cleanup here.
867 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
868 p_ent->queue == &p_spq->unlimited_pending);
870 if (p_ent->queue == &p_spq->unlimited_pending) {
871 struct qed_spq_entry *p_post_ent = p_ent->post_ent;
875 /* Return the entry which was actually posted */
883 qed_spq_return_entry(p_hwfn, p_ent);
888 spin_lock_bh(&p_spq->lock);
889 list_del(&p_ent->list);
890 qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
893 /* return to the free pool */
895 __qed_spq_return_entry(p_hwfn, p_ent);
896 spin_unlock_bh(&p_spq->lock);
901 int qed_spq_completion(struct qed_hwfn *p_hwfn,
904 union event_ring_data *p_data)
906 struct qed_spq *p_spq;
907 struct qed_spq_entry *p_ent = NULL;
908 struct qed_spq_entry *tmp;
909 struct qed_spq_entry *found = NULL;
914 p_spq = p_hwfn->p_spq;
918 spin_lock_bh(&p_spq->lock);
919 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
920 if (p_ent->elem.hdr.echo == echo) {
921 list_del(&p_ent->list);
922 qed_spq_comp_bmap_update(p_hwfn, echo);
928 /* This is relatively uncommon - depends on scenarios
929 * which have mutliple per-PF sent ramrods.
931 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
932 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
934 le16_to_cpu(p_ent->elem.hdr.echo));
937 /* Release lock before callback, as callback may post
938 * an additional ramrod.
940 spin_unlock_bh(&p_spq->lock);
944 "Failed to find an entry this EQE [echo %04x] completes\n",
949 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
950 "Complete EQE [echo %04x]: func %p cookie %p)\n",
952 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
953 if (found->comp_cb.function)
954 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
959 "Got a completion without a callback function\n");
961 if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
962 /* EBLOCK is responsible for returning its own entry into the
965 qed_spq_return_entry(p_hwfn, found);
970 int qed_consq_alloc(struct qed_hwfn *p_hwfn)
972 struct qed_consq *p_consq;
974 /* Allocate ConsQ struct */
975 p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
979 /* Allocate and initialize EQ chain*/
980 if (qed_chain_alloc(p_hwfn->cdev,
981 QED_CHAIN_USE_TO_PRODUCE,
983 QED_CHAIN_CNT_TYPE_U16,
984 QED_CHAIN_PAGE_SIZE / 0x80,
985 0x80, &p_consq->chain, NULL))
986 goto consq_allocate_fail;
988 p_hwfn->p_consq = p_consq;
996 void qed_consq_setup(struct qed_hwfn *p_hwfn)
998 qed_chain_reset(&p_hwfn->p_consq->chain);
1001 void qed_consq_free(struct qed_hwfn *p_hwfn)
1003 if (!p_hwfn->p_consq)
1006 qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain);
1008 kfree(p_hwfn->p_consq);
1009 p_hwfn->p_consq = NULL;