1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/string.h>
23 #include "qed_dev_api.h"
28 #include "qed_reg_addr.h"
30 #include "qed_sriov.h"
31 #if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
35 /***************************************************************************
36 * Structures & Definitions
37 ***************************************************************************/
39 #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
40 #define SPQ_BLOCK_SLEEP_LENGTH (1000)
42 /***************************************************************************
43 * Blocking Imp. (BLOCK/EBLOCK mode)
44 ***************************************************************************/
45 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
47 union event_ring_data *data, u8 fw_return_code)
49 struct qed_spq_comp_done *comp_done;
51 comp_done = (struct qed_spq_comp_done *)cookie;
53 comp_done->done = 0x1;
54 comp_done->fw_return_code = fw_return_code;
56 /* make update visible to waiting thread */
60 static int qed_spq_block(struct qed_hwfn *p_hwfn,
61 struct qed_spq_entry *p_ent,
64 int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
65 struct qed_spq_comp_done *comp_done;
68 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
70 /* validate we receive completion update */
72 if (comp_done->done == 1) {
74 *p_fw_ret = comp_done->fw_return_code;
77 usleep_range(5000, 10000);
81 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
82 rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
84 DP_NOTICE(p_hwfn, "MCP drain failed\n");
86 /* Retry after drain */
87 sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
89 /* validate we receive completion update */
91 if (comp_done->done == 1) {
93 *p_fw_ret = comp_done->fw_return_code;
96 usleep_range(5000, 10000);
100 if (comp_done->done == 1) {
102 *p_fw_ret = comp_done->fw_return_code;
106 DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
111 /***************************************************************************
112 * SPQ entries inner API
113 ***************************************************************************/
114 static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
115 struct qed_spq_entry *p_ent)
119 switch (p_ent->comp_mode) {
120 case QED_SPQ_MODE_EBLOCK:
121 case QED_SPQ_MODE_BLOCK:
122 p_ent->comp_cb.function = qed_spq_blocking_cb;
124 case QED_SPQ_MODE_CB:
127 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
132 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
133 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
135 p_ent->elem.hdr.cmd_id,
136 p_ent->elem.hdr.protocol_id,
137 p_ent->elem.data_ptr.hi,
138 p_ent->elem.data_ptr.lo,
139 D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
140 QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
146 /***************************************************************************
148 ***************************************************************************/
149 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
150 struct qed_spq *p_spq)
153 struct qed_cxt_info cxt_info;
154 struct core_conn_context *p_cxt;
155 union qed_qm_pq_params pq_params;
158 cxt_info.iid = p_spq->cid;
160 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
163 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
168 p_cxt = cxt_info.p_cxt;
170 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
171 XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
172 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
173 XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
174 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
175 XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
177 /* QM physical queue */
178 memset(&pq_params, 0, sizeof(pq_params));
179 pq_params.core.tc = LB_TC;
180 pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
181 p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
183 p_cxt->xstorm_st_context.spq_base_lo =
184 DMA_LO_LE(p_spq->chain.p_phys_addr);
185 p_cxt->xstorm_st_context.spq_base_hi =
186 DMA_HI_LE(p_spq->chain.p_phys_addr);
188 DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
189 p_hwfn->p_consq->chain.p_phys_addr);
192 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
193 struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
195 struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
196 u16 echo = qed_chain_get_prod_idx(p_chain);
197 struct slow_path_element *elem;
198 struct core_db_data db;
200 p_ent->elem.hdr.echo = cpu_to_le16(echo);
201 elem = qed_chain_produce(p_chain);
203 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
207 *elem = p_ent->elem; /* struct assignment */
209 /* send a doorbell on the slow hwfn session */
210 memset(&db, 0, sizeof(db));
211 SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
212 SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
213 SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
214 DQ_XCM_CORE_SPQ_PROD_CMD);
215 db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
216 db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
218 /* make sure the SPQE is updated before the doorbell */
221 DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
223 /* make sure doorbell is rang */
226 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
227 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
228 qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
229 p_spq->cid, db.params, db.agg_flags,
230 qed_chain_get_prod_idx(p_chain));
235 /***************************************************************************
236 * Asynchronous events
237 ***************************************************************************/
239 qed_async_event_completion(struct qed_hwfn *p_hwfn,
240 struct event_ring_entry *p_eqe)
242 switch (p_eqe->protocol_id) {
243 #if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
244 case PROTOCOLID_ROCE:
245 qed_async_roce_event(p_hwfn, p_eqe);
248 case PROTOCOLID_COMMON:
249 return qed_sriov_eqe_event(p_hwfn,
251 p_eqe->echo, &p_eqe->data);
254 "Unknown Async completion for protocol: %d\n",
260 /***************************************************************************
262 ***************************************************************************/
263 void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
265 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
266 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
268 REG_WR16(p_hwfn, addr, prod);
270 /* keep prod updates ordered */
274 int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
276 struct qed_eq *p_eq = cookie;
277 struct qed_chain *p_chain = &p_eq->chain;
280 /* take a snapshot of the FW consumer */
281 u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
283 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
285 /* Need to guarantee the fw_cons index we use points to a usuable
286 * element (to comply with our chain), so our macros would comply
288 if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
289 qed_chain_get_usable_per_page(p_chain))
290 fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
292 /* Complete current segment of eq entries */
293 while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
294 struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
301 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
302 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
306 le16_to_cpu(p_eqe->echo),
307 p_eqe->fw_return_code,
310 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
311 if (qed_async_event_completion(p_hwfn, p_eqe))
313 } else if (qed_spq_completion(p_hwfn,
315 p_eqe->fw_return_code,
320 qed_chain_recycle_consumed(p_chain);
323 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
328 struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
332 /* Allocate EQ struct */
333 p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
337 /* Allocate and initialize EQ chain*/
338 if (qed_chain_alloc(p_hwfn->cdev,
339 QED_CHAIN_USE_TO_PRODUCE,
341 QED_CHAIN_CNT_TYPE_U16,
343 sizeof(union event_ring_element),
345 goto eq_allocate_fail;
347 /* register EQ completion on the SP SB */
348 qed_int_register_cb(p_hwfn, qed_eq_completion,
349 p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
354 qed_eq_free(p_hwfn, p_eq);
358 void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
360 qed_chain_reset(&p_eq->chain);
363 void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
367 qed_chain_free(p_hwfn->cdev, &p_eq->chain);
371 /***************************************************************************
372 * CQE API - manipulate EQ functionality
373 ***************************************************************************/
374 static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
375 struct eth_slow_path_rx_cqe *cqe,
376 enum protocol_type protocol)
378 if (IS_VF(p_hwfn->cdev))
381 /* @@@tmp - it's possible we'll eventually want to handle some
382 * actual commands that can arrive here, but for now this is only
383 * used to complete the ramrod using the echo value on the cqe
385 return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
388 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
389 struct eth_slow_path_rx_cqe *cqe)
393 rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
396 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
402 /***************************************************************************
403 * Slow hwfn Queue (spq)
404 ***************************************************************************/
405 void qed_spq_setup(struct qed_hwfn *p_hwfn)
407 struct qed_spq *p_spq = p_hwfn->p_spq;
408 struct qed_spq_entry *p_virt = NULL;
409 dma_addr_t p_phys = 0;
412 INIT_LIST_HEAD(&p_spq->pending);
413 INIT_LIST_HEAD(&p_spq->completion_pending);
414 INIT_LIST_HEAD(&p_spq->free_pool);
415 INIT_LIST_HEAD(&p_spq->unlimited_pending);
416 spin_lock_init(&p_spq->lock);
419 p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
420 p_virt = p_spq->p_virt;
422 capacity = qed_chain_get_capacity(&p_spq->chain);
423 for (i = 0; i < capacity; i++) {
424 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
426 list_add_tail(&p_virt->list, &p_spq->free_pool);
429 p_phys += sizeof(struct qed_spq_entry);
433 p_spq->normal_count = 0;
434 p_spq->comp_count = 0;
435 p_spq->comp_sent_count = 0;
436 p_spq->unlimited_pending_count = 0;
438 bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
439 p_spq->comp_bitmap_idx = 0;
441 /* SPQ cid, cannot fail */
442 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
443 qed_spq_hw_initialize(p_hwfn, p_spq);
445 /* reset the chain itself */
446 qed_chain_reset(&p_spq->chain);
449 int qed_spq_alloc(struct qed_hwfn *p_hwfn)
451 struct qed_spq_entry *p_virt = NULL;
452 struct qed_spq *p_spq = NULL;
453 dma_addr_t p_phys = 0;
457 p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
462 if (qed_chain_alloc(p_hwfn->cdev,
463 QED_CHAIN_USE_TO_PRODUCE,
464 QED_CHAIN_MODE_SINGLE,
465 QED_CHAIN_CNT_TYPE_U16,
466 0, /* N/A when the mode is SINGLE */
467 sizeof(struct slow_path_element),
469 goto spq_allocate_fail;
471 /* allocate and fill the SPQ elements (incl. ramrod data list) */
472 capacity = qed_chain_get_capacity(&p_spq->chain);
473 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
474 capacity * sizeof(struct qed_spq_entry),
475 &p_phys, GFP_KERNEL);
477 goto spq_allocate_fail;
479 p_spq->p_virt = p_virt;
480 p_spq->p_phys = p_phys;
481 p_hwfn->p_spq = p_spq;
486 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
491 void qed_spq_free(struct qed_hwfn *p_hwfn)
493 struct qed_spq *p_spq = p_hwfn->p_spq;
500 capacity = qed_chain_get_capacity(&p_spq->chain);
501 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
503 sizeof(struct qed_spq_entry),
504 p_spq->p_virt, p_spq->p_phys);
507 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
512 int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
514 struct qed_spq *p_spq = p_hwfn->p_spq;
515 struct qed_spq_entry *p_ent = NULL;
518 spin_lock_bh(&p_spq->lock);
520 if (list_empty(&p_spq->free_pool)) {
521 p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
524 "Failed to allocate an SPQ entry for a pending ramrod\n");
528 p_ent->queue = &p_spq->unlimited_pending;
530 p_ent = list_first_entry(&p_spq->free_pool,
531 struct qed_spq_entry, list);
532 list_del(&p_ent->list);
533 p_ent->queue = &p_spq->pending;
539 spin_unlock_bh(&p_spq->lock);
543 /* Locked variant; Should be called while the SPQ lock is taken */
544 static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
545 struct qed_spq_entry *p_ent)
547 list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
550 void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
552 spin_lock_bh(&p_hwfn->p_spq->lock);
553 __qed_spq_return_entry(p_hwfn, p_ent);
554 spin_unlock_bh(&p_hwfn->p_spq->lock);
558 * @brief qed_spq_add_entry - adds a new entry to the pending
559 * list. Should be used while lock is being held.
561 * Addes an entry to the pending list is there is room (en empty
562 * element is available in the free_pool), or else places the
563 * entry in the unlimited_pending pool.
571 static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
572 struct qed_spq_entry *p_ent,
573 enum spq_priority priority)
575 struct qed_spq *p_spq = p_hwfn->p_spq;
577 if (p_ent->queue == &p_spq->unlimited_pending) {
579 if (list_empty(&p_spq->free_pool)) {
580 list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
581 p_spq->unlimited_pending_count++;
585 struct qed_spq_entry *p_en2;
587 p_en2 = list_first_entry(&p_spq->free_pool,
588 struct qed_spq_entry, list);
589 list_del(&p_en2->list);
591 /* Copy the ring element physical pointer to the new
592 * entry, since we are about to override the entire ring
593 * entry and don't want to lose the pointer.
595 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
599 /* EBLOCK responsible to free the allocated p_ent */
600 if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
607 /* entry is to be placed in 'pending' queue */
609 case QED_SPQ_PRIORITY_NORMAL:
610 list_add_tail(&p_ent->list, &p_spq->pending);
611 p_spq->normal_count++;
613 case QED_SPQ_PRIORITY_HIGH:
614 list_add(&p_ent->list, &p_spq->pending);
624 /***************************************************************************
626 ***************************************************************************/
627 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
630 return 0xffffffff; /* illegal */
631 return p_hwfn->p_spq->cid;
634 /***************************************************************************
635 * Posting new Ramrods
636 ***************************************************************************/
637 static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
638 struct list_head *head, u32 keep_reserve)
640 struct qed_spq *p_spq = p_hwfn->p_spq;
643 while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
645 struct qed_spq_entry *p_ent =
646 list_first_entry(head, struct qed_spq_entry, list);
647 list_del(&p_ent->list);
648 list_add_tail(&p_ent->list, &p_spq->completion_pending);
649 p_spq->comp_sent_count++;
651 rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
653 list_del(&p_ent->list);
654 __qed_spq_return_entry(p_hwfn, p_ent);
662 static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
664 struct qed_spq *p_spq = p_hwfn->p_spq;
665 struct qed_spq_entry *p_ent = NULL;
667 while (!list_empty(&p_spq->free_pool)) {
668 if (list_empty(&p_spq->unlimited_pending))
671 p_ent = list_first_entry(&p_spq->unlimited_pending,
672 struct qed_spq_entry, list);
676 list_del(&p_ent->list);
678 qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
681 return qed_spq_post_list(p_hwfn, &p_spq->pending,
682 SPQ_HIGH_PRI_RESERVE_DEFAULT);
685 int qed_spq_post(struct qed_hwfn *p_hwfn,
686 struct qed_spq_entry *p_ent, u8 *fw_return_code)
689 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
690 bool b_ret_ent = true;
696 DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
700 /* Complete the entry */
701 rc = qed_spq_fill_entry(p_hwfn, p_ent);
703 spin_lock_bh(&p_spq->lock);
705 /* Check return value after LOCK is taken for cleaner error flow */
709 /* Add the request to the pending queue */
710 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
714 rc = qed_spq_pend_post(p_hwfn);
716 /* Since it's possible that pending failed for a different
717 * entry [although unlikely], the failed entry was already
718 * dealt with; No need to return it here.
724 spin_unlock_bh(&p_spq->lock);
726 if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
727 /* For entries in QED BLOCK mode, the completion code cannot
728 * perform the necessary cleanup - if it did, we couldn't
729 * access p_ent here to see whether it's successful or not.
730 * Thus, after gaining the answer perform the cleanup here.
732 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
734 if (p_ent->queue == &p_spq->unlimited_pending) {
735 /* This is an allocated p_ent which does not need to
746 qed_spq_return_entry(p_hwfn, p_ent);
751 spin_lock_bh(&p_spq->lock);
752 list_del(&p_ent->list);
753 qed_chain_return_produced(&p_spq->chain);
756 /* return to the free pool */
758 __qed_spq_return_entry(p_hwfn, p_ent);
759 spin_unlock_bh(&p_spq->lock);
764 int qed_spq_completion(struct qed_hwfn *p_hwfn,
767 union event_ring_data *p_data)
769 struct qed_spq *p_spq;
770 struct qed_spq_entry *p_ent = NULL;
771 struct qed_spq_entry *tmp;
772 struct qed_spq_entry *found = NULL;
778 p_spq = p_hwfn->p_spq;
782 spin_lock_bh(&p_spq->lock);
783 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
784 if (p_ent->elem.hdr.echo == echo) {
785 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
787 list_del(&p_ent->list);
789 /* Avoid overriding of SPQ entries when getting
790 * out-of-order completions, by marking the completions
791 * in a bitmap and increasing the chain consumer only
792 * for the first successive completed entries.
794 __set_bit(pos, p_spq->p_comp_bitmap);
796 while (test_bit(p_spq->comp_bitmap_idx,
797 p_spq->p_comp_bitmap)) {
798 __clear_bit(p_spq->comp_bitmap_idx,
799 p_spq->p_comp_bitmap);
800 p_spq->comp_bitmap_idx++;
801 qed_chain_return_produced(&p_spq->chain);
809 /* This is relatively uncommon - depends on scenarios
810 * which have mutliple per-PF sent ramrods.
812 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
813 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
815 le16_to_cpu(p_ent->elem.hdr.echo));
818 /* Release lock before callback, as callback may post
819 * an additional ramrod.
821 spin_unlock_bh(&p_spq->lock);
825 "Failed to find an entry this EQE [echo %04x] completes\n",
830 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
831 "Complete EQE [echo %04x]: func %p cookie %p)\n",
833 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
834 if (found->comp_cb.function)
835 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
840 "Got a completion without a callback function\n");
842 if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
843 (found->queue == &p_spq->unlimited_pending))
844 /* EBLOCK is responsible for returning its own entry into the
845 * free list, unless it originally added the entry into the
846 * unlimited pending list.
848 qed_spq_return_entry(p_hwfn, found);
850 /* Attempt to post pending requests */
851 spin_lock_bh(&p_spq->lock);
852 rc = qed_spq_pend_post(p_hwfn);
853 spin_unlock_bh(&p_spq->lock);
858 struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
860 struct qed_consq *p_consq;
862 /* Allocate ConsQ struct */
863 p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
867 /* Allocate and initialize EQ chain*/
868 if (qed_chain_alloc(p_hwfn->cdev,
869 QED_CHAIN_USE_TO_PRODUCE,
871 QED_CHAIN_CNT_TYPE_U16,
872 QED_CHAIN_PAGE_SIZE / 0x80,
873 0x80, &p_consq->chain))
874 goto consq_allocate_fail;
879 qed_consq_free(p_hwfn, p_consq);
883 void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
885 qed_chain_reset(&p_consq->chain);
888 void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
892 qed_chain_free(p_hwfn->cdev, &p_consq->chain);