1 // SPDX-License-Identifier: GPL-2.0-only
3 * Huawei HiNIC PCI Express Linux driver
4 * Copyright(c) 2017 Huawei Technologies Co., Ltd
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/errno.h>
10 #include <linux/pci.h>
11 #include <linux/device.h>
12 #include <linux/workqueue.h>
13 #include <linux/interrupt.h>
14 #include <linux/slab.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/log2.h>
17 #include <asm/byteorder.h>
18 #include <asm/barrier.h>
20 #include "hinic_hw_dev.h"
21 #include "hinic_hw_csr.h"
22 #include "hinic_hw_if.h"
23 #include "hinic_hw_eqs.h"
25 #define HINIC_EQS_WQ_NAME "hinic_eqs"
27 #define GET_EQ_NUM_PAGES(eq, pg_size) \
28 (ALIGN((eq)->q_len * (eq)->elem_size, pg_size) / (pg_size))
30 #define GET_EQ_NUM_ELEMS_IN_PG(eq, pg_size) ((pg_size) / (eq)->elem_size)
32 #define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \
33 HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \
34 HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id))
36 #define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \
37 HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \
38 HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id))
40 #define EQ_HI_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \
41 HINIC_CSR_AEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
42 HINIC_CSR_CEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num))
44 #define EQ_LO_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \
45 HINIC_CSR_AEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
46 HINIC_CSR_CEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num))
48 #define GET_EQ_ELEMENT(eq, idx) \
49 ((eq)->virt_addr[(idx) / (eq)->num_elem_in_pg] + \
50 (((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size))
52 #define GET_AEQ_ELEM(eq, idx) ((struct hinic_aeq_elem *) \
53 GET_EQ_ELEMENT(eq, idx))
55 #define GET_CEQ_ELEM(eq, idx) ((u32 *) \
56 GET_EQ_ELEMENT(eq, idx))
58 #define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM(eq, (eq)->cons_idx)
60 #define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM(eq, (eq)->cons_idx)
62 #define PAGE_IN_4K(page_size) ((page_size) >> 12)
63 #define EQ_SET_HW_PAGE_SIZE_VAL(eq) (ilog2(PAGE_IN_4K((eq)->page_size)))
65 #define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5)
66 #define EQ_SET_HW_ELEM_SIZE_VAL(eq) (ilog2(ELEMENT_SIZE_IN_32B(eq)))
68 #define EQ_MAX_PAGES 8
70 #define CEQE_TYPE_SHIFT 23
71 #define CEQE_TYPE_MASK 0x7
73 #define CEQE_TYPE(ceqe) (((ceqe) >> CEQE_TYPE_SHIFT) & \
76 #define CEQE_DATA_MASK 0x3FFFFFF
77 #define CEQE_DATA(ceqe) ((ceqe) & CEQE_DATA_MASK)
79 #define aeq_to_aeqs(eq) \
80 container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0])
82 #define ceq_to_ceqs(eq) \
83 container_of((eq) - (eq)->q_id, struct hinic_ceqs, ceq[0])
85 #define work_to_aeq_work(work) \
86 container_of(work, struct hinic_eq_work, work)
88 #define DMA_ATTR_AEQ_DEFAULT 0
89 #define DMA_ATTR_CEQ_DEFAULT 0
92 #define THRESH_CEQ_DEFAULT 0
105 * hinic_aeq_register_hw_cb - register AEQ callback for specific event
106 * @aeqs: pointer to Async eqs of the chip
107 * @event: aeq event to register callback for it
108 * @handle: private data will be used by the callback
109 * @hw_handler: callback function
111 void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs,
112 enum hinic_aeq_type event, void *handle,
113 void (*hwe_handler)(void *handle, void *data,
116 struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event];
118 hwe_cb->hwe_handler = hwe_handler;
119 hwe_cb->handle = handle;
120 hwe_cb->hwe_state = HINIC_EQE_ENABLED;
124 * hinic_aeq_unregister_hw_cb - unregister the AEQ callback for specific event
125 * @aeqs: pointer to Async eqs of the chip
126 * @event: aeq event to unregister callback for it
128 void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs,
129 enum hinic_aeq_type event)
131 struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event];
133 hwe_cb->hwe_state &= ~HINIC_EQE_ENABLED;
135 while (hwe_cb->hwe_state & HINIC_EQE_RUNNING)
138 hwe_cb->hwe_handler = NULL;
142 * hinic_ceq_register_cb - register CEQ callback for specific event
143 * @ceqs: pointer to Completion eqs part of the chip
144 * @event: ceq event to register callback for it
145 * @handle: private data will be used by the callback
146 * @handler: callback function
148 void hinic_ceq_register_cb(struct hinic_ceqs *ceqs,
149 enum hinic_ceq_type event, void *handle,
150 void (*handler)(void *handle, u32 ceqe_data))
152 struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event];
154 ceq_cb->handler = handler;
155 ceq_cb->handle = handle;
156 ceq_cb->ceqe_state = HINIC_EQE_ENABLED;
160 * hinic_ceq_unregister_cb - unregister the CEQ callback for specific event
161 * @ceqs: pointer to Completion eqs part of the chip
162 * @event: ceq event to unregister callback for it
164 void hinic_ceq_unregister_cb(struct hinic_ceqs *ceqs,
165 enum hinic_ceq_type event)
167 struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event];
169 ceq_cb->ceqe_state &= ~HINIC_EQE_ENABLED;
171 while (ceq_cb->ceqe_state & HINIC_EQE_RUNNING)
174 ceq_cb->handler = NULL;
177 static u8 eq_cons_idx_checksum_set(u32 val)
182 for (idx = 0; idx < 32; idx += 4)
183 checksum ^= ((val >> idx) & 0xF);
185 return (checksum & 0xF);
189 * eq_update_ci - update the HW cons idx of event queue
190 * @eq: the event queue to update the cons idx for
192 static void eq_update_ci(struct hinic_eq *eq, u32 arm_state)
194 u32 val, addr = EQ_CONS_IDX_REG_ADDR(eq);
196 /* Read Modify Write */
197 val = hinic_hwif_read_reg(eq->hwif, addr);
199 val = HINIC_EQ_CI_CLEAR(val, IDX) &
200 HINIC_EQ_CI_CLEAR(val, WRAPPED) &
201 HINIC_EQ_CI_CLEAR(val, INT_ARMED) &
202 HINIC_EQ_CI_CLEAR(val, XOR_CHKSUM);
204 val |= HINIC_EQ_CI_SET(eq->cons_idx, IDX) |
205 HINIC_EQ_CI_SET(eq->wrapped, WRAPPED) |
206 HINIC_EQ_CI_SET(arm_state, INT_ARMED);
208 val |= HINIC_EQ_CI_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM);
210 hinic_hwif_write_reg(eq->hwif, addr, val);
214 * aeq_irq_handler - handler for the AEQ event
215 * @eq: the Async Event Queue that received the event
217 static void aeq_irq_handler(struct hinic_eq *eq)
219 struct hinic_aeqs *aeqs = aeq_to_aeqs(eq);
220 struct hinic_hwif *hwif = aeqs->hwif;
221 struct pci_dev *pdev = hwif->pdev;
222 struct hinic_aeq_elem *aeqe_curr;
223 struct hinic_hw_event_cb *hwe_cb;
224 enum hinic_aeq_type event;
225 unsigned long eqe_state;
229 for (i = 0; i < eq->q_len; i++) {
230 aeqe_curr = GET_CURR_AEQ_ELEM(eq);
232 /* Data in HW is in Big endian Format */
233 aeqe_desc = be32_to_cpu(aeqe_curr->desc);
235 /* HW toggles the wrapped bit, when it adds eq element */
236 if (HINIC_EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped)
241 event = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, TYPE);
242 if (event >= HINIC_MAX_AEQ_EVENTS) {
243 dev_err(&pdev->dev, "Unknown AEQ Event %d\n", event);
247 if (!HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SRC)) {
248 hwe_cb = &aeqs->hwe_cb[event];
250 size = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SIZE);
252 eqe_state = cmpxchg(&hwe_cb->hwe_state,
256 if ((eqe_state == HINIC_EQE_ENABLED) &&
257 (hwe_cb->hwe_handler))
258 hwe_cb->hwe_handler(hwe_cb->handle,
259 aeqe_curr->data, size);
261 dev_err(&pdev->dev, "Unhandled AEQ Event %d\n",
264 hwe_cb->hwe_state &= ~HINIC_EQE_RUNNING;
269 if (eq->cons_idx == eq->q_len) {
271 eq->wrapped = !eq->wrapped;
277 * ceq_event_handler - handler for the ceq events
278 * @ceqs: ceqs part of the chip
279 * @ceqe: ceq element that describes the event
281 static void ceq_event_handler(struct hinic_ceqs *ceqs, u32 ceqe)
283 struct hinic_hwif *hwif = ceqs->hwif;
284 struct pci_dev *pdev = hwif->pdev;
285 struct hinic_ceq_cb *ceq_cb;
286 enum hinic_ceq_type event;
287 unsigned long eqe_state;
289 event = CEQE_TYPE(ceqe);
290 if (event >= HINIC_MAX_CEQ_EVENTS) {
291 dev_err(&pdev->dev, "Unknown CEQ event, event = %d\n", event);
295 ceq_cb = &ceqs->ceq_cb[event];
297 eqe_state = cmpxchg(&ceq_cb->ceqe_state,
299 HINIC_EQE_ENABLED | HINIC_EQE_RUNNING);
301 if ((eqe_state == HINIC_EQE_ENABLED) && (ceq_cb->handler))
302 ceq_cb->handler(ceq_cb->handle, CEQE_DATA(ceqe));
304 dev_err(&pdev->dev, "Unhandled CEQ Event %d\n", event);
306 ceq_cb->ceqe_state &= ~HINIC_EQE_RUNNING;
310 * ceq_irq_handler - handler for the CEQ event
311 * @eq: the Completion Event Queue that received the event
313 static void ceq_irq_handler(struct hinic_eq *eq)
315 struct hinic_ceqs *ceqs = ceq_to_ceqs(eq);
319 for (i = 0; i < eq->q_len; i++) {
320 ceqe = *(GET_CURR_CEQ_ELEM(eq));
322 /* Data in HW is in Big endian Format */
323 ceqe = be32_to_cpu(ceqe);
325 /* HW toggles the wrapped bit, when it adds eq element event */
326 if (HINIC_EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped)
329 ceq_event_handler(ceqs, ceqe);
333 if (eq->cons_idx == eq->q_len) {
335 eq->wrapped = !eq->wrapped;
341 * eq_irq_handler - handler for the EQ event
342 * @data: the Event Queue that received the event
344 static void eq_irq_handler(void *data)
346 struct hinic_eq *eq = data;
348 if (eq->type == HINIC_AEQ)
350 else if (eq->type == HINIC_CEQ)
353 eq_update_ci(eq, EQ_ARMED);
357 * eq_irq_work - the work of the EQ that received the event
358 * @work: the work struct that is associated with the EQ
360 static void eq_irq_work(struct work_struct *work)
362 struct hinic_eq_work *aeq_work = work_to_aeq_work(work);
363 struct hinic_eq *aeq;
365 aeq = aeq_work->data;
370 * ceq_tasklet - the tasklet of the EQ that received the event
373 static void ceq_tasklet(unsigned long ceq_data)
375 struct hinic_eq *ceq = (struct hinic_eq *)ceq_data;
381 * aeq_interrupt - aeq interrupt handler
383 * @data: the Async Event Queue that collected the event
385 static irqreturn_t aeq_interrupt(int irq, void *data)
387 struct hinic_eq_work *aeq_work;
388 struct hinic_eq *aeq = data;
389 struct hinic_aeqs *aeqs;
391 /* clear resend timer cnt register */
392 hinic_msix_attr_cnt_clear(aeq->hwif, aeq->msix_entry.entry);
394 aeq_work = &aeq->aeq_work;
395 aeq_work->data = aeq;
397 aeqs = aeq_to_aeqs(aeq);
398 queue_work(aeqs->workq, &aeq_work->work);
404 * ceq_interrupt - ceq interrupt handler
406 * @data: the Completion Event Queue that collected the event
408 static irqreturn_t ceq_interrupt(int irq, void *data)
410 struct hinic_eq *ceq = data;
412 /* clear resend timer cnt register */
413 hinic_msix_attr_cnt_clear(ceq->hwif, ceq->msix_entry.entry);
415 tasklet_schedule(&ceq->ceq_tasklet);
420 static u32 get_ctrl0_val(struct hinic_eq *eq, u32 addr)
422 struct msix_entry *msix_entry = &eq->msix_entry;
423 enum hinic_eq_type type = eq->type;
426 if (type == HINIC_AEQ) {
428 addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
430 val = hinic_hwif_read_reg(eq->hwif, addr);
432 val = HINIC_AEQ_CTRL_0_CLEAR(val, INT_IDX) &
433 HINIC_AEQ_CTRL_0_CLEAR(val, DMA_ATTR) &
434 HINIC_AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
435 HINIC_AEQ_CTRL_0_CLEAR(val, INT_MODE);
437 ctrl0 = HINIC_AEQ_CTRL_0_SET(msix_entry->entry, INT_IDX) |
438 HINIC_AEQ_CTRL_0_SET(DMA_ATTR_AEQ_DEFAULT, DMA_ATTR) |
439 HINIC_AEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif),
441 HINIC_AEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INT_MODE);
446 addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
448 val = hinic_hwif_read_reg(eq->hwif, addr);
450 val = HINIC_CEQ_CTRL_0_CLEAR(val, INTR_IDX) &
451 HINIC_CEQ_CTRL_0_CLEAR(val, DMA_ATTR) &
452 HINIC_CEQ_CTRL_0_CLEAR(val, KICK_THRESH) &
453 HINIC_CEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
454 HINIC_CEQ_CTRL_0_CLEAR(val, INTR_MODE);
456 ctrl0 = HINIC_CEQ_CTRL_0_SET(msix_entry->entry, INTR_IDX) |
457 HINIC_CEQ_CTRL_0_SET(DMA_ATTR_CEQ_DEFAULT, DMA_ATTR) |
458 HINIC_CEQ_CTRL_0_SET(THRESH_CEQ_DEFAULT, KICK_THRESH) |
459 HINIC_CEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif),
461 HINIC_CEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INTR_MODE);
468 static void set_ctrl0(struct hinic_eq *eq)
472 if (eq->type == HINIC_AEQ)
473 addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
475 addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
477 val = get_ctrl0_val(eq, addr);
479 hinic_hwif_write_reg(eq->hwif, addr, val);
482 static u32 get_ctrl1_val(struct hinic_eq *eq, u32 addr)
484 u32 page_size_val, elem_size, val, ctrl1;
485 enum hinic_eq_type type = eq->type;
487 if (type == HINIC_AEQ) {
489 addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
491 page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
492 elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq);
494 val = hinic_hwif_read_reg(eq->hwif, addr);
496 val = HINIC_AEQ_CTRL_1_CLEAR(val, LEN) &
497 HINIC_AEQ_CTRL_1_CLEAR(val, ELEM_SIZE) &
498 HINIC_AEQ_CTRL_1_CLEAR(val, PAGE_SIZE);
500 ctrl1 = HINIC_AEQ_CTRL_1_SET(eq->q_len, LEN) |
501 HINIC_AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) |
502 HINIC_AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
507 addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
509 page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
511 val = hinic_hwif_read_reg(eq->hwif, addr);
513 val = HINIC_CEQ_CTRL_1_CLEAR(val, LEN) &
514 HINIC_CEQ_CTRL_1_CLEAR(val, PAGE_SIZE);
516 ctrl1 = HINIC_CEQ_CTRL_1_SET(eq->q_len, LEN) |
517 HINIC_CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
524 static void set_ctrl1(struct hinic_eq *eq)
528 if (eq->type == HINIC_AEQ)
529 addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
531 addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
533 val = get_ctrl1_val(eq, addr);
535 hinic_hwif_write_reg(eq->hwif, addr, val);
538 static int set_ceq_ctrl_reg(struct hinic_eq *eq)
540 struct hinic_ceq_ctrl_reg ceq_ctrl = {0};
541 struct hinic_hwdev *hwdev = eq->hwdev;
542 u16 out_size = sizeof(ceq_ctrl);
543 u16 in_size = sizeof(ceq_ctrl);
544 struct hinic_pfhwdev *pfhwdev;
548 pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
550 addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
551 ceq_ctrl.ctrl0 = get_ctrl0_val(eq, addr);
552 addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
553 ceq_ctrl.ctrl1 = get_ctrl1_val(eq, addr);
555 ceq_ctrl.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
556 ceq_ctrl.q_id = eq->q_id;
558 err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
559 HINIC_COMM_CMD_CEQ_CTRL_REG_WR_BY_UP,
561 &ceq_ctrl, &out_size, HINIC_MGMT_MSG_SYNC);
562 if (err || !out_size || ceq_ctrl.status) {
563 dev_err(&hwdev->hwif->pdev->dev,
564 "Failed to set ceq %d ctrl reg, err: %d status: 0x%x, out_size: 0x%x\n",
565 eq->q_id, err, ceq_ctrl.status, out_size);
573 * set_eq_ctrls - setting eq's ctrl registers
574 * @eq: the Event Queue for setting
576 static int set_eq_ctrls(struct hinic_eq *eq)
578 if (HINIC_IS_VF(eq->hwif) && eq->type == HINIC_CEQ)
579 return set_ceq_ctrl_reg(eq);
587 * aeq_elements_init - initialize all the elements in the aeq
588 * @eq: the Async Event Queue
589 * @init_val: value to initialize the elements with it
591 static void aeq_elements_init(struct hinic_eq *eq, u32 init_val)
593 struct hinic_aeq_elem *aeqe;
596 for (i = 0; i < eq->q_len; i++) {
597 aeqe = GET_AEQ_ELEM(eq, i);
598 aeqe->desc = cpu_to_be32(init_val);
601 wmb(); /* Write the initilzation values */
605 * ceq_elements_init - Initialize all the elements in the ceq
606 * @eq: the event queue
607 * @init_val: value to init with it the elements
609 static void ceq_elements_init(struct hinic_eq *eq, u32 init_val)
614 for (i = 0; i < eq->q_len; i++) {
615 ceqe = GET_CEQ_ELEM(eq, i);
616 *(ceqe) = cpu_to_be32(init_val);
619 wmb(); /* Write the initilzation values */
623 * alloc_eq_pages - allocate the pages for the queue
624 * @eq: the event queue
626 * Return 0 - Success, Negative - Failure
628 static int alloc_eq_pages(struct hinic_eq *eq)
630 struct hinic_hwif *hwif = eq->hwif;
631 struct pci_dev *pdev = hwif->pdev;
632 u32 init_val, addr, val;
636 addr_size = eq->num_pages * sizeof(*eq->dma_addr);
637 eq->dma_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
641 addr_size = eq->num_pages * sizeof(*eq->virt_addr);
642 eq->virt_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
643 if (!eq->virt_addr) {
645 goto err_virt_addr_alloc;
648 for (pg = 0; pg < eq->num_pages; pg++) {
649 eq->virt_addr[pg] = dma_alloc_coherent(&pdev->dev,
653 if (!eq->virt_addr[pg]) {
658 addr = EQ_HI_PHYS_ADDR_REG(eq, pg);
659 val = upper_32_bits(eq->dma_addr[pg]);
661 hinic_hwif_write_reg(hwif, addr, val);
663 addr = EQ_LO_PHYS_ADDR_REG(eq, pg);
664 val = lower_32_bits(eq->dma_addr[pg]);
666 hinic_hwif_write_reg(hwif, addr, val);
669 init_val = HINIC_EQ_ELEM_DESC_SET(eq->wrapped, WRAPPED);
671 if (eq->type == HINIC_AEQ)
672 aeq_elements_init(eq, init_val);
673 else if (eq->type == HINIC_CEQ)
674 ceq_elements_init(eq, init_val);
680 dma_free_coherent(&pdev->dev, eq->page_size,
684 devm_kfree(&pdev->dev, eq->virt_addr);
687 devm_kfree(&pdev->dev, eq->dma_addr);
692 * free_eq_pages - free the pages of the queue
693 * @eq: the Event Queue
695 static void free_eq_pages(struct hinic_eq *eq)
697 struct hinic_hwif *hwif = eq->hwif;
698 struct pci_dev *pdev = hwif->pdev;
701 for (pg = 0; pg < eq->num_pages; pg++)
702 dma_free_coherent(&pdev->dev, eq->page_size,
706 devm_kfree(&pdev->dev, eq->virt_addr);
707 devm_kfree(&pdev->dev, eq->dma_addr);
711 * init_eq - initialize Event Queue
712 * @eq: the event queue
713 * @hwif: the HW interface of a PCI function device
714 * @type: the type of the event queue, aeq or ceq
715 * @q_id: Queue id number
716 * @q_len: the number of EQ elements
717 * @page_size: the page size of the pages in the event queue
718 * @entry: msix entry associated with the event queue
720 * Return 0 - Success, Negative - Failure
722 static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
723 enum hinic_eq_type type, int q_id, u32 q_len, u32 page_size,
724 struct msix_entry entry)
726 struct pci_dev *pdev = hwif->pdev;
733 eq->page_size = page_size;
735 /* Clear PI and CI, also clear the ARM bit */
736 hinic_hwif_write_reg(eq->hwif, EQ_CONS_IDX_REG_ADDR(eq), 0);
737 hinic_hwif_write_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
742 if (type == HINIC_AEQ) {
743 eq->elem_size = HINIC_AEQE_SIZE;
744 } else if (type == HINIC_CEQ) {
745 eq->elem_size = HINIC_CEQE_SIZE;
747 dev_err(&pdev->dev, "Invalid EQ type\n");
751 eq->num_pages = GET_EQ_NUM_PAGES(eq, page_size);
752 eq->num_elem_in_pg = GET_EQ_NUM_ELEMS_IN_PG(eq, page_size);
754 eq->msix_entry = entry;
756 if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) {
757 dev_err(&pdev->dev, "num elements in eq page != power of 2\n");
761 if (eq->num_pages > EQ_MAX_PAGES) {
762 dev_err(&pdev->dev, "too many pages for eq\n");
766 err = set_eq_ctrls(eq);
768 dev_err(&pdev->dev, "Failed to set eq ctrls\n");
772 eq_update_ci(eq, EQ_ARMED);
774 err = alloc_eq_pages(eq);
776 dev_err(&pdev->dev, "Failed to allocate pages for eq\n");
780 if (type == HINIC_AEQ) {
781 struct hinic_eq_work *aeq_work = &eq->aeq_work;
783 INIT_WORK(&aeq_work->work, eq_irq_work);
784 } else if (type == HINIC_CEQ) {
785 tasklet_init(&eq->ceq_tasklet, ceq_tasklet,
789 /* set the attributes of the msix entry */
790 hinic_msix_attr_set(eq->hwif, eq->msix_entry.entry,
791 HINIC_EQ_MSIX_PENDING_LIMIT_DEFAULT,
792 HINIC_EQ_MSIX_COALESC_TIMER_DEFAULT,
793 HINIC_EQ_MSIX_LLI_TIMER_DEFAULT,
794 HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT,
795 HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT);
797 if (type == HINIC_AEQ)
798 err = request_irq(entry.vector, aeq_interrupt, 0,
800 else if (type == HINIC_CEQ)
801 err = request_irq(entry.vector, ceq_interrupt, 0,
805 dev_err(&pdev->dev, "Failed to request irq for the EQ\n");
817 * remove_eq - remove Event Queue
818 * @eq: the event queue
820 static void remove_eq(struct hinic_eq *eq)
822 hinic_set_msix_state(eq->hwif, eq->msix_entry.entry,
824 free_irq(eq->msix_entry.vector, eq);
826 if (eq->type == HINIC_AEQ) {
827 struct hinic_eq_work *aeq_work = &eq->aeq_work;
829 cancel_work_sync(&aeq_work->work);
830 /* clear aeq_len to avoid hw access host memory */
831 hinic_hwif_write_reg(eq->hwif,
832 HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
833 } else if (eq->type == HINIC_CEQ) {
834 tasklet_kill(&eq->ceq_tasklet);
835 /* clear ceq_len to avoid hw access host memory */
836 hinic_hwif_write_reg(eq->hwif,
837 HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id), 0);
840 /* update cons_idx to avoid invalid interrupt */
841 eq->cons_idx = hinic_hwif_read_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq));
842 eq_update_ci(eq, EQ_NOT_ARMED);
848 * hinic_aeqs_init - initialize all the aeqs
849 * @aeqs: pointer to Async eqs of the chip
850 * @hwif: the HW interface of a PCI function device
851 * @num_aeqs: number of AEQs
852 * @q_len: number of EQ elements
853 * @page_size: the page size of the pages in the event queue
854 * @msix_entries: msix entries associated with the event queues
856 * Return 0 - Success, negative - Failure
858 int hinic_aeqs_init(struct hinic_aeqs *aeqs, struct hinic_hwif *hwif,
859 int num_aeqs, u32 q_len, u32 page_size,
860 struct msix_entry *msix_entries)
862 struct pci_dev *pdev = hwif->pdev;
865 aeqs->workq = create_singlethread_workqueue(HINIC_EQS_WQ_NAME);
870 aeqs->num_aeqs = num_aeqs;
872 for (q_id = 0; q_id < num_aeqs; q_id++) {
873 err = init_eq(&aeqs->aeq[q_id], hwif, HINIC_AEQ, q_id, q_len,
874 page_size, msix_entries[q_id]);
876 dev_err(&pdev->dev, "Failed to init aeq %d\n", q_id);
884 for (i = 0; i < q_id; i++)
885 remove_eq(&aeqs->aeq[i]);
887 destroy_workqueue(aeqs->workq);
892 * hinic_aeqs_free - free all the aeqs
893 * @aeqs: pointer to Async eqs of the chip
895 void hinic_aeqs_free(struct hinic_aeqs *aeqs)
899 for (q_id = 0; q_id < aeqs->num_aeqs ; q_id++)
900 remove_eq(&aeqs->aeq[q_id]);
902 destroy_workqueue(aeqs->workq);
906 * hinic_ceqs_init - init all the ceqs
907 * @ceqs: ceqs part of the chip
908 * @hwif: the hardware interface of a pci function device
909 * @num_ceqs: number of CEQs
910 * @q_len: number of EQ elements
911 * @page_size: the page size of the event queue
912 * @msix_entries: msix entries associated with the event queues
914 * Return 0 - Success, Negative - Failure
916 int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif,
917 int num_ceqs, u32 q_len, u32 page_size,
918 struct msix_entry *msix_entries)
920 struct pci_dev *pdev = hwif->pdev;
924 ceqs->num_ceqs = num_ceqs;
926 for (q_id = 0; q_id < num_ceqs; q_id++) {
927 ceqs->ceq[q_id].hwdev = ceqs->hwdev;
928 err = init_eq(&ceqs->ceq[q_id], hwif, HINIC_CEQ, q_id, q_len,
929 page_size, msix_entries[q_id]);
931 dev_err(&pdev->dev, "Failed to init ceq %d\n", q_id);
939 for (i = 0; i < q_id; i++)
940 remove_eq(&ceqs->ceq[i]);
946 * hinic_ceqs_free - free all the ceqs
947 * @ceqs: ceqs part of the chip
949 void hinic_ceqs_free(struct hinic_ceqs *ceqs)
953 for (q_id = 0; q_id < ceqs->num_ceqs; q_id++)
954 remove_eq(&ceqs->ceq[q_id]);