1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/export.h>
8 #include <linux/device.h>
10 #include <linux/interrupt.h>
11 #include <linux/wait.h>
12 #include <linux/types.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/log2.h>
16 #include <linux/string.h>
23 #include "resources.h"
25 #define mlxsw_pci_write32(mlxsw_pci, reg, val) \
26 iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
27 #define mlxsw_pci_read32(mlxsw_pci, reg) \
28 ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
30 enum mlxsw_pci_queue_type {
31 MLXSW_PCI_QUEUE_TYPE_SDQ,
32 MLXSW_PCI_QUEUE_TYPE_RDQ,
33 MLXSW_PCI_QUEUE_TYPE_CQ,
34 MLXSW_PCI_QUEUE_TYPE_EQ,
37 #define MLXSW_PCI_QUEUE_TYPE_COUNT 4
39 static const u16 mlxsw_pci_doorbell_type_offset[] = {
40 MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
41 MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
42 MLXSW_PCI_DOORBELL_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
43 MLXSW_PCI_DOORBELL_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
46 static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
49 MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
50 MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
53 struct mlxsw_pci_mem_item {
59 struct mlxsw_pci_queue_elem_info {
60 char *elem; /* pointer to actual dma mapped element mem chunk */
71 struct mlxsw_pci_queue {
72 spinlock_t lock; /* for queue accesses */
73 struct mlxsw_pci_mem_item mem_item;
74 struct mlxsw_pci_queue_elem_info *elem_info;
77 u16 count; /* number of elements in queue */
78 u8 num; /* queue number */
79 u8 elem_size; /* size of one element */
80 enum mlxsw_pci_queue_type type;
81 struct tasklet_struct tasklet; /* queue processing tasklet */
82 struct mlxsw_pci *pci;
87 enum mlxsw_pci_cqe_v v;
97 struct mlxsw_pci_queue_type_group {
98 struct mlxsw_pci_queue *q;
99 u8 count; /* number of queues in group */
103 struct pci_dev *pdev;
105 u64 free_running_clock_offset;
106 struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
108 struct mlxsw_core *core;
110 struct mlxsw_pci_mem_item *items;
114 struct mlxsw_pci_mem_item out_mbox;
115 struct mlxsw_pci_mem_item in_mbox;
116 struct mutex lock; /* Lock access to command registers */
118 wait_queue_head_t wait;
125 struct mlxsw_bus_info bus_info;
126 const struct pci_device_id *id;
127 enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */
128 u8 num_sdq_cqs; /* Number of CQs used for SDQs */
131 static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
133 tasklet_schedule(&q->tasklet);
136 static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
137 size_t elem_size, int elem_index)
139 return q->mem_item.buf + (elem_size * elem_index);
142 static struct mlxsw_pci_queue_elem_info *
143 mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
145 return &q->elem_info[elem_index];
148 static struct mlxsw_pci_queue_elem_info *
149 mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
151 int index = q->producer_counter & (q->count - 1);
153 if ((u16) (q->producer_counter - q->consumer_counter) == q->count)
155 return mlxsw_pci_queue_elem_info_get(q, index);
158 static struct mlxsw_pci_queue_elem_info *
159 mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
161 int index = q->consumer_counter & (q->count - 1);
163 return mlxsw_pci_queue_elem_info_get(q, index);
166 static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
168 return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
171 static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
173 return owner_bit != !!(q->consumer_counter & q->count);
176 static struct mlxsw_pci_queue_type_group *
177 mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
178 enum mlxsw_pci_queue_type q_type)
180 return &mlxsw_pci->queues[q_type];
183 static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
184 enum mlxsw_pci_queue_type q_type)
186 struct mlxsw_pci_queue_type_group *queue_group;
188 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
189 return queue_group->count;
192 static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
194 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
197 static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
199 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
202 static struct mlxsw_pci_queue *
203 __mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
204 enum mlxsw_pci_queue_type q_type, u8 q_num)
206 return &mlxsw_pci->queues[q_type].q[q_num];
209 static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
212 return __mlxsw_pci_queue_get(mlxsw_pci,
213 MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
216 static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
219 return __mlxsw_pci_queue_get(mlxsw_pci,
220 MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
223 static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
226 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
229 static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
232 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
235 static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
236 struct mlxsw_pci_queue *q,
239 mlxsw_pci_write32(mlxsw_pci,
240 DOORBELL(mlxsw_pci->doorbell_offset,
241 mlxsw_pci_doorbell_type_offset[q->type],
245 static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
246 struct mlxsw_pci_queue *q,
249 mlxsw_pci_write32(mlxsw_pci,
250 DOORBELL(mlxsw_pci->doorbell_offset,
251 mlxsw_pci_doorbell_arm_type_offset[q->type],
255 static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
256 struct mlxsw_pci_queue *q)
258 wmb(); /* ensure all writes are done before we ring a bell */
259 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
262 static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
263 struct mlxsw_pci_queue *q)
265 wmb(); /* ensure all writes are done before we ring a bell */
266 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
267 q->consumer_counter + q->count);
271 mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
272 struct mlxsw_pci_queue *q)
274 wmb(); /* ensure all writes are done before we ring a bell */
275 __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
278 static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
281 return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
284 static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
285 struct mlxsw_pci_queue *q)
292 q->producer_counter = 0;
293 q->consumer_counter = 0;
294 tclass = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_PCI_SDQ_EMAD_TC :
295 MLXSW_PCI_SDQ_CTL_TC;
296 lp = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE :
297 MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE;
299 /* Set CQ of same number of this SDQ. */
300 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
301 mlxsw_cmd_mbox_sw2hw_dq_sdq_lp_set(mbox, lp);
302 mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass);
303 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
304 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
305 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
307 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
310 err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
313 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
317 static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
318 struct mlxsw_pci_queue *q)
320 mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
323 static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
324 int index, char *frag_data, size_t frag_len,
327 struct pci_dev *pdev = mlxsw_pci->pdev;
330 mapaddr = dma_map_single(&pdev->dev, frag_data, frag_len, direction);
331 if (unlikely(dma_mapping_error(&pdev->dev, mapaddr))) {
332 dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
335 mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
336 mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
340 static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
341 int index, int direction)
343 struct pci_dev *pdev = mlxsw_pci->pdev;
344 size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
345 dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
349 dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction);
352 static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
353 struct mlxsw_pci_queue_elem_info *elem_info)
355 size_t buf_len = MLXSW_PORT_MAX_MTU;
356 char *wqe = elem_info->elem;
360 skb = netdev_alloc_skb_ip_align(NULL, buf_len);
364 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
365 buf_len, DMA_FROM_DEVICE);
369 elem_info->u.rdq.skb = skb;
373 dev_kfree_skb_any(skb);
377 static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
378 struct mlxsw_pci_queue_elem_info *elem_info)
383 skb = elem_info->u.rdq.skb;
384 wqe = elem_info->elem;
386 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
387 dev_kfree_skb_any(skb);
390 static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
391 struct mlxsw_pci_queue *q)
393 struct mlxsw_pci_queue_elem_info *elem_info;
394 u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci);
398 q->producer_counter = 0;
399 q->consumer_counter = 0;
401 /* Set CQ of same number of this RDQ with base
402 * above SDQ count as the lower ones are assigned to SDQs.
404 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num);
405 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
406 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
407 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
409 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
412 err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
416 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
418 for (i = 0; i < q->count; i++) {
419 elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
421 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
424 /* Everything is set up, ring doorbell to pass elem to HW */
425 q->producer_counter++;
426 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
432 for (i--; i >= 0; i--) {
433 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
434 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
436 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
441 static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
442 struct mlxsw_pci_queue *q)
444 struct mlxsw_pci_queue_elem_info *elem_info;
447 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
448 for (i = 0; i < q->count; i++) {
449 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
450 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
454 static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
455 struct mlxsw_pci_queue *q)
457 q->u.cq.v = mlxsw_pci->max_cqe_ver;
459 /* For SDQ it is pointless to use CQEv2, so use CQEv1 instead */
460 if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
461 q->num < mlxsw_pci->num_sdq_cqs)
462 q->u.cq.v = MLXSW_PCI_CQE_V1;
465 static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
466 struct mlxsw_pci_queue *q)
471 q->consumer_counter = 0;
473 for (i = 0; i < q->count; i++) {
474 char *elem = mlxsw_pci_queue_elem_get(q, i);
476 mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
479 if (q->u.cq.v == MLXSW_PCI_CQE_V1)
480 mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
481 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
482 else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
483 mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
484 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
486 mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
487 mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
488 mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
489 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
490 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
492 mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
494 err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
497 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
498 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
502 static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
503 struct mlxsw_pci_queue *q)
505 mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
508 static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
509 struct mlxsw_pci_queue *q,
510 u16 consumer_counter_limit,
513 struct pci_dev *pdev = mlxsw_pci->pdev;
514 struct mlxsw_pci_queue_elem_info *elem_info;
515 struct mlxsw_tx_info tx_info;
521 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
522 tx_info = mlxsw_skb_cb(elem_info->u.sdq.skb)->tx_info;
523 skb = elem_info->u.sdq.skb;
524 wqe = elem_info->elem;
525 for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
526 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
528 if (unlikely(!tx_info.is_emad &&
529 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
530 mlxsw_core_ptp_transmitted(mlxsw_pci->core, skb,
536 dev_kfree_skb_any(skb);
537 elem_info->u.sdq.skb = NULL;
539 if (q->consumer_counter++ != consumer_counter_limit)
540 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
541 spin_unlock(&q->lock);
544 static void mlxsw_pci_cqe_rdq_md_tx_port_init(struct sk_buff *skb,
547 struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb);
549 if (mlxsw_pci_cqe2_tx_lag_get(cqe)) {
550 cb->rx_md_info.tx_port_is_lag = true;
551 cb->rx_md_info.tx_lag_id = mlxsw_pci_cqe2_tx_lag_id_get(cqe);
552 cb->rx_md_info.tx_lag_port_index =
553 mlxsw_pci_cqe2_tx_lag_subport_get(cqe);
555 cb->rx_md_info.tx_port_is_lag = false;
556 cb->rx_md_info.tx_sys_port =
557 mlxsw_pci_cqe2_tx_system_port_get(cqe);
560 if (cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_MULTI_PORT &&
561 cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_INVALID)
562 cb->rx_md_info.tx_port_valid = 1;
564 cb->rx_md_info.tx_port_valid = 0;
567 static void mlxsw_pci_cqe_rdq_md_init(struct sk_buff *skb, const char *cqe)
569 struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb);
571 cb->rx_md_info.tx_congestion = mlxsw_pci_cqe2_mirror_cong_get(cqe);
572 if (cb->rx_md_info.tx_congestion != MLXSW_PCI_CQE2_MIRROR_CONG_INVALID)
573 cb->rx_md_info.tx_congestion_valid = 1;
575 cb->rx_md_info.tx_congestion_valid = 0;
576 cb->rx_md_info.tx_congestion <<= MLXSW_PCI_CQE2_MIRROR_CONG_SHIFT;
578 cb->rx_md_info.latency = mlxsw_pci_cqe2_mirror_latency_get(cqe);
579 if (cb->rx_md_info.latency != MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID)
580 cb->rx_md_info.latency_valid = 1;
582 cb->rx_md_info.latency_valid = 0;
584 cb->rx_md_info.tx_tc = mlxsw_pci_cqe2_mirror_tclass_get(cqe);
585 if (cb->rx_md_info.tx_tc != MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID)
586 cb->rx_md_info.tx_tc_valid = 1;
588 cb->rx_md_info.tx_tc_valid = 0;
590 mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
593 static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
594 struct mlxsw_pci_queue *q,
595 u16 consumer_counter_limit,
596 enum mlxsw_pci_cqe_v cqe_v, char *cqe)
598 struct pci_dev *pdev = mlxsw_pci->pdev;
599 struct mlxsw_pci_queue_elem_info *elem_info;
600 struct mlxsw_rx_info rx_info = {};
601 char wqe[MLXSW_PCI_WQE_SIZE];
606 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
607 skb = elem_info->u.rdq.skb;
608 memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE);
610 if (q->consumer_counter++ != consumer_counter_limit)
611 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
613 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
615 dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
619 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
621 if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
622 rx_info.is_lag = true;
623 rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
624 rx_info.lag_port_index =
625 mlxsw_pci_cqe_lag_subport_get(cqe_v, cqe);
627 rx_info.is_lag = false;
628 rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
631 rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
633 if (rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_INGRESS_ACL ||
634 rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_EGRESS_ACL) {
635 u32 cookie_index = 0;
637 if (mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2)
638 cookie_index = mlxsw_pci_cqe2_user_def_val_orig_pkt_len_get(cqe);
639 mlxsw_skb_cb(skb)->rx_md_info.cookie_index = cookie_index;
640 } else if (rx_info.trap_id >= MLXSW_TRAP_ID_MIRROR_SESSION0 &&
641 rx_info.trap_id <= MLXSW_TRAP_ID_MIRROR_SESSION7 &&
642 mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) {
643 rx_info.mirror_reason = mlxsw_pci_cqe2_mirror_reason_get(cqe);
644 mlxsw_pci_cqe_rdq_md_init(skb, cqe);
645 } else if (rx_info.trap_id == MLXSW_TRAP_ID_PKT_SAMPLE &&
646 mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) {
647 mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
650 byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
651 if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
652 byte_count -= ETH_FCS_LEN;
653 skb_put(skb, byte_count);
654 mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
657 /* Everything is set up, ring doorbell to pass elem to HW */
658 q->producer_counter++;
659 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
663 static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
665 struct mlxsw_pci_queue_elem_info *elem_info;
669 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
670 elem = elem_info->elem;
671 owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem);
672 if (mlxsw_pci_elem_hw_owned(q, owner_bit))
674 q->consumer_counter++;
675 rmb(); /* make sure we read owned bit before the rest of elem */
679 static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t)
681 struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
682 struct mlxsw_pci *mlxsw_pci = q->pci;
685 int credits = q->count >> 1;
687 while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
688 u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
689 u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
690 u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
691 char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
693 memcpy(ncqe, cqe, q->elem_size);
694 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
697 struct mlxsw_pci_queue *sdq;
699 sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
700 mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
702 q->u.cq.comp_sdq_count++;
704 struct mlxsw_pci_queue *rdq;
706 rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
707 mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
708 wqe_counter, q->u.cq.v, ncqe);
709 q->u.cq.comp_rdq_count++;
711 if (++items == credits)
715 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
718 static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
720 return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
721 MLXSW_PCI_CQE01_COUNT;
724 static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
726 return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
727 MLXSW_PCI_CQE01_SIZE;
730 static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
731 struct mlxsw_pci_queue *q)
736 q->consumer_counter = 0;
738 for (i = 0; i < q->count; i++) {
739 char *elem = mlxsw_pci_queue_elem_get(q, i);
741 mlxsw_pci_eqe_owner_set(elem, 1);
744 mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
745 mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
746 mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
747 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
748 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
750 mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
752 err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
755 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
756 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
760 static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
761 struct mlxsw_pci_queue *q)
763 mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
766 static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
768 mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
769 mlxsw_pci->cmd.comp.out_param =
770 ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
771 mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
772 mlxsw_pci->cmd.wait_done = true;
773 wake_up(&mlxsw_pci->cmd.wait);
776 static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
778 struct mlxsw_pci_queue_elem_info *elem_info;
782 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
783 elem = elem_info->elem;
784 owner_bit = mlxsw_pci_eqe_owner_get(elem);
785 if (mlxsw_pci_elem_hw_owned(q, owner_bit))
787 q->consumer_counter++;
788 rmb(); /* make sure we read owned bit before the rest of elem */
792 static void mlxsw_pci_eq_tasklet(struct tasklet_struct *t)
794 struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
795 struct mlxsw_pci *mlxsw_pci = q->pci;
796 u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
797 unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
800 bool cq_handle = false;
802 int credits = q->count >> 1;
804 memset(&active_cqns, 0, sizeof(active_cqns));
806 while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
808 /* Command interface completion events are always received on
809 * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events
810 * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1).
813 case MLXSW_PCI_EQ_ASYNC_NUM:
814 mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
815 q->u.eq.ev_cmd_count++;
817 case MLXSW_PCI_EQ_COMP_NUM:
818 cqn = mlxsw_pci_eqe_cqn_get(eqe);
819 set_bit(cqn, active_cqns);
821 q->u.eq.ev_comp_count++;
824 q->u.eq.ev_other_count++;
826 if (++items == credits)
830 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
831 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
836 for_each_set_bit(cqn, active_cqns, cq_count) {
837 q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
838 mlxsw_pci_queue_tasklet_schedule(q);
842 struct mlxsw_pci_queue_ops {
844 enum mlxsw_pci_queue_type type;
845 void (*pre_init)(struct mlxsw_pci *mlxsw_pci,
846 struct mlxsw_pci_queue *q);
847 int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
848 struct mlxsw_pci_queue *q);
849 void (*fini)(struct mlxsw_pci *mlxsw_pci,
850 struct mlxsw_pci_queue *q);
851 void (*tasklet)(struct tasklet_struct *t);
852 u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
853 u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
858 static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
859 .type = MLXSW_PCI_QUEUE_TYPE_SDQ,
860 .init = mlxsw_pci_sdq_init,
861 .fini = mlxsw_pci_sdq_fini,
862 .elem_count = MLXSW_PCI_WQE_COUNT,
863 .elem_size = MLXSW_PCI_WQE_SIZE,
866 static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
867 .type = MLXSW_PCI_QUEUE_TYPE_RDQ,
868 .init = mlxsw_pci_rdq_init,
869 .fini = mlxsw_pci_rdq_fini,
870 .elem_count = MLXSW_PCI_WQE_COUNT,
871 .elem_size = MLXSW_PCI_WQE_SIZE
874 static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
875 .type = MLXSW_PCI_QUEUE_TYPE_CQ,
876 .pre_init = mlxsw_pci_cq_pre_init,
877 .init = mlxsw_pci_cq_init,
878 .fini = mlxsw_pci_cq_fini,
879 .tasklet = mlxsw_pci_cq_tasklet,
880 .elem_count_f = mlxsw_pci_cq_elem_count,
881 .elem_size_f = mlxsw_pci_cq_elem_size
884 static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
885 .type = MLXSW_PCI_QUEUE_TYPE_EQ,
886 .init = mlxsw_pci_eq_init,
887 .fini = mlxsw_pci_eq_fini,
888 .tasklet = mlxsw_pci_eq_tasklet,
889 .elem_count = MLXSW_PCI_EQE_COUNT,
890 .elem_size = MLXSW_PCI_EQE_SIZE
893 static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
894 const struct mlxsw_pci_queue_ops *q_ops,
895 struct mlxsw_pci_queue *q, u8 q_num)
897 struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
903 q_ops->pre_init(mlxsw_pci, q);
905 spin_lock_init(&q->lock);
906 q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) :
908 q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) :
910 q->type = q_ops->type;
914 tasklet_setup(&q->tasklet, q_ops->tasklet);
916 mem_item->size = MLXSW_PCI_AQ_SIZE;
917 mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
918 mem_item->size, &mem_item->mapaddr,
923 q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
926 goto err_elem_info_alloc;
929 /* Initialize dma mapped elements info elem_info for
930 * future easy access.
932 for (i = 0; i < q->count; i++) {
933 struct mlxsw_pci_queue_elem_info *elem_info;
935 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
937 __mlxsw_pci_queue_elem_get(q, q->elem_size, i);
940 mlxsw_cmd_mbox_zero(mbox);
941 err = q_ops->init(mlxsw_pci, mbox, q);
949 dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
950 mem_item->buf, mem_item->mapaddr);
954 static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
955 const struct mlxsw_pci_queue_ops *q_ops,
956 struct mlxsw_pci_queue *q)
958 struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
960 q_ops->fini(mlxsw_pci, q);
962 dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
963 mem_item->buf, mem_item->mapaddr);
966 static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
967 const struct mlxsw_pci_queue_ops *q_ops,
970 struct mlxsw_pci_queue_type_group *queue_group;
974 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
975 queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
979 for (i = 0; i < num_qs; i++) {
980 err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
981 &queue_group->q[i], i);
985 queue_group->count = num_qs;
990 for (i--; i >= 0; i--)
991 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
992 kfree(queue_group->q);
996 static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
997 const struct mlxsw_pci_queue_ops *q_ops)
999 struct mlxsw_pci_queue_type_group *queue_group;
1002 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
1003 for (i = 0; i < queue_group->count; i++)
1004 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
1005 kfree(queue_group->q);
1008 static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
1010 struct pci_dev *pdev = mlxsw_pci->pdev;
1022 mlxsw_cmd_mbox_zero(mbox);
1023 err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
1027 num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
1028 sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
1029 num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
1030 rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
1031 num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
1032 cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
1033 cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get(mbox);
1034 num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
1035 eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
1037 if (num_sdqs + num_rdqs > num_cqs ||
1038 num_sdqs < MLXSW_PCI_SDQS_MIN ||
1039 num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
1040 dev_err(&pdev->dev, "Unsupported number of queues\n");
1044 if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
1045 (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
1046 (1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) ||
1047 (mlxsw_pci->max_cqe_ver == MLXSW_PCI_CQE_V2 &&
1048 (1 << cqv2_log2sz != MLXSW_PCI_CQE2_COUNT)) ||
1049 (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
1050 dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
1054 mlxsw_pci->num_sdq_cqs = num_sdqs;
1056 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
1059 dev_err(&pdev->dev, "Failed to initialize event queues\n");
1063 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
1066 dev_err(&pdev->dev, "Failed to initialize completion queues\n");
1070 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
1073 dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
1077 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
1080 dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
1084 /* We have to poll in command interface until queues are initialized */
1085 mlxsw_pci->cmd.nopoll = true;
1089 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1091 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1093 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1097 static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
1099 mlxsw_pci->cmd.nopoll = false;
1100 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
1101 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1102 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1103 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1107 mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
1108 char *mbox, int index,
1109 const struct mlxsw_swid_config *swid)
1113 if (swid->used_type) {
1114 mlxsw_cmd_mbox_config_profile_swid_config_type_set(
1115 mbox, index, swid->type);
1118 if (swid->used_properties) {
1119 mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
1120 mbox, index, swid->properties);
1123 mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
1127 mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci,
1128 const struct mlxsw_config_profile *profile,
1129 struct mlxsw_res *res)
1131 u64 single_size, double_size, linear_size;
1134 err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile,
1135 &single_size, &double_size,
1140 MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size);
1141 MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size);
1142 MLXSW_RES_SET(res, KVD_LINEAR_SIZE, linear_size);
1147 static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
1148 const struct mlxsw_config_profile *profile,
1149 struct mlxsw_res *res)
1154 mlxsw_cmd_mbox_zero(mbox);
1156 if (profile->used_max_vepa_channels) {
1157 mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
1159 mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
1160 mbox, profile->max_vepa_channels);
1162 if (profile->used_max_mid) {
1163 mlxsw_cmd_mbox_config_profile_set_max_mid_set(
1165 mlxsw_cmd_mbox_config_profile_max_mid_set(
1166 mbox, profile->max_mid);
1168 if (profile->used_max_pgt) {
1169 mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
1171 mlxsw_cmd_mbox_config_profile_max_pgt_set(
1172 mbox, profile->max_pgt);
1174 if (profile->used_max_system_port) {
1175 mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
1177 mlxsw_cmd_mbox_config_profile_max_system_port_set(
1178 mbox, profile->max_system_port);
1180 if (profile->used_max_vlan_groups) {
1181 mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
1183 mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
1184 mbox, profile->max_vlan_groups);
1186 if (profile->used_max_regions) {
1187 mlxsw_cmd_mbox_config_profile_set_max_regions_set(
1189 mlxsw_cmd_mbox_config_profile_max_regions_set(
1190 mbox, profile->max_regions);
1192 if (profile->used_flood_tables) {
1193 mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
1195 mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
1196 mbox, profile->max_flood_tables);
1197 mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
1198 mbox, profile->max_vid_flood_tables);
1199 mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set(
1200 mbox, profile->max_fid_offset_flood_tables);
1201 mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set(
1202 mbox, profile->fid_offset_flood_table_size);
1203 mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set(
1204 mbox, profile->max_fid_flood_tables);
1205 mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
1206 mbox, profile->fid_flood_table_size);
1208 if (profile->used_flood_mode) {
1209 mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
1211 mlxsw_cmd_mbox_config_profile_flood_mode_set(
1212 mbox, profile->flood_mode);
1214 if (profile->used_max_ib_mc) {
1215 mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
1217 mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
1218 mbox, profile->max_ib_mc);
1220 if (profile->used_max_pkey) {
1221 mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
1223 mlxsw_cmd_mbox_config_profile_max_pkey_set(
1224 mbox, profile->max_pkey);
1226 if (profile->used_ar_sec) {
1227 mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
1229 mlxsw_cmd_mbox_config_profile_ar_sec_set(
1230 mbox, profile->ar_sec);
1232 if (profile->used_adaptive_routing_group_cap) {
1233 mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
1235 mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
1236 mbox, profile->adaptive_routing_group_cap);
1238 if (profile->used_ubridge) {
1239 mlxsw_cmd_mbox_config_profile_set_ubridge_set(mbox, 1);
1240 mlxsw_cmd_mbox_config_profile_ubridge_set(mbox,
1243 if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) {
1244 err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
1248 mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1);
1249 mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox,
1250 MLXSW_RES_GET(res, KVD_LINEAR_SIZE));
1251 mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox,
1253 mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox,
1254 MLXSW_RES_GET(res, KVD_SINGLE_SIZE));
1255 mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
1257 mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
1258 MLXSW_RES_GET(res, KVD_DOUBLE_SIZE));
1261 for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
1262 mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
1263 &profile->swid_config[i]);
1265 if (mlxsw_pci->max_cqe_ver > MLXSW_PCI_CQE_V0) {
1266 mlxsw_cmd_mbox_config_profile_set_cqe_version_set(mbox, 1);
1267 mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1);
1270 return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
1273 static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
1275 struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
1278 mlxsw_cmd_mbox_zero(mbox);
1279 err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
1282 mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
1283 mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
1287 static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1290 struct mlxsw_pci_mem_item *mem_item;
1295 mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
1297 if (!mlxsw_pci->fw_area.items)
1299 mlxsw_pci->fw_area.count = num_pages;
1301 mlxsw_cmd_mbox_zero(mbox);
1302 for (i = 0; i < num_pages; i++) {
1303 mem_item = &mlxsw_pci->fw_area.items[i];
1305 mem_item->size = MLXSW_PCI_PAGE_SIZE;
1306 mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
1308 &mem_item->mapaddr, GFP_KERNEL);
1309 if (!mem_item->buf) {
1313 mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr);
1314 mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */
1315 if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) {
1316 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1318 goto err_cmd_map_fa;
1320 mlxsw_cmd_mbox_zero(mbox);
1325 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1327 goto err_cmd_map_fa;
1334 for (i--; i >= 0; i--) {
1335 mem_item = &mlxsw_pci->fw_area.items[i];
1337 dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
1338 mem_item->buf, mem_item->mapaddr);
1340 kfree(mlxsw_pci->fw_area.items);
1344 static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
1346 struct mlxsw_pci_mem_item *mem_item;
1349 mlxsw_cmd_unmap_fa(mlxsw_pci->core);
1351 for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
1352 mem_item = &mlxsw_pci->fw_area.items[i];
1354 dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
1355 mem_item->buf, mem_item->mapaddr);
1357 kfree(mlxsw_pci->fw_area.items);
1360 static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
1362 struct mlxsw_pci *mlxsw_pci = dev_id;
1363 struct mlxsw_pci_queue *q;
1366 for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
1367 q = mlxsw_pci_eq_get(mlxsw_pci, i);
1368 mlxsw_pci_queue_tasklet_schedule(q);
1373 static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
1374 struct mlxsw_pci_mem_item *mbox)
1376 struct pci_dev *pdev = mlxsw_pci->pdev;
1379 mbox->size = MLXSW_CMD_MBOX_SIZE;
1380 mbox->buf = dma_alloc_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE,
1381 &mbox->mapaddr, GFP_KERNEL);
1383 dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
1390 static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
1391 struct mlxsw_pci_mem_item *mbox)
1393 struct pci_dev *pdev = mlxsw_pci->pdev;
1395 dma_free_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
1399 static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
1400 const struct pci_device_id *id,
1406 /* We must wait for the HW to become responsive. */
1407 msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
1409 end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1411 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
1412 if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
1415 } while (time_before(jiffies, end));
1417 *p_sys_status = val & MLXSW_PCI_FW_READY_MASK;
1422 static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
1423 const struct pci_device_id *id)
1425 struct pci_dev *pdev = mlxsw_pci->pdev;
1426 char mrsr_pl[MLXSW_REG_MRSR_LEN];
1430 err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
1432 dev_err(&pdev->dev, "Failed to reach system ready status before reset. Status is 0x%x\n",
1437 mlxsw_reg_mrsr_pack(mrsr_pl);
1438 err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
1442 err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
1444 dev_err(&pdev->dev, "Failed to reach system ready status after reset. Status is 0x%x\n",
1452 static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1456 err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX);
1458 dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n");
1462 static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1464 pci_free_irq_vectors(mlxsw_pci->pdev);
1467 static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
1468 const struct mlxsw_config_profile *profile,
1469 struct mlxsw_res *res)
1471 struct mlxsw_pci *mlxsw_pci = bus_priv;
1472 struct pci_dev *pdev = mlxsw_pci->pdev;
1477 mlxsw_pci->core = mlxsw_core;
1479 mbox = mlxsw_cmd_mbox_alloc();
1483 err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
1487 err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
1489 dev_err(&pdev->dev, "MSI-X init failed\n");
1493 err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
1497 mlxsw_pci->bus_info.fw_rev.major =
1498 mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
1499 mlxsw_pci->bus_info.fw_rev.minor =
1500 mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
1501 mlxsw_pci->bus_info.fw_rev.subminor =
1502 mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
1504 if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
1505 dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
1509 if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
1510 dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
1512 goto err_doorbell_page_bar;
1515 mlxsw_pci->doorbell_offset =
1516 mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
1518 if (mlxsw_cmd_mbox_query_fw_fr_rn_clk_bar_get(mbox) != 0) {
1519 dev_err(&pdev->dev, "Unsupported free running clock BAR queried from hw\n");
1521 goto err_fr_rn_clk_bar;
1524 mlxsw_pci->free_running_clock_offset =
1525 mlxsw_cmd_mbox_query_fw_free_running_clock_offset_get(mbox);
1527 num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
1528 err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
1530 goto err_fw_area_init;
1532 err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
1536 err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
1538 goto err_query_resources;
1540 if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V2) &&
1541 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V2))
1542 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V2;
1543 else if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V1) &&
1544 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V1))
1545 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V1;
1546 else if ((MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0) &&
1547 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V0)) ||
1548 !MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0)) {
1549 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V0;
1551 dev_err(&pdev->dev, "Invalid supported CQE version combination reported\n");
1552 goto err_cqe_v_check;
1555 err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res);
1557 goto err_config_profile;
1559 /* Some resources depend on unified bridge model, which is configured
1560 * as part of config_profile. Query the resources again to get correct
1563 err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
1565 goto err_requery_resources;
1567 err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
1571 err = request_irq(pci_irq_vector(pdev, 0),
1572 mlxsw_pci_eq_irq_handler, 0,
1573 mlxsw_pci->bus_info.device_kind, mlxsw_pci);
1575 dev_err(&pdev->dev, "IRQ request failed\n");
1576 goto err_request_eq_irq;
1582 mlxsw_pci_aqs_fini(mlxsw_pci);
1584 err_requery_resources:
1587 err_query_resources:
1589 mlxsw_pci_fw_area_fini(mlxsw_pci);
1592 err_doorbell_page_bar:
1595 mlxsw_pci_free_irq_vectors(mlxsw_pci);
1599 mlxsw_cmd_mbox_free(mbox);
1603 static void mlxsw_pci_fini(void *bus_priv)
1605 struct mlxsw_pci *mlxsw_pci = bus_priv;
1607 free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci);
1608 mlxsw_pci_aqs_fini(mlxsw_pci);
1609 mlxsw_pci_fw_area_fini(mlxsw_pci);
1610 mlxsw_pci_free_irq_vectors(mlxsw_pci);
1613 static struct mlxsw_pci_queue *
1614 mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
1615 const struct mlxsw_tx_info *tx_info)
1617 u8 ctl_sdq_count = mlxsw_pci_sdq_count(mlxsw_pci) - 1;
1620 if (tx_info->is_emad) {
1621 sdqn = MLXSW_PCI_SDQ_EMAD_INDEX;
1623 BUILD_BUG_ON(MLXSW_PCI_SDQ_EMAD_INDEX != 0);
1624 sdqn = 1 + (tx_info->local_port % ctl_sdq_count);
1627 return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
1630 static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
1631 const struct mlxsw_tx_info *tx_info)
1633 struct mlxsw_pci *mlxsw_pci = bus_priv;
1634 struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1636 return !mlxsw_pci_queue_elem_info_producer_get(q);
1639 static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
1640 const struct mlxsw_tx_info *tx_info)
1642 struct mlxsw_pci *mlxsw_pci = bus_priv;
1643 struct mlxsw_pci_queue *q;
1644 struct mlxsw_pci_queue_elem_info *elem_info;
1649 if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
1650 err = skb_linearize(skb);
1655 q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1656 spin_lock_bh(&q->lock);
1657 elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
1663 mlxsw_skb_cb(skb)->tx_info = *tx_info;
1664 elem_info->u.sdq.skb = skb;
1666 wqe = elem_info->elem;
1667 mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
1668 mlxsw_pci_wqe_lp_set(wqe, 0);
1669 mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
1671 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
1672 skb_headlen(skb), DMA_TO_DEVICE);
1676 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1677 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1679 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
1680 skb_frag_address(frag),
1681 skb_frag_size(frag),
1687 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1688 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1690 /* Set unused sq entries byte count to zero. */
1691 for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
1692 mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
1694 /* Everything is set up, ring producer doorbell to get HW going */
1695 q->producer_counter++;
1696 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
1702 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
1704 spin_unlock_bh(&q->lock);
1708 static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
1709 u32 in_mod, bool out_mbox_direct,
1710 char *in_mbox, size_t in_mbox_size,
1711 char *out_mbox, size_t out_mbox_size,
1714 struct mlxsw_pci *mlxsw_pci = bus_priv;
1715 dma_addr_t in_mapaddr = 0, out_mapaddr = 0;
1716 bool evreq = mlxsw_pci->cmd.nopoll;
1717 unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
1718 bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
1721 *p_status = MLXSW_CMD_STATUS_OK;
1723 err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
1728 memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
1729 in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
1731 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
1732 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
1735 out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
1736 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
1737 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
1739 mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
1740 mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
1742 *p_wait_done = false;
1744 wmb(); /* all needs to be written before we write control register */
1745 mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
1746 MLXSW_PCI_CIR_CTRL_GO_BIT |
1747 (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
1748 (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
1754 end = jiffies + timeout;
1756 u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
1758 if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
1759 *p_wait_done = true;
1760 *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
1764 } while (time_before(jiffies, end));
1766 wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
1767 *p_status = mlxsw_pci->cmd.comp.status;
1778 if (!err && out_mbox && out_mbox_direct) {
1779 /* Some commands don't use output param as address to mailbox
1780 * but they store output directly into registers. In that case,
1781 * copy registers into mbox buffer.
1786 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1788 memcpy(out_mbox, &tmp, sizeof(tmp));
1789 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1791 memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
1793 } else if (!err && out_mbox) {
1794 memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
1797 mutex_unlock(&mlxsw_pci->cmd.lock);
1802 static u32 mlxsw_pci_read_frc_h(void *bus_priv)
1804 struct mlxsw_pci *mlxsw_pci = bus_priv;
1807 frc_offset = mlxsw_pci->free_running_clock_offset;
1808 return mlxsw_pci_read32(mlxsw_pci, FREE_RUNNING_CLOCK_H(frc_offset));
1811 static u32 mlxsw_pci_read_frc_l(void *bus_priv)
1813 struct mlxsw_pci *mlxsw_pci = bus_priv;
1816 frc_offset = mlxsw_pci->free_running_clock_offset;
1817 return mlxsw_pci_read32(mlxsw_pci, FREE_RUNNING_CLOCK_L(frc_offset));
1820 static const struct mlxsw_bus mlxsw_pci_bus = {
1822 .init = mlxsw_pci_init,
1823 .fini = mlxsw_pci_fini,
1824 .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
1825 .skb_transmit = mlxsw_pci_skb_transmit,
1826 .cmd_exec = mlxsw_pci_cmd_exec,
1827 .read_frc_h = mlxsw_pci_read_frc_h,
1828 .read_frc_l = mlxsw_pci_read_frc_l,
1829 .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
1832 static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci)
1836 mutex_init(&mlxsw_pci->cmd.lock);
1837 init_waitqueue_head(&mlxsw_pci->cmd.wait);
1839 err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1841 goto err_in_mbox_alloc;
1843 err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1845 goto err_out_mbox_alloc;
1850 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1852 mutex_destroy(&mlxsw_pci->cmd.lock);
1856 static void mlxsw_pci_cmd_fini(struct mlxsw_pci *mlxsw_pci)
1858 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1859 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1860 mutex_destroy(&mlxsw_pci->cmd.lock);
1863 static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1865 const char *driver_name = dev_driver_string(&pdev->dev);
1866 struct mlxsw_pci *mlxsw_pci;
1869 mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
1873 err = pci_enable_device(pdev);
1875 dev_err(&pdev->dev, "pci_enable_device failed\n");
1876 goto err_pci_enable_device;
1879 err = pci_request_regions(pdev, driver_name);
1881 dev_err(&pdev->dev, "pci_request_regions failed\n");
1882 goto err_pci_request_regions;
1885 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1887 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1889 dev_err(&pdev->dev, "dma_set_mask failed\n");
1890 goto err_pci_set_dma_mask;
1894 if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
1895 dev_err(&pdev->dev, "invalid PCI region size\n");
1897 goto err_pci_resource_len_check;
1900 mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
1901 pci_resource_len(pdev, 0));
1902 if (!mlxsw_pci->hw_addr) {
1903 dev_err(&pdev->dev, "ioremap failed\n");
1907 pci_set_master(pdev);
1909 mlxsw_pci->pdev = pdev;
1910 pci_set_drvdata(pdev, mlxsw_pci);
1912 err = mlxsw_pci_cmd_init(mlxsw_pci);
1914 goto err_pci_cmd_init;
1916 mlxsw_pci->bus_info.device_kind = driver_name;
1917 mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
1918 mlxsw_pci->bus_info.dev = &pdev->dev;
1919 mlxsw_pci->bus_info.read_frc_capable = true;
1922 err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
1923 &mlxsw_pci_bus, mlxsw_pci, false,
1926 dev_err(&pdev->dev, "cannot register bus device\n");
1927 goto err_bus_device_register;
1932 err_bus_device_register:
1933 mlxsw_pci_cmd_fini(mlxsw_pci);
1935 iounmap(mlxsw_pci->hw_addr);
1937 err_pci_resource_len_check:
1938 err_pci_set_dma_mask:
1939 pci_release_regions(pdev);
1940 err_pci_request_regions:
1941 pci_disable_device(pdev);
1942 err_pci_enable_device:
1947 static void mlxsw_pci_remove(struct pci_dev *pdev)
1949 struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
1951 mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
1952 mlxsw_pci_cmd_fini(mlxsw_pci);
1953 iounmap(mlxsw_pci->hw_addr);
1954 pci_release_regions(mlxsw_pci->pdev);
1955 pci_disable_device(mlxsw_pci->pdev);
1959 int mlxsw_pci_driver_register(struct pci_driver *pci_driver)
1961 pci_driver->probe = mlxsw_pci_probe;
1962 pci_driver->remove = mlxsw_pci_remove;
1963 pci_driver->shutdown = mlxsw_pci_remove;
1964 return pci_register_driver(pci_driver);
1966 EXPORT_SYMBOL(mlxsw_pci_driver_register);
1968 void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver)
1970 pci_unregister_driver(pci_driver);
1972 EXPORT_SYMBOL(mlxsw_pci_driver_unregister);
1974 static int __init mlxsw_pci_module_init(void)
1979 static void __exit mlxsw_pci_module_exit(void)
1983 module_init(mlxsw_pci_module_init);
1984 module_exit(mlxsw_pci_module_exit);
1986 MODULE_LICENSE("Dual BSD/GPL");
1987 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1988 MODULE_DESCRIPTION("Mellanox switch PCI interface driver");