1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/export.h>
8 #include <linux/device.h>
10 #include <linux/interrupt.h>
11 #include <linux/wait.h>
12 #include <linux/types.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/log2.h>
16 #include <linux/string.h>
23 #include "resources.h"
25 #define mlxsw_pci_write32(mlxsw_pci, reg, val) \
26 iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
27 #define mlxsw_pci_read32(mlxsw_pci, reg) \
28 ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
30 enum mlxsw_pci_queue_type {
31 MLXSW_PCI_QUEUE_TYPE_SDQ,
32 MLXSW_PCI_QUEUE_TYPE_RDQ,
33 MLXSW_PCI_QUEUE_TYPE_CQ,
34 MLXSW_PCI_QUEUE_TYPE_EQ,
37 #define MLXSW_PCI_QUEUE_TYPE_COUNT 4
39 static const u16 mlxsw_pci_doorbell_type_offset[] = {
40 MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
41 MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
42 MLXSW_PCI_DOORBELL_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
43 MLXSW_PCI_DOORBELL_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
46 static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
49 MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
50 MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
53 struct mlxsw_pci_mem_item {
59 struct mlxsw_pci_queue_elem_info {
60 char *elem; /* pointer to actual dma mapped element mem chunk */
71 struct mlxsw_pci_queue {
72 spinlock_t lock; /* for queue accesses */
73 struct mlxsw_pci_mem_item mem_item;
74 struct mlxsw_pci_queue_elem_info *elem_info;
77 u16 count; /* number of elements in queue */
78 u8 num; /* queue number */
79 u8 elem_size; /* size of one element */
80 enum mlxsw_pci_queue_type type;
81 struct tasklet_struct tasklet; /* queue processing tasklet */
82 struct mlxsw_pci *pci;
87 enum mlxsw_pci_cqe_v v;
97 struct mlxsw_pci_queue_type_group {
98 struct mlxsw_pci_queue *q;
99 u8 count; /* number of queues in group */
103 struct pci_dev *pdev;
105 u64 free_running_clock_offset;
106 struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
108 struct mlxsw_core *core;
110 struct mlxsw_pci_mem_item *items;
114 struct mlxsw_pci_mem_item out_mbox;
115 struct mlxsw_pci_mem_item in_mbox;
116 struct mutex lock; /* Lock access to command registers */
118 wait_queue_head_t wait;
125 struct mlxsw_bus_info bus_info;
126 const struct pci_device_id *id;
127 enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */
128 u8 num_sdq_cqs; /* Number of CQs used for SDQs */
131 static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
133 tasklet_schedule(&q->tasklet);
136 static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
137 size_t elem_size, int elem_index)
139 return q->mem_item.buf + (elem_size * elem_index);
142 static struct mlxsw_pci_queue_elem_info *
143 mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
145 return &q->elem_info[elem_index];
148 static struct mlxsw_pci_queue_elem_info *
149 mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
151 int index = q->producer_counter & (q->count - 1);
153 if ((u16) (q->producer_counter - q->consumer_counter) == q->count)
155 return mlxsw_pci_queue_elem_info_get(q, index);
158 static struct mlxsw_pci_queue_elem_info *
159 mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
161 int index = q->consumer_counter & (q->count - 1);
163 return mlxsw_pci_queue_elem_info_get(q, index);
166 static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
168 return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
171 static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
173 return owner_bit != !!(q->consumer_counter & q->count);
176 static struct mlxsw_pci_queue_type_group *
177 mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
178 enum mlxsw_pci_queue_type q_type)
180 return &mlxsw_pci->queues[q_type];
183 static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
184 enum mlxsw_pci_queue_type q_type)
186 struct mlxsw_pci_queue_type_group *queue_group;
188 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
189 return queue_group->count;
192 static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
194 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
197 static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
199 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
202 static struct mlxsw_pci_queue *
203 __mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
204 enum mlxsw_pci_queue_type q_type, u8 q_num)
206 return &mlxsw_pci->queues[q_type].q[q_num];
209 static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
212 return __mlxsw_pci_queue_get(mlxsw_pci,
213 MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
216 static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
219 return __mlxsw_pci_queue_get(mlxsw_pci,
220 MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
223 static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
226 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
229 static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
232 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
235 static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
236 struct mlxsw_pci_queue *q,
239 mlxsw_pci_write32(mlxsw_pci,
240 DOORBELL(mlxsw_pci->doorbell_offset,
241 mlxsw_pci_doorbell_type_offset[q->type],
245 static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
246 struct mlxsw_pci_queue *q,
249 mlxsw_pci_write32(mlxsw_pci,
250 DOORBELL(mlxsw_pci->doorbell_offset,
251 mlxsw_pci_doorbell_arm_type_offset[q->type],
255 static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
256 struct mlxsw_pci_queue *q)
258 wmb(); /* ensure all writes are done before we ring a bell */
259 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
262 static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
263 struct mlxsw_pci_queue *q)
265 wmb(); /* ensure all writes are done before we ring a bell */
266 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
267 q->consumer_counter + q->count);
271 mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
272 struct mlxsw_pci_queue *q)
274 wmb(); /* ensure all writes are done before we ring a bell */
275 __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
278 static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
281 return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
284 static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
285 struct mlxsw_pci_queue *q)
291 q->producer_counter = 0;
292 q->consumer_counter = 0;
293 tclass = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_PCI_SDQ_EMAD_TC :
294 MLXSW_PCI_SDQ_CTL_TC;
296 /* Set CQ of same number of this SDQ. */
297 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
298 mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass);
299 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
300 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
301 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
303 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
306 err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
309 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
313 static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
314 struct mlxsw_pci_queue *q)
316 mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
319 static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
320 int index, char *frag_data, size_t frag_len,
323 struct pci_dev *pdev = mlxsw_pci->pdev;
326 mapaddr = dma_map_single(&pdev->dev, frag_data, frag_len, direction);
327 if (unlikely(dma_mapping_error(&pdev->dev, mapaddr))) {
328 dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
331 mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
332 mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
336 static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
337 int index, int direction)
339 struct pci_dev *pdev = mlxsw_pci->pdev;
340 size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
341 dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
345 dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction);
348 static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
349 struct mlxsw_pci_queue_elem_info *elem_info)
351 size_t buf_len = MLXSW_PORT_MAX_MTU;
352 char *wqe = elem_info->elem;
356 elem_info->u.rdq.skb = NULL;
357 skb = netdev_alloc_skb_ip_align(NULL, buf_len);
361 /* Assume that wqe was previously zeroed. */
363 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
364 buf_len, DMA_FROM_DEVICE);
368 elem_info->u.rdq.skb = skb;
372 dev_kfree_skb_any(skb);
376 static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
377 struct mlxsw_pci_queue_elem_info *elem_info)
382 skb = elem_info->u.rdq.skb;
383 wqe = elem_info->elem;
385 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
386 dev_kfree_skb_any(skb);
389 static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
390 struct mlxsw_pci_queue *q)
392 struct mlxsw_pci_queue_elem_info *elem_info;
393 u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci);
397 q->producer_counter = 0;
398 q->consumer_counter = 0;
400 /* Set CQ of same number of this RDQ with base
401 * above SDQ count as the lower ones are assigned to SDQs.
403 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num);
404 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
405 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
406 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
408 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
411 err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
415 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
417 for (i = 0; i < q->count; i++) {
418 elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
420 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
423 /* Everything is set up, ring doorbell to pass elem to HW */
424 q->producer_counter++;
425 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
431 for (i--; i >= 0; i--) {
432 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
433 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
435 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
440 static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
441 struct mlxsw_pci_queue *q)
443 struct mlxsw_pci_queue_elem_info *elem_info;
446 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
447 for (i = 0; i < q->count; i++) {
448 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
449 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
453 static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
454 struct mlxsw_pci_queue *q)
456 q->u.cq.v = mlxsw_pci->max_cqe_ver;
458 /* For SDQ it is pointless to use CQEv2, so use CQEv1 instead */
459 if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
460 q->num < mlxsw_pci->num_sdq_cqs)
461 q->u.cq.v = MLXSW_PCI_CQE_V1;
464 static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
465 struct mlxsw_pci_queue *q)
470 q->consumer_counter = 0;
472 for (i = 0; i < q->count; i++) {
473 char *elem = mlxsw_pci_queue_elem_get(q, i);
475 mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
478 if (q->u.cq.v == MLXSW_PCI_CQE_V1)
479 mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
480 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
481 else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
482 mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
483 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
485 mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
486 mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
487 mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
488 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
489 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
491 mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
493 err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
496 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
497 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
501 static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
502 struct mlxsw_pci_queue *q)
504 mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
507 static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
508 struct mlxsw_pci_queue *q,
509 u16 consumer_counter_limit,
512 struct pci_dev *pdev = mlxsw_pci->pdev;
513 struct mlxsw_pci_queue_elem_info *elem_info;
514 struct mlxsw_tx_info tx_info;
520 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
521 tx_info = mlxsw_skb_cb(elem_info->u.sdq.skb)->tx_info;
522 skb = elem_info->u.sdq.skb;
523 wqe = elem_info->elem;
524 for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
525 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
527 if (unlikely(!tx_info.is_emad &&
528 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
529 mlxsw_core_ptp_transmitted(mlxsw_pci->core, skb,
535 dev_kfree_skb_any(skb);
536 elem_info->u.sdq.skb = NULL;
538 if (q->consumer_counter++ != consumer_counter_limit)
539 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
540 spin_unlock(&q->lock);
543 static void mlxsw_pci_cqe_rdq_md_tx_port_init(struct sk_buff *skb,
546 struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb);
548 if (mlxsw_pci_cqe2_tx_lag_get(cqe)) {
549 cb->rx_md_info.tx_port_is_lag = true;
550 cb->rx_md_info.tx_lag_id = mlxsw_pci_cqe2_tx_lag_id_get(cqe);
551 cb->rx_md_info.tx_lag_port_index =
552 mlxsw_pci_cqe2_tx_lag_subport_get(cqe);
554 cb->rx_md_info.tx_port_is_lag = false;
555 cb->rx_md_info.tx_sys_port =
556 mlxsw_pci_cqe2_tx_system_port_get(cqe);
559 if (cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_MULTI_PORT &&
560 cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_INVALID)
561 cb->rx_md_info.tx_port_valid = 1;
563 cb->rx_md_info.tx_port_valid = 0;
566 static void mlxsw_pci_cqe_rdq_md_init(struct sk_buff *skb, const char *cqe)
568 struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb);
570 cb->rx_md_info.tx_congestion = mlxsw_pci_cqe2_mirror_cong_get(cqe);
571 if (cb->rx_md_info.tx_congestion != MLXSW_PCI_CQE2_MIRROR_CONG_INVALID)
572 cb->rx_md_info.tx_congestion_valid = 1;
574 cb->rx_md_info.tx_congestion_valid = 0;
575 cb->rx_md_info.tx_congestion <<= MLXSW_PCI_CQE2_MIRROR_CONG_SHIFT;
577 cb->rx_md_info.latency = mlxsw_pci_cqe2_mirror_latency_get(cqe);
578 if (cb->rx_md_info.latency != MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID)
579 cb->rx_md_info.latency_valid = 1;
581 cb->rx_md_info.latency_valid = 0;
583 cb->rx_md_info.tx_tc = mlxsw_pci_cqe2_mirror_tclass_get(cqe);
584 if (cb->rx_md_info.tx_tc != MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID)
585 cb->rx_md_info.tx_tc_valid = 1;
587 cb->rx_md_info.tx_tc_valid = 0;
589 mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
592 static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
593 struct mlxsw_pci_queue *q,
594 u16 consumer_counter_limit,
595 enum mlxsw_pci_cqe_v cqe_v, char *cqe)
597 struct pci_dev *pdev = mlxsw_pci->pdev;
598 struct mlxsw_pci_queue_elem_info *elem_info;
599 struct mlxsw_rx_info rx_info = {};
605 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
606 skb = elem_info->u.sdq.skb;
609 wqe = elem_info->elem;
610 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
612 if (q->consumer_counter++ != consumer_counter_limit)
613 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
615 if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
616 rx_info.is_lag = true;
617 rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
618 rx_info.lag_port_index =
619 mlxsw_pci_cqe_lag_subport_get(cqe_v, cqe);
621 rx_info.is_lag = false;
622 rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
625 rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
627 if (rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_INGRESS_ACL ||
628 rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_EGRESS_ACL) {
629 u32 cookie_index = 0;
631 if (mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2)
632 cookie_index = mlxsw_pci_cqe2_user_def_val_orig_pkt_len_get(cqe);
633 mlxsw_skb_cb(skb)->rx_md_info.cookie_index = cookie_index;
634 } else if (rx_info.trap_id >= MLXSW_TRAP_ID_MIRROR_SESSION0 &&
635 rx_info.trap_id <= MLXSW_TRAP_ID_MIRROR_SESSION7 &&
636 mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) {
637 rx_info.mirror_reason = mlxsw_pci_cqe2_mirror_reason_get(cqe);
638 mlxsw_pci_cqe_rdq_md_init(skb, cqe);
639 } else if (rx_info.trap_id == MLXSW_TRAP_ID_PKT_SAMPLE &&
640 mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) {
641 mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
644 byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
645 if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
646 byte_count -= ETH_FCS_LEN;
647 skb_put(skb, byte_count);
648 mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
650 memset(wqe, 0, q->elem_size);
651 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
653 dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
654 /* Everything is set up, ring doorbell to pass elem to HW */
655 q->producer_counter++;
656 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
660 static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
662 struct mlxsw_pci_queue_elem_info *elem_info;
666 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
667 elem = elem_info->elem;
668 owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem);
669 if (mlxsw_pci_elem_hw_owned(q, owner_bit))
671 q->consumer_counter++;
672 rmb(); /* make sure we read owned bit before the rest of elem */
676 static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t)
678 struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
679 struct mlxsw_pci *mlxsw_pci = q->pci;
682 int credits = q->count >> 1;
684 while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
685 u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
686 u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
687 u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
688 char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
690 memcpy(ncqe, cqe, q->elem_size);
691 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
694 struct mlxsw_pci_queue *sdq;
696 sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
697 mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
699 q->u.cq.comp_sdq_count++;
701 struct mlxsw_pci_queue *rdq;
703 rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
704 mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
705 wqe_counter, q->u.cq.v, ncqe);
706 q->u.cq.comp_rdq_count++;
708 if (++items == credits)
712 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
715 static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
717 return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
718 MLXSW_PCI_CQE01_COUNT;
721 static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
723 return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
724 MLXSW_PCI_CQE01_SIZE;
727 static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
728 struct mlxsw_pci_queue *q)
733 q->consumer_counter = 0;
735 for (i = 0; i < q->count; i++) {
736 char *elem = mlxsw_pci_queue_elem_get(q, i);
738 mlxsw_pci_eqe_owner_set(elem, 1);
741 mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
742 mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
743 mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
744 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
745 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
747 mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
749 err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
752 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
753 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
757 static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
758 struct mlxsw_pci_queue *q)
760 mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
763 static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
765 mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
766 mlxsw_pci->cmd.comp.out_param =
767 ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
768 mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
769 mlxsw_pci->cmd.wait_done = true;
770 wake_up(&mlxsw_pci->cmd.wait);
773 static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
775 struct mlxsw_pci_queue_elem_info *elem_info;
779 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
780 elem = elem_info->elem;
781 owner_bit = mlxsw_pci_eqe_owner_get(elem);
782 if (mlxsw_pci_elem_hw_owned(q, owner_bit))
784 q->consumer_counter++;
785 rmb(); /* make sure we read owned bit before the rest of elem */
789 static void mlxsw_pci_eq_tasklet(struct tasklet_struct *t)
791 struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
792 struct mlxsw_pci *mlxsw_pci = q->pci;
793 u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
794 unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
797 bool cq_handle = false;
799 int credits = q->count >> 1;
801 memset(&active_cqns, 0, sizeof(active_cqns));
803 while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
805 /* Command interface completion events are always received on
806 * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events
807 * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1).
810 case MLXSW_PCI_EQ_ASYNC_NUM:
811 mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
812 q->u.eq.ev_cmd_count++;
814 case MLXSW_PCI_EQ_COMP_NUM:
815 cqn = mlxsw_pci_eqe_cqn_get(eqe);
816 set_bit(cqn, active_cqns);
818 q->u.eq.ev_comp_count++;
821 q->u.eq.ev_other_count++;
823 if (++items == credits)
827 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
828 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
833 for_each_set_bit(cqn, active_cqns, cq_count) {
834 q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
835 mlxsw_pci_queue_tasklet_schedule(q);
839 struct mlxsw_pci_queue_ops {
841 enum mlxsw_pci_queue_type type;
842 void (*pre_init)(struct mlxsw_pci *mlxsw_pci,
843 struct mlxsw_pci_queue *q);
844 int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
845 struct mlxsw_pci_queue *q);
846 void (*fini)(struct mlxsw_pci *mlxsw_pci,
847 struct mlxsw_pci_queue *q);
848 void (*tasklet)(struct tasklet_struct *t);
849 u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
850 u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
855 static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
856 .type = MLXSW_PCI_QUEUE_TYPE_SDQ,
857 .init = mlxsw_pci_sdq_init,
858 .fini = mlxsw_pci_sdq_fini,
859 .elem_count = MLXSW_PCI_WQE_COUNT,
860 .elem_size = MLXSW_PCI_WQE_SIZE,
863 static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
864 .type = MLXSW_PCI_QUEUE_TYPE_RDQ,
865 .init = mlxsw_pci_rdq_init,
866 .fini = mlxsw_pci_rdq_fini,
867 .elem_count = MLXSW_PCI_WQE_COUNT,
868 .elem_size = MLXSW_PCI_WQE_SIZE
871 static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
872 .type = MLXSW_PCI_QUEUE_TYPE_CQ,
873 .pre_init = mlxsw_pci_cq_pre_init,
874 .init = mlxsw_pci_cq_init,
875 .fini = mlxsw_pci_cq_fini,
876 .tasklet = mlxsw_pci_cq_tasklet,
877 .elem_count_f = mlxsw_pci_cq_elem_count,
878 .elem_size_f = mlxsw_pci_cq_elem_size
881 static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
882 .type = MLXSW_PCI_QUEUE_TYPE_EQ,
883 .init = mlxsw_pci_eq_init,
884 .fini = mlxsw_pci_eq_fini,
885 .tasklet = mlxsw_pci_eq_tasklet,
886 .elem_count = MLXSW_PCI_EQE_COUNT,
887 .elem_size = MLXSW_PCI_EQE_SIZE
890 static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
891 const struct mlxsw_pci_queue_ops *q_ops,
892 struct mlxsw_pci_queue *q, u8 q_num)
894 struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
900 q_ops->pre_init(mlxsw_pci, q);
902 spin_lock_init(&q->lock);
903 q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) :
905 q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) :
907 q->type = q_ops->type;
911 tasklet_setup(&q->tasklet, q_ops->tasklet);
913 mem_item->size = MLXSW_PCI_AQ_SIZE;
914 mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
915 mem_item->size, &mem_item->mapaddr,
920 q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
923 goto err_elem_info_alloc;
926 /* Initialize dma mapped elements info elem_info for
927 * future easy access.
929 for (i = 0; i < q->count; i++) {
930 struct mlxsw_pci_queue_elem_info *elem_info;
932 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
934 __mlxsw_pci_queue_elem_get(q, q->elem_size, i);
937 mlxsw_cmd_mbox_zero(mbox);
938 err = q_ops->init(mlxsw_pci, mbox, q);
946 dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
947 mem_item->buf, mem_item->mapaddr);
951 static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
952 const struct mlxsw_pci_queue_ops *q_ops,
953 struct mlxsw_pci_queue *q)
955 struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
957 q_ops->fini(mlxsw_pci, q);
959 dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
960 mem_item->buf, mem_item->mapaddr);
963 static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
964 const struct mlxsw_pci_queue_ops *q_ops,
967 struct mlxsw_pci_queue_type_group *queue_group;
971 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
972 queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
976 for (i = 0; i < num_qs; i++) {
977 err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
978 &queue_group->q[i], i);
982 queue_group->count = num_qs;
987 for (i--; i >= 0; i--)
988 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
989 kfree(queue_group->q);
993 static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
994 const struct mlxsw_pci_queue_ops *q_ops)
996 struct mlxsw_pci_queue_type_group *queue_group;
999 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
1000 for (i = 0; i < queue_group->count; i++)
1001 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
1002 kfree(queue_group->q);
1005 static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
1007 struct pci_dev *pdev = mlxsw_pci->pdev;
1019 mlxsw_cmd_mbox_zero(mbox);
1020 err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
1024 num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
1025 sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
1026 num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
1027 rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
1028 num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
1029 cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
1030 cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get(mbox);
1031 num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
1032 eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
1034 if (num_sdqs + num_rdqs > num_cqs ||
1035 num_sdqs < MLXSW_PCI_SDQS_MIN ||
1036 num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
1037 dev_err(&pdev->dev, "Unsupported number of queues\n");
1041 if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
1042 (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
1043 (1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) ||
1044 (mlxsw_pci->max_cqe_ver == MLXSW_PCI_CQE_V2 &&
1045 (1 << cqv2_log2sz != MLXSW_PCI_CQE2_COUNT)) ||
1046 (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
1047 dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
1051 mlxsw_pci->num_sdq_cqs = num_sdqs;
1053 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
1056 dev_err(&pdev->dev, "Failed to initialize event queues\n");
1060 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
1063 dev_err(&pdev->dev, "Failed to initialize completion queues\n");
1067 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
1070 dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
1074 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
1077 dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
1081 /* We have to poll in command interface until queues are initialized */
1082 mlxsw_pci->cmd.nopoll = true;
1086 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1088 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1090 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1094 static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
1096 mlxsw_pci->cmd.nopoll = false;
1097 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
1098 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1099 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1100 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1104 mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
1105 char *mbox, int index,
1106 const struct mlxsw_swid_config *swid)
1110 if (swid->used_type) {
1111 mlxsw_cmd_mbox_config_profile_swid_config_type_set(
1112 mbox, index, swid->type);
1115 if (swid->used_properties) {
1116 mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
1117 mbox, index, swid->properties);
1120 mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
1124 mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci,
1125 const struct mlxsw_config_profile *profile,
1126 struct mlxsw_res *res)
1128 u64 single_size, double_size, linear_size;
1131 err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile,
1132 &single_size, &double_size,
1137 MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size);
1138 MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size);
1139 MLXSW_RES_SET(res, KVD_LINEAR_SIZE, linear_size);
1144 static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
1145 const struct mlxsw_config_profile *profile,
1146 struct mlxsw_res *res)
1151 mlxsw_cmd_mbox_zero(mbox);
1153 if (profile->used_max_vepa_channels) {
1154 mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
1156 mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
1157 mbox, profile->max_vepa_channels);
1159 if (profile->used_max_mid) {
1160 mlxsw_cmd_mbox_config_profile_set_max_mid_set(
1162 mlxsw_cmd_mbox_config_profile_max_mid_set(
1163 mbox, profile->max_mid);
1165 if (profile->used_max_pgt) {
1166 mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
1168 mlxsw_cmd_mbox_config_profile_max_pgt_set(
1169 mbox, profile->max_pgt);
1171 if (profile->used_max_system_port) {
1172 mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
1174 mlxsw_cmd_mbox_config_profile_max_system_port_set(
1175 mbox, profile->max_system_port);
1177 if (profile->used_max_vlan_groups) {
1178 mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
1180 mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
1181 mbox, profile->max_vlan_groups);
1183 if (profile->used_max_regions) {
1184 mlxsw_cmd_mbox_config_profile_set_max_regions_set(
1186 mlxsw_cmd_mbox_config_profile_max_regions_set(
1187 mbox, profile->max_regions);
1189 if (profile->used_flood_tables) {
1190 mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
1192 mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
1193 mbox, profile->max_flood_tables);
1194 mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
1195 mbox, profile->max_vid_flood_tables);
1196 mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set(
1197 mbox, profile->max_fid_offset_flood_tables);
1198 mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set(
1199 mbox, profile->fid_offset_flood_table_size);
1200 mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set(
1201 mbox, profile->max_fid_flood_tables);
1202 mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
1203 mbox, profile->fid_flood_table_size);
1205 if (profile->used_flood_mode) {
1206 mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
1208 mlxsw_cmd_mbox_config_profile_flood_mode_set(
1209 mbox, profile->flood_mode);
1211 if (profile->used_max_ib_mc) {
1212 mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
1214 mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
1215 mbox, profile->max_ib_mc);
1217 if (profile->used_max_pkey) {
1218 mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
1220 mlxsw_cmd_mbox_config_profile_max_pkey_set(
1221 mbox, profile->max_pkey);
1223 if (profile->used_ar_sec) {
1224 mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
1226 mlxsw_cmd_mbox_config_profile_ar_sec_set(
1227 mbox, profile->ar_sec);
1229 if (profile->used_adaptive_routing_group_cap) {
1230 mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
1232 mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
1233 mbox, profile->adaptive_routing_group_cap);
1235 if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) {
1236 err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
1240 mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1);
1241 mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox,
1242 MLXSW_RES_GET(res, KVD_LINEAR_SIZE));
1243 mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox,
1245 mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox,
1246 MLXSW_RES_GET(res, KVD_SINGLE_SIZE));
1247 mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
1249 mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
1250 MLXSW_RES_GET(res, KVD_DOUBLE_SIZE));
1252 if (profile->used_kvh_xlt_cache_mode) {
1253 mlxsw_cmd_mbox_config_profile_set_kvh_xlt_cache_mode_set(
1255 mlxsw_cmd_mbox_config_profile_kvh_xlt_cache_mode_set(
1256 mbox, profile->kvh_xlt_cache_mode);
1259 for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
1260 mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
1261 &profile->swid_config[i]);
1263 if (mlxsw_pci->max_cqe_ver > MLXSW_PCI_CQE_V0) {
1264 mlxsw_cmd_mbox_config_profile_set_cqe_version_set(mbox, 1);
1265 mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1);
1268 return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
1271 static int mlxsw_pci_boardinfo_xm_process(struct mlxsw_pci *mlxsw_pci,
1272 struct mlxsw_bus_info *bus_info,
1275 int count = mlxsw_cmd_mbox_boardinfo_xm_num_local_ports_get(mbox);
1278 if (!mlxsw_cmd_mbox_boardinfo_xm_exists_get(mbox))
1281 bus_info->xm_exists = true;
1283 if (count > MLXSW_BUS_INFO_XM_LOCAL_PORTS_MAX) {
1284 dev_err(&mlxsw_pci->pdev->dev, "Invalid number of XM local ports\n");
1287 bus_info->xm_local_ports_count = count;
1288 for (i = 0; i < count; i++)
1289 bus_info->xm_local_ports[i] =
1290 mlxsw_cmd_mbox_boardinfo_xm_local_port_entry_get(mbox,
1295 static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
1297 struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
1300 mlxsw_cmd_mbox_zero(mbox);
1301 err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
1304 mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
1305 mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
1307 return mlxsw_pci_boardinfo_xm_process(mlxsw_pci, bus_info, mbox);
1310 static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1313 struct mlxsw_pci_mem_item *mem_item;
1318 mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
1320 if (!mlxsw_pci->fw_area.items)
1322 mlxsw_pci->fw_area.count = num_pages;
1324 mlxsw_cmd_mbox_zero(mbox);
1325 for (i = 0; i < num_pages; i++) {
1326 mem_item = &mlxsw_pci->fw_area.items[i];
1328 mem_item->size = MLXSW_PCI_PAGE_SIZE;
1329 mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
1331 &mem_item->mapaddr, GFP_KERNEL);
1332 if (!mem_item->buf) {
1336 mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr);
1337 mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */
1338 if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) {
1339 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1341 goto err_cmd_map_fa;
1343 mlxsw_cmd_mbox_zero(mbox);
1348 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1350 goto err_cmd_map_fa;
1357 for (i--; i >= 0; i--) {
1358 mem_item = &mlxsw_pci->fw_area.items[i];
1360 dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
1361 mem_item->buf, mem_item->mapaddr);
1363 kfree(mlxsw_pci->fw_area.items);
1367 static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
1369 struct mlxsw_pci_mem_item *mem_item;
1372 mlxsw_cmd_unmap_fa(mlxsw_pci->core);
1374 for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
1375 mem_item = &mlxsw_pci->fw_area.items[i];
1377 dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
1378 mem_item->buf, mem_item->mapaddr);
1380 kfree(mlxsw_pci->fw_area.items);
1383 static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
1385 struct mlxsw_pci *mlxsw_pci = dev_id;
1386 struct mlxsw_pci_queue *q;
1389 for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
1390 q = mlxsw_pci_eq_get(mlxsw_pci, i);
1391 mlxsw_pci_queue_tasklet_schedule(q);
1396 static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
1397 struct mlxsw_pci_mem_item *mbox)
1399 struct pci_dev *pdev = mlxsw_pci->pdev;
1402 mbox->size = MLXSW_CMD_MBOX_SIZE;
1403 mbox->buf = dma_alloc_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE,
1404 &mbox->mapaddr, GFP_KERNEL);
1406 dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
1413 static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
1414 struct mlxsw_pci_mem_item *mbox)
1416 struct pci_dev *pdev = mlxsw_pci->pdev;
1418 dma_free_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
1422 static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
1423 const struct pci_device_id *id,
1429 if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
1430 msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1434 /* We must wait for the HW to become responsive. */
1435 msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
1437 end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1439 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
1440 if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
1443 } while (time_before(jiffies, end));
1445 *p_sys_status = val & MLXSW_PCI_FW_READY_MASK;
1450 static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
1451 const struct pci_device_id *id)
1453 struct pci_dev *pdev = mlxsw_pci->pdev;
1454 char mrsr_pl[MLXSW_REG_MRSR_LEN];
1458 err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
1460 dev_err(&pdev->dev, "Failed to reach system ready status before reset. Status is 0x%x\n",
1465 mlxsw_reg_mrsr_pack(mrsr_pl);
1466 err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
1470 err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
1472 dev_err(&pdev->dev, "Failed to reach system ready status after reset. Status is 0x%x\n",
1480 static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1484 err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX);
1486 dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n");
1490 static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1492 pci_free_irq_vectors(mlxsw_pci->pdev);
1495 static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
1496 const struct mlxsw_config_profile *profile,
1497 struct mlxsw_res *res)
1499 struct mlxsw_pci *mlxsw_pci = bus_priv;
1500 struct pci_dev *pdev = mlxsw_pci->pdev;
1505 mlxsw_pci->core = mlxsw_core;
1507 mbox = mlxsw_cmd_mbox_alloc();
1511 err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
1515 err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
1517 dev_err(&pdev->dev, "MSI-X init failed\n");
1521 err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
1525 mlxsw_pci->bus_info.fw_rev.major =
1526 mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
1527 mlxsw_pci->bus_info.fw_rev.minor =
1528 mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
1529 mlxsw_pci->bus_info.fw_rev.subminor =
1530 mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
1532 if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
1533 dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
1537 if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
1538 dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
1540 goto err_doorbell_page_bar;
1543 mlxsw_pci->doorbell_offset =
1544 mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
1546 if (mlxsw_cmd_mbox_query_fw_fr_rn_clk_bar_get(mbox) != 0) {
1547 dev_err(&pdev->dev, "Unsupported free running clock BAR queried from hw\n");
1549 goto err_fr_rn_clk_bar;
1552 mlxsw_pci->free_running_clock_offset =
1553 mlxsw_cmd_mbox_query_fw_free_running_clock_offset_get(mbox);
1555 num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
1556 err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
1558 goto err_fw_area_init;
1560 err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
1564 err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
1566 goto err_query_resources;
1568 if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V2) &&
1569 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V2))
1570 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V2;
1571 else if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V1) &&
1572 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V1))
1573 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V1;
1574 else if ((MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0) &&
1575 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V0)) ||
1576 !MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0)) {
1577 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V0;
1579 dev_err(&pdev->dev, "Invalid supported CQE version combination reported\n");
1580 goto err_cqe_v_check;
1583 err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res);
1585 goto err_config_profile;
1587 err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
1591 err = request_irq(pci_irq_vector(pdev, 0),
1592 mlxsw_pci_eq_irq_handler, 0,
1593 mlxsw_pci->bus_info.device_kind, mlxsw_pci);
1595 dev_err(&pdev->dev, "IRQ request failed\n");
1596 goto err_request_eq_irq;
1602 mlxsw_pci_aqs_fini(mlxsw_pci);
1606 err_query_resources:
1608 mlxsw_pci_fw_area_fini(mlxsw_pci);
1611 err_doorbell_page_bar:
1614 mlxsw_pci_free_irq_vectors(mlxsw_pci);
1618 mlxsw_cmd_mbox_free(mbox);
1622 static void mlxsw_pci_fini(void *bus_priv)
1624 struct mlxsw_pci *mlxsw_pci = bus_priv;
1626 free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci);
1627 mlxsw_pci_aqs_fini(mlxsw_pci);
1628 mlxsw_pci_fw_area_fini(mlxsw_pci);
1629 mlxsw_pci_free_irq_vectors(mlxsw_pci);
1632 static struct mlxsw_pci_queue *
1633 mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
1634 const struct mlxsw_tx_info *tx_info)
1636 u8 ctl_sdq_count = mlxsw_pci_sdq_count(mlxsw_pci) - 1;
1639 if (tx_info->is_emad) {
1640 sdqn = MLXSW_PCI_SDQ_EMAD_INDEX;
1642 BUILD_BUG_ON(MLXSW_PCI_SDQ_EMAD_INDEX != 0);
1643 sdqn = 1 + (tx_info->local_port % ctl_sdq_count);
1646 return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
1649 static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
1650 const struct mlxsw_tx_info *tx_info)
1652 struct mlxsw_pci *mlxsw_pci = bus_priv;
1653 struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1655 return !mlxsw_pci_queue_elem_info_producer_get(q);
1658 static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
1659 const struct mlxsw_tx_info *tx_info)
1661 struct mlxsw_pci *mlxsw_pci = bus_priv;
1662 struct mlxsw_pci_queue *q;
1663 struct mlxsw_pci_queue_elem_info *elem_info;
1668 if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
1669 err = skb_linearize(skb);
1674 q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1675 spin_lock_bh(&q->lock);
1676 elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
1682 mlxsw_skb_cb(skb)->tx_info = *tx_info;
1683 elem_info->u.sdq.skb = skb;
1685 wqe = elem_info->elem;
1686 mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
1687 mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad);
1688 mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
1690 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
1691 skb_headlen(skb), DMA_TO_DEVICE);
1695 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1696 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1698 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
1699 skb_frag_address(frag),
1700 skb_frag_size(frag),
1706 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1707 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1709 /* Set unused sq entries byte count to zero. */
1710 for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
1711 mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
1713 /* Everything is set up, ring producer doorbell to get HW going */
1714 q->producer_counter++;
1715 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
1721 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
1723 spin_unlock_bh(&q->lock);
1727 static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
1728 u32 in_mod, bool out_mbox_direct,
1729 char *in_mbox, size_t in_mbox_size,
1730 char *out_mbox, size_t out_mbox_size,
1733 struct mlxsw_pci *mlxsw_pci = bus_priv;
1734 dma_addr_t in_mapaddr = 0, out_mapaddr = 0;
1735 bool evreq = mlxsw_pci->cmd.nopoll;
1736 unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
1737 bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
1740 *p_status = MLXSW_CMD_STATUS_OK;
1742 err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
1747 memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
1748 in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
1750 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
1751 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
1754 out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
1755 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
1756 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
1758 mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
1759 mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
1761 *p_wait_done = false;
1763 wmb(); /* all needs to be written before we write control register */
1764 mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
1765 MLXSW_PCI_CIR_CTRL_GO_BIT |
1766 (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
1767 (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
1773 end = jiffies + timeout;
1775 u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
1777 if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
1778 *p_wait_done = true;
1779 *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
1783 } while (time_before(jiffies, end));
1785 wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
1786 *p_status = mlxsw_pci->cmd.comp.status;
1797 if (!err && out_mbox && out_mbox_direct) {
1798 /* Some commands don't use output param as address to mailbox
1799 * but they store output directly into registers. In that case,
1800 * copy registers into mbox buffer.
1805 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1807 memcpy(out_mbox, &tmp, sizeof(tmp));
1808 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1810 memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
1812 } else if (!err && out_mbox) {
1813 memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
1816 mutex_unlock(&mlxsw_pci->cmd.lock);
1821 static u32 mlxsw_pci_read_frc_h(void *bus_priv)
1823 struct mlxsw_pci *mlxsw_pci = bus_priv;
1826 frc_offset = mlxsw_pci->free_running_clock_offset;
1827 return mlxsw_pci_read32(mlxsw_pci, FREE_RUNNING_CLOCK_H(frc_offset));
1830 static u32 mlxsw_pci_read_frc_l(void *bus_priv)
1832 struct mlxsw_pci *mlxsw_pci = bus_priv;
1835 frc_offset = mlxsw_pci->free_running_clock_offset;
1836 return mlxsw_pci_read32(mlxsw_pci, FREE_RUNNING_CLOCK_L(frc_offset));
1839 static const struct mlxsw_bus mlxsw_pci_bus = {
1841 .init = mlxsw_pci_init,
1842 .fini = mlxsw_pci_fini,
1843 .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
1844 .skb_transmit = mlxsw_pci_skb_transmit,
1845 .cmd_exec = mlxsw_pci_cmd_exec,
1846 .read_frc_h = mlxsw_pci_read_frc_h,
1847 .read_frc_l = mlxsw_pci_read_frc_l,
1848 .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
1851 static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci)
1855 mutex_init(&mlxsw_pci->cmd.lock);
1856 init_waitqueue_head(&mlxsw_pci->cmd.wait);
1858 err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1860 goto err_in_mbox_alloc;
1862 err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1864 goto err_out_mbox_alloc;
1869 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1871 mutex_destroy(&mlxsw_pci->cmd.lock);
1875 static void mlxsw_pci_cmd_fini(struct mlxsw_pci *mlxsw_pci)
1877 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1878 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1879 mutex_destroy(&mlxsw_pci->cmd.lock);
1882 static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1884 const char *driver_name = pdev->driver->name;
1885 struct mlxsw_pci *mlxsw_pci;
1888 mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
1892 err = pci_enable_device(pdev);
1894 dev_err(&pdev->dev, "pci_enable_device failed\n");
1895 goto err_pci_enable_device;
1898 err = pci_request_regions(pdev, driver_name);
1900 dev_err(&pdev->dev, "pci_request_regions failed\n");
1901 goto err_pci_request_regions;
1904 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1906 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1908 dev_err(&pdev->dev, "dma_set_mask failed\n");
1909 goto err_pci_set_dma_mask;
1913 if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
1914 dev_err(&pdev->dev, "invalid PCI region size\n");
1916 goto err_pci_resource_len_check;
1919 mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
1920 pci_resource_len(pdev, 0));
1921 if (!mlxsw_pci->hw_addr) {
1922 dev_err(&pdev->dev, "ioremap failed\n");
1926 pci_set_master(pdev);
1928 mlxsw_pci->pdev = pdev;
1929 pci_set_drvdata(pdev, mlxsw_pci);
1931 err = mlxsw_pci_cmd_init(mlxsw_pci);
1933 goto err_pci_cmd_init;
1935 mlxsw_pci->bus_info.device_kind = driver_name;
1936 mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
1937 mlxsw_pci->bus_info.dev = &pdev->dev;
1938 mlxsw_pci->bus_info.read_frc_capable = true;
1941 err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
1942 &mlxsw_pci_bus, mlxsw_pci, false,
1945 dev_err(&pdev->dev, "cannot register bus device\n");
1946 goto err_bus_device_register;
1951 err_bus_device_register:
1952 mlxsw_pci_cmd_fini(mlxsw_pci);
1954 iounmap(mlxsw_pci->hw_addr);
1956 err_pci_resource_len_check:
1957 err_pci_set_dma_mask:
1958 pci_release_regions(pdev);
1959 err_pci_request_regions:
1960 pci_disable_device(pdev);
1961 err_pci_enable_device:
1966 static void mlxsw_pci_remove(struct pci_dev *pdev)
1968 struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
1970 mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
1971 mlxsw_pci_cmd_fini(mlxsw_pci);
1972 iounmap(mlxsw_pci->hw_addr);
1973 pci_release_regions(mlxsw_pci->pdev);
1974 pci_disable_device(mlxsw_pci->pdev);
1978 int mlxsw_pci_driver_register(struct pci_driver *pci_driver)
1980 pci_driver->probe = mlxsw_pci_probe;
1981 pci_driver->remove = mlxsw_pci_remove;
1982 return pci_register_driver(pci_driver);
1984 EXPORT_SYMBOL(mlxsw_pci_driver_register);
1986 void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver)
1988 pci_unregister_driver(pci_driver);
1990 EXPORT_SYMBOL(mlxsw_pci_driver_unregister);
1992 static int __init mlxsw_pci_module_init(void)
1997 static void __exit mlxsw_pci_module_exit(void)
2001 module_init(mlxsw_pci_module_init);
2002 module_exit(mlxsw_pci_module_exit);
2004 MODULE_LICENSE("Dual BSD/GPL");
2005 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
2006 MODULE_DESCRIPTION("Mellanox switch PCI interface driver");