1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/export.h>
8 #include <linux/device.h>
10 #include <linux/interrupt.h>
11 #include <linux/wait.h>
12 #include <linux/types.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/log2.h>
16 #include <linux/string.h>
23 #include "resources.h"
25 #define mlxsw_pci_write32(mlxsw_pci, reg, val) \
26 iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
27 #define mlxsw_pci_read32(mlxsw_pci, reg) \
28 ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
30 enum mlxsw_pci_queue_type {
31 MLXSW_PCI_QUEUE_TYPE_SDQ,
32 MLXSW_PCI_QUEUE_TYPE_RDQ,
33 MLXSW_PCI_QUEUE_TYPE_CQ,
34 MLXSW_PCI_QUEUE_TYPE_EQ,
37 #define MLXSW_PCI_QUEUE_TYPE_COUNT 4
39 static const u16 mlxsw_pci_doorbell_type_offset[] = {
40 MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
41 MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
42 MLXSW_PCI_DOORBELL_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
43 MLXSW_PCI_DOORBELL_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
46 static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
49 MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
50 MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
53 struct mlxsw_pci_mem_item {
59 struct mlxsw_pci_queue_elem_info {
60 char *elem; /* pointer to actual dma mapped element mem chunk */
71 struct mlxsw_pci_queue {
72 spinlock_t lock; /* for queue accesses */
73 struct mlxsw_pci_mem_item mem_item;
74 struct mlxsw_pci_queue_elem_info *elem_info;
77 u16 count; /* number of elements in queue */
78 u8 num; /* queue number */
79 u8 elem_size; /* size of one element */
80 enum mlxsw_pci_queue_type type;
81 struct tasklet_struct tasklet; /* queue processing tasklet */
82 struct mlxsw_pci *pci;
87 enum mlxsw_pci_cqe_v v;
97 struct mlxsw_pci_queue_type_group {
98 struct mlxsw_pci_queue *q;
99 u8 count; /* number of queues in group */
103 struct pci_dev *pdev;
105 u64 free_running_clock_offset;
108 bool lag_mode_support;
109 enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode;
110 struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
112 struct mlxsw_core *core;
114 struct mlxsw_pci_mem_item *items;
118 struct mlxsw_pci_mem_item out_mbox;
119 struct mlxsw_pci_mem_item in_mbox;
120 struct mutex lock; /* Lock access to command registers */
122 wait_queue_head_t wait;
129 struct mlxsw_bus_info bus_info;
130 const struct pci_device_id *id;
131 enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */
132 u8 num_sdq_cqs; /* Number of CQs used for SDQs */
136 static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
138 tasklet_schedule(&q->tasklet);
141 static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
142 size_t elem_size, int elem_index)
144 return q->mem_item.buf + (elem_size * elem_index);
147 static struct mlxsw_pci_queue_elem_info *
148 mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
150 return &q->elem_info[elem_index];
153 static struct mlxsw_pci_queue_elem_info *
154 mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
156 int index = q->producer_counter & (q->count - 1);
158 if ((u16) (q->producer_counter - q->consumer_counter) == q->count)
160 return mlxsw_pci_queue_elem_info_get(q, index);
163 static struct mlxsw_pci_queue_elem_info *
164 mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
166 int index = q->consumer_counter & (q->count - 1);
168 return mlxsw_pci_queue_elem_info_get(q, index);
171 static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
173 return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
176 static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
178 return owner_bit != !!(q->consumer_counter & q->count);
181 static struct mlxsw_pci_queue_type_group *
182 mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
183 enum mlxsw_pci_queue_type q_type)
185 return &mlxsw_pci->queues[q_type];
188 static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
189 enum mlxsw_pci_queue_type q_type)
191 struct mlxsw_pci_queue_type_group *queue_group;
193 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
194 return queue_group->count;
197 static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
199 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
202 static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
204 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
207 static struct mlxsw_pci_queue *
208 __mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
209 enum mlxsw_pci_queue_type q_type, u8 q_num)
211 return &mlxsw_pci->queues[q_type].q[q_num];
214 static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
217 return __mlxsw_pci_queue_get(mlxsw_pci,
218 MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
221 static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
224 return __mlxsw_pci_queue_get(mlxsw_pci,
225 MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
228 static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
231 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
234 static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
237 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
240 static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
241 struct mlxsw_pci_queue *q,
244 mlxsw_pci_write32(mlxsw_pci,
245 DOORBELL(mlxsw_pci->doorbell_offset,
246 mlxsw_pci_doorbell_type_offset[q->type],
250 static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
251 struct mlxsw_pci_queue *q,
254 mlxsw_pci_write32(mlxsw_pci,
255 DOORBELL(mlxsw_pci->doorbell_offset,
256 mlxsw_pci_doorbell_arm_type_offset[q->type],
260 static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
261 struct mlxsw_pci_queue *q)
263 wmb(); /* ensure all writes are done before we ring a bell */
264 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
267 static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
268 struct mlxsw_pci_queue *q)
270 wmb(); /* ensure all writes are done before we ring a bell */
271 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
272 q->consumer_counter + q->count);
276 mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
277 struct mlxsw_pci_queue *q)
279 wmb(); /* ensure all writes are done before we ring a bell */
280 __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
283 static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
286 return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
289 static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
290 struct mlxsw_pci_queue *q)
297 q->producer_counter = 0;
298 q->consumer_counter = 0;
299 tclass = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_PCI_SDQ_EMAD_TC :
300 MLXSW_PCI_SDQ_CTL_TC;
301 lp = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE :
302 MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE;
304 /* Set CQ of same number of this SDQ. */
305 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
306 mlxsw_cmd_mbox_sw2hw_dq_sdq_lp_set(mbox, lp);
307 mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass);
308 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
309 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
310 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
312 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
315 err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
318 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
322 static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
323 struct mlxsw_pci_queue *q)
325 mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
328 static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
329 int index, char *frag_data, size_t frag_len,
332 struct pci_dev *pdev = mlxsw_pci->pdev;
335 mapaddr = dma_map_single(&pdev->dev, frag_data, frag_len, direction);
336 if (unlikely(dma_mapping_error(&pdev->dev, mapaddr))) {
337 dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
340 mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
341 mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
345 static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
346 int index, int direction)
348 struct pci_dev *pdev = mlxsw_pci->pdev;
349 size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
350 dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
354 dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction);
357 static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
358 struct mlxsw_pci_queue_elem_info *elem_info,
361 size_t buf_len = MLXSW_PORT_MAX_MTU;
362 char *wqe = elem_info->elem;
366 skb = __netdev_alloc_skb_ip_align(NULL, buf_len, gfp);
370 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
371 buf_len, DMA_FROM_DEVICE);
375 elem_info->u.rdq.skb = skb;
379 dev_kfree_skb_any(skb);
383 static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
384 struct mlxsw_pci_queue_elem_info *elem_info)
389 skb = elem_info->u.rdq.skb;
390 wqe = elem_info->elem;
392 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
393 dev_kfree_skb_any(skb);
396 static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
397 struct mlxsw_pci_queue *q)
399 struct mlxsw_pci_queue_elem_info *elem_info;
400 u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci);
404 q->producer_counter = 0;
405 q->consumer_counter = 0;
407 /* Set CQ of same number of this RDQ with base
408 * above SDQ count as the lower ones are assigned to SDQs.
410 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num);
411 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
412 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
413 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
415 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
418 err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
422 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
424 for (i = 0; i < q->count; i++) {
425 elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
427 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_KERNEL);
430 /* Everything is set up, ring doorbell to pass elem to HW */
431 q->producer_counter++;
432 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
438 for (i--; i >= 0; i--) {
439 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
440 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
442 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
447 static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
448 struct mlxsw_pci_queue *q)
450 struct mlxsw_pci_queue_elem_info *elem_info;
453 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
454 for (i = 0; i < q->count; i++) {
455 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
456 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
460 static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
461 struct mlxsw_pci_queue *q)
463 q->u.cq.v = mlxsw_pci->max_cqe_ver;
465 if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
466 q->num < mlxsw_pci->num_sdq_cqs &&
467 !mlxsw_core_sdq_supports_cqe_v2(mlxsw_pci->core))
468 q->u.cq.v = MLXSW_PCI_CQE_V1;
471 static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
472 struct mlxsw_pci_queue *q)
477 q->consumer_counter = 0;
479 for (i = 0; i < q->count; i++) {
480 char *elem = mlxsw_pci_queue_elem_get(q, i);
482 mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
485 if (q->u.cq.v == MLXSW_PCI_CQE_V1)
486 mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
487 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
488 else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
489 mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
490 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
492 mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
493 mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
494 mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
495 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
496 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
498 mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
500 err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
503 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
504 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
508 static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
509 struct mlxsw_pci_queue *q)
511 mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
514 static unsigned int mlxsw_pci_read32_off(struct mlxsw_pci *mlxsw_pci,
517 return ioread32be(mlxsw_pci->hw_addr + off);
520 static void mlxsw_pci_skb_cb_ts_set(struct mlxsw_pci *mlxsw_pci,
522 enum mlxsw_pci_cqe_v cqe_v, char *cqe)
526 if (cqe_v != MLXSW_PCI_CQE_V2)
529 ts_type = mlxsw_pci_cqe2_time_stamp_type_get(cqe);
531 if (ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC &&
532 ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_MIRROR_UTC)
535 mlxsw_skb_cb(skb)->cqe_ts.sec = mlxsw_pci_cqe2_time_stamp_sec_get(cqe);
536 mlxsw_skb_cb(skb)->cqe_ts.nsec =
537 mlxsw_pci_cqe2_time_stamp_nsec_get(cqe);
540 static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
541 struct mlxsw_pci_queue *q,
542 u16 consumer_counter_limit,
543 enum mlxsw_pci_cqe_v cqe_v,
546 struct pci_dev *pdev = mlxsw_pci->pdev;
547 struct mlxsw_pci_queue_elem_info *elem_info;
548 struct mlxsw_tx_info tx_info;
554 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
555 tx_info = mlxsw_skb_cb(elem_info->u.sdq.skb)->tx_info;
556 skb = elem_info->u.sdq.skb;
557 wqe = elem_info->elem;
558 for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
559 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
561 if (unlikely(!tx_info.is_emad &&
562 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
563 mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
564 mlxsw_core_ptp_transmitted(mlxsw_pci->core, skb,
570 dev_kfree_skb_any(skb);
571 elem_info->u.sdq.skb = NULL;
573 if (q->consumer_counter++ != consumer_counter_limit)
574 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
575 spin_unlock(&q->lock);
578 static void mlxsw_pci_cqe_rdq_md_tx_port_init(struct sk_buff *skb,
581 struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb);
583 if (mlxsw_pci_cqe2_tx_lag_get(cqe)) {
584 cb->rx_md_info.tx_port_is_lag = true;
585 cb->rx_md_info.tx_lag_id = mlxsw_pci_cqe2_tx_lag_id_get(cqe);
586 cb->rx_md_info.tx_lag_port_index =
587 mlxsw_pci_cqe2_tx_lag_subport_get(cqe);
589 cb->rx_md_info.tx_port_is_lag = false;
590 cb->rx_md_info.tx_sys_port =
591 mlxsw_pci_cqe2_tx_system_port_get(cqe);
594 if (cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_MULTI_PORT &&
595 cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_INVALID)
596 cb->rx_md_info.tx_port_valid = 1;
598 cb->rx_md_info.tx_port_valid = 0;
601 static void mlxsw_pci_cqe_rdq_md_init(struct sk_buff *skb, const char *cqe)
603 struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb);
605 cb->rx_md_info.tx_congestion = mlxsw_pci_cqe2_mirror_cong_get(cqe);
606 if (cb->rx_md_info.tx_congestion != MLXSW_PCI_CQE2_MIRROR_CONG_INVALID)
607 cb->rx_md_info.tx_congestion_valid = 1;
609 cb->rx_md_info.tx_congestion_valid = 0;
610 cb->rx_md_info.tx_congestion <<= MLXSW_PCI_CQE2_MIRROR_CONG_SHIFT;
612 cb->rx_md_info.latency = mlxsw_pci_cqe2_mirror_latency_get(cqe);
613 if (cb->rx_md_info.latency != MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID)
614 cb->rx_md_info.latency_valid = 1;
616 cb->rx_md_info.latency_valid = 0;
618 cb->rx_md_info.tx_tc = mlxsw_pci_cqe2_mirror_tclass_get(cqe);
619 if (cb->rx_md_info.tx_tc != MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID)
620 cb->rx_md_info.tx_tc_valid = 1;
622 cb->rx_md_info.tx_tc_valid = 0;
624 mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
627 static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
628 struct mlxsw_pci_queue *q,
629 u16 consumer_counter_limit,
630 enum mlxsw_pci_cqe_v cqe_v, char *cqe)
632 struct pci_dev *pdev = mlxsw_pci->pdev;
633 struct mlxsw_pci_queue_elem_info *elem_info;
634 struct mlxsw_rx_info rx_info = {};
635 char wqe[MLXSW_PCI_WQE_SIZE];
640 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
641 skb = elem_info->u.rdq.skb;
642 memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE);
644 if (q->consumer_counter++ != consumer_counter_limit)
645 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
647 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_ATOMIC);
649 dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
653 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
655 if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
656 rx_info.is_lag = true;
657 rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
658 rx_info.lag_port_index =
659 mlxsw_pci_cqe_lag_subport_get(cqe_v, cqe);
661 rx_info.is_lag = false;
662 rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
665 rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
667 if (rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_INGRESS_ACL ||
668 rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_EGRESS_ACL) {
669 u32 cookie_index = 0;
671 if (mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2)
672 cookie_index = mlxsw_pci_cqe2_user_def_val_orig_pkt_len_get(cqe);
673 mlxsw_skb_cb(skb)->rx_md_info.cookie_index = cookie_index;
674 } else if (rx_info.trap_id >= MLXSW_TRAP_ID_MIRROR_SESSION0 &&
675 rx_info.trap_id <= MLXSW_TRAP_ID_MIRROR_SESSION7 &&
676 mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) {
677 rx_info.mirror_reason = mlxsw_pci_cqe2_mirror_reason_get(cqe);
678 mlxsw_pci_cqe_rdq_md_init(skb, cqe);
679 } else if (rx_info.trap_id == MLXSW_TRAP_ID_PKT_SAMPLE &&
680 mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) {
681 mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
684 mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
686 byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
687 if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
688 byte_count -= ETH_FCS_LEN;
689 skb_put(skb, byte_count);
690 mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
693 /* Everything is set up, ring doorbell to pass elem to HW */
694 q->producer_counter++;
695 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
699 static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
701 struct mlxsw_pci_queue_elem_info *elem_info;
705 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
706 elem = elem_info->elem;
707 owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem);
708 if (mlxsw_pci_elem_hw_owned(q, owner_bit))
710 q->consumer_counter++;
711 rmb(); /* make sure we read owned bit before the rest of elem */
715 static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t)
717 struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
718 struct mlxsw_pci *mlxsw_pci = q->pci;
721 int credits = q->count >> 1;
723 while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
724 u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
725 u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
726 u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
727 char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
729 memcpy(ncqe, cqe, q->elem_size);
730 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
733 struct mlxsw_pci_queue *sdq;
735 sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
736 mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
737 wqe_counter, q->u.cq.v, ncqe);
738 q->u.cq.comp_sdq_count++;
740 struct mlxsw_pci_queue *rdq;
742 rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
743 mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
744 wqe_counter, q->u.cq.v, ncqe);
745 q->u.cq.comp_rdq_count++;
747 if (++items == credits)
751 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
754 static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
756 return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
757 MLXSW_PCI_CQE01_COUNT;
760 static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
762 return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
763 MLXSW_PCI_CQE01_SIZE;
766 static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
767 struct mlxsw_pci_queue *q)
772 q->consumer_counter = 0;
774 for (i = 0; i < q->count; i++) {
775 char *elem = mlxsw_pci_queue_elem_get(q, i);
777 mlxsw_pci_eqe_owner_set(elem, 1);
780 mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
781 mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
782 mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
783 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
784 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
786 mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
788 err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
791 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
792 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
796 static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
797 struct mlxsw_pci_queue *q)
799 mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
802 static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
804 mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
805 mlxsw_pci->cmd.comp.out_param =
806 ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
807 mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
808 mlxsw_pci->cmd.wait_done = true;
809 wake_up(&mlxsw_pci->cmd.wait);
812 static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
814 struct mlxsw_pci_queue_elem_info *elem_info;
818 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
819 elem = elem_info->elem;
820 owner_bit = mlxsw_pci_eqe_owner_get(elem);
821 if (mlxsw_pci_elem_hw_owned(q, owner_bit))
823 q->consumer_counter++;
824 rmb(); /* make sure we read owned bit before the rest of elem */
828 static void mlxsw_pci_eq_tasklet(struct tasklet_struct *t)
830 struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
831 struct mlxsw_pci *mlxsw_pci = q->pci;
832 u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
833 unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
836 bool cq_handle = false;
838 int credits = q->count >> 1;
840 memset(&active_cqns, 0, sizeof(active_cqns));
842 while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
844 /* Command interface completion events are always received on
845 * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events
846 * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1).
849 case MLXSW_PCI_EQ_ASYNC_NUM:
850 mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
851 q->u.eq.ev_cmd_count++;
853 case MLXSW_PCI_EQ_COMP_NUM:
854 cqn = mlxsw_pci_eqe_cqn_get(eqe);
855 set_bit(cqn, active_cqns);
857 q->u.eq.ev_comp_count++;
860 q->u.eq.ev_other_count++;
862 if (++items == credits)
866 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
867 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
872 for_each_set_bit(cqn, active_cqns, cq_count) {
873 q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
874 mlxsw_pci_queue_tasklet_schedule(q);
878 struct mlxsw_pci_queue_ops {
880 enum mlxsw_pci_queue_type type;
881 void (*pre_init)(struct mlxsw_pci *mlxsw_pci,
882 struct mlxsw_pci_queue *q);
883 int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
884 struct mlxsw_pci_queue *q);
885 void (*fini)(struct mlxsw_pci *mlxsw_pci,
886 struct mlxsw_pci_queue *q);
887 void (*tasklet)(struct tasklet_struct *t);
888 u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
889 u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
894 static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
895 .type = MLXSW_PCI_QUEUE_TYPE_SDQ,
896 .init = mlxsw_pci_sdq_init,
897 .fini = mlxsw_pci_sdq_fini,
898 .elem_count = MLXSW_PCI_WQE_COUNT,
899 .elem_size = MLXSW_PCI_WQE_SIZE,
902 static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
903 .type = MLXSW_PCI_QUEUE_TYPE_RDQ,
904 .init = mlxsw_pci_rdq_init,
905 .fini = mlxsw_pci_rdq_fini,
906 .elem_count = MLXSW_PCI_WQE_COUNT,
907 .elem_size = MLXSW_PCI_WQE_SIZE
910 static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
911 .type = MLXSW_PCI_QUEUE_TYPE_CQ,
912 .pre_init = mlxsw_pci_cq_pre_init,
913 .init = mlxsw_pci_cq_init,
914 .fini = mlxsw_pci_cq_fini,
915 .tasklet = mlxsw_pci_cq_tasklet,
916 .elem_count_f = mlxsw_pci_cq_elem_count,
917 .elem_size_f = mlxsw_pci_cq_elem_size
920 static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
921 .type = MLXSW_PCI_QUEUE_TYPE_EQ,
922 .init = mlxsw_pci_eq_init,
923 .fini = mlxsw_pci_eq_fini,
924 .tasklet = mlxsw_pci_eq_tasklet,
925 .elem_count = MLXSW_PCI_EQE_COUNT,
926 .elem_size = MLXSW_PCI_EQE_SIZE
929 static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
930 const struct mlxsw_pci_queue_ops *q_ops,
931 struct mlxsw_pci_queue *q, u8 q_num)
933 struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
939 q_ops->pre_init(mlxsw_pci, q);
941 spin_lock_init(&q->lock);
942 q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) :
944 q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) :
946 q->type = q_ops->type;
950 tasklet_setup(&q->tasklet, q_ops->tasklet);
952 mem_item->size = MLXSW_PCI_AQ_SIZE;
953 mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
954 mem_item->size, &mem_item->mapaddr,
959 q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
962 goto err_elem_info_alloc;
965 /* Initialize dma mapped elements info elem_info for
966 * future easy access.
968 for (i = 0; i < q->count; i++) {
969 struct mlxsw_pci_queue_elem_info *elem_info;
971 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
973 __mlxsw_pci_queue_elem_get(q, q->elem_size, i);
976 mlxsw_cmd_mbox_zero(mbox);
977 err = q_ops->init(mlxsw_pci, mbox, q);
985 dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
986 mem_item->buf, mem_item->mapaddr);
990 static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
991 const struct mlxsw_pci_queue_ops *q_ops,
992 struct mlxsw_pci_queue *q)
994 struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
996 q_ops->fini(mlxsw_pci, q);
998 dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
999 mem_item->buf, mem_item->mapaddr);
1002 static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1003 const struct mlxsw_pci_queue_ops *q_ops,
1006 struct mlxsw_pci_queue_type_group *queue_group;
1010 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
1011 queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
1012 if (!queue_group->q)
1015 for (i = 0; i < num_qs; i++) {
1016 err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
1017 &queue_group->q[i], i);
1019 goto err_queue_init;
1021 queue_group->count = num_qs;
1026 for (i--; i >= 0; i--)
1027 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
1028 kfree(queue_group->q);
1032 static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
1033 const struct mlxsw_pci_queue_ops *q_ops)
1035 struct mlxsw_pci_queue_type_group *queue_group;
1038 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
1039 for (i = 0; i < queue_group->count; i++)
1040 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
1041 kfree(queue_group->q);
1044 static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
1046 struct pci_dev *pdev = mlxsw_pci->pdev;
1058 mlxsw_cmd_mbox_zero(mbox);
1059 err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
1063 num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
1064 sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
1065 num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
1066 rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
1067 num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
1068 cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
1069 cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get(mbox);
1070 num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
1071 eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
1073 if (num_sdqs + num_rdqs > num_cqs ||
1074 num_sdqs < MLXSW_PCI_SDQS_MIN ||
1075 num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
1076 dev_err(&pdev->dev, "Unsupported number of queues\n");
1080 if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
1081 (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
1082 (1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) ||
1083 (mlxsw_pci->max_cqe_ver == MLXSW_PCI_CQE_V2 &&
1084 (1 << cqv2_log2sz != MLXSW_PCI_CQE2_COUNT)) ||
1085 (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
1086 dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
1090 mlxsw_pci->num_sdq_cqs = num_sdqs;
1092 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
1095 dev_err(&pdev->dev, "Failed to initialize event queues\n");
1099 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
1102 dev_err(&pdev->dev, "Failed to initialize completion queues\n");
1106 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
1109 dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
1113 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
1116 dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
1120 /* We have to poll in command interface until queues are initialized */
1121 mlxsw_pci->cmd.nopoll = true;
1125 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1127 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1129 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1133 static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
1135 mlxsw_pci->cmd.nopoll = false;
1136 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
1137 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1138 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1139 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1143 mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
1144 char *mbox, int index,
1145 const struct mlxsw_swid_config *swid)
1149 if (swid->used_type) {
1150 mlxsw_cmd_mbox_config_profile_swid_config_type_set(
1151 mbox, index, swid->type);
1154 if (swid->used_properties) {
1155 mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
1156 mbox, index, swid->properties);
1159 mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
1163 mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci,
1164 const struct mlxsw_config_profile *profile,
1165 struct mlxsw_res *res)
1167 u64 single_size, double_size, linear_size;
1170 err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile,
1171 &single_size, &double_size,
1176 MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size);
1177 MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size);
1178 MLXSW_RES_SET(res, KVD_LINEAR_SIZE, linear_size);
1183 static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
1184 const struct mlxsw_config_profile *profile,
1185 struct mlxsw_res *res)
1190 mlxsw_cmd_mbox_zero(mbox);
1192 if (profile->used_max_vepa_channels) {
1193 mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
1195 mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
1196 mbox, profile->max_vepa_channels);
1198 if (profile->used_max_lag) {
1199 mlxsw_cmd_mbox_config_profile_set_max_lag_set(mbox, 1);
1200 mlxsw_cmd_mbox_config_profile_max_lag_set(mbox,
1203 if (profile->used_max_mid) {
1204 mlxsw_cmd_mbox_config_profile_set_max_mid_set(
1206 mlxsw_cmd_mbox_config_profile_max_mid_set(
1207 mbox, profile->max_mid);
1209 if (profile->used_max_pgt) {
1210 mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
1212 mlxsw_cmd_mbox_config_profile_max_pgt_set(
1213 mbox, profile->max_pgt);
1215 if (profile->used_max_system_port) {
1216 mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
1218 mlxsw_cmd_mbox_config_profile_max_system_port_set(
1219 mbox, profile->max_system_port);
1221 if (profile->used_max_vlan_groups) {
1222 mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
1224 mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
1225 mbox, profile->max_vlan_groups);
1227 if (profile->used_max_regions) {
1228 mlxsw_cmd_mbox_config_profile_set_max_regions_set(
1230 mlxsw_cmd_mbox_config_profile_max_regions_set(
1231 mbox, profile->max_regions);
1233 if (profile->used_flood_tables) {
1234 mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
1236 mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
1237 mbox, profile->max_flood_tables);
1238 mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
1239 mbox, profile->max_vid_flood_tables);
1240 mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set(
1241 mbox, profile->max_fid_offset_flood_tables);
1242 mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set(
1243 mbox, profile->fid_offset_flood_table_size);
1244 mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set(
1245 mbox, profile->max_fid_flood_tables);
1246 mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
1247 mbox, profile->fid_flood_table_size);
1249 if (profile->used_flood_mode) {
1250 mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
1252 mlxsw_cmd_mbox_config_profile_flood_mode_set(
1253 mbox, profile->flood_mode);
1255 if (profile->used_max_ib_mc) {
1256 mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
1258 mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
1259 mbox, profile->max_ib_mc);
1261 if (profile->used_max_pkey) {
1262 mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
1264 mlxsw_cmd_mbox_config_profile_max_pkey_set(
1265 mbox, profile->max_pkey);
1267 if (profile->used_ar_sec) {
1268 mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
1270 mlxsw_cmd_mbox_config_profile_ar_sec_set(
1271 mbox, profile->ar_sec);
1273 if (profile->used_adaptive_routing_group_cap) {
1274 mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
1276 mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
1277 mbox, profile->adaptive_routing_group_cap);
1279 if (profile->used_ubridge) {
1280 mlxsw_cmd_mbox_config_profile_set_ubridge_set(mbox, 1);
1281 mlxsw_cmd_mbox_config_profile_ubridge_set(mbox,
1284 if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) {
1285 err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
1289 mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1);
1290 mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox,
1291 MLXSW_RES_GET(res, KVD_LINEAR_SIZE));
1292 mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox,
1294 mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox,
1295 MLXSW_RES_GET(res, KVD_SINGLE_SIZE));
1296 mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
1298 mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
1299 MLXSW_RES_GET(res, KVD_DOUBLE_SIZE));
1302 for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
1303 mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
1304 &profile->swid_config[i]);
1306 if (mlxsw_pci->max_cqe_ver > MLXSW_PCI_CQE_V0) {
1307 mlxsw_cmd_mbox_config_profile_set_cqe_version_set(mbox, 1);
1308 mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1);
1311 if (profile->used_cqe_time_stamp_type) {
1312 mlxsw_cmd_mbox_config_profile_set_cqe_time_stamp_type_set(mbox,
1314 mlxsw_cmd_mbox_config_profile_cqe_time_stamp_type_set(mbox,
1315 profile->cqe_time_stamp_type);
1318 if (profile->lag_mode_prefer_sw && mlxsw_pci->lag_mode_support) {
1319 enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode =
1320 MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW;
1322 mlxsw_cmd_mbox_config_profile_set_lag_mode_set(mbox, 1);
1323 mlxsw_cmd_mbox_config_profile_lag_mode_set(mbox, lag_mode);
1324 mlxsw_pci->lag_mode = lag_mode;
1326 mlxsw_pci->lag_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_FW;
1328 return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
1331 static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
1333 struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
1336 mlxsw_cmd_mbox_zero(mbox);
1337 err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
1340 mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
1341 mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
1345 static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1348 struct mlxsw_pci_mem_item *mem_item;
1353 mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
1355 if (!mlxsw_pci->fw_area.items)
1357 mlxsw_pci->fw_area.count = num_pages;
1359 mlxsw_cmd_mbox_zero(mbox);
1360 for (i = 0; i < num_pages; i++) {
1361 mem_item = &mlxsw_pci->fw_area.items[i];
1363 mem_item->size = MLXSW_PCI_PAGE_SIZE;
1364 mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
1366 &mem_item->mapaddr, GFP_KERNEL);
1367 if (!mem_item->buf) {
1371 mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr);
1372 mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */
1373 if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) {
1374 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1376 goto err_cmd_map_fa;
1378 mlxsw_cmd_mbox_zero(mbox);
1383 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1385 goto err_cmd_map_fa;
1392 for (i--; i >= 0; i--) {
1393 mem_item = &mlxsw_pci->fw_area.items[i];
1395 dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
1396 mem_item->buf, mem_item->mapaddr);
1398 kfree(mlxsw_pci->fw_area.items);
1402 static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
1404 struct mlxsw_pci_mem_item *mem_item;
1407 mlxsw_cmd_unmap_fa(mlxsw_pci->core);
1409 for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
1410 mem_item = &mlxsw_pci->fw_area.items[i];
1412 dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
1413 mem_item->buf, mem_item->mapaddr);
1415 kfree(mlxsw_pci->fw_area.items);
1418 static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
1420 struct mlxsw_pci *mlxsw_pci = dev_id;
1421 struct mlxsw_pci_queue *q;
1424 for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
1425 q = mlxsw_pci_eq_get(mlxsw_pci, i);
1426 mlxsw_pci_queue_tasklet_schedule(q);
1431 static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
1432 struct mlxsw_pci_mem_item *mbox)
1434 struct pci_dev *pdev = mlxsw_pci->pdev;
1437 mbox->size = MLXSW_CMD_MBOX_SIZE;
1438 mbox->buf = dma_alloc_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE,
1439 &mbox->mapaddr, GFP_KERNEL);
1441 dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
1448 static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
1449 struct mlxsw_pci_mem_item *mbox)
1451 struct pci_dev *pdev = mlxsw_pci->pdev;
1453 dma_free_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
1457 static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
1458 const struct pci_device_id *id,
1464 /* We must wait for the HW to become responsive. */
1465 msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
1467 end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1469 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
1470 if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
1473 } while (time_before(jiffies, end));
1475 *p_sys_status = val & MLXSW_PCI_FW_READY_MASK;
1480 static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci)
1482 struct pci_dev *pdev = mlxsw_pci->pdev;
1483 char mrsr_pl[MLXSW_REG_MRSR_LEN];
1486 mlxsw_reg_mrsr_pack(mrsr_pl,
1487 MLXSW_REG_MRSR_COMMAND_RESET_AT_PCI_DISABLE);
1488 err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
1492 device_lock_assert(&pdev->dev);
1494 pci_cfg_access_lock(pdev);
1495 pci_save_state(pdev);
1497 err = __pci_reset_function_locked(pdev);
1499 pci_err(pdev, "PCI function reset failed with %d\n", err);
1501 pci_restore_state(pdev);
1502 pci_cfg_access_unlock(pdev);
1507 static int mlxsw_pci_reset_sw(struct mlxsw_pci *mlxsw_pci)
1509 char mrsr_pl[MLXSW_REG_MRSR_LEN];
1511 mlxsw_reg_mrsr_pack(mrsr_pl, MLXSW_REG_MRSR_COMMAND_SOFTWARE_RESET);
1512 return mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
1516 mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
1518 struct pci_dev *pdev = mlxsw_pci->pdev;
1519 char mcam_pl[MLXSW_REG_MCAM_LEN];
1520 bool pci_reset_supported;
1524 err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
1526 dev_err(&pdev->dev, "Failed to reach system ready status before reset. Status is 0x%x\n",
1531 /* PCI core already issued a PCI reset, do not issue another reset. */
1532 if (mlxsw_pci->skip_reset)
1535 mlxsw_reg_mcam_pack(mcam_pl,
1536 MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
1537 err = mlxsw_reg_query(mlxsw_pci->core, MLXSW_REG(mcam), mcam_pl);
1541 mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET,
1542 &pci_reset_supported);
1544 if (pci_reset_supported) {
1545 pci_dbg(pdev, "Starting PCI reset flow\n");
1546 err = mlxsw_pci_reset_at_pci_disable(mlxsw_pci);
1548 pci_dbg(pdev, "Starting software reset flow\n");
1549 err = mlxsw_pci_reset_sw(mlxsw_pci);
1552 err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
1554 dev_err(&pdev->dev, "Failed to reach system ready status after reset. Status is 0x%x\n",
1562 static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1566 err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX);
1568 dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n");
1572 static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1574 pci_free_irq_vectors(mlxsw_pci->pdev);
1577 static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
1578 const struct mlxsw_config_profile *profile,
1579 struct mlxsw_res *res)
1581 struct mlxsw_pci *mlxsw_pci = bus_priv;
1582 struct pci_dev *pdev = mlxsw_pci->pdev;
1587 mlxsw_pci->core = mlxsw_core;
1589 mbox = mlxsw_cmd_mbox_alloc();
1593 err = mlxsw_pci_reset(mlxsw_pci, mlxsw_pci->id);
1597 err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
1599 dev_err(&pdev->dev, "MSI-X init failed\n");
1603 err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
1607 mlxsw_pci->bus_info.fw_rev.major =
1608 mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
1609 mlxsw_pci->bus_info.fw_rev.minor =
1610 mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
1611 mlxsw_pci->bus_info.fw_rev.subminor =
1612 mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
1614 if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
1615 dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
1619 if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
1620 dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
1622 goto err_doorbell_page_bar;
1625 mlxsw_pci->doorbell_offset =
1626 mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
1628 if (mlxsw_cmd_mbox_query_fw_fr_rn_clk_bar_get(mbox) != 0) {
1629 dev_err(&pdev->dev, "Unsupported free running clock BAR queried from hw\n");
1631 goto err_fr_rn_clk_bar;
1634 mlxsw_pci->free_running_clock_offset =
1635 mlxsw_cmd_mbox_query_fw_free_running_clock_offset_get(mbox);
1637 if (mlxsw_cmd_mbox_query_fw_utc_sec_bar_get(mbox) != 0) {
1638 dev_err(&pdev->dev, "Unsupported UTC sec BAR queried from hw\n");
1640 goto err_utc_sec_bar;
1643 mlxsw_pci->utc_sec_offset =
1644 mlxsw_cmd_mbox_query_fw_utc_sec_offset_get(mbox);
1646 if (mlxsw_cmd_mbox_query_fw_utc_nsec_bar_get(mbox) != 0) {
1647 dev_err(&pdev->dev, "Unsupported UTC nsec BAR queried from hw\n");
1649 goto err_utc_nsec_bar;
1652 mlxsw_pci->utc_nsec_offset =
1653 mlxsw_cmd_mbox_query_fw_utc_nsec_offset_get(mbox);
1655 mlxsw_pci->lag_mode_support =
1656 mlxsw_cmd_mbox_query_fw_lag_mode_support_get(mbox);
1657 num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
1658 err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
1660 goto err_fw_area_init;
1662 err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
1666 err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
1668 goto err_query_resources;
1670 if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V2) &&
1671 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V2))
1672 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V2;
1673 else if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V1) &&
1674 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V1))
1675 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V1;
1676 else if ((MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0) &&
1677 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V0)) ||
1678 !MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0)) {
1679 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V0;
1681 dev_err(&pdev->dev, "Invalid supported CQE version combination reported\n");
1682 goto err_cqe_v_check;
1685 err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res);
1687 goto err_config_profile;
1689 /* Some resources depend on details of config_profile, such as unified
1690 * bridge model. Query the resources again to get correct values.
1692 err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
1694 goto err_requery_resources;
1696 err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
1700 err = request_irq(pci_irq_vector(pdev, 0),
1701 mlxsw_pci_eq_irq_handler, 0,
1702 mlxsw_pci->bus_info.device_kind, mlxsw_pci);
1704 dev_err(&pdev->dev, "IRQ request failed\n");
1705 goto err_request_eq_irq;
1711 mlxsw_pci_aqs_fini(mlxsw_pci);
1713 err_requery_resources:
1716 err_query_resources:
1718 mlxsw_pci_fw_area_fini(mlxsw_pci);
1723 err_doorbell_page_bar:
1726 mlxsw_pci_free_irq_vectors(mlxsw_pci);
1730 mlxsw_cmd_mbox_free(mbox);
1734 static void mlxsw_pci_fini(void *bus_priv)
1736 struct mlxsw_pci *mlxsw_pci = bus_priv;
1738 free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci);
1739 mlxsw_pci_aqs_fini(mlxsw_pci);
1740 mlxsw_pci_fw_area_fini(mlxsw_pci);
1741 mlxsw_pci_free_irq_vectors(mlxsw_pci);
1744 static struct mlxsw_pci_queue *
1745 mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
1746 const struct mlxsw_tx_info *tx_info)
1748 u8 ctl_sdq_count = mlxsw_pci_sdq_count(mlxsw_pci) - 1;
1751 if (tx_info->is_emad) {
1752 sdqn = MLXSW_PCI_SDQ_EMAD_INDEX;
1754 BUILD_BUG_ON(MLXSW_PCI_SDQ_EMAD_INDEX != 0);
1755 sdqn = 1 + (tx_info->local_port % ctl_sdq_count);
1758 return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
1761 static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
1762 const struct mlxsw_tx_info *tx_info)
1764 struct mlxsw_pci *mlxsw_pci = bus_priv;
1765 struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1767 return !mlxsw_pci_queue_elem_info_producer_get(q);
1770 static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
1771 const struct mlxsw_tx_info *tx_info)
1773 struct mlxsw_pci *mlxsw_pci = bus_priv;
1774 struct mlxsw_pci_queue *q;
1775 struct mlxsw_pci_queue_elem_info *elem_info;
1780 if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
1781 err = skb_linearize(skb);
1786 q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1787 spin_lock_bh(&q->lock);
1788 elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
1794 mlxsw_skb_cb(skb)->tx_info = *tx_info;
1795 elem_info->u.sdq.skb = skb;
1797 wqe = elem_info->elem;
1798 mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
1799 mlxsw_pci_wqe_lp_set(wqe, 0);
1800 mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
1802 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
1803 skb_headlen(skb), DMA_TO_DEVICE);
1807 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1808 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1810 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
1811 skb_frag_address(frag),
1812 skb_frag_size(frag),
1818 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1819 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1821 /* Set unused sq entries byte count to zero. */
1822 for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
1823 mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
1825 /* Everything is set up, ring producer doorbell to get HW going */
1826 q->producer_counter++;
1827 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
1833 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
1835 spin_unlock_bh(&q->lock);
1839 static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
1840 u32 in_mod, bool out_mbox_direct,
1841 char *in_mbox, size_t in_mbox_size,
1842 char *out_mbox, size_t out_mbox_size,
1845 struct mlxsw_pci *mlxsw_pci = bus_priv;
1846 dma_addr_t in_mapaddr = 0, out_mapaddr = 0;
1847 bool evreq = mlxsw_pci->cmd.nopoll;
1848 unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
1849 bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
1852 *p_status = MLXSW_CMD_STATUS_OK;
1854 err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
1859 memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
1860 in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
1862 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
1863 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
1866 out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
1867 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
1868 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
1870 mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
1871 mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
1873 *p_wait_done = false;
1875 wmb(); /* all needs to be written before we write control register */
1876 mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
1877 MLXSW_PCI_CIR_CTRL_GO_BIT |
1878 (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
1879 (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
1885 end = jiffies + timeout;
1887 u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
1889 if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
1890 *p_wait_done = true;
1891 *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
1895 } while (time_before(jiffies, end));
1897 wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
1898 *p_status = mlxsw_pci->cmd.comp.status;
1909 if (!err && out_mbox && out_mbox_direct) {
1910 /* Some commands don't use output param as address to mailbox
1911 * but they store output directly into registers. In that case,
1912 * copy registers into mbox buffer.
1917 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1919 memcpy(out_mbox, &tmp, sizeof(tmp));
1920 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1922 memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
1924 } else if (!err && out_mbox) {
1925 memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
1928 mutex_unlock(&mlxsw_pci->cmd.lock);
1933 static u32 mlxsw_pci_read_frc_h(void *bus_priv)
1935 struct mlxsw_pci *mlxsw_pci = bus_priv;
1938 frc_offset_h = mlxsw_pci->free_running_clock_offset;
1939 return mlxsw_pci_read32_off(mlxsw_pci, frc_offset_h);
1942 static u32 mlxsw_pci_read_frc_l(void *bus_priv)
1944 struct mlxsw_pci *mlxsw_pci = bus_priv;
1947 frc_offset_l = mlxsw_pci->free_running_clock_offset + 4;
1948 return mlxsw_pci_read32_off(mlxsw_pci, frc_offset_l);
1951 static u32 mlxsw_pci_read_utc_sec(void *bus_priv)
1953 struct mlxsw_pci *mlxsw_pci = bus_priv;
1955 return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_sec_offset);
1958 static u32 mlxsw_pci_read_utc_nsec(void *bus_priv)
1960 struct mlxsw_pci *mlxsw_pci = bus_priv;
1962 return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_nsec_offset);
1965 static enum mlxsw_cmd_mbox_config_profile_lag_mode
1966 mlxsw_pci_lag_mode(void *bus_priv)
1968 struct mlxsw_pci *mlxsw_pci = bus_priv;
1970 return mlxsw_pci->lag_mode;
1973 static const struct mlxsw_bus mlxsw_pci_bus = {
1975 .init = mlxsw_pci_init,
1976 .fini = mlxsw_pci_fini,
1977 .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
1978 .skb_transmit = mlxsw_pci_skb_transmit,
1979 .cmd_exec = mlxsw_pci_cmd_exec,
1980 .read_frc_h = mlxsw_pci_read_frc_h,
1981 .read_frc_l = mlxsw_pci_read_frc_l,
1982 .read_utc_sec = mlxsw_pci_read_utc_sec,
1983 .read_utc_nsec = mlxsw_pci_read_utc_nsec,
1984 .lag_mode = mlxsw_pci_lag_mode,
1985 .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
1988 static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci)
1992 mutex_init(&mlxsw_pci->cmd.lock);
1993 init_waitqueue_head(&mlxsw_pci->cmd.wait);
1995 err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1997 goto err_in_mbox_alloc;
1999 err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
2001 goto err_out_mbox_alloc;
2006 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
2008 mutex_destroy(&mlxsw_pci->cmd.lock);
2012 static void mlxsw_pci_cmd_fini(struct mlxsw_pci *mlxsw_pci)
2014 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
2015 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
2016 mutex_destroy(&mlxsw_pci->cmd.lock);
2019 static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2021 const char *driver_name = dev_driver_string(&pdev->dev);
2022 struct mlxsw_pci *mlxsw_pci;
2025 mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
2029 err = pci_enable_device(pdev);
2031 dev_err(&pdev->dev, "pci_enable_device failed\n");
2032 goto err_pci_enable_device;
2035 err = pci_request_regions(pdev, driver_name);
2037 dev_err(&pdev->dev, "pci_request_regions failed\n");
2038 goto err_pci_request_regions;
2041 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2043 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2045 dev_err(&pdev->dev, "dma_set_mask failed\n");
2046 goto err_pci_set_dma_mask;
2050 if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
2051 dev_err(&pdev->dev, "invalid PCI region size\n");
2053 goto err_pci_resource_len_check;
2056 mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
2057 pci_resource_len(pdev, 0));
2058 if (!mlxsw_pci->hw_addr) {
2059 dev_err(&pdev->dev, "ioremap failed\n");
2063 pci_set_master(pdev);
2065 mlxsw_pci->pdev = pdev;
2066 pci_set_drvdata(pdev, mlxsw_pci);
2068 err = mlxsw_pci_cmd_init(mlxsw_pci);
2070 goto err_pci_cmd_init;
2072 mlxsw_pci->bus_info.device_kind = driver_name;
2073 mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
2074 mlxsw_pci->bus_info.dev = &pdev->dev;
2075 mlxsw_pci->bus_info.read_clock_capable = true;
2078 err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
2079 &mlxsw_pci_bus, mlxsw_pci, false,
2082 dev_err(&pdev->dev, "cannot register bus device\n");
2083 goto err_bus_device_register;
2088 err_bus_device_register:
2089 mlxsw_pci_cmd_fini(mlxsw_pci);
2091 iounmap(mlxsw_pci->hw_addr);
2093 err_pci_resource_len_check:
2094 err_pci_set_dma_mask:
2095 pci_release_regions(pdev);
2096 err_pci_request_regions:
2097 pci_disable_device(pdev);
2098 err_pci_enable_device:
2103 static void mlxsw_pci_remove(struct pci_dev *pdev)
2105 struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
2107 mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
2108 mlxsw_pci_cmd_fini(mlxsw_pci);
2109 iounmap(mlxsw_pci->hw_addr);
2110 pci_release_regions(mlxsw_pci->pdev);
2111 pci_disable_device(mlxsw_pci->pdev);
2115 static void mlxsw_pci_reset_prepare(struct pci_dev *pdev)
2117 struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
2119 mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
2122 static void mlxsw_pci_reset_done(struct pci_dev *pdev)
2124 struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
2126 mlxsw_pci->skip_reset = true;
2127 mlxsw_core_bus_device_register(&mlxsw_pci->bus_info, &mlxsw_pci_bus,
2128 mlxsw_pci, false, NULL, NULL);
2129 mlxsw_pci->skip_reset = false;
2132 static const struct pci_error_handlers mlxsw_pci_err_handler = {
2133 .reset_prepare = mlxsw_pci_reset_prepare,
2134 .reset_done = mlxsw_pci_reset_done,
2137 int mlxsw_pci_driver_register(struct pci_driver *pci_driver)
2139 pci_driver->probe = mlxsw_pci_probe;
2140 pci_driver->remove = mlxsw_pci_remove;
2141 pci_driver->shutdown = mlxsw_pci_remove;
2142 pci_driver->err_handler = &mlxsw_pci_err_handler;
2143 return pci_register_driver(pci_driver);
2145 EXPORT_SYMBOL(mlxsw_pci_driver_register);
2147 void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver)
2149 pci_unregister_driver(pci_driver);
2151 EXPORT_SYMBOL(mlxsw_pci_driver_unregister);
2153 static int __init mlxsw_pci_module_init(void)
2158 static void __exit mlxsw_pci_module_exit(void)
2162 module_init(mlxsw_pci_module_init);
2163 module_exit(mlxsw_pci_module_exit);
2165 MODULE_LICENSE("Dual BSD/GPL");
2166 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
2167 MODULE_DESCRIPTION("Mellanox switch PCI interface driver");