2 * drivers/net/ethernet/mellanox/mlxsw/pci.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/export.h>
38 #include <linux/err.h>
39 #include <linux/device.h>
40 #include <linux/pci.h>
41 #include <linux/interrupt.h>
42 #include <linux/wait.h>
43 #include <linux/types.h>
44 #include <linux/skbuff.h>
45 #include <linux/if_vlan.h>
46 #include <linux/log2.h>
47 #include <linux/string.h>
54 #include "resources.h"
56 static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
58 #define mlxsw_pci_write32(mlxsw_pci, reg, val) \
59 iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
60 #define mlxsw_pci_read32(mlxsw_pci, reg) \
61 ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
63 enum mlxsw_pci_queue_type {
64 MLXSW_PCI_QUEUE_TYPE_SDQ,
65 MLXSW_PCI_QUEUE_TYPE_RDQ,
66 MLXSW_PCI_QUEUE_TYPE_CQ,
67 MLXSW_PCI_QUEUE_TYPE_EQ,
70 #define MLXSW_PCI_QUEUE_TYPE_COUNT 4
72 static const u16 mlxsw_pci_doorbell_type_offset[] = {
73 MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
74 MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
75 MLXSW_PCI_DOORBELL_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
76 MLXSW_PCI_DOORBELL_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
79 static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
82 MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
83 MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
86 struct mlxsw_pci_mem_item {
92 struct mlxsw_pci_queue_elem_info {
93 char *elem; /* pointer to actual dma mapped element mem chunk */
104 struct mlxsw_pci_queue {
105 spinlock_t lock; /* for queue accesses */
106 struct mlxsw_pci_mem_item mem_item;
107 struct mlxsw_pci_queue_elem_info *elem_info;
108 u16 producer_counter;
109 u16 consumer_counter;
110 u16 count; /* number of elements in queue */
111 u8 num; /* queue number */
112 u8 elem_size; /* size of one element */
113 enum mlxsw_pci_queue_type type;
114 struct tasklet_struct tasklet; /* queue processing tasklet */
115 struct mlxsw_pci *pci;
120 enum mlxsw_pci_cqe_v v;
130 struct mlxsw_pci_queue_type_group {
131 struct mlxsw_pci_queue *q;
132 u8 count; /* number of queues in group */
136 struct pci_dev *pdev;
138 struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
140 struct mlxsw_core *core;
142 struct mlxsw_pci_mem_item *items;
146 struct mlxsw_pci_mem_item out_mbox;
147 struct mlxsw_pci_mem_item in_mbox;
148 struct mutex lock; /* Lock access to command registers */
150 wait_queue_head_t wait;
157 struct mlxsw_bus_info bus_info;
158 const struct pci_device_id *id;
159 enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */
160 u8 num_sdq_cqs; /* Number of CQs used for SDQs */
163 static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
165 tasklet_schedule(&q->tasklet);
168 static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
169 size_t elem_size, int elem_index)
171 return q->mem_item.buf + (elem_size * elem_index);
174 static struct mlxsw_pci_queue_elem_info *
175 mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
177 return &q->elem_info[elem_index];
180 static struct mlxsw_pci_queue_elem_info *
181 mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
183 int index = q->producer_counter & (q->count - 1);
185 if ((u16) (q->producer_counter - q->consumer_counter) == q->count)
187 return mlxsw_pci_queue_elem_info_get(q, index);
190 static struct mlxsw_pci_queue_elem_info *
191 mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
193 int index = q->consumer_counter & (q->count - 1);
195 return mlxsw_pci_queue_elem_info_get(q, index);
198 static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
200 return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
203 static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
205 return owner_bit != !!(q->consumer_counter & q->count);
208 static struct mlxsw_pci_queue_type_group *
209 mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
210 enum mlxsw_pci_queue_type q_type)
212 return &mlxsw_pci->queues[q_type];
215 static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
216 enum mlxsw_pci_queue_type q_type)
218 struct mlxsw_pci_queue_type_group *queue_group;
220 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
221 return queue_group->count;
224 static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
226 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
229 static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
231 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
234 static struct mlxsw_pci_queue *
235 __mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
236 enum mlxsw_pci_queue_type q_type, u8 q_num)
238 return &mlxsw_pci->queues[q_type].q[q_num];
241 static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
244 return __mlxsw_pci_queue_get(mlxsw_pci,
245 MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
248 static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
251 return __mlxsw_pci_queue_get(mlxsw_pci,
252 MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
255 static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
258 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
261 static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
264 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
267 static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
268 struct mlxsw_pci_queue *q,
271 mlxsw_pci_write32(mlxsw_pci,
272 DOORBELL(mlxsw_pci->doorbell_offset,
273 mlxsw_pci_doorbell_type_offset[q->type],
277 static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
278 struct mlxsw_pci_queue *q,
281 mlxsw_pci_write32(mlxsw_pci,
282 DOORBELL(mlxsw_pci->doorbell_offset,
283 mlxsw_pci_doorbell_arm_type_offset[q->type],
287 static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
288 struct mlxsw_pci_queue *q)
290 wmb(); /* ensure all writes are done before we ring a bell */
291 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
294 static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
295 struct mlxsw_pci_queue *q)
297 wmb(); /* ensure all writes are done before we ring a bell */
298 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
299 q->consumer_counter + q->count);
303 mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
304 struct mlxsw_pci_queue *q)
306 wmb(); /* ensure all writes are done before we ring a bell */
307 __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
310 static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
313 return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
316 static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
317 struct mlxsw_pci_queue *q)
322 q->producer_counter = 0;
323 q->consumer_counter = 0;
325 /* Set CQ of same number of this SDQ. */
326 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
327 mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 3);
328 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
329 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
330 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
332 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
335 err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
338 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
342 static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
343 struct mlxsw_pci_queue *q)
345 mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
348 static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
349 int index, char *frag_data, size_t frag_len,
352 struct pci_dev *pdev = mlxsw_pci->pdev;
355 mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
356 if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
357 dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
360 mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
361 mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
365 static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
366 int index, int direction)
368 struct pci_dev *pdev = mlxsw_pci->pdev;
369 size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
370 dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
374 pci_unmap_single(pdev, mapaddr, frag_len, direction);
377 static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
378 struct mlxsw_pci_queue_elem_info *elem_info)
380 size_t buf_len = MLXSW_PORT_MAX_MTU;
381 char *wqe = elem_info->elem;
385 elem_info->u.rdq.skb = NULL;
386 skb = netdev_alloc_skb_ip_align(NULL, buf_len);
390 /* Assume that wqe was previously zeroed. */
392 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
393 buf_len, DMA_FROM_DEVICE);
397 elem_info->u.rdq.skb = skb;
401 dev_kfree_skb_any(skb);
405 static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
406 struct mlxsw_pci_queue_elem_info *elem_info)
411 skb = elem_info->u.rdq.skb;
412 wqe = elem_info->elem;
414 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
415 dev_kfree_skb_any(skb);
418 static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
419 struct mlxsw_pci_queue *q)
421 struct mlxsw_pci_queue_elem_info *elem_info;
422 u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci);
426 q->producer_counter = 0;
427 q->consumer_counter = 0;
429 /* Set CQ of same number of this RDQ with base
430 * above SDQ count as the lower ones are assigned to SDQs.
432 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num);
433 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
434 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
435 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
437 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
440 err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
444 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
446 for (i = 0; i < q->count; i++) {
447 elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
449 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
452 /* Everything is set up, ring doorbell to pass elem to HW */
453 q->producer_counter++;
454 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
460 for (i--; i >= 0; i--) {
461 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
462 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
464 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
469 static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
470 struct mlxsw_pci_queue *q)
472 struct mlxsw_pci_queue_elem_info *elem_info;
475 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
476 for (i = 0; i < q->count; i++) {
477 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
478 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
482 static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
483 struct mlxsw_pci_queue *q)
485 q->u.cq.v = mlxsw_pci->max_cqe_ver;
487 /* For SDQ it is pointless to use CQEv2, so use CQEv1 instead */
488 if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
489 q->num < mlxsw_pci->num_sdq_cqs)
490 q->u.cq.v = MLXSW_PCI_CQE_V1;
493 static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
494 struct mlxsw_pci_queue *q)
499 q->consumer_counter = 0;
501 for (i = 0; i < q->count; i++) {
502 char *elem = mlxsw_pci_queue_elem_get(q, i);
504 mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
507 if (q->u.cq.v == MLXSW_PCI_CQE_V1)
508 mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
509 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
510 else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
511 mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
512 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
514 mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
515 mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
516 mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
517 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
518 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
520 mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
522 err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
525 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
526 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
530 static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
531 struct mlxsw_pci_queue *q)
533 mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
536 static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
537 struct mlxsw_pci_queue *q,
538 u16 consumer_counter_limit,
541 struct pci_dev *pdev = mlxsw_pci->pdev;
542 struct mlxsw_pci_queue_elem_info *elem_info;
548 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
549 skb = elem_info->u.sdq.skb;
550 wqe = elem_info->elem;
551 for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
552 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
553 dev_kfree_skb_any(skb);
554 elem_info->u.sdq.skb = NULL;
556 if (q->consumer_counter++ != consumer_counter_limit)
557 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
558 spin_unlock(&q->lock);
561 static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
562 struct mlxsw_pci_queue *q,
563 u16 consumer_counter_limit,
564 enum mlxsw_pci_cqe_v cqe_v, char *cqe)
566 struct pci_dev *pdev = mlxsw_pci->pdev;
567 struct mlxsw_pci_queue_elem_info *elem_info;
570 struct mlxsw_rx_info rx_info;
574 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
575 skb = elem_info->u.sdq.skb;
578 wqe = elem_info->elem;
579 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
581 if (q->consumer_counter++ != consumer_counter_limit)
582 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
584 if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
585 rx_info.is_lag = true;
586 rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
587 rx_info.lag_port_index =
588 mlxsw_pci_cqe_lag_subport_get(cqe_v, cqe);
590 rx_info.is_lag = false;
591 rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
594 rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
596 byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
597 if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
598 byte_count -= ETH_FCS_LEN;
599 skb_put(skb, byte_count);
600 mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
602 memset(wqe, 0, q->elem_size);
603 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
605 dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
606 /* Everything is set up, ring doorbell to pass elem to HW */
607 q->producer_counter++;
608 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
612 static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
614 struct mlxsw_pci_queue_elem_info *elem_info;
618 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
619 elem = elem_info->elem;
620 owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem);
621 if (mlxsw_pci_elem_hw_owned(q, owner_bit))
623 q->consumer_counter++;
624 rmb(); /* make sure we read owned bit before the rest of elem */
628 static void mlxsw_pci_cq_tasklet(unsigned long data)
630 struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
631 struct mlxsw_pci *mlxsw_pci = q->pci;
634 int credits = q->count >> 1;
636 while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
637 u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
638 u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
639 u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
642 struct mlxsw_pci_queue *sdq;
644 sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
645 mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
647 q->u.cq.comp_sdq_count++;
649 struct mlxsw_pci_queue *rdq;
651 rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
652 mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
653 wqe_counter, q->u.cq.v, cqe);
654 q->u.cq.comp_rdq_count++;
656 if (++items == credits)
660 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
661 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
665 static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
667 return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
668 MLXSW_PCI_CQE01_COUNT;
671 static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
673 return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
674 MLXSW_PCI_CQE01_SIZE;
677 static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
678 struct mlxsw_pci_queue *q)
683 q->consumer_counter = 0;
685 for (i = 0; i < q->count; i++) {
686 char *elem = mlxsw_pci_queue_elem_get(q, i);
688 mlxsw_pci_eqe_owner_set(elem, 1);
691 mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
692 mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
693 mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
694 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
695 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
697 mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
699 err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
702 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
703 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
707 static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
708 struct mlxsw_pci_queue *q)
710 mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
713 static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
715 mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
716 mlxsw_pci->cmd.comp.out_param =
717 ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
718 mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
719 mlxsw_pci->cmd.wait_done = true;
720 wake_up(&mlxsw_pci->cmd.wait);
723 static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
725 struct mlxsw_pci_queue_elem_info *elem_info;
729 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
730 elem = elem_info->elem;
731 owner_bit = mlxsw_pci_eqe_owner_get(elem);
732 if (mlxsw_pci_elem_hw_owned(q, owner_bit))
734 q->consumer_counter++;
735 rmb(); /* make sure we read owned bit before the rest of elem */
739 static void mlxsw_pci_eq_tasklet(unsigned long data)
741 struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
742 struct mlxsw_pci *mlxsw_pci = q->pci;
743 u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
744 unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
747 bool cq_handle = false;
749 int credits = q->count >> 1;
751 memset(&active_cqns, 0, sizeof(active_cqns));
753 while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
754 u8 event_type = mlxsw_pci_eqe_event_type_get(eqe);
756 switch (event_type) {
757 case MLXSW_PCI_EQE_EVENT_TYPE_CMD:
758 mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
759 q->u.eq.ev_cmd_count++;
761 case MLXSW_PCI_EQE_EVENT_TYPE_COMP:
762 cqn = mlxsw_pci_eqe_cqn_get(eqe);
763 set_bit(cqn, active_cqns);
765 q->u.eq.ev_comp_count++;
768 q->u.eq.ev_other_count++;
770 if (++items == credits)
774 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
775 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
780 for_each_set_bit(cqn, active_cqns, cq_count) {
781 q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
782 mlxsw_pci_queue_tasklet_schedule(q);
786 struct mlxsw_pci_queue_ops {
788 enum mlxsw_pci_queue_type type;
789 void (*pre_init)(struct mlxsw_pci *mlxsw_pci,
790 struct mlxsw_pci_queue *q);
791 int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
792 struct mlxsw_pci_queue *q);
793 void (*fini)(struct mlxsw_pci *mlxsw_pci,
794 struct mlxsw_pci_queue *q);
795 void (*tasklet)(unsigned long data);
796 u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
797 u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
802 static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
803 .type = MLXSW_PCI_QUEUE_TYPE_SDQ,
804 .init = mlxsw_pci_sdq_init,
805 .fini = mlxsw_pci_sdq_fini,
806 .elem_count = MLXSW_PCI_WQE_COUNT,
807 .elem_size = MLXSW_PCI_WQE_SIZE,
810 static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
811 .type = MLXSW_PCI_QUEUE_TYPE_RDQ,
812 .init = mlxsw_pci_rdq_init,
813 .fini = mlxsw_pci_rdq_fini,
814 .elem_count = MLXSW_PCI_WQE_COUNT,
815 .elem_size = MLXSW_PCI_WQE_SIZE
818 static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
819 .type = MLXSW_PCI_QUEUE_TYPE_CQ,
820 .pre_init = mlxsw_pci_cq_pre_init,
821 .init = mlxsw_pci_cq_init,
822 .fini = mlxsw_pci_cq_fini,
823 .tasklet = mlxsw_pci_cq_tasklet,
824 .elem_count_f = mlxsw_pci_cq_elem_count,
825 .elem_size_f = mlxsw_pci_cq_elem_size
828 static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
829 .type = MLXSW_PCI_QUEUE_TYPE_EQ,
830 .init = mlxsw_pci_eq_init,
831 .fini = mlxsw_pci_eq_fini,
832 .tasklet = mlxsw_pci_eq_tasklet,
833 .elem_count = MLXSW_PCI_EQE_COUNT,
834 .elem_size = MLXSW_PCI_EQE_SIZE
837 static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
838 const struct mlxsw_pci_queue_ops *q_ops,
839 struct mlxsw_pci_queue *q, u8 q_num)
841 struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
847 q_ops->pre_init(mlxsw_pci, q);
849 spin_lock_init(&q->lock);
850 q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) :
852 q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) :
854 q->type = q_ops->type;
858 tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q);
860 mem_item->size = MLXSW_PCI_AQ_SIZE;
861 mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
866 memset(mem_item->buf, 0, mem_item->size);
868 q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
871 goto err_elem_info_alloc;
874 /* Initialize dma mapped elements info elem_info for
875 * future easy access.
877 for (i = 0; i < q->count; i++) {
878 struct mlxsw_pci_queue_elem_info *elem_info;
880 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
882 __mlxsw_pci_queue_elem_get(q, q->elem_size, i);
885 mlxsw_cmd_mbox_zero(mbox);
886 err = q_ops->init(mlxsw_pci, mbox, q);
894 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
895 mem_item->buf, mem_item->mapaddr);
899 static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
900 const struct mlxsw_pci_queue_ops *q_ops,
901 struct mlxsw_pci_queue *q)
903 struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
905 q_ops->fini(mlxsw_pci, q);
907 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
908 mem_item->buf, mem_item->mapaddr);
911 static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
912 const struct mlxsw_pci_queue_ops *q_ops,
915 struct mlxsw_pci_queue_type_group *queue_group;
919 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
920 queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
924 for (i = 0; i < num_qs; i++) {
925 err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
926 &queue_group->q[i], i);
930 queue_group->count = num_qs;
935 for (i--; i >= 0; i--)
936 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
937 kfree(queue_group->q);
941 static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
942 const struct mlxsw_pci_queue_ops *q_ops)
944 struct mlxsw_pci_queue_type_group *queue_group;
947 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
948 for (i = 0; i < queue_group->count; i++)
949 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
950 kfree(queue_group->q);
953 static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
955 struct pci_dev *pdev = mlxsw_pci->pdev;
967 mlxsw_cmd_mbox_zero(mbox);
968 err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
972 num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
973 sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
974 num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
975 rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
976 num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
977 cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
978 cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get(mbox);
979 num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
980 eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
982 if (num_sdqs + num_rdqs > num_cqs ||
983 num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
984 dev_err(&pdev->dev, "Unsupported number of queues\n");
988 if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
989 (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
990 (1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) ||
991 (mlxsw_pci->max_cqe_ver == MLXSW_PCI_CQE_V2 &&
992 (1 << cqv2_log2sz != MLXSW_PCI_CQE2_COUNT)) ||
993 (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
994 dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
998 mlxsw_pci->num_sdq_cqs = num_sdqs;
1000 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
1003 dev_err(&pdev->dev, "Failed to initialize event queues\n");
1007 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
1010 dev_err(&pdev->dev, "Failed to initialize completion queues\n");
1014 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
1017 dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
1021 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
1024 dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
1028 /* We have to poll in command interface until queues are initialized */
1029 mlxsw_pci->cmd.nopoll = true;
1033 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1035 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1037 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1041 static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
1043 mlxsw_pci->cmd.nopoll = false;
1044 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
1045 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1046 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1047 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1051 mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
1052 char *mbox, int index,
1053 const struct mlxsw_swid_config *swid)
1057 if (swid->used_type) {
1058 mlxsw_cmd_mbox_config_profile_swid_config_type_set(
1059 mbox, index, swid->type);
1062 if (swid->used_properties) {
1063 mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
1064 mbox, index, swid->properties);
1067 mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
1070 static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
1071 struct mlxsw_res *res)
1081 mlxsw_cmd_mbox_zero(mbox);
1083 for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES;
1085 err = mlxsw_cmd_query_resources(mlxsw_pci->core, mbox, index);
1089 for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) {
1090 id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i);
1091 data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i);
1093 if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID)
1096 mlxsw_res_parse(res, id, data);
1100 /* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get
1101 * MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW.
1107 mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci,
1108 const struct mlxsw_config_profile *profile,
1109 struct mlxsw_res *res)
1111 u64 single_size, double_size, linear_size;
1114 err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile,
1115 &single_size, &double_size,
1120 MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size);
1121 MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size);
1122 MLXSW_RES_SET(res, KVD_LINEAR_SIZE, linear_size);
1127 static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
1128 const struct mlxsw_config_profile *profile,
1129 struct mlxsw_res *res)
1134 mlxsw_cmd_mbox_zero(mbox);
1136 if (profile->used_max_vepa_channels) {
1137 mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
1139 mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
1140 mbox, profile->max_vepa_channels);
1142 if (profile->used_max_mid) {
1143 mlxsw_cmd_mbox_config_profile_set_max_mid_set(
1145 mlxsw_cmd_mbox_config_profile_max_mid_set(
1146 mbox, profile->max_mid);
1148 if (profile->used_max_pgt) {
1149 mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
1151 mlxsw_cmd_mbox_config_profile_max_pgt_set(
1152 mbox, profile->max_pgt);
1154 if (profile->used_max_system_port) {
1155 mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
1157 mlxsw_cmd_mbox_config_profile_max_system_port_set(
1158 mbox, profile->max_system_port);
1160 if (profile->used_max_vlan_groups) {
1161 mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
1163 mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
1164 mbox, profile->max_vlan_groups);
1166 if (profile->used_max_regions) {
1167 mlxsw_cmd_mbox_config_profile_set_max_regions_set(
1169 mlxsw_cmd_mbox_config_profile_max_regions_set(
1170 mbox, profile->max_regions);
1172 if (profile->used_flood_tables) {
1173 mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
1175 mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
1176 mbox, profile->max_flood_tables);
1177 mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
1178 mbox, profile->max_vid_flood_tables);
1179 mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set(
1180 mbox, profile->max_fid_offset_flood_tables);
1181 mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set(
1182 mbox, profile->fid_offset_flood_table_size);
1183 mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set(
1184 mbox, profile->max_fid_flood_tables);
1185 mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
1186 mbox, profile->fid_flood_table_size);
1188 if (profile->used_flood_mode) {
1189 mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
1191 mlxsw_cmd_mbox_config_profile_flood_mode_set(
1192 mbox, profile->flood_mode);
1194 if (profile->used_max_ib_mc) {
1195 mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
1197 mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
1198 mbox, profile->max_ib_mc);
1200 if (profile->used_max_pkey) {
1201 mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
1203 mlxsw_cmd_mbox_config_profile_max_pkey_set(
1204 mbox, profile->max_pkey);
1206 if (profile->used_ar_sec) {
1207 mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
1209 mlxsw_cmd_mbox_config_profile_ar_sec_set(
1210 mbox, profile->ar_sec);
1212 if (profile->used_adaptive_routing_group_cap) {
1213 mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
1215 mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
1216 mbox, profile->adaptive_routing_group_cap);
1218 if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) {
1219 err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
1223 mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1);
1224 mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox,
1225 MLXSW_RES_GET(res, KVD_LINEAR_SIZE));
1226 mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox,
1228 mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox,
1229 MLXSW_RES_GET(res, KVD_SINGLE_SIZE));
1230 mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
1232 mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
1233 MLXSW_RES_GET(res, KVD_DOUBLE_SIZE));
1236 for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
1237 mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
1238 &profile->swid_config[i]);
1240 if (mlxsw_pci->max_cqe_ver > MLXSW_PCI_CQE_V0) {
1241 mlxsw_cmd_mbox_config_profile_set_cqe_version_set(mbox, 1);
1242 mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1);
1245 return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
1248 static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
1250 struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
1253 mlxsw_cmd_mbox_zero(mbox);
1254 err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
1257 mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
1258 mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
1262 static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1265 struct mlxsw_pci_mem_item *mem_item;
1270 mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
1272 if (!mlxsw_pci->fw_area.items)
1274 mlxsw_pci->fw_area.count = num_pages;
1276 mlxsw_cmd_mbox_zero(mbox);
1277 for (i = 0; i < num_pages; i++) {
1278 mem_item = &mlxsw_pci->fw_area.items[i];
1280 mem_item->size = MLXSW_PCI_PAGE_SIZE;
1281 mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
1283 &mem_item->mapaddr);
1284 if (!mem_item->buf) {
1288 mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr);
1289 mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */
1290 if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) {
1291 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1293 goto err_cmd_map_fa;
1295 mlxsw_cmd_mbox_zero(mbox);
1300 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1302 goto err_cmd_map_fa;
1309 for (i--; i >= 0; i--) {
1310 mem_item = &mlxsw_pci->fw_area.items[i];
1312 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
1313 mem_item->buf, mem_item->mapaddr);
1315 kfree(mlxsw_pci->fw_area.items);
1319 static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
1321 struct mlxsw_pci_mem_item *mem_item;
1324 mlxsw_cmd_unmap_fa(mlxsw_pci->core);
1326 for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
1327 mem_item = &mlxsw_pci->fw_area.items[i];
1329 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
1330 mem_item->buf, mem_item->mapaddr);
1332 kfree(mlxsw_pci->fw_area.items);
1335 static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
1337 struct mlxsw_pci *mlxsw_pci = dev_id;
1338 struct mlxsw_pci_queue *q;
1341 for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
1342 q = mlxsw_pci_eq_get(mlxsw_pci, i);
1343 mlxsw_pci_queue_tasklet_schedule(q);
1348 static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
1349 struct mlxsw_pci_mem_item *mbox)
1351 struct pci_dev *pdev = mlxsw_pci->pdev;
1354 mbox->size = MLXSW_CMD_MBOX_SIZE;
1355 mbox->buf = pci_alloc_consistent(pdev, MLXSW_CMD_MBOX_SIZE,
1358 dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
1365 static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
1366 struct mlxsw_pci_mem_item *mbox)
1368 struct pci_dev *pdev = mlxsw_pci->pdev;
1370 pci_free_consistent(pdev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
1374 static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
1375 const struct pci_device_id *id)
1378 char mrsr_pl[MLXSW_REG_MRSR_LEN];
1381 mlxsw_reg_mrsr_pack(mrsr_pl);
1382 err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
1385 if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
1386 msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1390 /* We must wait for the HW to become responsive once again. */
1391 msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
1393 end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1395 u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
1397 if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
1400 } while (time_before(jiffies, end));
1404 static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1408 err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX);
1410 dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n");
1414 static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1416 pci_free_irq_vectors(mlxsw_pci->pdev);
1419 static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
1420 const struct mlxsw_config_profile *profile,
1421 struct mlxsw_res *res)
1423 struct mlxsw_pci *mlxsw_pci = bus_priv;
1424 struct pci_dev *pdev = mlxsw_pci->pdev;
1429 mutex_init(&mlxsw_pci->cmd.lock);
1430 init_waitqueue_head(&mlxsw_pci->cmd.wait);
1432 mlxsw_pci->core = mlxsw_core;
1434 mbox = mlxsw_cmd_mbox_alloc();
1438 err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1442 err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1444 goto err_out_mbox_alloc;
1446 err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
1450 err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
1452 dev_err(&pdev->dev, "MSI-X init failed\n");
1456 err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
1460 mlxsw_pci->bus_info.fw_rev.major =
1461 mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
1462 mlxsw_pci->bus_info.fw_rev.minor =
1463 mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
1464 mlxsw_pci->bus_info.fw_rev.subminor =
1465 mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
1467 if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
1468 dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
1472 if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
1473 dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
1475 goto err_doorbell_page_bar;
1478 mlxsw_pci->doorbell_offset =
1479 mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
1481 num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
1482 err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
1484 goto err_fw_area_init;
1486 err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
1490 err = mlxsw_pci_resources_query(mlxsw_pci, mbox, res);
1492 goto err_query_resources;
1494 if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V2) &&
1495 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V2))
1496 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V2;
1497 else if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V1) &&
1498 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V1))
1499 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V1;
1500 else if ((MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0) &&
1501 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V0)) ||
1502 !MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0)) {
1503 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V0;
1505 dev_err(&pdev->dev, "Invalid supported CQE version combination reported\n");
1506 goto err_cqe_v_check;
1509 err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res);
1511 goto err_config_profile;
1513 err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
1517 err = request_irq(pci_irq_vector(pdev, 0),
1518 mlxsw_pci_eq_irq_handler, 0,
1519 mlxsw_pci->bus_info.device_kind, mlxsw_pci);
1521 dev_err(&pdev->dev, "IRQ request failed\n");
1522 goto err_request_eq_irq;
1528 mlxsw_pci_aqs_fini(mlxsw_pci);
1532 err_query_resources:
1534 mlxsw_pci_fw_area_fini(mlxsw_pci);
1536 err_doorbell_page_bar:
1539 mlxsw_pci_free_irq_vectors(mlxsw_pci);
1542 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1544 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1546 mlxsw_cmd_mbox_free(mbox);
1550 static void mlxsw_pci_fini(void *bus_priv)
1552 struct mlxsw_pci *mlxsw_pci = bus_priv;
1554 free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci);
1555 mlxsw_pci_aqs_fini(mlxsw_pci);
1556 mlxsw_pci_fw_area_fini(mlxsw_pci);
1557 mlxsw_pci_free_irq_vectors(mlxsw_pci);
1558 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1559 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1562 static struct mlxsw_pci_queue *
1563 mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
1564 const struct mlxsw_tx_info *tx_info)
1566 u8 sdqn = tx_info->local_port % mlxsw_pci_sdq_count(mlxsw_pci);
1568 return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
1571 static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
1572 const struct mlxsw_tx_info *tx_info)
1574 struct mlxsw_pci *mlxsw_pci = bus_priv;
1575 struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1577 return !mlxsw_pci_queue_elem_info_producer_get(q);
1580 static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
1581 const struct mlxsw_tx_info *tx_info)
1583 struct mlxsw_pci *mlxsw_pci = bus_priv;
1584 struct mlxsw_pci_queue *q;
1585 struct mlxsw_pci_queue_elem_info *elem_info;
1590 if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
1591 err = skb_linearize(skb);
1596 q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1597 spin_lock_bh(&q->lock);
1598 elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
1604 elem_info->u.sdq.skb = skb;
1606 wqe = elem_info->elem;
1607 mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
1608 mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad);
1609 mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
1611 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
1612 skb_headlen(skb), DMA_TO_DEVICE);
1616 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1617 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1619 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
1620 skb_frag_address(frag),
1621 skb_frag_size(frag),
1627 /* Set unused sq entries byte count to zero. */
1628 for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
1629 mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
1631 /* Everything is set up, ring producer doorbell to get HW going */
1632 q->producer_counter++;
1633 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
1639 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
1641 spin_unlock_bh(&q->lock);
1645 static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
1646 u32 in_mod, bool out_mbox_direct,
1647 char *in_mbox, size_t in_mbox_size,
1648 char *out_mbox, size_t out_mbox_size,
1651 struct mlxsw_pci *mlxsw_pci = bus_priv;
1652 dma_addr_t in_mapaddr = 0, out_mapaddr = 0;
1653 bool evreq = mlxsw_pci->cmd.nopoll;
1654 unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
1655 bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
1658 *p_status = MLXSW_CMD_STATUS_OK;
1660 err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
1665 memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
1666 in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
1668 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
1669 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
1672 out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
1673 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
1674 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
1676 mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
1677 mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
1679 *p_wait_done = false;
1681 wmb(); /* all needs to be written before we write control register */
1682 mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
1683 MLXSW_PCI_CIR_CTRL_GO_BIT |
1684 (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
1685 (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
1691 end = jiffies + timeout;
1693 u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
1695 if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
1696 *p_wait_done = true;
1697 *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
1701 } while (time_before(jiffies, end));
1703 wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
1704 *p_status = mlxsw_pci->cmd.comp.status;
1715 if (!err && out_mbox && out_mbox_direct) {
1716 /* Some commands don't use output param as address to mailbox
1717 * but they store output directly into registers. In that case,
1718 * copy registers into mbox buffer.
1723 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1725 memcpy(out_mbox, &tmp, sizeof(tmp));
1726 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1728 memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
1730 } else if (!err && out_mbox) {
1731 memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
1734 mutex_unlock(&mlxsw_pci->cmd.lock);
1739 static const struct mlxsw_bus mlxsw_pci_bus = {
1741 .init = mlxsw_pci_init,
1742 .fini = mlxsw_pci_fini,
1743 .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
1744 .skb_transmit = mlxsw_pci_skb_transmit,
1745 .cmd_exec = mlxsw_pci_cmd_exec,
1746 .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
1749 static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1751 const char *driver_name = pdev->driver->name;
1752 struct mlxsw_pci *mlxsw_pci;
1755 mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
1759 err = pci_enable_device(pdev);
1761 dev_err(&pdev->dev, "pci_enable_device failed\n");
1762 goto err_pci_enable_device;
1765 err = pci_request_regions(pdev, driver_name);
1767 dev_err(&pdev->dev, "pci_request_regions failed\n");
1768 goto err_pci_request_regions;
1771 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1773 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1775 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
1776 goto err_pci_set_dma_mask;
1779 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1781 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
1782 goto err_pci_set_dma_mask;
1786 if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
1787 dev_err(&pdev->dev, "invalid PCI region size\n");
1789 goto err_pci_resource_len_check;
1792 mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
1793 pci_resource_len(pdev, 0));
1794 if (!mlxsw_pci->hw_addr) {
1795 dev_err(&pdev->dev, "ioremap failed\n");
1799 pci_set_master(pdev);
1801 mlxsw_pci->pdev = pdev;
1802 pci_set_drvdata(pdev, mlxsw_pci);
1804 mlxsw_pci->bus_info.device_kind = driver_name;
1805 mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
1806 mlxsw_pci->bus_info.dev = &pdev->dev;
1809 err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
1810 &mlxsw_pci_bus, mlxsw_pci, false,
1813 dev_err(&pdev->dev, "cannot register bus device\n");
1814 goto err_bus_device_register;
1819 err_bus_device_register:
1820 iounmap(mlxsw_pci->hw_addr);
1822 err_pci_resource_len_check:
1823 err_pci_set_dma_mask:
1824 pci_release_regions(pdev);
1825 err_pci_request_regions:
1826 pci_disable_device(pdev);
1827 err_pci_enable_device:
1832 static void mlxsw_pci_remove(struct pci_dev *pdev)
1834 struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
1836 mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
1837 iounmap(mlxsw_pci->hw_addr);
1838 pci_release_regions(mlxsw_pci->pdev);
1839 pci_disable_device(mlxsw_pci->pdev);
1843 int mlxsw_pci_driver_register(struct pci_driver *pci_driver)
1845 pci_driver->probe = mlxsw_pci_probe;
1846 pci_driver->remove = mlxsw_pci_remove;
1847 return pci_register_driver(pci_driver);
1849 EXPORT_SYMBOL(mlxsw_pci_driver_register);
1851 void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver)
1853 pci_unregister_driver(pci_driver);
1855 EXPORT_SYMBOL(mlxsw_pci_driver_unregister);
1857 static int __init mlxsw_pci_module_init(void)
1862 static void __exit mlxsw_pci_module_exit(void)
1866 module_init(mlxsw_pci_module_init);
1867 module_exit(mlxsw_pci_module_exit);
1869 MODULE_LICENSE("Dual BSD/GPL");
1870 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1871 MODULE_DESCRIPTION("Mellanox switch PCI interface driver");