1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/if_vlan.h>
11 #include <linux/kernel.h>
12 #include <linux/pci.h>
13 #include <linux/slab.h>
14 #include <linux/stddef.h>
15 #include <linux/workqueue.h>
17 #include <linux/bitops.h>
18 #include <linux/delay.h>
19 #include <linux/errno.h>
20 #include <linux/etherdevice.h>
22 #include <linux/list.h>
23 #include <linux/mutex.h>
24 #include <linux/spinlock.h>
25 #include <linux/string.h>
26 #include <linux/qed/qed_ll2_if.h>
29 #include "qed_dev_api.h"
36 #include "qed_reg_addr.h"
40 #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registered)
41 #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registered)
43 #define QED_LL2_TX_SIZE (256)
44 #define QED_LL2_RX_SIZE (4096)
46 struct qed_cb_ll2_info {
51 /* Lock protecting LL2 buffer lists in sleepless context */
53 struct list_head list;
55 const struct qed_ll2_cb_ops *cbs;
59 struct qed_ll2_buffer {
60 struct list_head list;
65 static void qed_ll2b_complete_tx_packet(void *cxt,
68 dma_addr_t first_frag_addr,
72 struct qed_hwfn *p_hwfn = cxt;
73 struct qed_dev *cdev = p_hwfn->cdev;
74 struct sk_buff *skb = cookie;
76 /* All we need to do is release the mapping */
77 dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
78 skb_headlen(skb), DMA_TO_DEVICE);
80 if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
81 cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
84 dev_kfree_skb_any(skb);
87 static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
88 u8 **data, dma_addr_t *phys_addr)
90 *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
92 DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
96 *phys_addr = dma_map_single(&cdev->pdev->dev,
97 ((*data) + NET_SKB_PAD),
98 cdev->ll2->rx_size, DMA_FROM_DEVICE);
99 if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
100 DP_INFO(cdev, "Failed to map LL2 buffer data\n");
108 static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
109 struct qed_ll2_buffer *buffer)
111 spin_lock_bh(&cdev->ll2->lock);
113 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
114 cdev->ll2->rx_size, DMA_FROM_DEVICE);
116 list_del(&buffer->list);
119 if (!cdev->ll2->rx_cnt)
120 DP_INFO(cdev, "All LL2 entries were removed\n");
122 spin_unlock_bh(&cdev->ll2->lock);
127 static void qed_ll2_kill_buffers(struct qed_dev *cdev)
129 struct qed_ll2_buffer *buffer, *tmp_buffer;
131 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
132 qed_ll2_dealloc_buffer(cdev, buffer);
135 static void qed_ll2b_complete_rx_packet(void *cxt,
136 struct qed_ll2_comp_rx_data *data)
138 struct qed_hwfn *p_hwfn = cxt;
139 struct qed_ll2_buffer *buffer = data->cookie;
140 struct qed_dev *cdev = p_hwfn->cdev;
141 dma_addr_t new_phys_addr;
148 (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
149 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
150 (u64)data->rx_buf_addr,
151 data->u.placement_offset,
152 data->length.packet_length,
154 data->vlan, data->opaque_data_0, data->opaque_data_1);
156 if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
157 print_hex_dump(KERN_INFO, "",
158 DUMP_PREFIX_OFFSET, 16, 1,
159 buffer->data, data->length.packet_length, false);
162 /* Determine if data is valid */
163 if (data->length.packet_length < ETH_HLEN)
166 /* Allocate a replacement for buffer; Reuse upon failure */
168 rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
171 /* If need to reuse or there's no replacement buffer, repost this */
174 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
175 cdev->ll2->rx_size, DMA_FROM_DEVICE);
177 skb = build_skb(buffer->data, 0);
179 DP_INFO(cdev, "Failed to build SKB\n");
184 data->u.placement_offset += NET_SKB_PAD;
185 skb_reserve(skb, data->u.placement_offset);
186 skb_put(skb, data->length.packet_length);
187 skb_checksum_none_assert(skb);
189 /* Get parital ethernet information instead of eth_type_trans(),
190 * Since we don't have an associated net_device.
192 skb_reset_mac_header(skb);
193 skb->protocol = eth_hdr(skb)->h_proto;
195 /* Pass SKB onward */
196 if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
198 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
200 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
202 data->opaque_data_1);
204 DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | NETIF_MSG_PKTDATA |
205 QED_MSG_LL2 | QED_MSG_STORAGE),
206 "Dropping the packet\n");
211 /* Update Buffer information and update FW producer */
212 buffer->data = new_data;
213 buffer->phys_addr = new_phys_addr;
216 rc = qed_ll2_post_rx_buffer(p_hwfn, cdev->ll2->handle,
217 buffer->phys_addr, 0, buffer, 1);
219 qed_ll2_dealloc_buffer(cdev, buffer);
222 static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
223 u8 connection_handle,
227 struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
229 if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
232 if (!p_hwfn->p_ll2_info)
235 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
239 mutex_lock(&p_ll2_conn->mutex);
240 if (p_ll2_conn->b_active)
243 mutex_unlock(&p_ll2_conn->mutex);
251 static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
252 u8 connection_handle)
254 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
257 static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
258 u8 connection_handle)
260 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
263 static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
265 u8 connection_handle)
267 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
270 static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
272 bool b_last_packet = false, b_last_frag = false;
273 struct qed_ll2_tx_packet *p_pkt = NULL;
274 struct qed_ll2_info *p_ll2_conn;
275 struct qed_ll2_tx_queue *p_tx;
276 unsigned long flags = 0;
279 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
283 p_tx = &p_ll2_conn->tx_queue;
285 spin_lock_irqsave(&p_tx->lock, flags);
286 while (!list_empty(&p_tx->active_descq)) {
287 p_pkt = list_first_entry(&p_tx->active_descq,
288 struct qed_ll2_tx_packet, list_entry);
292 list_del(&p_pkt->list_entry);
293 b_last_packet = list_empty(&p_tx->active_descq);
294 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
295 spin_unlock_irqrestore(&p_tx->lock, flags);
296 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
297 struct qed_ooo_buffer *p_buffer;
299 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
300 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
303 p_tx->cur_completing_packet = *p_pkt;
304 p_tx->cur_completing_bd_idx = 1;
306 p_tx->cur_completing_bd_idx == p_pkt->bd_used;
307 tx_frag = p_pkt->bds_set[0].tx_frag;
308 p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie,
315 spin_lock_irqsave(&p_tx->lock, flags);
317 spin_unlock_irqrestore(&p_tx->lock, flags);
320 static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
322 struct qed_ll2_info *p_ll2_conn = p_cookie;
323 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
324 u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
325 struct qed_ll2_tx_packet *p_pkt;
326 bool b_last_frag = false;
333 spin_lock_irqsave(&p_tx->lock, flags);
334 if (p_tx->b_completing_packet) {
339 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
340 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
342 if (list_empty(&p_tx->active_descq))
345 p_pkt = list_first_entry(&p_tx->active_descq,
346 struct qed_ll2_tx_packet, list_entry);
350 p_tx->b_completing_packet = true;
351 p_tx->cur_completing_packet = *p_pkt;
352 num_bds_in_packet = p_pkt->bd_used;
353 list_del(&p_pkt->list_entry);
355 if (num_bds < num_bds_in_packet) {
357 "Rest of BDs does not cover whole packet\n");
361 num_bds -= num_bds_in_packet;
362 p_tx->bds_idx += num_bds_in_packet;
363 while (num_bds_in_packet--)
364 qed_chain_consume(&p_tx->txq_chain);
366 p_tx->cur_completing_bd_idx = 1;
367 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
368 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
370 spin_unlock_irqrestore(&p_tx->lock, flags);
372 p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie,
375 p_pkt->bds_set[0].tx_frag,
376 b_last_frag, !num_bds);
378 spin_lock_irqsave(&p_tx->lock, flags);
381 p_tx->b_completing_packet = false;
384 spin_unlock_irqrestore(&p_tx->lock, flags);
388 static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
389 union core_rx_cqe_union *p_cqe,
390 struct qed_ll2_comp_rx_data *data)
392 data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
393 data->length.data_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
394 data->vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
395 data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
396 data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
397 data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
398 data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id);
400 data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp);
403 static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
404 union core_rx_cqe_union *p_cqe,
405 struct qed_ll2_comp_rx_data *data)
407 data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags);
408 data->err_flags = le16_to_cpu(p_cqe->rx_cqe_fp.err_flags.flags);
409 data->length.packet_length =
410 le16_to_cpu(p_cqe->rx_cqe_fp.packet_length);
411 data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan);
412 data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]);
413 data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]);
414 data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset;
418 qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn,
419 struct qed_ll2_info *p_ll2_conn,
420 union core_rx_cqe_union *p_cqe,
421 unsigned long *p_lock_flags)
423 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
424 struct core_rx_slow_path_cqe *sp_cqe;
426 sp_cqe = &p_cqe->rx_cqe_sp;
427 if (sp_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) {
429 "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n",
430 sp_cqe->ramrod_cmd_id);
434 if (!p_ll2_conn->cbs.slowpath_cb) {
436 "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n");
440 spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
442 p_ll2_conn->cbs.slowpath_cb(p_ll2_conn->cbs.cookie,
444 le32_to_cpu(sp_cqe->opaque_data.data[0]),
445 le32_to_cpu(sp_cqe->opaque_data.data[1]));
447 spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
453 qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
454 struct qed_ll2_info *p_ll2_conn,
455 union core_rx_cqe_union *p_cqe,
456 unsigned long *p_lock_flags, bool b_last_cqe)
458 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
459 struct qed_ll2_rx_packet *p_pkt = NULL;
460 struct qed_ll2_comp_rx_data data;
462 if (!list_empty(&p_rx->active_descq))
463 p_pkt = list_first_entry(&p_rx->active_descq,
464 struct qed_ll2_rx_packet, list_entry);
467 "[%d] LL2 Rx completion but active_descq is empty\n",
468 p_ll2_conn->input.conn_type);
472 list_del(&p_pkt->list_entry);
474 if (p_cqe->rx_cqe_sp.type == CORE_RX_CQE_TYPE_REGULAR)
475 qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data);
477 qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data);
478 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
480 "Mismatch between active_descq and the LL2 Rx chain\n");
482 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
484 data.connection_handle = p_ll2_conn->my_id;
485 data.cookie = p_pkt->cookie;
486 data.rx_buf_addr = p_pkt->rx_buf_addr;
487 data.b_last_packet = b_last_cqe;
489 spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
490 p_ll2_conn->cbs.rx_comp_cb(p_ll2_conn->cbs.cookie, &data);
492 spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
497 static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
499 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)cookie;
500 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
501 union core_rx_cqe_union *cqe = NULL;
502 u16 cq_new_idx = 0, cq_old_idx = 0;
503 unsigned long flags = 0;
509 spin_lock_irqsave(&p_rx->lock, flags);
511 if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) {
512 spin_unlock_irqrestore(&p_rx->lock, flags);
516 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
517 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
519 while (cq_new_idx != cq_old_idx) {
520 bool b_last_cqe = (cq_new_idx == cq_old_idx);
523 (union core_rx_cqe_union *)
524 qed_chain_consume(&p_rx->rcq_chain);
525 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
529 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
530 cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
532 switch (cqe->rx_cqe_sp.type) {
533 case CORE_RX_CQE_TYPE_SLOW_PATH:
534 rc = qed_ll2_handle_slowpath(p_hwfn, p_ll2_conn,
537 case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
538 case CORE_RX_CQE_TYPE_REGULAR:
539 rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn,
548 spin_unlock_irqrestore(&p_rx->lock, flags);
552 static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
554 struct qed_ll2_info *p_ll2_conn = NULL;
555 struct qed_ll2_rx_packet *p_pkt = NULL;
556 struct qed_ll2_rx_queue *p_rx;
557 unsigned long flags = 0;
559 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
563 p_rx = &p_ll2_conn->rx_queue;
565 spin_lock_irqsave(&p_rx->lock, flags);
566 while (!list_empty(&p_rx->active_descq)) {
567 p_pkt = list_first_entry(&p_rx->active_descq,
568 struct qed_ll2_rx_packet, list_entry);
571 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
572 spin_unlock_irqrestore(&p_rx->lock, flags);
574 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
575 struct qed_ooo_buffer *p_buffer;
577 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
578 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
581 dma_addr_t rx_buf_addr = p_pkt->rx_buf_addr;
582 void *cookie = p_pkt->cookie;
585 b_last = list_empty(&p_rx->active_descq);
586 p_ll2_conn->cbs.rx_release_cb(p_ll2_conn->cbs.cookie,
589 rx_buf_addr, b_last);
591 spin_lock_irqsave(&p_rx->lock, flags);
593 spin_unlock_irqrestore(&p_rx->lock, flags);
597 qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn,
598 struct core_rx_slow_path_cqe *p_cqe)
600 struct ooo_opaque *iscsi_ooo;
603 if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH)
606 iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data;
607 if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES)
610 /* Need to make a flush */
611 cid = le32_to_cpu(iscsi_ooo->cid);
612 qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid);
617 static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
618 struct qed_ll2_info *p_ll2_conn)
620 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
621 u16 packet_length = 0, parse_flags = 0, vlan = 0;
622 struct qed_ll2_rx_packet *p_pkt = NULL;
623 u32 num_ooo_add_to_peninsula = 0, cid;
624 union core_rx_cqe_union *cqe = NULL;
625 u16 cq_new_idx = 0, cq_old_idx = 0;
626 struct qed_ooo_buffer *p_buffer;
627 struct ooo_opaque *iscsi_ooo;
628 u8 placement_offset = 0;
631 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
632 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
633 if (cq_new_idx == cq_old_idx)
636 while (cq_new_idx != cq_old_idx) {
637 struct core_rx_fast_path_cqe *p_cqe_fp;
639 cqe = qed_chain_consume(&p_rx->rcq_chain);
640 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
641 cqe_type = cqe->rx_cqe_sp.type;
643 if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH)
644 if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn,
648 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
650 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
654 p_cqe_fp = &cqe->rx_cqe_fp;
656 placement_offset = p_cqe_fp->placement_offset;
657 parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
658 packet_length = le16_to_cpu(p_cqe_fp->packet_length);
659 vlan = le16_to_cpu(p_cqe_fp->vlan);
660 iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
661 qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
663 cid = le32_to_cpu(iscsi_ooo->cid);
665 /* Process delete isle first */
666 if (iscsi_ooo->drop_size)
667 qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
668 iscsi_ooo->drop_isle,
669 iscsi_ooo->drop_size);
671 if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
674 /* Now process create/add/join isles */
675 if (list_empty(&p_rx->active_descq)) {
677 "LL2 OOO RX chain has no submitted buffers\n"
682 p_pkt = list_first_entry(&p_rx->active_descq,
683 struct qed_ll2_rx_packet, list_entry);
685 if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
686 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
687 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
688 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
689 (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
692 "LL2 OOO RX packet is not valid\n");
695 list_del(&p_pkt->list_entry);
696 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
697 p_buffer->packet_length = packet_length;
698 p_buffer->parse_flags = parse_flags;
699 p_buffer->vlan = vlan;
700 p_buffer->placement_offset = placement_offset;
701 qed_chain_consume(&p_rx->rxq_chain);
702 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
704 switch (iscsi_ooo->ooo_opcode) {
705 case TCP_EVENT_ADD_NEW_ISLE:
706 qed_ooo_add_new_isle(p_hwfn,
712 case TCP_EVENT_ADD_ISLE_RIGHT:
713 qed_ooo_add_new_buffer(p_hwfn,
720 case TCP_EVENT_ADD_ISLE_LEFT:
721 qed_ooo_add_new_buffer(p_hwfn,
729 qed_ooo_add_new_buffer(p_hwfn,
732 iscsi_ooo->ooo_isle +
736 qed_ooo_join_isles(p_hwfn,
738 cid, iscsi_ooo->ooo_isle);
740 case TCP_EVENT_ADD_PEN:
741 num_ooo_add_to_peninsula++;
742 qed_ooo_put_ready_buffer(p_hwfn,
749 "Unexpected event (%d) TX OOO completion\n",
750 iscsi_ooo->ooo_opcode);
758 qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
759 struct qed_ll2_info *p_ll2_conn)
761 struct qed_ll2_tx_pkt_info tx_pkt;
762 struct qed_ooo_buffer *p_buffer;
764 dma_addr_t first_frag;
768 /* Submit Tx buffers here */
769 while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
770 p_hwfn->p_ooo_info))) {
774 first_frag = p_buffer->rx_buffer_phys_addr +
775 p_buffer->placement_offset;
776 SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
777 SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
779 memset(&tx_pkt, 0, sizeof(tx_pkt));
780 tx_pkt.num_of_bds = 1;
781 tx_pkt.vlan = p_buffer->vlan;
782 tx_pkt.bd_flags = bd_flags;
783 tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
784 switch (p_ll2_conn->tx_dest) {
785 case CORE_TX_DEST_NW:
786 tx_pkt.tx_dest = QED_LL2_TX_DEST_NW;
788 case CORE_TX_DEST_LB:
789 tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
791 case CORE_TX_DEST_DROP:
793 tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
796 tx_pkt.first_frag = first_frag;
797 tx_pkt.first_frag_len = p_buffer->packet_length;
798 tx_pkt.cookie = p_buffer;
800 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id,
803 qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
811 qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
812 struct qed_ll2_info *p_ll2_conn)
814 struct qed_ooo_buffer *p_buffer;
817 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
818 p_hwfn->p_ooo_info))) {
819 rc = qed_ll2_post_rx_buffer(p_hwfn,
821 p_buffer->rx_buffer_phys_addr,
824 qed_ooo_put_free_buffer(p_hwfn,
825 p_hwfn->p_ooo_info, p_buffer);
831 static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
833 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
839 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
842 rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
846 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
847 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
852 static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
854 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
855 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
856 struct qed_ll2_tx_packet *p_pkt = NULL;
857 struct qed_ooo_buffer *p_buffer;
858 bool b_dont_submit_rx = false;
859 u16 new_idx = 0, num_bds = 0;
865 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
868 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
869 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
875 if (list_empty(&p_tx->active_descq))
878 p_pkt = list_first_entry(&p_tx->active_descq,
879 struct qed_ll2_tx_packet, list_entry);
883 if (p_pkt->bd_used != 1) {
885 "Unexpectedly many BDs(%d) in TX OOO completion\n",
890 list_del(&p_pkt->list_entry);
894 qed_chain_consume(&p_tx->txq_chain);
896 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
897 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
899 if (b_dont_submit_rx) {
900 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
905 rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
906 p_buffer->rx_buffer_phys_addr, 0,
909 qed_ooo_put_free_buffer(p_hwfn,
910 p_hwfn->p_ooo_info, p_buffer);
911 b_dont_submit_rx = true;
915 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
920 static void qed_ll2_stop_ooo(struct qed_hwfn *p_hwfn)
922 u8 *handle = &p_hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
924 DP_VERBOSE(p_hwfn, (QED_MSG_STORAGE | QED_MSG_LL2),
925 "Stopping LL2 OOO queue [%02x]\n", *handle);
927 qed_ll2_terminate_connection(p_hwfn, *handle);
928 qed_ll2_release_connection(p_hwfn, *handle);
929 *handle = QED_LL2_UNUSED_HANDLE;
932 static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
933 struct qed_ll2_info *p_ll2_conn,
936 enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
937 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
938 struct core_rx_start_ramrod_data *p_ramrod = NULL;
939 struct qed_spq_entry *p_ent = NULL;
940 struct qed_sp_init_data init_data;
945 memset(&init_data, 0, sizeof(init_data));
946 init_data.cid = p_ll2_conn->cid;
947 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
948 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
950 rc = qed_sp_init_request(p_hwfn, &p_ent,
951 CORE_RAMROD_RX_QUEUE_START,
952 PROTOCOLID_CORE, &init_data);
956 p_ramrod = &p_ent->ramrod.core_rx_queue_start;
957 memset(p_ramrod, 0, sizeof(*p_ramrod));
958 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
959 p_ramrod->sb_index = p_rx->rx_sb_index;
960 p_ramrod->complete_event_flg = 1;
962 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
963 DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr);
964 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
965 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
966 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
967 qed_chain_get_pbl_phys(&p_rx->rcq_chain));
969 p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
970 p_ramrod->inner_vlan_stripping_en =
971 p_ll2_conn->input.rx_vlan_removal_en;
973 if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
974 p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE)
975 p_ramrod->report_outer_vlan = 1;
976 p_ramrod->queue_id = p_ll2_conn->queue_id;
977 p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
979 if (test_bit(QED_MF_LL2_NON_UNICAST, &p_hwfn->cdev->mf_bits) &&
980 p_ramrod->main_func_queue && conn_type != QED_LL2_TYPE_ROCE &&
981 conn_type != QED_LL2_TYPE_IWARP &&
982 (!QED_IS_NVMETCP_PERSONALITY(p_hwfn))) {
983 p_ramrod->mf_si_bcast_accept_all = 1;
984 p_ramrod->mf_si_mcast_accept_all = 1;
986 p_ramrod->mf_si_bcast_accept_all = 0;
987 p_ramrod->mf_si_mcast_accept_all = 0;
990 p_ramrod->action_on_error.error_type = action_on_error;
991 p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
992 p_ramrod->zero_prod_flg = 1;
994 return qed_spq_post(p_hwfn, p_ent, NULL);
997 static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
998 struct qed_ll2_info *p_ll2_conn)
1000 enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
1001 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1002 struct core_tx_start_ramrod_data *p_ramrod = NULL;
1003 struct qed_spq_entry *p_ent = NULL;
1004 struct qed_sp_init_data init_data;
1005 u16 pq_id = 0, pbl_size;
1008 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
1011 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
1012 p_ll2_conn->tx_stats_en = 0;
1014 p_ll2_conn->tx_stats_en = 1;
1017 memset(&init_data, 0, sizeof(init_data));
1018 init_data.cid = p_ll2_conn->cid;
1019 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1020 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1022 rc = qed_sp_init_request(p_hwfn, &p_ent,
1023 CORE_RAMROD_TX_QUEUE_START,
1024 PROTOCOLID_CORE, &init_data);
1028 p_ramrod = &p_ent->ramrod.core_tx_queue_start;
1030 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1031 p_ramrod->sb_index = p_tx->tx_sb_index;
1032 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
1033 p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
1034 p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
1036 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
1037 qed_chain_get_pbl_phys(&p_tx->txq_chain));
1038 pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
1039 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1041 switch (p_ll2_conn->input.tx_tc) {
1043 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
1046 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
1049 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
1053 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1055 switch (conn_type) {
1056 case QED_LL2_TYPE_FCOE:
1057 p_ramrod->conn_type = PROTOCOLID_FCOE;
1059 case QED_LL2_TYPE_TCP_ULP:
1060 p_ramrod->conn_type = PROTOCOLID_TCP_ULP;
1062 case QED_LL2_TYPE_ROCE:
1063 p_ramrod->conn_type = PROTOCOLID_ROCE;
1065 case QED_LL2_TYPE_IWARP:
1066 p_ramrod->conn_type = PROTOCOLID_IWARP;
1068 case QED_LL2_TYPE_OOO:
1069 if (p_hwfn->hw_info.personality == QED_PCI_ISCSI ||
1070 p_hwfn->hw_info.personality == QED_PCI_NVMETCP)
1071 p_ramrod->conn_type = PROTOCOLID_TCP_ULP;
1073 p_ramrod->conn_type = PROTOCOLID_IWARP;
1076 p_ramrod->conn_type = PROTOCOLID_ETH;
1077 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
1080 p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
1082 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1086 rc = qed_db_recovery_add(p_hwfn->cdev, p_tx->doorbell_addr,
1087 &p_tx->db_msg, DB_REC_WIDTH_32B,
1092 static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
1093 struct qed_ll2_info *p_ll2_conn)
1095 struct core_rx_stop_ramrod_data *p_ramrod = NULL;
1096 struct qed_spq_entry *p_ent = NULL;
1097 struct qed_sp_init_data init_data;
1101 memset(&init_data, 0, sizeof(init_data));
1102 init_data.cid = p_ll2_conn->cid;
1103 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1104 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1106 rc = qed_sp_init_request(p_hwfn, &p_ent,
1107 CORE_RAMROD_RX_QUEUE_STOP,
1108 PROTOCOLID_CORE, &init_data);
1112 p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
1114 p_ramrod->complete_event_flg = 1;
1115 p_ramrod->queue_id = p_ll2_conn->queue_id;
1117 return qed_spq_post(p_hwfn, p_ent, NULL);
1120 static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
1121 struct qed_ll2_info *p_ll2_conn)
1123 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1124 struct qed_spq_entry *p_ent = NULL;
1125 struct qed_sp_init_data init_data;
1127 qed_db_recovery_del(p_hwfn->cdev, p_tx->doorbell_addr, &p_tx->db_msg);
1130 memset(&init_data, 0, sizeof(init_data));
1131 init_data.cid = p_ll2_conn->cid;
1132 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1133 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1135 rc = qed_sp_init_request(p_hwfn, &p_ent,
1136 CORE_RAMROD_TX_QUEUE_STOP,
1137 PROTOCOLID_CORE, &init_data);
1141 return qed_spq_post(p_hwfn, p_ent, NULL);
1145 qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
1146 struct qed_ll2_info *p_ll2_info)
1148 struct qed_chain_init_params params = {
1149 .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1150 .cnt_type = QED_CHAIN_CNT_TYPE_U16,
1151 .num_elems = p_ll2_info->input.rx_num_desc,
1153 struct qed_dev *cdev = p_hwfn->cdev;
1154 struct qed_ll2_rx_packet *p_descq;
1158 if (!p_ll2_info->input.rx_num_desc)
1161 params.mode = QED_CHAIN_MODE_NEXT_PTR;
1162 params.elem_size = sizeof(struct core_rx_bd);
1164 rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rxq_chain, ¶ms);
1166 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
1170 capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
1171 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
1175 DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
1178 p_ll2_info->rx_queue.descq_array = p_descq;
1180 params.mode = QED_CHAIN_MODE_PBL;
1181 params.elem_size = sizeof(struct core_rx_fast_path_cqe);
1183 rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rcq_chain, ¶ms);
1185 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
1189 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1190 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
1191 p_ll2_info->input.conn_type, p_ll2_info->input.rx_num_desc);
1197 static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
1198 struct qed_ll2_info *p_ll2_info)
1200 struct qed_chain_init_params params = {
1201 .mode = QED_CHAIN_MODE_PBL,
1202 .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1203 .cnt_type = QED_CHAIN_CNT_TYPE_U16,
1204 .num_elems = p_ll2_info->input.tx_num_desc,
1205 .elem_size = sizeof(struct core_tx_bd),
1207 struct qed_ll2_tx_packet *p_descq;
1212 if (!p_ll2_info->input.tx_num_desc)
1215 rc = qed_chain_alloc(p_hwfn->cdev, &p_ll2_info->tx_queue.txq_chain,
1220 capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
1221 /* All bds_set elements are flexibily added. */
1222 desc_size = struct_size(p_descq, bds_set,
1223 p_ll2_info->input.tx_max_bds_per_packet);
1225 p_descq = kcalloc(capacity, desc_size, GFP_KERNEL);
1230 p_ll2_info->tx_queue.descq_mem = p_descq;
1232 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1233 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
1234 p_ll2_info->input.conn_type, p_ll2_info->input.tx_num_desc);
1239 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
1240 p_ll2_info->input.tx_num_desc);
1245 qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
1246 struct qed_ll2_info *p_ll2_info, u16 mtu)
1248 struct qed_ooo_buffer *p_buf = NULL;
1253 if (p_ll2_info->input.conn_type != QED_LL2_TYPE_OOO)
1256 /* Correct number of requested OOO buffers if needed */
1257 if (!p_ll2_info->input.rx_num_ooo_buffers) {
1258 u16 num_desc = p_ll2_info->input.rx_num_desc;
1262 p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2;
1265 for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers;
1267 p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
1273 p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
1274 p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
1275 ETH_CACHE_LINE_SIZE - 1) &
1276 ~(ETH_CACHE_LINE_SIZE - 1);
1277 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1278 p_buf->rx_buffer_size,
1279 &p_buf->rx_buffer_phys_addr,
1287 p_buf->rx_buffer_virt_addr = p_virt;
1288 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
1291 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1292 "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
1293 p_ll2_info->input.rx_num_ooo_buffers, p_buf->rx_buffer_size);
1300 qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
1302 if (!cbs || (!cbs->rx_comp_cb ||
1303 !cbs->rx_release_cb ||
1304 !cbs->tx_comp_cb || !cbs->tx_release_cb || !cbs->cookie))
1307 p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb;
1308 p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb;
1309 p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb;
1310 p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb;
1311 p_ll2_info->cbs.slowpath_cb = cbs->slowpath_cb;
1312 p_ll2_info->cbs.cookie = cbs->cookie;
1317 static void _qed_ll2_calc_allowed_conns(struct qed_hwfn *p_hwfn,
1318 struct qed_ll2_acquire_data *data,
1319 u8 *start_idx, u8 *last_idx)
1321 /* LL2 queues handles will be split as follows:
1322 * First will be the legacy queues, and then the ctx based.
1324 if (data->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) {
1325 *start_idx = QED_LL2_LEGACY_CONN_BASE_PF;
1326 *last_idx = *start_idx +
1327 QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF;
1329 /* QED_LL2_RX_TYPE_CTX */
1330 *start_idx = QED_LL2_CTX_CONN_BASE_PF;
1331 *last_idx = *start_idx +
1332 QED_MAX_NUM_OF_CTX_LL2_CONNS_PF;
1336 static enum core_error_handle
1337 qed_ll2_get_error_choice(enum qed_ll2_error_handle err)
1340 case QED_LL2_DROP_PACKET:
1341 return LL2_DROP_PACKET;
1342 case QED_LL2_DO_NOTHING:
1343 return LL2_DO_NOTHING;
1344 case QED_LL2_ASSERT:
1347 return LL2_DO_NOTHING;
1351 int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
1353 struct qed_hwfn *p_hwfn = cxt;
1354 qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
1355 struct qed_ll2_info *p_ll2_info = NULL;
1356 u8 i, first_idx, last_idx, *p_tx_max;
1359 if (!data->p_connection_handle || !p_hwfn->p_ll2_info)
1362 _qed_ll2_calc_allowed_conns(p_hwfn, data, &first_idx, &last_idx);
1364 /* Find a free connection to be used */
1365 for (i = first_idx; i < last_idx; i++) {
1366 mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
1367 if (p_hwfn->p_ll2_info[i].b_active) {
1368 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1372 p_hwfn->p_ll2_info[i].b_active = true;
1373 p_ll2_info = &p_hwfn->p_ll2_info[i];
1374 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1380 memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
1382 switch (data->input.tx_dest) {
1383 case QED_LL2_TX_DEST_NW:
1384 p_ll2_info->tx_dest = CORE_TX_DEST_NW;
1386 case QED_LL2_TX_DEST_LB:
1387 p_ll2_info->tx_dest = CORE_TX_DEST_LB;
1389 case QED_LL2_TX_DEST_DROP:
1390 p_ll2_info->tx_dest = CORE_TX_DEST_DROP;
1396 if (data->input.conn_type == QED_LL2_TYPE_OOO ||
1397 data->input.secondary_queue)
1398 p_ll2_info->main_func_queue = false;
1400 p_ll2_info->main_func_queue = true;
1402 /* Correct maximum number of Tx BDs */
1403 p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet;
1405 *p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET;
1407 *p_tx_max = min_t(u8, *p_tx_max,
1408 CORE_LL2_TX_MAX_BDS_PER_PACKET);
1410 rc = qed_ll2_set_cbs(p_ll2_info, data->cbs);
1412 DP_NOTICE(p_hwfn, "Invalid callback functions\n");
1413 goto q_allocate_fail;
1416 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info);
1418 goto q_allocate_fail;
1420 rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info);
1422 goto q_allocate_fail;
1424 rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
1427 goto q_allocate_fail;
1429 /* Register callbacks for the Rx/Tx queues */
1430 if (data->input.conn_type == QED_LL2_TYPE_OOO) {
1431 comp_rx_cb = qed_ll2_lb_rxq_completion;
1432 comp_tx_cb = qed_ll2_lb_txq_completion;
1434 comp_rx_cb = qed_ll2_rxq_completion;
1435 comp_tx_cb = qed_ll2_txq_completion;
1438 if (data->input.rx_num_desc) {
1439 qed_int_register_cb(p_hwfn, comp_rx_cb,
1440 &p_hwfn->p_ll2_info[i],
1441 &p_ll2_info->rx_queue.rx_sb_index,
1442 &p_ll2_info->rx_queue.p_fw_cons);
1443 p_ll2_info->rx_queue.b_cb_registered = true;
1446 if (data->input.tx_num_desc) {
1447 qed_int_register_cb(p_hwfn,
1449 &p_hwfn->p_ll2_info[i],
1450 &p_ll2_info->tx_queue.tx_sb_index,
1451 &p_ll2_info->tx_queue.p_fw_cons);
1452 p_ll2_info->tx_queue.b_cb_registered = true;
1455 *data->p_connection_handle = i;
1459 qed_ll2_release_connection(p_hwfn, i);
1463 static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
1464 struct qed_ll2_info *p_ll2_conn)
1466 enum qed_ll2_error_handle error_input;
1467 enum core_error_handle error_mode;
1468 u8 action_on_error = 0;
1471 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
1474 DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
1475 error_input = p_ll2_conn->input.ai_err_packet_too_big;
1476 error_mode = qed_ll2_get_error_choice(error_input);
1477 SET_FIELD(action_on_error,
1478 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode);
1479 error_input = p_ll2_conn->input.ai_err_no_buf;
1480 error_mode = qed_ll2_get_error_choice(error_input);
1481 SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode);
1483 rc = qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
1487 if (p_ll2_conn->rx_queue.ctx_based) {
1488 rc = qed_db_recovery_add(p_hwfn->cdev,
1489 p_ll2_conn->rx_queue.set_prod_addr,
1490 &p_ll2_conn->rx_queue.db_data,
1491 DB_REC_WIDTH_64B, DB_REC_KERNEL);
1498 qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
1499 struct qed_ll2_info *p_ll2_conn)
1501 if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
1504 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1505 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
1508 static inline u8 qed_ll2_handle_to_queue_id(struct qed_hwfn *p_hwfn,
1514 if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY)
1515 return p_hwfn->hw_info.resc_start[QED_LL2_RAM_QUEUE] + handle;
1517 /* QED_LL2_RX_TYPE_CTX
1518 * FW distinguishes between the legacy queues (ram based) and the
1519 * ctx based queues by the queue_id.
1520 * The first MAX_NUM_LL2_RX_RAM_QUEUES queues are legacy
1521 * and the queue ids above that are ctx base.
1523 qid = p_hwfn->hw_info.resc_start[QED_LL2_CTX_QUEUE] +
1524 MAX_NUM_LL2_RX_RAM_QUEUES;
1526 /* See comment on the acquire connection for how the ll2
1527 * queues handles are divided.
1529 qid += (handle - QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF);
1534 int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
1536 struct e4_core_conn_context *p_cxt;
1537 struct qed_ll2_tx_packet *p_pkt;
1538 struct qed_ll2_info *p_ll2_conn;
1539 struct qed_hwfn *p_hwfn = cxt;
1540 struct qed_ll2_rx_queue *p_rx;
1541 struct qed_ll2_tx_queue *p_tx;
1542 struct qed_cxt_info cxt_info;
1543 struct qed_ptt *p_ptt;
1549 p_ptt = qed_ptt_acquire(p_hwfn);
1553 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1559 p_rx = &p_ll2_conn->rx_queue;
1560 p_tx = &p_ll2_conn->tx_queue;
1562 qed_chain_reset(&p_rx->rxq_chain);
1563 qed_chain_reset(&p_rx->rcq_chain);
1564 INIT_LIST_HEAD(&p_rx->active_descq);
1565 INIT_LIST_HEAD(&p_rx->free_descq);
1566 INIT_LIST_HEAD(&p_rx->posting_descq);
1567 spin_lock_init(&p_rx->lock);
1568 capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
1569 for (i = 0; i < capacity; i++)
1570 list_add_tail(&p_rx->descq_array[i].list_entry,
1572 *p_rx->p_fw_cons = 0;
1574 qed_chain_reset(&p_tx->txq_chain);
1575 INIT_LIST_HEAD(&p_tx->active_descq);
1576 INIT_LIST_HEAD(&p_tx->free_descq);
1577 INIT_LIST_HEAD(&p_tx->sending_descq);
1578 spin_lock_init(&p_tx->lock);
1579 capacity = qed_chain_get_capacity(&p_tx->txq_chain);
1580 /* All bds_set elements are flexibily added. */
1581 desc_size = struct_size(p_pkt, bds_set,
1582 p_ll2_conn->input.tx_max_bds_per_packet);
1584 for (i = 0; i < capacity; i++) {
1585 p_pkt = p_tx->descq_mem + desc_size * i;
1586 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
1588 p_tx->cur_completing_bd_idx = 0;
1590 p_tx->b_completing_packet = false;
1591 p_tx->cur_send_packet = NULL;
1592 p_tx->cur_send_frag_num = 0;
1593 p_tx->cur_completing_frag_num = 0;
1594 *p_tx->p_fw_cons = 0;
1596 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
1599 cxt_info.iid = p_ll2_conn->cid;
1600 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
1602 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
1607 p_cxt = cxt_info.p_cxt;
1609 memset(p_cxt, 0, sizeof(*p_cxt));
1611 qid = qed_ll2_handle_to_queue_id(p_hwfn, connection_handle,
1612 p_ll2_conn->input.rx_conn_type);
1613 p_ll2_conn->queue_id = qid;
1614 p_ll2_conn->tx_stats_id = qid;
1616 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1617 "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d\n",
1618 p_hwfn->rel_pf_id, p_ll2_conn->input.rx_conn_type, qid);
1620 if (p_ll2_conn->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) {
1621 p_rx->set_prod_addr = p_hwfn->regview +
1622 GTT_BAR0_MAP_REG_TSDM_RAM + TSTORM_LL2_RX_PRODS_OFFSET(qid);
1624 /* QED_LL2_RX_TYPE_CTX - using doorbell */
1625 p_rx->ctx_based = 1;
1627 p_rx->set_prod_addr = p_hwfn->doorbells +
1628 p_hwfn->dpi_start_offset +
1629 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE);
1631 /* prepare db data */
1632 p_rx->db_data.icid = cpu_to_le16((u16)p_ll2_conn->cid);
1633 SET_FIELD(p_rx->db_data.params,
1634 CORE_PWM_PROD_UPDATE_DATA_AGG_CMD, DB_AGG_CMD_SET);
1635 SET_FIELD(p_rx->db_data.params,
1636 CORE_PWM_PROD_UPDATE_DATA_RESERVED1, 0);
1639 p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
1640 qed_db_addr(p_ll2_conn->cid,
1642 /* prepare db data */
1643 SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
1644 SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1645 SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
1646 DQ_XCM_CORE_TX_BD_PROD_CMD);
1647 p_tx->db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
1649 rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
1653 rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
1657 if (!QED_IS_RDMA_PERSONALITY(p_hwfn) &&
1658 !QED_IS_NVMETCP_PERSONALITY(p_hwfn))
1659 qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
1661 qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
1663 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
1664 if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
1665 qed_llh_add_protocol_filter(p_hwfn->cdev, 0,
1666 QED_LLH_FILTER_ETHERTYPE,
1668 qed_llh_add_protocol_filter(p_hwfn->cdev, 0,
1669 QED_LLH_FILTER_ETHERTYPE,
1674 qed_ptt_release(p_hwfn, p_ptt);
1678 static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1679 struct qed_ll2_rx_queue *p_rx,
1680 struct qed_ll2_rx_packet *p_curp)
1682 struct qed_ll2_rx_packet *p_posting_packet = NULL;
1683 struct core_ll2_rx_prod rx_prod = { 0, 0 };
1684 bool b_notify_fw = false;
1685 u16 bd_prod, cq_prod;
1687 /* This handles the flushing of already posted buffers */
1688 while (!list_empty(&p_rx->posting_descq)) {
1689 p_posting_packet = list_first_entry(&p_rx->posting_descq,
1690 struct qed_ll2_rx_packet,
1692 list_move_tail(&p_posting_packet->list_entry,
1693 &p_rx->active_descq);
1697 /* This handles the supplied packet [if there is one] */
1699 list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
1706 bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
1707 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1708 if (p_rx->ctx_based) {
1709 /* update producer by giving a doorbell */
1710 p_rx->db_data.prod.bd_prod = cpu_to_le16(bd_prod);
1711 p_rx->db_data.prod.cqe_prod = cpu_to_le16(cq_prod);
1712 /* Make sure chain element is updated before ringing the
1716 DIRECT_REG_WR64(p_rx->set_prod_addr,
1717 *((u64 *)&p_rx->db_data));
1719 rx_prod.bd_prod = cpu_to_le16(bd_prod);
1720 rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1722 /* Make sure chain element is updated before ringing the
1727 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1731 int qed_ll2_post_rx_buffer(void *cxt,
1732 u8 connection_handle,
1734 u16 buf_len, void *cookie, u8 notify_fw)
1736 struct qed_hwfn *p_hwfn = cxt;
1737 struct core_rx_bd_with_buff_len *p_curb = NULL;
1738 struct qed_ll2_rx_packet *p_curp = NULL;
1739 struct qed_ll2_info *p_ll2_conn;
1740 struct qed_ll2_rx_queue *p_rx;
1741 unsigned long flags;
1745 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1748 p_rx = &p_ll2_conn->rx_queue;
1749 if (!p_rx->set_prod_addr)
1752 spin_lock_irqsave(&p_rx->lock, flags);
1753 if (!list_empty(&p_rx->free_descq))
1754 p_curp = list_first_entry(&p_rx->free_descq,
1755 struct qed_ll2_rx_packet, list_entry);
1757 if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
1758 qed_chain_get_elem_left(&p_rx->rcq_chain)) {
1759 p_data = qed_chain_produce(&p_rx->rxq_chain);
1760 p_curb = (struct core_rx_bd_with_buff_len *)p_data;
1761 qed_chain_produce(&p_rx->rcq_chain);
1765 /* If we're lacking entires, let's try to flush buffers to FW */
1766 if (!p_curp || !p_curb) {
1772 /* We have an Rx packet we can fill */
1773 DMA_REGPAIR_LE(p_curb->addr, addr);
1774 p_curb->buff_length = cpu_to_le16(buf_len);
1775 p_curp->rx_buf_addr = addr;
1776 p_curp->cookie = cookie;
1777 p_curp->rxq_bd = p_curb;
1778 p_curp->buf_length = buf_len;
1779 list_del(&p_curp->list_entry);
1781 /* Check if we only want to enqueue this packet without informing FW */
1783 list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
1788 qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
1790 spin_unlock_irqrestore(&p_rx->lock, flags);
1794 static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1795 struct qed_ll2_tx_queue *p_tx,
1796 struct qed_ll2_tx_packet *p_curp,
1797 struct qed_ll2_tx_pkt_info *pkt,
1800 list_del(&p_curp->list_entry);
1801 p_curp->cookie = pkt->cookie;
1802 p_curp->bd_used = pkt->num_of_bds;
1803 p_curp->notify_fw = notify_fw;
1804 p_tx->cur_send_packet = p_curp;
1805 p_tx->cur_send_frag_num = 0;
1807 p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag;
1808 p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len;
1809 p_tx->cur_send_frag_num++;
1813 qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1814 struct qed_ll2_info *p_ll2,
1815 struct qed_ll2_tx_packet *p_curp,
1816 struct qed_ll2_tx_pkt_info *pkt)
1818 struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1819 u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1820 struct core_tx_bd *start_bd = NULL;
1821 enum core_roce_flavor_type roce_flavor;
1822 enum core_tx_dest tx_dest;
1823 u16 bd_data = 0, frag_idx;
1826 roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
1829 switch (pkt->tx_dest) {
1830 case QED_LL2_TX_DEST_NW:
1831 tx_dest = CORE_TX_DEST_NW;
1833 case QED_LL2_TX_DEST_LB:
1834 tx_dest = CORE_TX_DEST_LB;
1836 case QED_LL2_TX_DEST_DROP:
1837 tx_dest = CORE_TX_DEST_DROP;
1840 tx_dest = CORE_TX_DEST_LB;
1844 start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1845 if (QED_IS_IWARP_PERSONALITY(p_hwfn) &&
1846 p_ll2->input.conn_type == QED_LL2_TYPE_OOO) {
1847 start_bd->nw_vlan_or_lb_echo =
1848 cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE);
1850 start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
1851 if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
1852 p_ll2->input.conn_type == QED_LL2_TYPE_FCOE)
1853 pkt->remove_stag = true;
1856 bitfield1 = le16_to_cpu(start_bd->bitfield1);
1857 SET_FIELD(bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, pkt->l4_hdr_offset_w);
1858 SET_FIELD(bitfield1, CORE_TX_BD_TX_DST, tx_dest);
1859 start_bd->bitfield1 = cpu_to_le16(bitfield1);
1861 bd_data |= pkt->bd_flags;
1862 SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
1863 SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
1864 SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
1865 SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum));
1866 SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum));
1867 SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len));
1868 SET_FIELD(bd_data, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION,
1869 !!(pkt->remove_stag));
1871 start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
1872 DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
1873 start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
1876 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1877 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1880 p_ll2->input.conn_type,
1882 pkt->first_frag_len,
1884 le32_to_cpu(start_bd->addr.hi),
1885 le32_to_cpu(start_bd->addr.lo));
1887 if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds)
1890 /* Need to provide the packet with additional BDs for frags */
1891 for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
1892 frag_idx < pkt->num_of_bds; frag_idx++) {
1893 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1895 *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1896 (*p_bd)->bd_data.as_bitfield = 0;
1897 (*p_bd)->bitfield1 = 0;
1898 p_curp->bds_set[frag_idx].tx_frag = 0;
1899 p_curp->bds_set[frag_idx].frag_len = 0;
1903 /* This should be called while the Txq spinlock is being held */
1904 static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1905 struct qed_ll2_info *p_ll2_conn)
1907 bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
1908 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1909 struct qed_ll2_tx_packet *p_pkt = NULL;
1912 /* If there are missing BDs, don't do anything now */
1913 if (p_ll2_conn->tx_queue.cur_send_frag_num !=
1914 p_ll2_conn->tx_queue.cur_send_packet->bd_used)
1917 /* Push the current packet to the list and clean after it */
1918 list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
1919 &p_ll2_conn->tx_queue.sending_descq);
1920 p_ll2_conn->tx_queue.cur_send_packet = NULL;
1921 p_ll2_conn->tx_queue.cur_send_frag_num = 0;
1923 /* Notify FW of packet only if requested to */
1927 bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
1929 while (!list_empty(&p_tx->sending_descq)) {
1930 p_pkt = list_first_entry(&p_tx->sending_descq,
1931 struct qed_ll2_tx_packet, list_entry);
1935 list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
1938 p_tx->db_msg.spq_prod = cpu_to_le16(bd_prod);
1940 /* Make sure the BDs data is updated before ringing the doorbell */
1943 DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&p_tx->db_msg));
1946 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1947 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1948 p_ll2_conn->queue_id,
1950 p_ll2_conn->input.conn_type, p_tx->db_msg.spq_prod);
1953 int qed_ll2_prepare_tx_packet(void *cxt,
1954 u8 connection_handle,
1955 struct qed_ll2_tx_pkt_info *pkt,
1958 struct qed_hwfn *p_hwfn = cxt;
1959 struct qed_ll2_tx_packet *p_curp = NULL;
1960 struct qed_ll2_info *p_ll2_conn = NULL;
1961 struct qed_ll2_tx_queue *p_tx;
1962 struct qed_chain *p_tx_chain;
1963 unsigned long flags;
1966 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1969 p_tx = &p_ll2_conn->tx_queue;
1970 p_tx_chain = &p_tx->txq_chain;
1972 if (pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet)
1975 spin_lock_irqsave(&p_tx->lock, flags);
1976 if (p_tx->cur_send_packet) {
1981 /* Get entry, but only if we have tx elements for it */
1982 if (!list_empty(&p_tx->free_descq))
1983 p_curp = list_first_entry(&p_tx->free_descq,
1984 struct qed_ll2_tx_packet, list_entry);
1985 if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)
1993 /* Prepare packet and BD, and perhaps send a doorbell to FW */
1994 qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw);
1996 qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt);
1998 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
2001 spin_unlock_irqrestore(&p_tx->lock, flags);
2005 int qed_ll2_set_fragment_of_tx_packet(void *cxt,
2006 u8 connection_handle,
2007 dma_addr_t addr, u16 nbytes)
2009 struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
2010 struct qed_hwfn *p_hwfn = cxt;
2011 struct qed_ll2_info *p_ll2_conn = NULL;
2012 u16 cur_send_frag_num = 0;
2013 struct core_tx_bd *p_bd;
2014 unsigned long flags;
2016 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
2020 if (!p_ll2_conn->tx_queue.cur_send_packet)
2023 p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
2024 cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
2026 if (cur_send_frag_num >= p_cur_send_packet->bd_used)
2029 /* Fill the BD information, and possibly notify FW */
2030 p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
2031 DMA_REGPAIR_LE(p_bd->addr, addr);
2032 p_bd->nbytes = cpu_to_le16(nbytes);
2033 p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
2034 p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
2036 p_ll2_conn->tx_queue.cur_send_frag_num++;
2038 spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
2039 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
2040 spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
2045 int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
2047 struct qed_hwfn *p_hwfn = cxt;
2048 struct qed_ll2_info *p_ll2_conn = NULL;
2050 struct qed_ptt *p_ptt;
2052 p_ptt = qed_ptt_acquire(p_hwfn);
2056 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
2062 /* Stop Tx & Rx of connection, if needed */
2063 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
2064 p_ll2_conn->tx_queue.b_cb_registered = false;
2065 smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
2066 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
2070 qed_ll2_txq_flush(p_hwfn, connection_handle);
2071 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
2074 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
2075 p_ll2_conn->rx_queue.b_cb_registered = false;
2076 smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
2078 if (p_ll2_conn->rx_queue.ctx_based)
2079 qed_db_recovery_del(p_hwfn->cdev,
2080 p_ll2_conn->rx_queue.set_prod_addr,
2081 &p_ll2_conn->rx_queue.db_data);
2083 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
2087 qed_ll2_rxq_flush(p_hwfn, connection_handle);
2088 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
2091 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
2092 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
2094 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
2095 if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
2096 qed_llh_remove_protocol_filter(p_hwfn->cdev, 0,
2097 QED_LLH_FILTER_ETHERTYPE,
2099 qed_llh_remove_protocol_filter(p_hwfn->cdev, 0,
2100 QED_LLH_FILTER_ETHERTYPE,
2105 qed_ptt_release(p_hwfn, p_ptt);
2109 static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
2110 struct qed_ll2_info *p_ll2_conn)
2112 struct qed_ooo_buffer *p_buffer;
2114 if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
2117 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
2118 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
2119 p_hwfn->p_ooo_info))) {
2120 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2121 p_buffer->rx_buffer_size,
2122 p_buffer->rx_buffer_virt_addr,
2123 p_buffer->rx_buffer_phys_addr);
2128 void qed_ll2_release_connection(void *cxt, u8 connection_handle)
2130 struct qed_hwfn *p_hwfn = cxt;
2131 struct qed_ll2_info *p_ll2_conn = NULL;
2133 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
2137 kfree(p_ll2_conn->tx_queue.descq_mem);
2138 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
2140 kfree(p_ll2_conn->rx_queue.descq_array);
2141 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
2142 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
2144 qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
2146 qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
2148 mutex_lock(&p_ll2_conn->mutex);
2149 p_ll2_conn->b_active = false;
2150 mutex_unlock(&p_ll2_conn->mutex);
2153 int qed_ll2_alloc(struct qed_hwfn *p_hwfn)
2155 struct qed_ll2_info *p_ll2_connections;
2158 /* Allocate LL2's set struct */
2159 p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
2160 sizeof(struct qed_ll2_info), GFP_KERNEL);
2161 if (!p_ll2_connections) {
2162 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
2166 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
2167 p_ll2_connections[i].my_id = i;
2169 p_hwfn->p_ll2_info = p_ll2_connections;
2173 void qed_ll2_setup(struct qed_hwfn *p_hwfn)
2177 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
2178 mutex_init(&p_hwfn->p_ll2_info[i].mutex);
2181 void qed_ll2_free(struct qed_hwfn *p_hwfn)
2183 if (!p_hwfn->p_ll2_info)
2186 kfree(p_hwfn->p_ll2_info);
2187 p_hwfn->p_ll2_info = NULL;
2190 static void _qed_ll2_get_port_stats(struct qed_hwfn *p_hwfn,
2191 struct qed_ptt *p_ptt,
2192 struct qed_ll2_stats *p_stats)
2194 struct core_ll2_port_stats port_stats;
2196 memset(&port_stats, 0, sizeof(port_stats));
2197 qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
2198 BAR0_MAP_REG_TSDM_RAM +
2199 TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)),
2200 sizeof(port_stats));
2202 p_stats->gsi_invalid_hdr += HILO_64_REGPAIR(port_stats.gsi_invalid_hdr);
2203 p_stats->gsi_invalid_pkt_length +=
2204 HILO_64_REGPAIR(port_stats.gsi_invalid_pkt_length);
2205 p_stats->gsi_unsupported_pkt_typ +=
2206 HILO_64_REGPAIR(port_stats.gsi_unsupported_pkt_typ);
2207 p_stats->gsi_crcchksm_error +=
2208 HILO_64_REGPAIR(port_stats.gsi_crcchksm_error);
2211 static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
2212 struct qed_ptt *p_ptt,
2213 struct qed_ll2_info *p_ll2_conn,
2214 struct qed_ll2_stats *p_stats)
2216 struct core_ll2_tstorm_per_queue_stat tstats;
2217 u8 qid = p_ll2_conn->queue_id;
2220 memset(&tstats, 0, sizeof(tstats));
2221 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
2222 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
2223 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
2225 p_stats->packet_too_big_discard +=
2226 HILO_64_REGPAIR(tstats.packet_too_big_discard);
2227 p_stats->no_buff_discard += HILO_64_REGPAIR(tstats.no_buff_discard);
2230 static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
2231 struct qed_ptt *p_ptt,
2232 struct qed_ll2_info *p_ll2_conn,
2233 struct qed_ll2_stats *p_stats)
2235 struct core_ll2_ustorm_per_queue_stat ustats;
2236 u8 qid = p_ll2_conn->queue_id;
2239 memset(&ustats, 0, sizeof(ustats));
2240 ustats_addr = BAR0_MAP_REG_USDM_RAM +
2241 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
2242 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
2244 p_stats->rcv_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
2245 p_stats->rcv_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
2246 p_stats->rcv_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
2247 p_stats->rcv_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
2248 p_stats->rcv_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
2249 p_stats->rcv_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
2252 static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
2253 struct qed_ptt *p_ptt,
2254 struct qed_ll2_info *p_ll2_conn,
2255 struct qed_ll2_stats *p_stats)
2257 struct core_ll2_pstorm_per_queue_stat pstats;
2258 u8 stats_id = p_ll2_conn->tx_stats_id;
2261 memset(&pstats, 0, sizeof(pstats));
2262 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
2263 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
2264 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
2266 p_stats->sent_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
2267 p_stats->sent_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
2268 p_stats->sent_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
2269 p_stats->sent_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
2270 p_stats->sent_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
2271 p_stats->sent_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
2274 static int __qed_ll2_get_stats(void *cxt, u8 connection_handle,
2275 struct qed_ll2_stats *p_stats)
2277 struct qed_hwfn *p_hwfn = cxt;
2278 struct qed_ll2_info *p_ll2_conn = NULL;
2279 struct qed_ptt *p_ptt;
2281 if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
2282 !p_hwfn->p_ll2_info)
2285 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
2287 p_ptt = qed_ptt_acquire(p_hwfn);
2289 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2293 if (p_ll2_conn->input.gsi_enable)
2294 _qed_ll2_get_port_stats(p_hwfn, p_ptt, p_stats);
2296 _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2298 _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2300 if (p_ll2_conn->tx_stats_en)
2301 _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2303 qed_ptt_release(p_hwfn, p_ptt);
2308 int qed_ll2_get_stats(void *cxt,
2309 u8 connection_handle, struct qed_ll2_stats *p_stats)
2311 memset(p_stats, 0, sizeof(*p_stats));
2312 return __qed_ll2_get_stats(cxt, connection_handle, p_stats);
2315 static void qed_ll2b_release_rx_packet(void *cxt,
2316 u8 connection_handle,
2318 dma_addr_t rx_buf_addr,
2321 struct qed_hwfn *p_hwfn = cxt;
2323 qed_ll2_dealloc_buffer(p_hwfn->cdev, cookie);
2326 static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
2327 const struct qed_ll2_cb_ops *ops,
2330 cdev->ll2->cbs = ops;
2331 cdev->ll2->cb_cookie = cookie;
2334 static struct qed_ll2_cbs ll2_cbs = {
2335 .rx_comp_cb = &qed_ll2b_complete_rx_packet,
2336 .rx_release_cb = &qed_ll2b_release_rx_packet,
2337 .tx_comp_cb = &qed_ll2b_complete_tx_packet,
2338 .tx_release_cb = &qed_ll2b_complete_tx_packet,
2341 static void qed_ll2_set_conn_data(struct qed_hwfn *p_hwfn,
2342 struct qed_ll2_acquire_data *data,
2343 struct qed_ll2_params *params,
2344 enum qed_ll2_conn_type conn_type,
2345 u8 *handle, bool lb)
2347 memset(data, 0, sizeof(*data));
2349 data->input.conn_type = conn_type;
2350 data->input.mtu = params->mtu;
2351 data->input.rx_num_desc = QED_LL2_RX_SIZE;
2352 data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets;
2353 data->input.rx_vlan_removal_en = params->rx_vlan_stripping;
2354 data->input.tx_num_desc = QED_LL2_TX_SIZE;
2355 data->p_connection_handle = handle;
2356 data->cbs = &ll2_cbs;
2357 ll2_cbs.cookie = p_hwfn;
2360 data->input.tx_tc = PKT_LB_TC;
2361 data->input.tx_dest = QED_LL2_TX_DEST_LB;
2363 data->input.tx_tc = 0;
2364 data->input.tx_dest = QED_LL2_TX_DEST_NW;
2368 static int qed_ll2_start_ooo(struct qed_hwfn *p_hwfn,
2369 struct qed_ll2_params *params)
2371 u8 *handle = &p_hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
2372 struct qed_ll2_acquire_data data;
2375 qed_ll2_set_conn_data(p_hwfn, &data, params,
2376 QED_LL2_TYPE_OOO, handle, true);
2378 rc = qed_ll2_acquire_connection(p_hwfn, &data);
2380 DP_INFO(p_hwfn, "Failed to acquire LL2 OOO connection\n");
2384 rc = qed_ll2_establish_connection(p_hwfn, *handle);
2386 DP_INFO(p_hwfn, "Failed to establish LL2 OOO connection\n");
2393 qed_ll2_release_connection(p_hwfn, *handle);
2395 *handle = QED_LL2_UNUSED_HANDLE;
2399 static bool qed_ll2_is_storage_eng1(struct qed_dev *cdev)
2401 return (QED_IS_FCOE_PERSONALITY(QED_LEADING_HWFN(cdev)) ||
2402 QED_IS_ISCSI_PERSONALITY(QED_LEADING_HWFN(cdev)) ||
2403 QED_IS_NVMETCP_PERSONALITY(QED_LEADING_HWFN(cdev))) &&
2404 (QED_AFFIN_HWFN(cdev) != QED_LEADING_HWFN(cdev));
2407 static int __qed_ll2_stop(struct qed_hwfn *p_hwfn)
2409 struct qed_dev *cdev = p_hwfn->cdev;
2412 rc = qed_ll2_terminate_connection(p_hwfn, cdev->ll2->handle);
2414 DP_INFO(cdev, "Failed to terminate LL2 connection\n");
2416 qed_ll2_release_connection(p_hwfn, cdev->ll2->handle);
2421 static int qed_ll2_stop(struct qed_dev *cdev)
2423 bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev);
2424 struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
2425 int rc = 0, rc2 = 0;
2427 if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
2429 if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn))
2430 qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address);
2432 qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address);
2433 eth_zero_addr(cdev->ll2_mac_address);
2435 if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn))
2436 qed_ll2_stop_ooo(p_hwfn);
2438 /* In CMT mode, LL2 is always started on engine 0 for a storage PF */
2439 if (b_is_storage_eng1) {
2440 rc2 = __qed_ll2_stop(QED_LEADING_HWFN(cdev));
2442 DP_NOTICE(QED_LEADING_HWFN(cdev),
2443 "Failed to stop LL2 on engine 0\n");
2446 rc = __qed_ll2_stop(p_hwfn);
2448 DP_NOTICE(p_hwfn, "Failed to stop LL2\n");
2450 qed_ll2_kill_buffers(cdev);
2452 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2457 static int __qed_ll2_start(struct qed_hwfn *p_hwfn,
2458 struct qed_ll2_params *params)
2460 struct qed_ll2_buffer *buffer, *tmp_buffer;
2461 struct qed_dev *cdev = p_hwfn->cdev;
2462 enum qed_ll2_conn_type conn_type;
2463 struct qed_ll2_acquire_data data;
2466 switch (p_hwfn->hw_info.personality) {
2468 conn_type = QED_LL2_TYPE_FCOE;
2471 case QED_PCI_NVMETCP:
2472 conn_type = QED_LL2_TYPE_TCP_ULP;
2474 case QED_PCI_ETH_ROCE:
2475 conn_type = QED_LL2_TYPE_ROCE;
2479 conn_type = QED_LL2_TYPE_TEST;
2482 qed_ll2_set_conn_data(p_hwfn, &data, params, conn_type,
2483 &cdev->ll2->handle, false);
2485 rc = qed_ll2_acquire_connection(p_hwfn, &data);
2487 DP_INFO(p_hwfn, "Failed to acquire LL2 connection\n");
2491 rc = qed_ll2_establish_connection(p_hwfn, cdev->ll2->handle);
2493 DP_INFO(p_hwfn, "Failed to establish LL2 connection\n");
2497 /* Post all Rx buffers to FW */
2498 spin_lock_bh(&cdev->ll2->lock);
2499 rx_cnt = cdev->ll2->rx_cnt;
2500 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
2501 rc = qed_ll2_post_rx_buffer(p_hwfn,
2503 buffer->phys_addr, 0, buffer, 1);
2506 "Failed to post an Rx buffer; Deleting it\n");
2507 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
2508 cdev->ll2->rx_size, DMA_FROM_DEVICE);
2509 kfree(buffer->data);
2510 list_del(&buffer->list);
2516 spin_unlock_bh(&cdev->ll2->lock);
2518 if (rx_cnt == cdev->ll2->rx_cnt) {
2519 DP_NOTICE(p_hwfn, "Failed passing even a single Rx buffer\n");
2520 goto terminate_conn;
2522 cdev->ll2->rx_cnt = rx_cnt;
2527 qed_ll2_terminate_connection(p_hwfn, cdev->ll2->handle);
2529 qed_ll2_release_connection(p_hwfn, cdev->ll2->handle);
2533 static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2535 bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev);
2536 struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
2537 struct qed_ll2_buffer *buffer;
2538 int rx_num_desc, i, rc;
2540 if (!is_valid_ether_addr(params->ll2_mac_address)) {
2541 DP_NOTICE(cdev, "Invalid Ethernet address\n");
2545 WARN_ON(!cdev->ll2->cbs);
2547 /* Initialize LL2 locks & lists */
2548 INIT_LIST_HEAD(&cdev->ll2->list);
2549 spin_lock_init(&cdev->ll2->lock);
2551 cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
2552 L1_CACHE_BYTES + params->mtu;
2554 /* Allocate memory for LL2.
2555 * In CMT mode, in case of a storage PF which is affintized to engine 1,
2556 * LL2 is started also on engine 0 and thus we need twofold buffers.
2558 rx_num_desc = QED_LL2_RX_SIZE * (b_is_storage_eng1 ? 2 : 1);
2559 DP_INFO(cdev, "Allocating %d LL2 buffers of size %08x bytes\n",
2560 rx_num_desc, cdev->ll2->rx_size);
2561 for (i = 0; i < rx_num_desc; i++) {
2562 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2564 DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
2569 rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
2570 &buffer->phys_addr);
2576 list_add_tail(&buffer->list, &cdev->ll2->list);
2579 rc = __qed_ll2_start(p_hwfn, params);
2581 DP_NOTICE(cdev, "Failed to start LL2\n");
2585 /* In CMT mode, always need to start LL2 on engine 0 for a storage PF,
2586 * since broadcast/mutlicast packets are routed to engine 0.
2588 if (b_is_storage_eng1) {
2589 rc = __qed_ll2_start(QED_LEADING_HWFN(cdev), params);
2591 DP_NOTICE(QED_LEADING_HWFN(cdev),
2592 "Failed to start LL2 on engine 0\n");
2597 if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn)) {
2598 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
2599 rc = qed_ll2_start_ooo(p_hwfn, params);
2601 DP_NOTICE(cdev, "Failed to start OOO LL2\n");
2606 if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn)) {
2607 rc = qed_llh_add_mac_filter(cdev, 0, params->ll2_mac_address);
2609 DP_NOTICE(cdev, "Failed to add an LLH filter\n");
2615 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
2620 if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn))
2621 qed_ll2_stop_ooo(p_hwfn);
2623 if (b_is_storage_eng1)
2624 __qed_ll2_stop(QED_LEADING_HWFN(cdev));
2626 __qed_ll2_stop(p_hwfn);
2628 qed_ll2_kill_buffers(cdev);
2629 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2633 static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2634 unsigned long xmit_flags)
2636 struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
2637 struct qed_ll2_tx_pkt_info pkt;
2638 const skb_frag_t *frag;
2639 u8 flags = 0, nr_frags;
2640 int rc = -EINVAL, i;
2644 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
2645 DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
2649 /* Cache number of fragments from SKB since SKB may be freed by
2650 * the completion routine after calling qed_ll2_prepare_tx_packet()
2652 nr_frags = skb_shinfo(skb)->nr_frags;
2654 if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2655 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2660 mapping = dma_map_single(&cdev->pdev->dev, skb->data,
2661 skb->len, DMA_TO_DEVICE);
2662 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2663 DP_NOTICE(cdev, "SKB mapping failed\n");
2667 /* Request HW to calculate IP csum */
2668 if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
2669 ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2670 flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
2672 if (skb_vlan_tag_present(skb)) {
2673 vlan = skb_vlan_tag_get(skb);
2674 flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
2677 memset(&pkt, 0, sizeof(pkt));
2678 pkt.num_of_bds = 1 + nr_frags;
2680 pkt.bd_flags = flags;
2681 pkt.tx_dest = QED_LL2_TX_DEST_NW;
2682 pkt.first_frag = mapping;
2683 pkt.first_frag_len = skb->len;
2685 if (test_bit(QED_MF_UFP_SPECIFIC, &cdev->mf_bits) &&
2686 test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
2687 pkt.remove_stag = true;
2689 /* qed_ll2_prepare_tx_packet() may actually send the packet if
2690 * there are no fragments in the skb and subsequently the completion
2691 * routine may run and free the SKB, so no dereferencing the SKB
2692 * beyond this point unless skb has any fragments.
2694 rc = qed_ll2_prepare_tx_packet(p_hwfn, cdev->ll2->handle,
2699 for (i = 0; i < nr_frags; i++) {
2700 frag = &skb_shinfo(skb)->frags[i];
2702 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
2703 skb_frag_size(frag), DMA_TO_DEVICE);
2705 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2707 "Unable to map frag - dropping packet\n");
2712 rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn,
2715 skb_frag_size(frag));
2717 /* if failed not much to do here, partial packet has been posted
2718 * we can't free memory, will need to wait for completion
2727 dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
2732 static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
2734 bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev);
2735 struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
2741 rc = qed_ll2_get_stats(p_hwfn, cdev->ll2->handle, stats);
2743 DP_NOTICE(p_hwfn, "Failed to get LL2 stats\n");
2747 /* In CMT mode, LL2 is always started on engine 0 for a storage PF */
2748 if (b_is_storage_eng1) {
2749 rc = __qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
2750 cdev->ll2->handle, stats);
2752 DP_NOTICE(QED_LEADING_HWFN(cdev),
2753 "Failed to get LL2 stats on engine 0\n");
2761 const struct qed_ll2_ops qed_ll2_ops_pass = {
2762 .start = &qed_ll2_start,
2763 .stop = &qed_ll2_stop,
2764 .start_xmit = &qed_ll2_start_xmit,
2765 .register_cb_ops = &qed_ll2_register_cb_ops,
2766 .get_stats = &qed_ll2_stats,
2769 int qed_ll2_alloc_if(struct qed_dev *cdev)
2771 cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
2772 return cdev->ll2 ? 0 : -ENOMEM;
2775 void qed_ll2_dealloc_if(struct qed_dev *cdev)