1 /* bnx2x_cmn.c: QLogic Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
4 * Copyright (c) 2014 QLogic Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12 * Written by: Eliezer Tamir
13 * Based on code from Michael Chan's bnx2 driver
14 * UDP CSUM errata workaround by Arik Gendelman
15 * Slowpath and fastpath rework by Vladislav Zolotarov
16 * Statistics and Link management by Yitchak Gertner
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/etherdevice.h>
23 #include <linux/if_vlan.h>
24 #include <linux/interrupt.h>
26 #include <linux/crash_dump.h>
29 #include <net/ip6_checksum.h>
30 #include <net/busy_poll.h>
31 #include <linux/prefetch.h>
32 #include "bnx2x_cmn.h"
33 #include "bnx2x_init.h"
36 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
37 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
38 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
39 static int bnx2x_poll(struct napi_struct *napi, int budget);
41 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
45 /* Add NAPI objects */
46 for_each_rx_queue_cnic(bp, i) {
47 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
48 bnx2x_poll, NAPI_POLL_WEIGHT);
52 static void bnx2x_add_all_napi(struct bnx2x *bp)
56 /* Add NAPI objects */
57 for_each_eth_queue(bp, i) {
58 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
59 bnx2x_poll, NAPI_POLL_WEIGHT);
63 static int bnx2x_calc_num_queues(struct bnx2x *bp)
65 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
67 /* Reduce memory usage in kdump environment by using only one queue */
68 if (is_kdump_kernel())
71 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
76 * bnx2x_move_fp - move content of the fastpath structure.
79 * @from: source FP index
80 * @to: destination FP index
82 * Makes sure the contents of the bp->fp[to].napi is kept
83 * intact. This is done by first copying the napi struct from
84 * the target to the source, and then mem copying the entire
85 * source onto the target. Update txdata pointers and related
88 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
90 struct bnx2x_fastpath *from_fp = &bp->fp[from];
91 struct bnx2x_fastpath *to_fp = &bp->fp[to];
92 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
93 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
94 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
95 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
96 int old_max_eth_txqs, new_max_eth_txqs;
97 int old_txdata_index = 0, new_txdata_index = 0;
98 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
100 /* Copy the NAPI object as it has been already initialized */
101 from_fp->napi = to_fp->napi;
103 /* Move bnx2x_fastpath contents */
104 memcpy(to_fp, from_fp, sizeof(*to_fp));
107 /* Retain the tpa_info of the original `to' version as we don't want
108 * 2 FPs to contain the same tpa_info pointer.
110 to_fp->tpa_info = old_tpa_info;
112 /* move sp_objs contents as well, as their indices match fp ones */
113 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
115 /* move fp_stats contents as well, as their indices match fp ones */
116 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
118 /* Update txdata pointers in fp and move txdata content accordingly:
119 * Each fp consumes 'max_cos' txdata structures, so the index should be
120 * decremented by max_cos x delta.
123 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
124 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
126 if (from == FCOE_IDX(bp)) {
127 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
131 memcpy(&bp->bnx2x_txq[new_txdata_index],
132 &bp->bnx2x_txq[old_txdata_index],
133 sizeof(struct bnx2x_fp_txdata));
134 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
138 * bnx2x_fill_fw_str - Fill buffer with FW version string.
141 * @buf: character buffer to fill with the fw name
142 * @buf_len: length of the above buffer
145 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
148 u8 phy_fw_ver[PHY_FW_VER_LEN];
150 phy_fw_ver[0] = '\0';
151 bnx2x_get_ext_phy_fw_version(&bp->link_params,
152 phy_fw_ver, PHY_FW_VER_LEN);
153 strlcpy(buf, bp->fw_ver, buf_len);
154 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
156 (bp->common.bc_ver & 0xff0000) >> 16,
157 (bp->common.bc_ver & 0xff00) >> 8,
158 (bp->common.bc_ver & 0xff),
159 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
161 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
166 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
169 * @delta: number of eth queues which were not allocated
171 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
173 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
175 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
176 * backward along the array could cause memory to be overridden
178 for (cos = 1; cos < bp->max_cos; cos++) {
179 for (i = 0; i < old_eth_num - delta; i++) {
180 struct bnx2x_fastpath *fp = &bp->fp[i];
181 int new_idx = cos * (old_eth_num - delta) + i;
183 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
184 sizeof(struct bnx2x_fp_txdata));
185 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
190 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
192 /* free skb in the packet ring at pos idx
193 * return idx of last bd freed
195 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
196 u16 idx, unsigned int *pkts_compl,
197 unsigned int *bytes_compl)
199 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
200 struct eth_tx_start_bd *tx_start_bd;
201 struct eth_tx_bd *tx_data_bd;
202 struct sk_buff *skb = tx_buf->skb;
203 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
205 u16 split_bd_len = 0;
207 /* prefetch skb end pointer to speedup dev_kfree_skb() */
210 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
211 txdata->txq_index, idx, tx_buf, skb);
213 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
215 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
216 #ifdef BNX2X_STOP_ON_ERROR
217 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
218 BNX2X_ERR("BAD nbd!\n");
222 new_cons = nbd + tx_buf->first_bd;
224 /* Get the next bd */
225 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
227 /* Skip a parse bd... */
229 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
231 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
232 /* Skip second parse bd... */
234 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
237 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
238 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
239 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
240 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
242 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
246 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
247 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
253 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
254 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
255 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
257 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
264 (*bytes_compl) += skb->len;
265 dev_kfree_skb_any(skb);
268 tx_buf->first_bd = 0;
274 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
276 struct netdev_queue *txq;
277 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
278 unsigned int pkts_compl = 0, bytes_compl = 0;
280 #ifdef BNX2X_STOP_ON_ERROR
281 if (unlikely(bp->panic))
285 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
286 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
287 sw_cons = txdata->tx_pkt_cons;
289 while (sw_cons != hw_cons) {
292 pkt_cons = TX_BD(sw_cons);
294 DP(NETIF_MSG_TX_DONE,
295 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
296 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
298 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
299 &pkts_compl, &bytes_compl);
304 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
306 txdata->tx_pkt_cons = sw_cons;
307 txdata->tx_bd_cons = bd_cons;
309 /* Need to make the tx_bd_cons update visible to start_xmit()
310 * before checking for netif_tx_queue_stopped(). Without the
311 * memory barrier, there is a small possibility that
312 * start_xmit() will miss it and cause the queue to be stopped
314 * On the other hand we need an rmb() here to ensure the proper
315 * ordering of bit testing in the following
316 * netif_tx_queue_stopped(txq) call.
320 if (unlikely(netif_tx_queue_stopped(txq))) {
321 /* Taking tx_lock() is needed to prevent re-enabling the queue
322 * while it's empty. This could have happen if rx_action() gets
323 * suspended in bnx2x_tx_int() after the condition before
324 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
326 * stops the queue->sees fresh tx_bd_cons->releases the queue->
327 * sends some packets consuming the whole queue again->
331 __netif_tx_lock(txq, smp_processor_id());
333 if ((netif_tx_queue_stopped(txq)) &&
334 (bp->state == BNX2X_STATE_OPEN) &&
335 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
336 netif_tx_wake_queue(txq);
338 __netif_tx_unlock(txq);
343 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
346 u16 last_max = fp->last_max_sge;
348 if (SUB_S16(idx, last_max) > 0)
349 fp->last_max_sge = idx;
352 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
354 struct eth_end_agg_rx_cqe *cqe)
356 struct bnx2x *bp = fp->bp;
357 u16 last_max, last_elem, first_elem;
364 /* First mark all used pages */
365 for (i = 0; i < sge_len; i++)
366 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
367 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
369 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
370 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
372 /* Here we assume that the last SGE index is the biggest */
373 prefetch((void *)(fp->sge_mask));
374 bnx2x_update_last_max_sge(fp,
375 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
377 last_max = RX_SGE(fp->last_max_sge);
378 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
379 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
381 /* If ring is not full */
382 if (last_elem + 1 != first_elem)
385 /* Now update the prod */
386 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
387 if (likely(fp->sge_mask[i]))
390 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
391 delta += BIT_VEC64_ELEM_SZ;
395 fp->rx_sge_prod += delta;
396 /* clear page-end entries */
397 bnx2x_clear_sge_mask_next_elems(fp);
400 DP(NETIF_MSG_RX_STATUS,
401 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
402 fp->last_max_sge, fp->rx_sge_prod);
405 /* Get Toeplitz hash value in the skb using the value from the
406 * CQE (calculated by HW).
408 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
409 const struct eth_fast_path_rx_cqe *cqe,
410 enum pkt_hash_types *rxhash_type)
412 /* Get Toeplitz hash from CQE */
413 if ((bp->dev->features & NETIF_F_RXHASH) &&
414 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
415 enum eth_rss_hash_type htype;
417 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
418 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
419 (htype == TCP_IPV6_HASH_TYPE)) ?
420 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
422 return le32_to_cpu(cqe->rss_hash_result);
424 *rxhash_type = PKT_HASH_TYPE_NONE;
428 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
430 struct eth_fast_path_rx_cqe *cqe)
432 struct bnx2x *bp = fp->bp;
433 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
434 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
435 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
437 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
438 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
440 /* print error if current state != stop */
441 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
442 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
444 /* Try to map an empty data buffer from the aggregation info */
445 mapping = dma_map_single(&bp->pdev->dev,
446 first_buf->data + NET_SKB_PAD,
447 fp->rx_buf_size, DMA_FROM_DEVICE);
449 * ...if it fails - move the skb from the consumer to the producer
450 * and set the current aggregation state as ERROR to drop it
451 * when TPA_STOP arrives.
454 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
455 /* Move the BD from the consumer to the producer */
456 bnx2x_reuse_rx_data(fp, cons, prod);
457 tpa_info->tpa_state = BNX2X_TPA_ERROR;
461 /* move empty data from pool to prod */
462 prod_rx_buf->data = first_buf->data;
463 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
464 /* point prod_bd to new data */
465 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
466 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
468 /* move partial skb from cons to pool (don't unmap yet) */
469 *first_buf = *cons_rx_buf;
471 /* mark bin state as START */
472 tpa_info->parsing_flags =
473 le16_to_cpu(cqe->pars_flags.flags);
474 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
475 tpa_info->tpa_state = BNX2X_TPA_START;
476 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
477 tpa_info->placement_offset = cqe->placement_offset;
478 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
479 if (fp->mode == TPA_MODE_GRO) {
480 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
481 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
482 tpa_info->gro_size = gro_size;
485 #ifdef BNX2X_STOP_ON_ERROR
486 fp->tpa_queue_used |= (1 << queue);
487 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
492 /* Timestamp option length allowed for TPA aggregation:
494 * nop nop kind length echo val
496 #define TPA_TSTAMP_OPT_LEN 12
498 * bnx2x_set_gro_params - compute GRO values
501 * @parsing_flags: parsing flags from the START CQE
502 * @len_on_bd: total length of the first packet for the
504 * @pkt_len: length of all segments
506 * Approximate value of the MSS for this aggregation calculated using
507 * the first packet of it.
508 * Compute number of aggregated segments, and gso_type.
510 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
511 u16 len_on_bd, unsigned int pkt_len,
512 u16 num_of_coalesced_segs)
514 /* TPA aggregation won't have either IP options or TCP options
515 * other than timestamp or IPv6 extension headers.
517 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
519 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
520 PRS_FLAG_OVERETH_IPV6) {
521 hdrs_len += sizeof(struct ipv6hdr);
522 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
524 hdrs_len += sizeof(struct iphdr);
525 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
528 /* Check if there was a TCP timestamp, if there is it's will
529 * always be 12 bytes length: nop nop kind length echo val.
531 * Otherwise FW would close the aggregation.
533 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
534 hdrs_len += TPA_TSTAMP_OPT_LEN;
536 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
538 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
539 * to skb_shinfo(skb)->gso_segs
541 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
544 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
545 u16 index, gfp_t gfp_mask)
547 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
548 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
549 struct bnx2x_alloc_pool *pool = &fp->page_pool;
553 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
554 if (unlikely(!pool->page))
560 mapping = dma_map_page(&bp->pdev->dev, pool->page,
561 pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
562 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
563 BNX2X_ERR("Can't map sge\n");
567 sw_buf->page = pool->page;
568 sw_buf->offset = pool->offset;
570 dma_unmap_addr_set(sw_buf, mapping, mapping);
572 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
573 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
575 pool->offset += SGE_PAGE_SIZE;
576 if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
577 get_page(pool->page);
583 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
584 struct bnx2x_agg_info *tpa_info,
587 struct eth_end_agg_rx_cqe *cqe,
590 struct sw_rx_page *rx_pg, old_rx_pg;
591 u32 i, frag_len, frag_size;
592 int err, j, frag_id = 0;
593 u16 len_on_bd = tpa_info->len_on_bd;
594 u16 full_page = 0, gro_size = 0;
596 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
598 if (fp->mode == TPA_MODE_GRO) {
599 gro_size = tpa_info->gro_size;
600 full_page = tpa_info->full_page;
603 /* This is needed in order to enable forwarding support */
605 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
606 le16_to_cpu(cqe->pkt_len),
607 le16_to_cpu(cqe->num_of_coalesced_segs));
609 #ifdef BNX2X_STOP_ON_ERROR
610 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
611 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
613 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
619 /* Run through the SGL and compose the fragmented skb */
620 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
621 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
623 /* FW gives the indices of the SGE as if the ring is an array
624 (meaning that "next" element will consume 2 indices) */
625 if (fp->mode == TPA_MODE_GRO)
626 frag_len = min_t(u32, frag_size, (u32)full_page);
628 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
630 rx_pg = &fp->rx_page_ring[sge_idx];
633 /* If we fail to allocate a substitute page, we simply stop
634 where we are and drop the whole packet */
635 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
637 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
641 dma_unmap_page(&bp->pdev->dev,
642 dma_unmap_addr(&old_rx_pg, mapping),
643 SGE_PAGE_SIZE, DMA_FROM_DEVICE);
644 /* Add one frag and update the appropriate fields in the skb */
645 if (fp->mode == TPA_MODE_LRO)
646 skb_fill_page_desc(skb, j, old_rx_pg.page,
647 old_rx_pg.offset, frag_len);
651 for (rem = frag_len; rem > 0; rem -= gro_size) {
652 int len = rem > gro_size ? gro_size : rem;
653 skb_fill_page_desc(skb, frag_id++,
655 old_rx_pg.offset + offset,
658 get_page(old_rx_pg.page);
663 skb->data_len += frag_len;
664 skb->truesize += SGE_PAGES;
665 skb->len += frag_len;
667 frag_size -= frag_len;
673 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
675 if (fp->rx_frag_size)
681 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
683 if (fp->rx_frag_size) {
684 /* GFP_KERNEL allocations are used only during initialization */
685 if (unlikely(gfpflags_allow_blocking(gfp_mask)))
686 return (void *)__get_free_page(gfp_mask);
688 return netdev_alloc_frag(fp->rx_frag_size);
691 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
695 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
697 const struct iphdr *iph = ip_hdr(skb);
700 skb_set_transport_header(skb, sizeof(struct iphdr));
703 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
704 iph->saddr, iph->daddr, 0);
707 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
709 struct ipv6hdr *iph = ipv6_hdr(skb);
712 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
715 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
716 &iph->saddr, &iph->daddr, 0);
719 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
720 void (*gro_func)(struct bnx2x*, struct sk_buff*))
722 skb_reset_network_header(skb);
724 tcp_gro_complete(skb);
728 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
732 if (skb_shinfo(skb)->gso_size) {
733 switch (be16_to_cpu(skb->protocol)) {
735 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
738 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
741 netdev_WARN_ONCE(bp->dev,
742 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
743 be16_to_cpu(skb->protocol));
747 skb_record_rx_queue(skb, fp->rx_queue);
748 napi_gro_receive(&fp->napi, skb);
751 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
752 struct bnx2x_agg_info *tpa_info,
754 struct eth_end_agg_rx_cqe *cqe,
757 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
758 u8 pad = tpa_info->placement_offset;
759 u16 len = tpa_info->len_on_bd;
760 struct sk_buff *skb = NULL;
761 u8 *new_data, *data = rx_buf->data;
762 u8 old_tpa_state = tpa_info->tpa_state;
764 tpa_info->tpa_state = BNX2X_TPA_STOP;
766 /* If we there was an error during the handling of the TPA_START -
767 * drop this aggregation.
769 if (old_tpa_state == BNX2X_TPA_ERROR)
772 /* Try to allocate the new data */
773 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
774 /* Unmap skb in the pool anyway, as we are going to change
775 pool entry status to BNX2X_TPA_STOP even if new skb allocation
777 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
778 fp->rx_buf_size, DMA_FROM_DEVICE);
779 if (likely(new_data))
780 skb = build_skb(data, fp->rx_frag_size);
783 #ifdef BNX2X_STOP_ON_ERROR
784 if (pad + len > fp->rx_buf_size) {
785 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
786 pad, len, fp->rx_buf_size);
792 skb_reserve(skb, pad + NET_SKB_PAD);
794 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
796 skb->protocol = eth_type_trans(skb, bp->dev);
797 skb->ip_summed = CHECKSUM_UNNECESSARY;
799 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
800 skb, cqe, cqe_idx)) {
801 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
802 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
803 bnx2x_gro_receive(bp, fp, skb);
805 DP(NETIF_MSG_RX_STATUS,
806 "Failed to allocate new pages - dropping packet!\n");
807 dev_kfree_skb_any(skb);
810 /* put new data in bin */
811 rx_buf->data = new_data;
816 bnx2x_frag_free(fp, new_data);
818 /* drop the packet and keep the buffer in the bin */
819 DP(NETIF_MSG_RX_STATUS,
820 "Failed to allocate or map a new skb - dropping packet!\n");
821 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
824 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
825 u16 index, gfp_t gfp_mask)
828 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
829 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
832 data = bnx2x_frag_alloc(fp, gfp_mask);
833 if (unlikely(data == NULL))
836 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
839 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
840 bnx2x_frag_free(fp, data);
841 BNX2X_ERR("Can't map rx data\n");
846 dma_unmap_addr_set(rx_buf, mapping, mapping);
848 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
849 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
855 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
856 struct bnx2x_fastpath *fp,
857 struct bnx2x_eth_q_stats *qstats)
859 /* Do nothing if no L4 csum validation was done.
860 * We do not check whether IP csum was validated. For IPv4 we assume
861 * that if the card got as far as validating the L4 csum, it also
862 * validated the IP csum. IPv6 has no IP csum.
864 if (cqe->fast_path_cqe.status_flags &
865 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
868 /* If L4 validation was done, check if an error was found. */
870 if (cqe->fast_path_cqe.type_error_flags &
871 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
872 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
873 qstats->hw_csum_err++;
875 skb->ip_summed = CHECKSUM_UNNECESSARY;
878 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
880 struct bnx2x *bp = fp->bp;
881 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
882 u16 sw_comp_cons, sw_comp_prod;
884 union eth_rx_cqe *cqe;
885 struct eth_fast_path_rx_cqe *cqe_fp;
887 #ifdef BNX2X_STOP_ON_ERROR
888 if (unlikely(bp->panic))
894 bd_cons = fp->rx_bd_cons;
895 bd_prod = fp->rx_bd_prod;
896 bd_prod_fw = bd_prod;
897 sw_comp_cons = fp->rx_comp_cons;
898 sw_comp_prod = fp->rx_comp_prod;
900 comp_ring_cons = RCQ_BD(sw_comp_cons);
901 cqe = &fp->rx_comp_ring[comp_ring_cons];
902 cqe_fp = &cqe->fast_path_cqe;
904 DP(NETIF_MSG_RX_STATUS,
905 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
907 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
908 struct sw_rx_bd *rx_buf = NULL;
911 enum eth_rx_cqe_type cqe_fp_type;
915 enum pkt_hash_types rxhash_type;
917 #ifdef BNX2X_STOP_ON_ERROR
918 if (unlikely(bp->panic))
922 bd_prod = RX_BD(bd_prod);
923 bd_cons = RX_BD(bd_cons);
925 /* A rmb() is required to ensure that the CQE is not read
926 * before it is written by the adapter DMA. PCI ordering
927 * rules will make sure the other fields are written before
928 * the marker at the end of struct eth_fast_path_rx_cqe
929 * but without rmb() a weakly ordered processor can process
930 * stale data. Without the barrier TPA state-machine might
931 * enter inconsistent state and kernel stack might be
932 * provided with incorrect packet description - these lead
933 * to various kernel crashed.
937 cqe_fp_flags = cqe_fp->type_error_flags;
938 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
940 DP(NETIF_MSG_RX_STATUS,
941 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
942 CQE_TYPE(cqe_fp_flags),
943 cqe_fp_flags, cqe_fp->status_flags,
944 le32_to_cpu(cqe_fp->rss_hash_result),
945 le16_to_cpu(cqe_fp->vlan_tag),
946 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
948 /* is this a slowpath msg? */
949 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
950 bnx2x_sp_event(fp, cqe);
954 rx_buf = &fp->rx_buf_ring[bd_cons];
957 if (!CQE_TYPE_FAST(cqe_fp_type)) {
958 struct bnx2x_agg_info *tpa_info;
959 u16 frag_size, pages;
960 #ifdef BNX2X_STOP_ON_ERROR
962 if (fp->mode == TPA_MODE_DISABLED &&
963 (CQE_TYPE_START(cqe_fp_type) ||
964 CQE_TYPE_STOP(cqe_fp_type)))
965 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
966 CQE_TYPE(cqe_fp_type));
969 if (CQE_TYPE_START(cqe_fp_type)) {
970 u16 queue = cqe_fp->queue_index;
971 DP(NETIF_MSG_RX_STATUS,
972 "calling tpa_start on queue %d\n",
975 bnx2x_tpa_start(fp, queue,
981 queue = cqe->end_agg_cqe.queue_index;
982 tpa_info = &fp->tpa_info[queue];
983 DP(NETIF_MSG_RX_STATUS,
984 "calling tpa_stop on queue %d\n",
987 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
990 if (fp->mode == TPA_MODE_GRO)
991 pages = (frag_size + tpa_info->full_page - 1) /
994 pages = SGE_PAGE_ALIGN(frag_size) >>
997 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
998 &cqe->end_agg_cqe, comp_ring_cons);
999 #ifdef BNX2X_STOP_ON_ERROR
1004 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1008 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1009 pad = cqe_fp->placement_offset;
1010 dma_sync_single_for_cpu(&bp->pdev->dev,
1011 dma_unmap_addr(rx_buf, mapping),
1012 pad + RX_COPY_THRESH,
1015 prefetch(data + pad); /* speedup eth_type_trans() */
1016 /* is this an error packet? */
1017 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1018 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1019 "ERROR flags %x rx packet %u\n",
1020 cqe_fp_flags, sw_comp_cons);
1021 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1025 /* Since we don't have a jumbo ring
1026 * copy small packets if mtu > 1500
1028 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1029 (len <= RX_COPY_THRESH)) {
1030 skb = napi_alloc_skb(&fp->napi, len);
1032 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1033 "ERROR packet dropped because of alloc failure\n");
1034 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1037 memcpy(skb->data, data + pad, len);
1038 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1040 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1041 GFP_ATOMIC) == 0)) {
1042 dma_unmap_single(&bp->pdev->dev,
1043 dma_unmap_addr(rx_buf, mapping),
1046 skb = build_skb(data, fp->rx_frag_size);
1047 if (unlikely(!skb)) {
1048 bnx2x_frag_free(fp, data);
1049 bnx2x_fp_qstats(bp, fp)->
1050 rx_skb_alloc_failed++;
1053 skb_reserve(skb, pad);
1055 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1056 "ERROR packet dropped because of alloc failure\n");
1057 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1059 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1065 skb->protocol = eth_type_trans(skb, bp->dev);
1067 /* Set Toeplitz hash for a none-LRO skb */
1068 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1069 skb_set_hash(skb, rxhash, rxhash_type);
1071 skb_checksum_none_assert(skb);
1073 if (bp->dev->features & NETIF_F_RXCSUM)
1074 bnx2x_csum_validate(skb, cqe, fp,
1075 bnx2x_fp_qstats(bp, fp));
1077 skb_record_rx_queue(skb, fp->rx_queue);
1079 /* Check if this packet was timestamped */
1080 if (unlikely(cqe->fast_path_cqe.type_error_flags &
1081 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1082 bnx2x_set_rx_ts(bp, skb);
1084 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1086 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1087 le16_to_cpu(cqe_fp->vlan_tag));
1089 napi_gro_receive(&fp->napi, skb);
1091 rx_buf->data = NULL;
1093 bd_cons = NEXT_RX_IDX(bd_cons);
1094 bd_prod = NEXT_RX_IDX(bd_prod);
1095 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1098 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1099 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1101 /* mark CQE as free */
1102 BNX2X_SEED_CQE(cqe_fp);
1104 if (rx_pkt == budget)
1107 comp_ring_cons = RCQ_BD(sw_comp_cons);
1108 cqe = &fp->rx_comp_ring[comp_ring_cons];
1109 cqe_fp = &cqe->fast_path_cqe;
1112 fp->rx_bd_cons = bd_cons;
1113 fp->rx_bd_prod = bd_prod_fw;
1114 fp->rx_comp_cons = sw_comp_cons;
1115 fp->rx_comp_prod = sw_comp_prod;
1117 /* Update producers */
1118 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1124 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1126 struct bnx2x_fastpath *fp = fp_cookie;
1127 struct bnx2x *bp = fp->bp;
1131 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1132 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1134 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1136 #ifdef BNX2X_STOP_ON_ERROR
1137 if (unlikely(bp->panic))
1141 /* Handle Rx and Tx according to MSI-X vector */
1142 for_each_cos_in_tx_queue(fp, cos)
1143 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1145 prefetch(&fp->sb_running_index[SM_RX_ID]);
1146 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1151 /* HW Lock for shared dual port PHYs */
1152 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1154 mutex_lock(&bp->port.phy_mutex);
1156 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1159 void bnx2x_release_phy_lock(struct bnx2x *bp)
1161 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1163 mutex_unlock(&bp->port.phy_mutex);
1166 /* calculates MF speed according to current linespeed and MF configuration */
1167 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1169 u16 line_speed = bp->link_vars.line_speed;
1171 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1172 bp->mf_config[BP_VN(bp)]);
1174 /* Calculate the current MAX line speed limit for the MF
1177 if (IS_MF_PERCENT_BW(bp))
1178 line_speed = (line_speed * maxCfg) / 100;
1179 else { /* SD mode */
1180 u16 vn_max_rate = maxCfg * 100;
1182 if (vn_max_rate < line_speed)
1183 line_speed = vn_max_rate;
1191 * bnx2x_fill_report_data - fill link report data to report
1193 * @bp: driver handle
1194 * @data: link state to update
1196 * It uses a none-atomic bit operations because is called under the mutex.
1198 static void bnx2x_fill_report_data(struct bnx2x *bp,
1199 struct bnx2x_link_report_data *data)
1201 memset(data, 0, sizeof(*data));
1204 /* Fill the report data: effective line speed */
1205 data->line_speed = bnx2x_get_mf_speed(bp);
1208 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1209 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1210 &data->link_report_flags);
1212 if (!BNX2X_NUM_ETH_QUEUES(bp))
1213 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1214 &data->link_report_flags);
1217 if (bp->link_vars.duplex == DUPLEX_FULL)
1218 __set_bit(BNX2X_LINK_REPORT_FD,
1219 &data->link_report_flags);
1221 /* Rx Flow Control is ON */
1222 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1223 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1224 &data->link_report_flags);
1226 /* Tx Flow Control is ON */
1227 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1228 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1229 &data->link_report_flags);
1231 *data = bp->vf_link_vars;
1236 * bnx2x_link_report - report link status to OS.
1238 * @bp: driver handle
1240 * Calls the __bnx2x_link_report() under the same locking scheme
1241 * as a link/PHY state managing code to ensure a consistent link
1245 void bnx2x_link_report(struct bnx2x *bp)
1247 bnx2x_acquire_phy_lock(bp);
1248 __bnx2x_link_report(bp);
1249 bnx2x_release_phy_lock(bp);
1253 * __bnx2x_link_report - report link status to OS.
1255 * @bp: driver handle
1257 * None atomic implementation.
1258 * Should be called under the phy_lock.
1260 void __bnx2x_link_report(struct bnx2x *bp)
1262 struct bnx2x_link_report_data cur_data;
1264 if (bp->force_link_down) {
1265 bp->link_vars.link_up = 0;
1270 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1271 bnx2x_read_mf_cfg(bp);
1273 /* Read the current link report info */
1274 bnx2x_fill_report_data(bp, &cur_data);
1276 /* Don't report link down or exactly the same link status twice */
1277 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1278 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1279 &bp->last_reported_link.link_report_flags) &&
1280 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1281 &cur_data.link_report_flags)))
1286 /* We are going to report a new link parameters now -
1287 * remember the current data for the next time.
1289 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1291 /* propagate status to VFs */
1293 bnx2x_iov_link_update(bp);
1295 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1296 &cur_data.link_report_flags)) {
1297 netif_carrier_off(bp->dev);
1298 netdev_err(bp->dev, "NIC Link is Down\n");
1304 netif_carrier_on(bp->dev);
1306 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1307 &cur_data.link_report_flags))
1312 /* Handle the FC at the end so that only these flags would be
1313 * possibly set. This way we may easily check if there is no FC
1316 if (cur_data.link_report_flags) {
1317 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1318 &cur_data.link_report_flags)) {
1319 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1320 &cur_data.link_report_flags))
1321 flow = "ON - receive & transmit";
1323 flow = "ON - receive";
1325 flow = "ON - transmit";
1330 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1331 cur_data.line_speed, duplex, flow);
1335 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1339 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1340 struct eth_rx_sge *sge;
1342 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1344 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1345 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1348 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1349 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1353 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1354 struct bnx2x_fastpath *fp, int last)
1358 for (i = 0; i < last; i++) {
1359 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1360 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1361 u8 *data = first_buf->data;
1364 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1367 if (tpa_info->tpa_state == BNX2X_TPA_START)
1368 dma_unmap_single(&bp->pdev->dev,
1369 dma_unmap_addr(first_buf, mapping),
1370 fp->rx_buf_size, DMA_FROM_DEVICE);
1371 bnx2x_frag_free(fp, data);
1372 first_buf->data = NULL;
1376 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1380 for_each_rx_queue_cnic(bp, j) {
1381 struct bnx2x_fastpath *fp = &bp->fp[j];
1385 /* Activate BD ring */
1387 * this will generate an interrupt (to the TSTORM)
1388 * must only be done after chip is initialized
1390 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1395 void bnx2x_init_rx_rings(struct bnx2x *bp)
1397 int func = BP_FUNC(bp);
1401 /* Allocate TPA resources */
1402 for_each_eth_queue(bp, j) {
1403 struct bnx2x_fastpath *fp = &bp->fp[j];
1406 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1408 if (fp->mode != TPA_MODE_DISABLED) {
1409 /* Fill the per-aggregation pool */
1410 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1411 struct bnx2x_agg_info *tpa_info =
1413 struct sw_rx_bd *first_buf =
1414 &tpa_info->first_buf;
1417 bnx2x_frag_alloc(fp, GFP_KERNEL);
1418 if (!first_buf->data) {
1419 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1421 bnx2x_free_tpa_pool(bp, fp, i);
1422 fp->mode = TPA_MODE_DISABLED;
1425 dma_unmap_addr_set(first_buf, mapping, 0);
1426 tpa_info->tpa_state = BNX2X_TPA_STOP;
1429 /* "next page" elements initialization */
1430 bnx2x_set_next_page_sgl(fp);
1432 /* set SGEs bit mask */
1433 bnx2x_init_sge_ring_bit_mask(fp);
1435 /* Allocate SGEs and initialize the ring elements */
1436 for (i = 0, ring_prod = 0;
1437 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1439 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1441 BNX2X_ERR("was only able to allocate %d rx sges\n",
1443 BNX2X_ERR("disabling TPA for queue[%d]\n",
1445 /* Cleanup already allocated elements */
1446 bnx2x_free_rx_sge_range(bp, fp,
1448 bnx2x_free_tpa_pool(bp, fp,
1450 fp->mode = TPA_MODE_DISABLED;
1454 ring_prod = NEXT_SGE_IDX(ring_prod);
1457 fp->rx_sge_prod = ring_prod;
1461 for_each_eth_queue(bp, j) {
1462 struct bnx2x_fastpath *fp = &bp->fp[j];
1466 /* Activate BD ring */
1468 * this will generate an interrupt (to the TSTORM)
1469 * must only be done after chip is initialized
1471 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1477 if (CHIP_IS_E1(bp)) {
1478 REG_WR(bp, BAR_USTRORM_INTMEM +
1479 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1480 U64_LO(fp->rx_comp_mapping));
1481 REG_WR(bp, BAR_USTRORM_INTMEM +
1482 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1483 U64_HI(fp->rx_comp_mapping));
1488 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1491 struct bnx2x *bp = fp->bp;
1493 for_each_cos_in_tx_queue(fp, cos) {
1494 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1495 unsigned pkts_compl = 0, bytes_compl = 0;
1497 u16 sw_prod = txdata->tx_pkt_prod;
1498 u16 sw_cons = txdata->tx_pkt_cons;
1500 while (sw_cons != sw_prod) {
1501 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1502 &pkts_compl, &bytes_compl);
1506 netdev_tx_reset_queue(
1507 netdev_get_tx_queue(bp->dev,
1508 txdata->txq_index));
1512 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1516 for_each_tx_queue_cnic(bp, i) {
1517 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1521 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1525 for_each_eth_queue(bp, i) {
1526 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1530 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1532 struct bnx2x *bp = fp->bp;
1535 /* ring wasn't allocated */
1536 if (fp->rx_buf_ring == NULL)
1539 for (i = 0; i < NUM_RX_BD; i++) {
1540 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1541 u8 *data = rx_buf->data;
1545 dma_unmap_single(&bp->pdev->dev,
1546 dma_unmap_addr(rx_buf, mapping),
1547 fp->rx_buf_size, DMA_FROM_DEVICE);
1549 rx_buf->data = NULL;
1550 bnx2x_frag_free(fp, data);
1554 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1558 for_each_rx_queue_cnic(bp, j) {
1559 bnx2x_free_rx_bds(&bp->fp[j]);
1563 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1567 for_each_eth_queue(bp, j) {
1568 struct bnx2x_fastpath *fp = &bp->fp[j];
1570 bnx2x_free_rx_bds(fp);
1572 if (fp->mode != TPA_MODE_DISABLED)
1573 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1577 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1579 bnx2x_free_tx_skbs_cnic(bp);
1580 bnx2x_free_rx_skbs_cnic(bp);
1583 void bnx2x_free_skbs(struct bnx2x *bp)
1585 bnx2x_free_tx_skbs(bp);
1586 bnx2x_free_rx_skbs(bp);
1589 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1591 /* load old values */
1592 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1594 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1595 /* leave all but MAX value */
1596 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1598 /* set new MAX value */
1599 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1600 & FUNC_MF_CFG_MAX_BW_MASK;
1602 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1607 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1609 * @bp: driver handle
1610 * @nvecs: number of vectors to be released
1612 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1616 if (nvecs == offset)
1619 /* VFs don't have a default SB */
1621 free_irq(bp->msix_table[offset].vector, bp->dev);
1622 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1623 bp->msix_table[offset].vector);
1627 if (CNIC_SUPPORT(bp)) {
1628 if (nvecs == offset)
1633 for_each_eth_queue(bp, i) {
1634 if (nvecs == offset)
1636 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1637 i, bp->msix_table[offset].vector);
1639 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1643 void bnx2x_free_irq(struct bnx2x *bp)
1645 if (bp->flags & USING_MSIX_FLAG &&
1646 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1647 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1649 /* vfs don't have a default status block */
1653 bnx2x_free_msix_irqs(bp, nvecs);
1655 free_irq(bp->dev->irq, bp->dev);
1659 int bnx2x_enable_msix(struct bnx2x *bp)
1661 int msix_vec = 0, i, rc;
1663 /* VFs don't have a default status block */
1665 bp->msix_table[msix_vec].entry = msix_vec;
1666 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1667 bp->msix_table[0].entry);
1671 /* Cnic requires an msix vector for itself */
1672 if (CNIC_SUPPORT(bp)) {
1673 bp->msix_table[msix_vec].entry = msix_vec;
1674 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1675 msix_vec, bp->msix_table[msix_vec].entry);
1679 /* We need separate vectors for ETH queues only (not FCoE) */
1680 for_each_eth_queue(bp, i) {
1681 bp->msix_table[msix_vec].entry = msix_vec;
1682 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1683 msix_vec, msix_vec, i);
1687 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1690 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1691 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1693 * reconfigure number of tx/rx queues according to available
1696 if (rc == -ENOSPC) {
1697 /* Get by with single vector */
1698 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1700 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1705 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1706 bp->flags |= USING_SINGLE_MSIX_FLAG;
1708 BNX2X_DEV_INFO("set number of queues to 1\n");
1709 bp->num_ethernet_queues = 1;
1710 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1711 } else if (rc < 0) {
1712 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1714 } else if (rc < msix_vec) {
1715 /* how less vectors we will have? */
1716 int diff = msix_vec - rc;
1718 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1721 * decrease number of queues by number of unallocated entries
1723 bp->num_ethernet_queues -= diff;
1724 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1726 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1730 bp->flags |= USING_MSIX_FLAG;
1735 /* fall to INTx if not enough memory */
1737 bp->flags |= DISABLE_MSI_FLAG;
1742 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1744 int i, rc, offset = 0;
1746 /* no default status block for vf */
1748 rc = request_irq(bp->msix_table[offset++].vector,
1749 bnx2x_msix_sp_int, 0,
1750 bp->dev->name, bp->dev);
1752 BNX2X_ERR("request sp irq failed\n");
1757 if (CNIC_SUPPORT(bp))
1760 for_each_eth_queue(bp, i) {
1761 struct bnx2x_fastpath *fp = &bp->fp[i];
1762 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1765 rc = request_irq(bp->msix_table[offset].vector,
1766 bnx2x_msix_fp_int, 0, fp->name, fp);
1768 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1769 bp->msix_table[offset].vector, rc);
1770 bnx2x_free_msix_irqs(bp, offset);
1777 i = BNX2X_NUM_ETH_QUEUES(bp);
1779 offset = 1 + CNIC_SUPPORT(bp);
1780 netdev_info(bp->dev,
1781 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1782 bp->msix_table[0].vector,
1783 0, bp->msix_table[offset].vector,
1784 i - 1, bp->msix_table[offset + i - 1].vector);
1786 offset = CNIC_SUPPORT(bp);
1787 netdev_info(bp->dev,
1788 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1789 0, bp->msix_table[offset].vector,
1790 i - 1, bp->msix_table[offset + i - 1].vector);
1795 int bnx2x_enable_msi(struct bnx2x *bp)
1799 rc = pci_enable_msi(bp->pdev);
1801 BNX2X_DEV_INFO("MSI is not attainable\n");
1804 bp->flags |= USING_MSI_FLAG;
1809 static int bnx2x_req_irq(struct bnx2x *bp)
1811 unsigned long flags;
1814 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1817 flags = IRQF_SHARED;
1819 if (bp->flags & USING_MSIX_FLAG)
1820 irq = bp->msix_table[0].vector;
1822 irq = bp->pdev->irq;
1824 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1827 static int bnx2x_setup_irqs(struct bnx2x *bp)
1830 if (bp->flags & USING_MSIX_FLAG &&
1831 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1832 rc = bnx2x_req_msix_irqs(bp);
1836 rc = bnx2x_req_irq(bp);
1838 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1841 if (bp->flags & USING_MSI_FLAG) {
1842 bp->dev->irq = bp->pdev->irq;
1843 netdev_info(bp->dev, "using MSI IRQ %d\n",
1846 if (bp->flags & USING_MSIX_FLAG) {
1847 bp->dev->irq = bp->msix_table[0].vector;
1848 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1856 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1860 for_each_rx_queue_cnic(bp, i) {
1861 napi_enable(&bnx2x_fp(bp, i, napi));
1865 static void bnx2x_napi_enable(struct bnx2x *bp)
1869 for_each_eth_queue(bp, i) {
1870 napi_enable(&bnx2x_fp(bp, i, napi));
1874 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1878 for_each_rx_queue_cnic(bp, i) {
1879 napi_disable(&bnx2x_fp(bp, i, napi));
1883 static void bnx2x_napi_disable(struct bnx2x *bp)
1887 for_each_eth_queue(bp, i) {
1888 napi_disable(&bnx2x_fp(bp, i, napi));
1892 void bnx2x_netif_start(struct bnx2x *bp)
1894 if (netif_running(bp->dev)) {
1895 bnx2x_napi_enable(bp);
1896 if (CNIC_LOADED(bp))
1897 bnx2x_napi_enable_cnic(bp);
1898 bnx2x_int_enable(bp);
1899 if (bp->state == BNX2X_STATE_OPEN)
1900 netif_tx_wake_all_queues(bp->dev);
1904 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1906 bnx2x_int_disable_sync(bp, disable_hw);
1907 bnx2x_napi_disable(bp);
1908 if (CNIC_LOADED(bp))
1909 bnx2x_napi_disable_cnic(bp);
1912 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1913 void *accel_priv, select_queue_fallback_t fallback)
1915 struct bnx2x *bp = netdev_priv(dev);
1917 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1918 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1919 u16 ether_type = ntohs(hdr->h_proto);
1921 /* Skip VLAN tag if present */
1922 if (ether_type == ETH_P_8021Q) {
1923 struct vlan_ethhdr *vhdr =
1924 (struct vlan_ethhdr *)skb->data;
1926 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1929 /* If ethertype is FCoE or FIP - use FCoE ring */
1930 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1931 return bnx2x_fcoe_tx(bp, txq_index);
1934 /* select a non-FCoE queue */
1935 return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
1938 void bnx2x_set_num_queues(struct bnx2x *bp)
1941 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1943 /* override in STORAGE SD modes */
1944 if (IS_MF_STORAGE_ONLY(bp))
1945 bp->num_ethernet_queues = 1;
1947 /* Add special queues */
1948 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1949 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1951 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1955 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1957 * @bp: Driver handle
1959 * We currently support for at most 16 Tx queues for each CoS thus we will
1960 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1963 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1964 * index after all ETH L2 indices.
1966 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1967 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1968 * 16..31,...) with indices that are not coupled with any real Tx queue.
1970 * The proper configuration of skb->queue_mapping is handled by
1971 * bnx2x_select_queue() and __skb_tx_hash().
1973 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1974 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1976 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1980 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1981 rx = BNX2X_NUM_ETH_QUEUES(bp);
1983 /* account for fcoe queue */
1984 if (include_cnic && !NO_FCOE(bp)) {
1989 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1991 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1994 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1996 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
2000 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2006 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2010 for_each_queue(bp, i) {
2011 struct bnx2x_fastpath *fp = &bp->fp[i];
2014 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2017 * Although there are no IP frames expected to arrive to
2018 * this ring we still want to add an
2019 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2022 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2025 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2026 IP_HEADER_ALIGNMENT_PADDING +
2029 BNX2X_FW_RX_ALIGN_END;
2030 fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
2031 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2032 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2033 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2035 fp->rx_frag_size = 0;
2039 static int bnx2x_init_rss(struct bnx2x *bp)
2042 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2044 /* Prepare the initial contents for the indirection table if RSS is
2047 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2048 bp->rss_conf_obj.ind_table[i] =
2050 ethtool_rxfh_indir_default(i, num_eth_queues);
2053 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2054 * per-port, so if explicit configuration is needed , do it only
2057 * For 57712 and newer on the other hand it's a per-function
2060 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2063 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2064 bool config_hash, bool enable)
2066 struct bnx2x_config_rss_params params = {NULL};
2068 /* Although RSS is meaningless when there is a single HW queue we
2069 * still need it enabled in order to have HW Rx hash generated.
2071 * if (!is_eth_multi(bp))
2072 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2075 params.rss_obj = rss_obj;
2077 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
2080 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
2082 /* RSS configuration */
2083 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
2084 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
2085 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
2086 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
2087 if (rss_obj->udp_rss_v4)
2088 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
2089 if (rss_obj->udp_rss_v6)
2090 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
2092 if (!CHIP_IS_E1x(bp)) {
2093 /* valid only for TUNN_MODE_VXLAN tunnel mode */
2094 __set_bit(BNX2X_RSS_IPV4_VXLAN, ¶ms.rss_flags);
2095 __set_bit(BNX2X_RSS_IPV6_VXLAN, ¶ms.rss_flags);
2097 /* valid only for TUNN_MODE_GRE tunnel mode */
2098 __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, ¶ms.rss_flags);
2101 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
2105 params.rss_result_mask = MULTI_MASK;
2107 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2111 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2112 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
2116 return bnx2x_config_rss(bp, ¶ms);
2118 return bnx2x_vfpf_config_rss(bp, ¶ms);
2121 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2123 struct bnx2x_func_state_params func_params = {NULL};
2125 /* Prepare parameters for function state transitions */
2126 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2128 func_params.f_obj = &bp->func_obj;
2129 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2131 func_params.params.hw_init.load_phase = load_code;
2133 return bnx2x_func_state_change(bp, &func_params);
2137 * Cleans the object that have internal lists without sending
2138 * ramrods. Should be run when interrupts are disabled.
2140 void bnx2x_squeeze_objects(struct bnx2x *bp)
2143 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2144 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2145 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2147 /***************** Cleanup MACs' object first *************************/
2149 /* Wait for completion of requested */
2150 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2151 /* Perform a dry cleanup */
2152 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2154 /* Clean ETH primary MAC */
2155 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2156 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2159 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2161 /* Cleanup UC list */
2163 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2164 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2167 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2169 /***************** Now clean mcast object *****************************/
2170 rparam.mcast_obj = &bp->mcast_obj;
2171 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2173 /* Add a DEL command... - Since we're doing a driver cleanup only,
2174 * we take a lock surrounding both the initial send and the CONTs,
2175 * as we don't want a true completion to disrupt us in the middle.
2177 netif_addr_lock_bh(bp->dev);
2178 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2180 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2183 /* ...and wait until all pending commands are cleared */
2184 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2187 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2189 netif_addr_unlock_bh(bp->dev);
2193 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2195 netif_addr_unlock_bh(bp->dev);
2198 #ifndef BNX2X_STOP_ON_ERROR
2199 #define LOAD_ERROR_EXIT(bp, label) \
2201 (bp)->state = BNX2X_STATE_ERROR; \
2205 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2207 bp->cnic_loaded = false; \
2210 #else /*BNX2X_STOP_ON_ERROR*/
2211 #define LOAD_ERROR_EXIT(bp, label) \
2213 (bp)->state = BNX2X_STATE_ERROR; \
2217 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2219 bp->cnic_loaded = false; \
2223 #endif /*BNX2X_STOP_ON_ERROR*/
2225 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2227 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2228 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2232 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2234 int num_groups, vf_headroom = 0;
2235 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2237 /* number of queues for statistics is number of eth queues + FCoE */
2238 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2240 /* Total number of FW statistics requests =
2241 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2242 * and fcoe l2 queue) stats + num of queues (which includes another 1
2243 * for fcoe l2 queue if applicable)
2245 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2247 /* vf stats appear in the request list, but their data is allocated by
2248 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2249 * it is used to determine where to place the vf stats queries in the
2253 vf_headroom = bnx2x_vf_headroom(bp);
2255 /* Request is built from stats_query_header and an array of
2256 * stats_query_cmd_group each of which contains
2257 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2258 * configured in the stats_query_header.
2261 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2262 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2265 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2266 bp->fw_stats_num, vf_headroom, num_groups);
2267 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2268 num_groups * sizeof(struct stats_query_cmd_group);
2270 /* Data for statistics requests + stats_counter
2271 * stats_counter holds per-STORM counters that are incremented
2272 * when STORM has finished with the current request.
2273 * memory for FCoE offloaded statistics are counted anyway,
2274 * even if they will not be sent.
2275 * VF stats are not accounted for here as the data of VF stats is stored
2276 * in memory allocated by the VF, not here.
2278 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2279 sizeof(struct per_pf_stats) +
2280 sizeof(struct fcoe_statistics_params) +
2281 sizeof(struct per_queue_stats) * num_queue_stats +
2282 sizeof(struct stats_counter);
2284 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2285 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2290 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2291 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2292 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2293 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2294 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2295 bp->fw_stats_req_sz;
2297 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2298 U64_HI(bp->fw_stats_req_mapping),
2299 U64_LO(bp->fw_stats_req_mapping));
2300 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2301 U64_HI(bp->fw_stats_data_mapping),
2302 U64_LO(bp->fw_stats_data_mapping));
2306 bnx2x_free_fw_stats_mem(bp);
2307 BNX2X_ERR("Can't allocate FW stats memory\n");
2311 /* send load request to mcp and analyze response */
2312 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2318 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2319 DRV_MSG_SEQ_NUMBER_MASK);
2320 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2322 /* Get current FW pulse sequence */
2323 bp->fw_drv_pulse_wr_seq =
2324 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2325 DRV_PULSE_SEQ_MASK);
2326 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2328 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2330 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2331 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2334 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2336 /* if mcp fails to respond we must abort */
2337 if (!(*load_code)) {
2338 BNX2X_ERR("MCP response failure, aborting\n");
2342 /* If mcp refused (e.g. other port is in diagnostic mode) we
2345 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2346 BNX2X_ERR("MCP refused load request, aborting\n");
2352 /* check whether another PF has already loaded FW to chip. In
2353 * virtualized environments a pf from another VM may have already
2354 * initialized the device including loading FW
2356 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2358 /* is another pf loaded on this engine? */
2359 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2360 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2361 /* build my FW version dword */
2362 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2363 (BCM_5710_FW_MINOR_VERSION << 8) +
2364 (BCM_5710_FW_REVISION_VERSION << 16) +
2365 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2367 /* read loaded FW from chip */
2368 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2370 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2373 /* abort nic load if version mismatch */
2374 if (my_fw != loaded_fw) {
2376 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2379 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2387 /* returns the "mcp load_code" according to global load_count array */
2388 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2390 int path = BP_PATH(bp);
2392 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2393 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2394 bnx2x_load_count[path][2]);
2395 bnx2x_load_count[path][0]++;
2396 bnx2x_load_count[path][1 + port]++;
2397 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2398 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2399 bnx2x_load_count[path][2]);
2400 if (bnx2x_load_count[path][0] == 1)
2401 return FW_MSG_CODE_DRV_LOAD_COMMON;
2402 else if (bnx2x_load_count[path][1 + port] == 1)
2403 return FW_MSG_CODE_DRV_LOAD_PORT;
2405 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2408 /* mark PMF if applicable */
2409 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2411 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2412 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2413 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2415 /* We need the barrier to ensure the ordering between the
2416 * writing to bp->port.pmf here and reading it from the
2417 * bnx2x_periodic_task().
2424 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2427 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2429 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2430 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2431 (bp->common.shmem2_base)) {
2432 if (SHMEM2_HAS(bp, dcc_support))
2433 SHMEM2_WR(bp, dcc_support,
2434 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2435 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2436 if (SHMEM2_HAS(bp, afex_driver_support))
2437 SHMEM2_WR(bp, afex_driver_support,
2438 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2441 /* Set AFEX default VLAN tag to an invalid value */
2442 bp->afex_def_vlan_tag = -1;
2446 * bnx2x_bz_fp - zero content of the fastpath structure.
2448 * @bp: driver handle
2449 * @index: fastpath index to be zeroed
2451 * Makes sure the contents of the bp->fp[index].napi is kept
2454 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2456 struct bnx2x_fastpath *fp = &bp->fp[index];
2458 struct napi_struct orig_napi = fp->napi;
2459 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2461 /* bzero bnx2x_fastpath contents */
2463 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2464 sizeof(struct bnx2x_agg_info));
2465 memset(fp, 0, sizeof(*fp));
2467 /* Restore the NAPI object as it has been already initialized */
2468 fp->napi = orig_napi;
2469 fp->tpa_info = orig_tpa_info;
2473 fp->max_cos = bp->max_cos;
2475 /* Special queues support only one CoS */
2478 /* Init txdata pointers */
2480 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2482 for_each_cos_in_tx_queue(fp, cos)
2483 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2484 BNX2X_NUM_ETH_QUEUES(bp) + index];
2486 /* set the tpa flag for each queue. The tpa flag determines the queue
2487 * minimal size so it must be set prior to queue memory allocation
2489 if (bp->dev->features & NETIF_F_LRO)
2490 fp->mode = TPA_MODE_LRO;
2491 else if (bp->dev->features & NETIF_F_GRO_HW)
2492 fp->mode = TPA_MODE_GRO;
2494 fp->mode = TPA_MODE_DISABLED;
2496 /* We don't want TPA if it's disabled in bp
2497 * or if this is an FCoE L2 ring.
2499 if (bp->disable_tpa || IS_FCOE_FP(fp))
2500 fp->mode = TPA_MODE_DISABLED;
2503 void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2507 if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2510 cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2511 DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2514 SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2517 int bnx2x_load_cnic(struct bnx2x *bp)
2519 int i, rc, port = BP_PORT(bp);
2521 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2523 mutex_init(&bp->cnic_mutex);
2526 rc = bnx2x_alloc_mem_cnic(bp);
2528 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2529 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2533 rc = bnx2x_alloc_fp_mem_cnic(bp);
2535 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2536 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2539 /* Update the number of queues with the cnic queues */
2540 rc = bnx2x_set_real_num_queues(bp, 1);
2542 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2543 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2546 /* Add all CNIC NAPI objects */
2547 bnx2x_add_all_napi_cnic(bp);
2548 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2549 bnx2x_napi_enable_cnic(bp);
2551 rc = bnx2x_init_hw_func_cnic(bp);
2553 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2555 bnx2x_nic_init_cnic(bp);
2558 /* Enable Timer scan */
2559 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2561 /* setup cnic queues */
2562 for_each_cnic_queue(bp, i) {
2563 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2565 BNX2X_ERR("Queue setup failed\n");
2566 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2571 /* Initialize Rx filter. */
2572 bnx2x_set_rx_mode_inner(bp);
2574 /* re-read iscsi info */
2575 bnx2x_get_iscsi_info(bp);
2576 bnx2x_setup_cnic_irq_info(bp);
2577 bnx2x_setup_cnic_info(bp);
2578 bp->cnic_loaded = true;
2579 if (bp->state == BNX2X_STATE_OPEN)
2580 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2582 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2586 #ifndef BNX2X_STOP_ON_ERROR
2588 /* Disable Timer scan */
2589 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2592 bnx2x_napi_disable_cnic(bp);
2593 /* Update the number of queues without the cnic queues */
2594 if (bnx2x_set_real_num_queues(bp, 0))
2595 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2597 BNX2X_ERR("CNIC-related load failed\n");
2598 bnx2x_free_fp_mem_cnic(bp);
2599 bnx2x_free_mem_cnic(bp);
2601 #endif /* ! BNX2X_STOP_ON_ERROR */
2604 /* must be called with rtnl_lock */
2605 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2607 int port = BP_PORT(bp);
2608 int i, rc = 0, load_code = 0;
2610 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2612 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2614 #ifdef BNX2X_STOP_ON_ERROR
2615 if (unlikely(bp->panic)) {
2616 BNX2X_ERR("Can't load NIC when there is panic\n");
2621 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2623 /* zero the structure w/o any lock, before SP handler is initialized */
2624 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2625 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2626 &bp->last_reported_link.link_report_flags);
2629 /* must be called before memory allocation and HW init */
2630 bnx2x_ilt_set_info(bp);
2633 * Zero fastpath structures preserving invariants like napi, which are
2634 * allocated only once, fp index, max_cos, bp pointer.
2635 * Also set fp->mode and txdata_ptr.
2637 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2638 for_each_queue(bp, i)
2640 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2641 bp->num_cnic_queues) *
2642 sizeof(struct bnx2x_fp_txdata));
2644 bp->fcoe_init = false;
2646 /* Set the receive queues buffer size */
2647 bnx2x_set_rx_buf_size(bp);
2650 rc = bnx2x_alloc_mem(bp);
2652 BNX2X_ERR("Unable to allocate bp memory\n");
2657 /* need to be done after alloc mem, since it's self adjusting to amount
2658 * of memory available for RSS queues
2660 rc = bnx2x_alloc_fp_mem(bp);
2662 BNX2X_ERR("Unable to allocate memory for fps\n");
2663 LOAD_ERROR_EXIT(bp, load_error0);
2666 /* Allocated memory for FW statistics */
2667 if (bnx2x_alloc_fw_stats_mem(bp))
2668 LOAD_ERROR_EXIT(bp, load_error0);
2670 /* request pf to initialize status blocks */
2672 rc = bnx2x_vfpf_init(bp);
2674 LOAD_ERROR_EXIT(bp, load_error0);
2677 /* As long as bnx2x_alloc_mem() may possibly update
2678 * bp->num_queues, bnx2x_set_real_num_queues() should always
2679 * come after it. At this stage cnic queues are not counted.
2681 rc = bnx2x_set_real_num_queues(bp, 0);
2683 BNX2X_ERR("Unable to set real_num_queues\n");
2684 LOAD_ERROR_EXIT(bp, load_error0);
2687 /* configure multi cos mappings in kernel.
2688 * this configuration may be overridden by a multi class queue
2689 * discipline or by a dcbx negotiation result.
2691 bnx2x_setup_tc(bp->dev, bp->max_cos);
2693 /* Add all NAPI objects */
2694 bnx2x_add_all_napi(bp);
2695 DP(NETIF_MSG_IFUP, "napi added\n");
2696 bnx2x_napi_enable(bp);
2699 /* set pf load just before approaching the MCP */
2700 bnx2x_set_pf_load(bp);
2702 /* if mcp exists send load request and analyze response */
2703 if (!BP_NOMCP(bp)) {
2704 /* attempt to load pf */
2705 rc = bnx2x_nic_load_request(bp, &load_code);
2707 LOAD_ERROR_EXIT(bp, load_error1);
2709 /* what did mcp say? */
2710 rc = bnx2x_compare_fw_ver(bp, load_code, true);
2712 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2713 LOAD_ERROR_EXIT(bp, load_error2);
2716 load_code = bnx2x_nic_load_no_mcp(bp, port);
2719 /* mark pmf if applicable */
2720 bnx2x_nic_load_pmf(bp, load_code);
2722 /* Init Function state controlling object */
2723 bnx2x__init_func_obj(bp);
2726 rc = bnx2x_init_hw(bp, load_code);
2728 BNX2X_ERR("HW init failed, aborting\n");
2729 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2730 LOAD_ERROR_EXIT(bp, load_error2);
2734 bnx2x_pre_irq_nic_init(bp);
2736 /* Connect to IRQs */
2737 rc = bnx2x_setup_irqs(bp);
2739 BNX2X_ERR("setup irqs failed\n");
2741 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2742 LOAD_ERROR_EXIT(bp, load_error2);
2745 /* Init per-function objects */
2747 /* Setup NIC internals and enable interrupts */
2748 bnx2x_post_irq_nic_init(bp, load_code);
2750 bnx2x_init_bp_objs(bp);
2751 bnx2x_iov_nic_init(bp);
2753 /* Set AFEX default VLAN tag to an invalid value */
2754 bp->afex_def_vlan_tag = -1;
2755 bnx2x_nic_load_afex_dcc(bp, load_code);
2756 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2757 rc = bnx2x_func_start(bp);
2759 BNX2X_ERR("Function start failed!\n");
2760 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2762 LOAD_ERROR_EXIT(bp, load_error3);
2765 /* Send LOAD_DONE command to MCP */
2766 if (!BP_NOMCP(bp)) {
2767 load_code = bnx2x_fw_command(bp,
2768 DRV_MSG_CODE_LOAD_DONE, 0);
2770 BNX2X_ERR("MCP response failure, aborting\n");
2772 LOAD_ERROR_EXIT(bp, load_error3);
2776 /* initialize FW coalescing state machines in RAM */
2777 bnx2x_update_coalesce(bp);
2780 /* setup the leading queue */
2781 rc = bnx2x_setup_leading(bp);
2783 BNX2X_ERR("Setup leading failed!\n");
2784 LOAD_ERROR_EXIT(bp, load_error3);
2787 /* set up the rest of the queues */
2788 for_each_nondefault_eth_queue(bp, i) {
2790 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2792 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2794 BNX2X_ERR("Queue %d setup failed\n", i);
2795 LOAD_ERROR_EXIT(bp, load_error3);
2800 rc = bnx2x_init_rss(bp);
2802 BNX2X_ERR("PF RSS init failed\n");
2803 LOAD_ERROR_EXIT(bp, load_error3);
2806 /* Now when Clients are configured we are ready to work */
2807 bp->state = BNX2X_STATE_OPEN;
2809 /* Configure a ucast MAC */
2811 rc = bnx2x_set_eth_mac(bp, true);
2813 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2816 BNX2X_ERR("Setting Ethernet MAC failed\n");
2817 LOAD_ERROR_EXIT(bp, load_error3);
2820 if (IS_PF(bp) && bp->pending_max) {
2821 bnx2x_update_max_mf_config(bp, bp->pending_max);
2822 bp->pending_max = 0;
2825 bp->force_link_down = false;
2827 rc = bnx2x_initial_phy_init(bp, load_mode);
2829 LOAD_ERROR_EXIT(bp, load_error3);
2831 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2833 /* Start fast path */
2835 /* Re-configure vlan filters */
2836 rc = bnx2x_vlan_reconfigure_vid(bp);
2838 LOAD_ERROR_EXIT(bp, load_error3);
2840 /* Initialize Rx filter. */
2841 bnx2x_set_rx_mode_inner(bp);
2843 if (bp->flags & PTP_SUPPORTED) {
2845 bnx2x_configure_ptp_filters(bp);
2848 switch (load_mode) {
2850 /* Tx queue should be only re-enabled */
2851 netif_tx_wake_all_queues(bp->dev);
2855 netif_tx_start_all_queues(bp->dev);
2856 smp_mb__after_atomic();
2860 case LOAD_LOOPBACK_EXT:
2861 bp->state = BNX2X_STATE_DIAG;
2869 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2871 bnx2x__link_status_update(bp);
2873 /* start the timer */
2874 mod_timer(&bp->timer, jiffies + bp->current_interval);
2876 if (CNIC_ENABLED(bp))
2877 bnx2x_load_cnic(bp);
2880 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2882 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2883 /* mark driver is loaded in shmem2 */
2885 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2886 val &= ~DRV_FLAGS_MTU_MASK;
2887 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
2888 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2889 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2890 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2893 /* Wait for all pending SP commands to complete */
2894 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2895 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2896 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2900 /* Update driver data for On-Chip MFW dump. */
2902 bnx2x_update_mfw_dump(bp);
2904 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2905 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2906 bnx2x_dcbx_init(bp, false);
2908 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2909 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2911 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2915 #ifndef BNX2X_STOP_ON_ERROR
2918 bnx2x_int_disable_sync(bp, 1);
2920 /* Clean queueable objects */
2921 bnx2x_squeeze_objects(bp);
2924 /* Free SKBs, SGEs, TPA pool and driver internals */
2925 bnx2x_free_skbs(bp);
2926 for_each_rx_queue(bp, i)
2927 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2932 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2933 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2934 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2939 bnx2x_napi_disable(bp);
2940 bnx2x_del_all_napi(bp);
2942 /* clear pf_load status, as it was already set */
2944 bnx2x_clear_pf_load(bp);
2946 bnx2x_free_fw_stats_mem(bp);
2947 bnx2x_free_fp_mem(bp);
2951 #endif /* ! BNX2X_STOP_ON_ERROR */
2954 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2958 /* Wait until tx fastpath tasks complete */
2959 for_each_tx_queue(bp, i) {
2960 struct bnx2x_fastpath *fp = &bp->fp[i];
2962 for_each_cos_in_tx_queue(fp, cos)
2963 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2970 /* must be called with rtnl_lock */
2971 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2974 bool global = false;
2976 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2978 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2979 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2981 /* mark driver is unloaded in shmem2 */
2982 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2984 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2985 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2986 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2989 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2990 (bp->state == BNX2X_STATE_CLOSED ||
2991 bp->state == BNX2X_STATE_ERROR)) {
2992 /* We can get here if the driver has been unloaded
2993 * during parity error recovery and is either waiting for a
2994 * leader to complete or for other functions to unload and
2995 * then ifdown has been issued. In this case we want to
2996 * unload and let other functions to complete a recovery
2999 bp->recovery_state = BNX2X_RECOVERY_DONE;
3001 bnx2x_release_leader_lock(bp);
3004 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3005 BNX2X_ERR("Can't unload in closed or error state\n");
3009 /* Nothing to do during unload if previous bnx2x_nic_load()
3010 * have not completed successfully - all resources are released.
3012 * we can get here only after unsuccessful ndo_* callback, during which
3013 * dev->IFF_UP flag is still on.
3015 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3018 /* It's important to set the bp->state to the value different from
3019 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3020 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3022 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3025 /* indicate to VFs that the PF is going down */
3026 bnx2x_iov_channel_down(bp);
3028 if (CNIC_LOADED(bp))
3029 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3032 bnx2x_tx_disable(bp);
3033 netdev_reset_tc(bp->dev);
3035 bp->rx_mode = BNX2X_RX_MODE_NONE;
3037 del_timer_sync(&bp->timer);
3039 if (IS_PF(bp) && !BP_NOMCP(bp)) {
3040 /* Set ALWAYS_ALIVE bit in shmem */
3041 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3042 bnx2x_drv_pulse(bp);
3043 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3044 bnx2x_save_statistics(bp);
3047 /* wait till consumers catch up with producers in all queues.
3048 * If we're recovering, FW can't write to host so no reason
3049 * to wait for the queues to complete all Tx.
3051 if (unload_mode != UNLOAD_RECOVERY)
3052 bnx2x_drain_tx_queues(bp);
3054 /* if VF indicate to PF this function is going down (PF will delete sp
3055 * elements and clear initializations
3058 bnx2x_vfpf_close_vf(bp);
3059 else if (unload_mode != UNLOAD_RECOVERY)
3060 /* if this is a normal/close unload need to clean up chip*/
3061 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3063 /* Send the UNLOAD_REQUEST to the MCP */
3064 bnx2x_send_unload_req(bp, unload_mode);
3066 /* Prevent transactions to host from the functions on the
3067 * engine that doesn't reset global blocks in case of global
3068 * attention once global blocks are reset and gates are opened
3069 * (the engine which leader will perform the recovery
3072 if (!CHIP_IS_E1x(bp))
3073 bnx2x_pf_disable(bp);
3075 /* Disable HW interrupts, NAPI */
3076 bnx2x_netif_stop(bp, 1);
3077 /* Delete all NAPI objects */
3078 bnx2x_del_all_napi(bp);
3079 if (CNIC_LOADED(bp))
3080 bnx2x_del_all_napi_cnic(bp);
3084 /* Report UNLOAD_DONE to MCP */
3085 bnx2x_send_unload_done(bp, false);
3089 * At this stage no more interrupts will arrive so we may safely clean
3090 * the queueable objects here in case they failed to get cleaned so far.
3093 bnx2x_squeeze_objects(bp);
3095 /* There should be no more pending SP commands at this stage */
3100 /* clear pending work in rtnl task */
3101 bp->sp_rtnl_state = 0;
3104 /* Free SKBs, SGEs, TPA pool and driver internals */
3105 bnx2x_free_skbs(bp);
3106 if (CNIC_LOADED(bp))
3107 bnx2x_free_skbs_cnic(bp);
3108 for_each_rx_queue(bp, i)
3109 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3111 bnx2x_free_fp_mem(bp);
3112 if (CNIC_LOADED(bp))
3113 bnx2x_free_fp_mem_cnic(bp);
3116 if (CNIC_LOADED(bp))
3117 bnx2x_free_mem_cnic(bp);
3121 bp->state = BNX2X_STATE_CLOSED;
3122 bp->cnic_loaded = false;
3124 /* Clear driver version indication in shmem */
3125 if (IS_PF(bp) && !BP_NOMCP(bp))
3126 bnx2x_update_mng_version(bp);
3128 /* Check if there are pending parity attentions. If there are - set
3129 * RECOVERY_IN_PROGRESS.
3131 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3132 bnx2x_set_reset_in_progress(bp);
3134 /* Set RESET_IS_GLOBAL if needed */
3136 bnx2x_set_reset_global(bp);
3139 /* The last driver must disable a "close the gate" if there is no
3140 * parity attention or "process kill" pending.
3143 !bnx2x_clear_pf_load(bp) &&
3144 bnx2x_reset_is_done(bp, BP_PATH(bp)))
3145 bnx2x_disable_close_the_gate(bp);
3147 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3152 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3156 /* If there is no power capability, silently succeed */
3157 if (!bp->pdev->pm_cap) {
3158 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3162 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3166 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3167 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3168 PCI_PM_CTRL_PME_STATUS));
3170 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3171 /* delay required during transition out of D3hot */
3176 /* If there are other clients above don't
3177 shut down the power */
3178 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3180 /* Don't shut down the power for emulation and FPGA */
3181 if (CHIP_REV_IS_SLOW(bp))
3184 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3188 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3190 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3193 /* No more memory access after this point until
3194 * device is brought back to D0.
3199 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3206 * net_device service functions
3208 static int bnx2x_poll(struct napi_struct *napi, int budget)
3210 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3212 struct bnx2x *bp = fp->bp;
3216 #ifdef BNX2X_STOP_ON_ERROR
3217 if (unlikely(bp->panic)) {
3218 napi_complete(napi);
3222 for_each_cos_in_tx_queue(fp, cos)
3223 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3224 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3226 rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0;
3228 if (rx_work_done < budget) {
3229 /* No need to update SB for FCoE L2 ring as long as
3230 * it's connected to the default SB and the SB
3231 * has been updated when NAPI was scheduled.
3233 if (IS_FCOE_FP(fp)) {
3234 napi_complete_done(napi, rx_work_done);
3236 bnx2x_update_fpsb_idx(fp);
3237 /* bnx2x_has_rx_work() reads the status block,
3238 * thus we need to ensure that status block indices
3239 * have been actually read (bnx2x_update_fpsb_idx)
3240 * prior to this check (bnx2x_has_rx_work) so that
3241 * we won't write the "newer" value of the status block
3242 * to IGU (if there was a DMA right after
3243 * bnx2x_has_rx_work and if there is no rmb, the memory
3244 * reading (bnx2x_update_fpsb_idx) may be postponed
3245 * to right before bnx2x_ack_sb). In this case there
3246 * will never be another interrupt until there is
3247 * another update of the status block, while there
3248 * is still unhandled work.
3252 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3253 if (napi_complete_done(napi, rx_work_done)) {
3254 /* Re-enable interrupts */
3255 DP(NETIF_MSG_RX_STATUS,
3256 "Update index to %d\n", fp->fp_hc_idx);
3257 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3258 le16_to_cpu(fp->fp_hc_idx),
3262 rx_work_done = budget;
3267 return rx_work_done;
3270 /* we split the first BD into headers and data BDs
3271 * to ease the pain of our fellow microcode engineers
3272 * we use one mapping for both BDs
3274 static u16 bnx2x_tx_split(struct bnx2x *bp,
3275 struct bnx2x_fp_txdata *txdata,
3276 struct sw_tx_bd *tx_buf,
3277 struct eth_tx_start_bd **tx_bd, u16 hlen,
3280 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3281 struct eth_tx_bd *d_tx_bd;
3283 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3285 /* first fix first BD */
3286 h_tx_bd->nbytes = cpu_to_le16(hlen);
3288 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3289 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3291 /* now get a new data BD
3292 * (after the pbd) and fill it */
3293 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3294 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3296 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3297 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3299 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3300 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3301 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3303 /* this marks the BD as one that has no individual mapping */
3304 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3306 DP(NETIF_MSG_TX_QUEUED,
3307 "TSO split data size is %d (%x:%x)\n",
3308 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3311 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3316 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3317 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3318 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3320 __sum16 tsum = (__force __sum16) csum;
3323 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3324 csum_partial(t_header - fix, fix, 0)));
3327 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3328 csum_partial(t_header, -fix, 0)));
3330 return bswab16(tsum);
3333 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3339 if (skb->ip_summed != CHECKSUM_PARTIAL)
3342 protocol = vlan_get_protocol(skb);
3343 if (protocol == htons(ETH_P_IPV6)) {
3345 prot = ipv6_hdr(skb)->nexthdr;
3348 prot = ip_hdr(skb)->protocol;
3351 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3352 if (inner_ip_hdr(skb)->version == 6) {
3353 rc |= XMIT_CSUM_ENC_V6;
3354 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3355 rc |= XMIT_CSUM_TCP;
3357 rc |= XMIT_CSUM_ENC_V4;
3358 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3359 rc |= XMIT_CSUM_TCP;
3362 if (prot == IPPROTO_TCP)
3363 rc |= XMIT_CSUM_TCP;
3365 if (skb_is_gso(skb)) {
3366 if (skb_is_gso_v6(skb)) {
3367 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3368 if (rc & XMIT_CSUM_ENC)
3369 rc |= XMIT_GSO_ENC_V6;
3371 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3372 if (rc & XMIT_CSUM_ENC)
3373 rc |= XMIT_GSO_ENC_V4;
3380 /* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
3381 #define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4
3383 /* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3384 #define BNX2X_NUM_TSO_WIN_SUB_BDS 3
3386 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3387 /* check if packet requires linearization (packet is too fragmented)
3388 no need to check fragmentation if page size > 8K (there will be no
3389 violation to FW restrictions) */
3390 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3393 int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3394 int to_copy = 0, hlen = 0;
3396 if (xmit_type & XMIT_GSO_ENC)
3397 num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
3399 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
3400 if (xmit_type & XMIT_GSO) {
3401 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3402 int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
3403 /* Number of windows to check */
3404 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3409 /* Headers length */
3410 if (xmit_type & XMIT_GSO_ENC)
3411 hlen = (int)(skb_inner_transport_header(skb) -
3413 inner_tcp_hdrlen(skb);
3415 hlen = (int)(skb_transport_header(skb) -
3416 skb->data) + tcp_hdrlen(skb);
3418 /* Amount of data (w/o headers) on linear part of SKB*/
3419 first_bd_sz = skb_headlen(skb) - hlen;
3421 wnd_sum = first_bd_sz;
3423 /* Calculate the first sum - it's special */
3424 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3426 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3428 /* If there was data on linear skb data - check it */
3429 if (first_bd_sz > 0) {
3430 if (unlikely(wnd_sum < lso_mss)) {
3435 wnd_sum -= first_bd_sz;
3438 /* Others are easier: run through the frag list and
3439 check all windows */
3440 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3442 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3444 if (unlikely(wnd_sum < lso_mss)) {
3449 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3452 /* in non-LSO too fragmented packet should always
3459 if (unlikely(to_copy))
3460 DP(NETIF_MSG_TX_QUEUED,
3461 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3462 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3463 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3470 * bnx2x_set_pbd_gso - update PBD in GSO case.
3474 * @xmit_type: xmit flags
3476 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3477 struct eth_tx_parse_bd_e1x *pbd,
3480 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3481 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3482 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3484 if (xmit_type & XMIT_GSO_V4) {
3485 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3486 pbd->tcp_pseudo_csum =
3487 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3489 0, IPPROTO_TCP, 0));
3491 pbd->tcp_pseudo_csum =
3492 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3493 &ipv6_hdr(skb)->daddr,
3494 0, IPPROTO_TCP, 0));
3498 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3502 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3504 * @bp: driver handle
3506 * @parsing_data: data to be updated
3507 * @xmit_type: xmit flags
3509 * 57712/578xx related, when skb has encapsulation
3511 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3512 u32 *parsing_data, u32 xmit_type)
3515 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3516 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3517 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3519 if (xmit_type & XMIT_CSUM_TCP) {
3520 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3521 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3522 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3524 return skb_inner_transport_header(skb) +
3525 inner_tcp_hdrlen(skb) - skb->data;
3528 /* We support checksum offload for TCP and UDP only.
3529 * No need to pass the UDP header length - it's a constant.
3531 return skb_inner_transport_header(skb) +
3532 sizeof(struct udphdr) - skb->data;
3536 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3538 * @bp: driver handle
3540 * @parsing_data: data to be updated
3541 * @xmit_type: xmit flags
3543 * 57712/578xx related
3545 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3546 u32 *parsing_data, u32 xmit_type)
3549 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3550 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3551 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3553 if (xmit_type & XMIT_CSUM_TCP) {
3554 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3555 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3556 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3558 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3560 /* We support checksum offload for TCP and UDP only.
3561 * No need to pass the UDP header length - it's a constant.
3563 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3566 /* set FW indication according to inner or outer protocols if tunneled */
3567 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3568 struct eth_tx_start_bd *tx_start_bd,
3571 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3573 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3574 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3576 if (!(xmit_type & XMIT_CSUM_TCP))
3577 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3581 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3583 * @bp: driver handle
3585 * @pbd: parse BD to be updated
3586 * @xmit_type: xmit flags
3588 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3589 struct eth_tx_parse_bd_e1x *pbd,
3592 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3594 /* for now NS flag is not used in Linux */
3597 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3598 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3600 pbd->ip_hlen_w = (skb_transport_header(skb) -
3601 skb_network_header(skb)) >> 1;
3603 hlen += pbd->ip_hlen_w;
3605 /* We support checksum offload for TCP and UDP only */
3606 if (xmit_type & XMIT_CSUM_TCP)
3607 hlen += tcp_hdrlen(skb) / 2;
3609 hlen += sizeof(struct udphdr) / 2;
3611 pbd->total_hlen_w = cpu_to_le16(hlen);
3614 if (xmit_type & XMIT_CSUM_TCP) {
3615 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3618 s8 fix = SKB_CS_OFF(skb); /* signed! */
3620 DP(NETIF_MSG_TX_QUEUED,
3621 "hlen %d fix %d csum before fix %x\n",
3622 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3624 /* HW bug: fixup the CSUM */
3625 pbd->tcp_pseudo_csum =
3626 bnx2x_csum_fix(skb_transport_header(skb),
3629 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3630 pbd->tcp_pseudo_csum);
3636 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3637 struct eth_tx_parse_bd_e2 *pbd_e2,
3638 struct eth_tx_parse_2nd_bd *pbd2,
3643 u8 outerip_off, outerip_len = 0;
3645 /* from outer IP to transport */
3646 hlen_w = (skb_inner_transport_header(skb) -
3647 skb_network_header(skb)) >> 1;
3650 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3652 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3654 /* outer IP header info */
3655 if (xmit_type & XMIT_CSUM_V4) {
3656 struct iphdr *iph = ip_hdr(skb);
3657 u32 csum = (__force u32)(~iph->check) -
3658 (__force u32)iph->tot_len -
3659 (__force u32)iph->frag_off;
3661 outerip_len = iph->ihl << 1;
3663 pbd2->fw_ip_csum_wo_len_flags_frag =
3664 bswab16(csum_fold((__force __wsum)csum));
3666 pbd2->fw_ip_hdr_to_payload_w =
3667 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3668 pbd_e2->data.tunnel_data.flags |=
3669 ETH_TUNNEL_DATA_IPV6_OUTER;
3672 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3674 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3676 /* inner IP header info */
3677 if (xmit_type & XMIT_CSUM_ENC_V4) {
3678 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3680 pbd_e2->data.tunnel_data.pseudo_csum =
3681 bswab16(~csum_tcpudp_magic(
3682 inner_ip_hdr(skb)->saddr,
3683 inner_ip_hdr(skb)->daddr,
3684 0, IPPROTO_TCP, 0));
3686 pbd_e2->data.tunnel_data.pseudo_csum =
3687 bswab16(~csum_ipv6_magic(
3688 &inner_ipv6_hdr(skb)->saddr,
3689 &inner_ipv6_hdr(skb)->daddr,
3690 0, IPPROTO_TCP, 0));
3693 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3698 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3699 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3700 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3702 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3703 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3704 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3708 static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3711 struct ipv6hdr *ipv6;
3713 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3716 if (xmit_type & XMIT_GSO_ENC_V6)
3717 ipv6 = inner_ipv6_hdr(skb);
3718 else /* XMIT_GSO_V6 */
3719 ipv6 = ipv6_hdr(skb);
3721 if (ipv6->nexthdr == NEXTHDR_IPV6)
3722 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3725 /* called with netif_tx_lock
3726 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3727 * netif_wake_queue()
3729 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3731 struct bnx2x *bp = netdev_priv(dev);
3733 struct netdev_queue *txq;
3734 struct bnx2x_fp_txdata *txdata;
3735 struct sw_tx_bd *tx_buf;
3736 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3737 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3738 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3739 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3740 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3741 u32 pbd_e2_parsing_data = 0;
3742 u16 pkt_prod, bd_prod;
3745 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3748 __le16 pkt_size = 0;
3750 u8 mac_type = UNICAST_ADDRESS;
3752 #ifdef BNX2X_STOP_ON_ERROR
3753 if (unlikely(bp->panic))
3754 return NETDEV_TX_BUSY;
3757 txq_index = skb_get_queue_mapping(skb);
3758 txq = netdev_get_tx_queue(dev, txq_index);
3760 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3762 txdata = &bp->bnx2x_txq[txq_index];
3764 /* enable this debug print to view the transmission queue being used
3765 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3766 txq_index, fp_index, txdata_index); */
3768 /* enable this debug print to view the transmission details
3769 DP(NETIF_MSG_TX_QUEUED,
3770 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3771 txdata->cid, fp_index, txdata_index, txdata, fp); */
3773 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3774 skb_shinfo(skb)->nr_frags +
3776 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3777 /* Handle special storage cases separately */
3778 if (txdata->tx_ring_size == 0) {
3779 struct bnx2x_eth_q_stats *q_stats =
3780 bnx2x_fp_qstats(bp, txdata->parent_fp);
3781 q_stats->driver_filtered_tx_pkt++;
3783 return NETDEV_TX_OK;
3785 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3786 netif_tx_stop_queue(txq);
3787 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3789 return NETDEV_TX_BUSY;
3792 DP(NETIF_MSG_TX_QUEUED,
3793 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3794 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3795 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3798 eth = (struct ethhdr *)skb->data;
3800 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3801 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3802 if (is_broadcast_ether_addr(eth->h_dest))
3803 mac_type = BROADCAST_ADDRESS;
3805 mac_type = MULTICAST_ADDRESS;
3808 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3809 /* First, check if we need to linearize the skb (due to FW
3810 restrictions). No need to check fragmentation if page size > 8K
3811 (there will be no violation to FW restrictions) */
3812 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3813 /* Statistics of linearization */
3815 if (skb_linearize(skb) != 0) {
3816 DP(NETIF_MSG_TX_QUEUED,
3817 "SKB linearization failed - silently dropping this SKB\n");
3818 dev_kfree_skb_any(skb);
3819 return NETDEV_TX_OK;
3823 /* Map skb linear data for DMA */
3824 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3825 skb_headlen(skb), DMA_TO_DEVICE);
3826 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3827 DP(NETIF_MSG_TX_QUEUED,
3828 "SKB mapping failed - silently dropping this SKB\n");
3829 dev_kfree_skb_any(skb);
3830 return NETDEV_TX_OK;
3833 Please read carefully. First we use one BD which we mark as start,
3834 then we have a parsing info BD (used for TSO or xsum),
3835 and only then we have the rest of the TSO BDs.
3836 (don't forget to mark the last one as last,
3837 and to unmap only AFTER you write to the BD ...)
3838 And above all, all pdb sizes are in words - NOT DWORDS!
3841 /* get current pkt produced now - advance it just before sending packet
3842 * since mapping of pages may fail and cause packet to be dropped
3844 pkt_prod = txdata->tx_pkt_prod;
3845 bd_prod = TX_BD(txdata->tx_bd_prod);
3847 /* get a tx_buf and first BD
3848 * tx_start_bd may be changed during SPLIT,
3849 * but first_bd will always stay first
3851 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3852 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3853 first_bd = tx_start_bd;
3855 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3857 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3858 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3859 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3860 } else if (bp->ptp_tx_skb) {
3861 BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3863 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3864 /* schedule check for Tx timestamp */
3865 bp->ptp_tx_skb = skb_get(skb);
3866 bp->ptp_tx_start = jiffies;
3867 schedule_work(&bp->ptp_task);
3871 /* header nbd: indirectly zero other flags! */
3872 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3874 /* remember the first BD of the packet */
3875 tx_buf->first_bd = txdata->tx_bd_prod;
3879 DP(NETIF_MSG_TX_QUEUED,
3880 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3881 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3883 if (skb_vlan_tag_present(skb)) {
3884 tx_start_bd->vlan_or_ethertype =
3885 cpu_to_le16(skb_vlan_tag_get(skb));
3886 tx_start_bd->bd_flags.as_bitfield |=
3887 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3889 /* when transmitting in a vf, start bd must hold the ethertype
3890 * for fw to enforce it
3893 #ifndef BNX2X_STOP_ON_ERROR
3896 /* Still need to consider inband vlan for enforced */
3897 if (__vlan_get_tag(skb, &vlan_tci)) {
3898 tx_start_bd->vlan_or_ethertype =
3899 cpu_to_le16(ntohs(eth->h_proto));
3901 tx_start_bd->bd_flags.as_bitfield |=
3902 (X_ETH_INBAND_VLAN <<
3903 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3904 tx_start_bd->vlan_or_ethertype =
3905 cpu_to_le16(vlan_tci);
3907 #ifndef BNX2X_STOP_ON_ERROR
3909 /* used by FW for packet accounting */
3910 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3915 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3917 /* turn on parsing and get a BD */
3918 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3920 if (xmit_type & XMIT_CSUM)
3921 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3923 if (!CHIP_IS_E1x(bp)) {
3924 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3925 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3927 if (xmit_type & XMIT_CSUM_ENC) {
3928 u16 global_data = 0;
3930 /* Set PBD in enc checksum offload case */
3931 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3932 &pbd_e2_parsing_data,
3935 /* turn on 2nd parsing and get a BD */
3936 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3938 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3940 memset(pbd2, 0, sizeof(*pbd2));
3942 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3943 (skb_inner_network_header(skb) -
3946 if (xmit_type & XMIT_GSO_ENC)
3947 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3951 pbd2->global_data = cpu_to_le16(global_data);
3953 /* add addition parse BD indication to start BD */
3954 SET_FLAG(tx_start_bd->general_data,
3955 ETH_TX_START_BD_PARSE_NBDS, 1);
3956 /* set encapsulation flag in start BD */
3957 SET_FLAG(tx_start_bd->general_data,
3958 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3960 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3963 } else if (xmit_type & XMIT_CSUM) {
3964 /* Set PBD in checksum offload case w/o encapsulation */
3965 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3966 &pbd_e2_parsing_data,
3970 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
3971 /* Add the macs to the parsing BD if this is a vf or if
3972 * Tx Switching is enabled.
3975 /* override GRE parameters in BD */
3976 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3977 &pbd_e2->data.mac_addr.src_mid,
3978 &pbd_e2->data.mac_addr.src_lo,
3981 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3982 &pbd_e2->data.mac_addr.dst_mid,
3983 &pbd_e2->data.mac_addr.dst_lo,
3986 if (bp->flags & TX_SWITCHING)
3987 bnx2x_set_fw_mac_addr(
3988 &pbd_e2->data.mac_addr.dst_hi,
3989 &pbd_e2->data.mac_addr.dst_mid,
3990 &pbd_e2->data.mac_addr.dst_lo,
3992 #ifdef BNX2X_STOP_ON_ERROR
3993 /* Enforce security is always set in Stop on Error -
3994 * source mac should be present in the parsing BD
3996 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3997 &pbd_e2->data.mac_addr.src_mid,
3998 &pbd_e2->data.mac_addr.src_lo,
4003 SET_FLAG(pbd_e2_parsing_data,
4004 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
4006 u16 global_data = 0;
4007 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
4008 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4009 /* Set PBD in checksum offload case */
4010 if (xmit_type & XMIT_CSUM)
4011 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
4013 SET_FLAG(global_data,
4014 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4015 pbd_e1x->global_data |= cpu_to_le16(global_data);
4018 /* Setup the data pointer of the first BD of the packet */
4019 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4020 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4021 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4022 pkt_size = tx_start_bd->nbytes;
4024 DP(NETIF_MSG_TX_QUEUED,
4025 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
4026 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4027 le16_to_cpu(tx_start_bd->nbytes),
4028 tx_start_bd->bd_flags.as_bitfield,
4029 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4031 if (xmit_type & XMIT_GSO) {
4033 DP(NETIF_MSG_TX_QUEUED,
4034 "TSO packet len %d hlen %d total len %d tso size %d\n",
4035 skb->len, hlen, skb_headlen(skb),
4036 skb_shinfo(skb)->gso_size);
4038 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4040 if (unlikely(skb_headlen(skb) > hlen)) {
4042 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4046 if (!CHIP_IS_E1x(bp))
4047 pbd_e2_parsing_data |=
4048 (skb_shinfo(skb)->gso_size <<
4049 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4050 ETH_TX_PARSE_BD_E2_LSO_MSS;
4052 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4055 /* Set the PBD's parsing_data field if not zero
4056 * (for the chips newer than 57711).
4058 if (pbd_e2_parsing_data)
4059 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4061 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4063 /* Handle fragmented skb */
4064 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4065 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4067 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4068 skb_frag_size(frag), DMA_TO_DEVICE);
4069 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4070 unsigned int pkts_compl = 0, bytes_compl = 0;
4072 DP(NETIF_MSG_TX_QUEUED,
4073 "Unable to map page - dropping packet...\n");
4075 /* we need unmap all buffers already mapped
4077 * first_bd->nbd need to be properly updated
4078 * before call to bnx2x_free_tx_pkt
4080 first_bd->nbd = cpu_to_le16(nbd);
4081 bnx2x_free_tx_pkt(bp, txdata,
4082 TX_BD(txdata->tx_pkt_prod),
4083 &pkts_compl, &bytes_compl);
4084 return NETDEV_TX_OK;
4087 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4088 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4089 if (total_pkt_bd == NULL)
4090 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4092 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4093 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4094 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4095 le16_add_cpu(&pkt_size, skb_frag_size(frag));
4098 DP(NETIF_MSG_TX_QUEUED,
4099 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4100 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4101 le16_to_cpu(tx_data_bd->nbytes));
4104 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4106 /* update with actual num BDs */
4107 first_bd->nbd = cpu_to_le16(nbd);
4109 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4111 /* now send a tx doorbell, counting the next BD
4112 * if the packet contains or ends with it
4114 if (TX_BD_POFF(bd_prod) < nbd)
4117 /* total_pkt_bytes should be set on the first data BD if
4118 * it's not an LSO packet and there is more than one
4119 * data BD. In this case pkt_size is limited by an MTU value.
4120 * However we prefer to set it for an LSO packet (while we don't
4121 * have to) in order to save some CPU cycles in a none-LSO
4122 * case, when we much more care about them.
4124 if (total_pkt_bd != NULL)
4125 total_pkt_bd->total_pkt_bytes = pkt_size;
4128 DP(NETIF_MSG_TX_QUEUED,
4129 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
4130 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4131 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4132 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4133 le16_to_cpu(pbd_e1x->total_hlen_w));
4135 DP(NETIF_MSG_TX_QUEUED,
4136 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
4138 pbd_e2->data.mac_addr.dst_hi,
4139 pbd_e2->data.mac_addr.dst_mid,
4140 pbd_e2->data.mac_addr.dst_lo,
4141 pbd_e2->data.mac_addr.src_hi,
4142 pbd_e2->data.mac_addr.src_mid,
4143 pbd_e2->data.mac_addr.src_lo,
4144 pbd_e2->parsing_data);
4145 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4147 netdev_tx_sent_queue(txq, skb->len);
4149 skb_tx_timestamp(skb);
4151 txdata->tx_pkt_prod++;
4153 * Make sure that the BD data is updated before updating the producer
4154 * since FW might read the BD right after the producer is updated.
4155 * This is only applicable for weak-ordered memory model archs such
4156 * as IA-64. The following barrier is also mandatory since FW will
4157 * assumes packets must have BDs.
4161 txdata->tx_db.data.prod += nbd;
4162 /* make sure descriptor update is observed by HW */
4165 DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
4169 txdata->tx_bd_prod += nbd;
4171 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4172 netif_tx_stop_queue(txq);
4174 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4175 * ordering of set_bit() in netif_tx_stop_queue() and read of
4179 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4180 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4181 netif_tx_wake_queue(txq);
4185 return NETDEV_TX_OK;
4188 void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4190 int mfw_vn = BP_FW_MB_IDX(bp);
4193 /* If the shmem shouldn't affect configuration, reflect */
4194 if (!IS_MF_BD(bp)) {
4197 for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4204 tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4205 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4206 c2s_map[0] = tmp & 0xff;
4207 c2s_map[1] = (tmp >> 8) & 0xff;
4208 c2s_map[2] = (tmp >> 16) & 0xff;
4209 c2s_map[3] = (tmp >> 24) & 0xff;
4211 tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4212 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4213 c2s_map[4] = tmp & 0xff;
4214 c2s_map[5] = (tmp >> 8) & 0xff;
4215 c2s_map[6] = (tmp >> 16) & 0xff;
4216 c2s_map[7] = (tmp >> 24) & 0xff;
4218 tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4219 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4220 *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4224 * bnx2x_setup_tc - routine to configure net_device for multi tc
4226 * @netdev: net device to configure
4227 * @tc: number of traffic classes to enable
4229 * callback connected to the ndo_setup_tc function pointer
4231 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4233 struct bnx2x *bp = netdev_priv(dev);
4234 u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4235 int cos, prio, count, offset;
4237 /* setup tc must be called under rtnl lock */
4240 /* no traffic classes requested. Aborting */
4242 netdev_reset_tc(dev);
4246 /* requested to support too many traffic classes */
4247 if (num_tc > bp->max_cos) {
4248 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4249 num_tc, bp->max_cos);
4253 /* declare amount of supported traffic classes */
4254 if (netdev_set_num_tc(dev, num_tc)) {
4255 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4259 bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4261 /* configure priority to traffic class mapping */
4262 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4263 int outer_prio = c2s_map[prio];
4265 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
4266 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4267 "mapping priority %d to tc %d\n",
4268 outer_prio, bp->prio_to_cos[outer_prio]);
4271 /* Use this configuration to differentiate tc0 from other COSes
4272 This can be used for ets or pfc, and save the effort of setting
4273 up a multio class queue disc or negotiating DCBX with a switch
4274 netdev_set_prio_tc_map(dev, 0, 0);
4275 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4276 for (prio = 1; prio < 16; prio++) {
4277 netdev_set_prio_tc_map(dev, prio, 1);
4278 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4281 /* configure traffic class to transmission queue mapping */
4282 for (cos = 0; cos < bp->max_cos; cos++) {
4283 count = BNX2X_NUM_ETH_QUEUES(bp);
4284 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4285 netdev_set_tc_queue(dev, cos, count, offset);
4286 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4287 "mapping tc %d to offset %d count %d\n",
4288 cos, offset, count);
4294 int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
4297 struct tc_mqprio_qopt *mqprio = type_data;
4299 if (type != TC_SETUP_QDISC_MQPRIO)
4302 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
4304 return bnx2x_setup_tc(dev, mqprio->num_tc);
4307 /* called with rtnl_lock */
4308 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4310 struct sockaddr *addr = p;
4311 struct bnx2x *bp = netdev_priv(dev);
4314 if (!is_valid_ether_addr(addr->sa_data)) {
4315 BNX2X_ERR("Requested MAC address is not valid\n");
4319 if (IS_MF_STORAGE_ONLY(bp)) {
4320 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4324 if (netif_running(dev)) {
4325 rc = bnx2x_set_eth_mac(bp, false);
4330 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4332 if (netif_running(dev))
4333 rc = bnx2x_set_eth_mac(bp, true);
4335 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4336 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4341 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4343 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4344 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4349 if (IS_FCOE_IDX(fp_index)) {
4350 memset(sb, 0, sizeof(union host_hc_status_block));
4351 fp->status_blk_mapping = 0;
4354 if (!CHIP_IS_E1x(bp))
4355 BNX2X_PCI_FREE(sb->e2_sb,
4356 bnx2x_fp(bp, fp_index,
4357 status_blk_mapping),
4358 sizeof(struct host_hc_status_block_e2));
4360 BNX2X_PCI_FREE(sb->e1x_sb,
4361 bnx2x_fp(bp, fp_index,
4362 status_blk_mapping),
4363 sizeof(struct host_hc_status_block_e1x));
4367 if (!skip_rx_queue(bp, fp_index)) {
4368 bnx2x_free_rx_bds(fp);
4370 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4371 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4372 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4373 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4374 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4376 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4377 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4378 sizeof(struct eth_fast_path_rx_cqe) *
4382 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4383 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4384 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4385 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4389 if (!skip_tx_queue(bp, fp_index)) {
4390 /* fastpath tx rings: tx_buf tx_desc */
4391 for_each_cos_in_tx_queue(fp, cos) {
4392 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4394 DP(NETIF_MSG_IFDOWN,
4395 "freeing tx memory of fp %d cos %d cid %d\n",
4396 fp_index, cos, txdata->cid);
4398 BNX2X_FREE(txdata->tx_buf_ring);
4399 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4400 txdata->tx_desc_mapping,
4401 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4404 /* end of fastpath */
4407 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4410 for_each_cnic_queue(bp, i)
4411 bnx2x_free_fp_mem_at(bp, i);
4414 void bnx2x_free_fp_mem(struct bnx2x *bp)
4417 for_each_eth_queue(bp, i)
4418 bnx2x_free_fp_mem_at(bp, i);
4421 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4423 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4424 if (!CHIP_IS_E1x(bp)) {
4425 bnx2x_fp(bp, index, sb_index_values) =
4426 (__le16 *)status_blk.e2_sb->sb.index_values;
4427 bnx2x_fp(bp, index, sb_running_index) =
4428 (__le16 *)status_blk.e2_sb->sb.running_index;
4430 bnx2x_fp(bp, index, sb_index_values) =
4431 (__le16 *)status_blk.e1x_sb->sb.index_values;
4432 bnx2x_fp(bp, index, sb_running_index) =
4433 (__le16 *)status_blk.e1x_sb->sb.running_index;
4437 /* Returns the number of actually allocated BDs */
4438 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4441 struct bnx2x *bp = fp->bp;
4442 u16 ring_prod, cqe_ring_prod;
4443 int i, failure_cnt = 0;
4445 fp->rx_comp_cons = 0;
4446 cqe_ring_prod = ring_prod = 0;
4448 /* This routine is called only during fo init so
4449 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4451 for (i = 0; i < rx_ring_size; i++) {
4452 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4456 ring_prod = NEXT_RX_IDX(ring_prod);
4457 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4458 WARN_ON(ring_prod <= (i - failure_cnt));
4462 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4463 i - failure_cnt, fp->index);
4465 fp->rx_bd_prod = ring_prod;
4466 /* Limit the CQE producer by the CQE ring size */
4467 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4470 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4472 return i - failure_cnt;
4475 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4479 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4480 struct eth_rx_cqe_next_page *nextpg;
4482 nextpg = (struct eth_rx_cqe_next_page *)
4483 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4485 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4486 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4488 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4489 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4493 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4495 union host_hc_status_block *sb;
4496 struct bnx2x_fastpath *fp = &bp->fp[index];
4499 int rx_ring_size = 0;
4501 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4502 rx_ring_size = MIN_RX_SIZE_NONTPA;
4503 bp->rx_ring_size = rx_ring_size;
4504 } else if (!bp->rx_ring_size) {
4505 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4507 if (CHIP_IS_E3(bp)) {
4508 u32 cfg = SHMEM_RD(bp,
4509 dev_info.port_hw_config[BP_PORT(bp)].
4512 /* Decrease ring size for 1G functions */
4513 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4514 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4518 /* allocate at least number of buffers required by FW */
4519 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4520 MIN_RX_SIZE_TPA, rx_ring_size);
4522 bp->rx_ring_size = rx_ring_size;
4523 } else /* if rx_ring_size specified - use it */
4524 rx_ring_size = bp->rx_ring_size;
4526 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4529 sb = &bnx2x_fp(bp, index, status_blk);
4531 if (!IS_FCOE_IDX(index)) {
4533 if (!CHIP_IS_E1x(bp)) {
4534 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4535 sizeof(struct host_hc_status_block_e2));
4539 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4540 sizeof(struct host_hc_status_block_e1x));
4546 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4547 * set shortcuts for it.
4549 if (!IS_FCOE_IDX(index))
4550 set_sb_shortcuts(bp, index);
4553 if (!skip_tx_queue(bp, index)) {
4554 /* fastpath tx rings: tx_buf tx_desc */
4555 for_each_cos_in_tx_queue(fp, cos) {
4556 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4559 "allocating tx memory of fp %d cos %d\n",
4562 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4563 sizeof(struct sw_tx_bd),
4565 if (!txdata->tx_buf_ring)
4567 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4568 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4569 if (!txdata->tx_desc_ring)
4575 if (!skip_rx_queue(bp, index)) {
4576 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4577 bnx2x_fp(bp, index, rx_buf_ring) =
4578 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4579 if (!bnx2x_fp(bp, index, rx_buf_ring))
4581 bnx2x_fp(bp, index, rx_desc_ring) =
4582 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4583 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4584 if (!bnx2x_fp(bp, index, rx_desc_ring))
4587 /* Seed all CQEs by 1s */
4588 bnx2x_fp(bp, index, rx_comp_ring) =
4589 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4590 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4591 if (!bnx2x_fp(bp, index, rx_comp_ring))
4595 bnx2x_fp(bp, index, rx_page_ring) =
4596 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4598 if (!bnx2x_fp(bp, index, rx_page_ring))
4600 bnx2x_fp(bp, index, rx_sge_ring) =
4601 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4602 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4603 if (!bnx2x_fp(bp, index, rx_sge_ring))
4606 bnx2x_set_next_page_rx_bd(fp);
4609 bnx2x_set_next_page_rx_cq(fp);
4612 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4613 if (ring_size < rx_ring_size)
4619 /* handles low memory cases */
4621 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4623 /* FW will drop all packets if queue is not big enough,
4624 * In these cases we disable the queue
4625 * Min size is different for OOO, TPA and non-TPA queues
4627 if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4628 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4629 /* release memory allocated for this queue */
4630 bnx2x_free_fp_mem_at(bp, index);
4636 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4640 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4641 /* we will fail load process instead of mark
4649 static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4653 /* 1. Allocate FP for leading - fatal if error
4654 * 2. Allocate RSS - fix number of queues if error
4658 if (bnx2x_alloc_fp_mem_at(bp, 0))
4662 for_each_nondefault_eth_queue(bp, i)
4663 if (bnx2x_alloc_fp_mem_at(bp, i))
4666 /* handle memory failures */
4667 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4668 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4671 bnx2x_shrink_eth_fp(bp, delta);
4672 if (CNIC_SUPPORT(bp))
4673 /* move non eth FPs next to last eth FP
4674 * must be done in that order
4675 * FCOE_IDX < FWD_IDX < OOO_IDX
4678 /* move FCoE fp even NO_FCOE_FLAG is on */
4679 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4680 bp->num_ethernet_queues -= delta;
4681 bp->num_queues = bp->num_ethernet_queues +
4682 bp->num_cnic_queues;
4683 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4684 bp->num_queues + delta, bp->num_queues);
4690 void bnx2x_free_mem_bp(struct bnx2x *bp)
4694 for (i = 0; i < bp->fp_array_size; i++)
4695 kfree(bp->fp[i].tpa_info);
4698 kfree(bp->fp_stats);
4699 kfree(bp->bnx2x_txq);
4700 kfree(bp->msix_table);
4704 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4706 struct bnx2x_fastpath *fp;
4707 struct msix_entry *tbl;
4708 struct bnx2x_ilt *ilt;
4709 int msix_table_size = 0;
4710 int fp_array_size, txq_array_size;
4714 * The biggest MSI-X table we might need is as a maximum number of fast
4715 * path IGU SBs plus default SB (for PF only).
4717 msix_table_size = bp->igu_sb_cnt;
4720 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4722 /* fp array: RSS plus CNIC related L2 queues */
4723 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4724 bp->fp_array_size = fp_array_size;
4725 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4727 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4730 for (i = 0; i < bp->fp_array_size; i++) {
4732 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4733 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4734 if (!(fp[i].tpa_info))
4740 /* allocate sp objs */
4741 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4746 /* allocate fp_stats */
4747 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4752 /* Allocate memory for the transmission queues array */
4754 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4755 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4757 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4763 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4766 bp->msix_table = tbl;
4769 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4776 bnx2x_free_mem_bp(bp);
4780 int bnx2x_reload_if_running(struct net_device *dev)
4782 struct bnx2x *bp = netdev_priv(dev);
4784 if (unlikely(!netif_running(dev)))
4787 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4788 return bnx2x_nic_load(bp, LOAD_NORMAL);
4791 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4793 u32 sel_phy_idx = 0;
4794 if (bp->link_params.num_phys <= 1)
4797 if (bp->link_vars.link_up) {
4798 sel_phy_idx = EXT_PHY1;
4799 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4800 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4801 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4802 sel_phy_idx = EXT_PHY2;
4805 switch (bnx2x_phy_selection(&bp->link_params)) {
4806 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4807 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4808 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4809 sel_phy_idx = EXT_PHY1;
4811 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4812 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4813 sel_phy_idx = EXT_PHY2;
4820 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4822 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4824 * The selected activated PHY is always after swapping (in case PHY
4825 * swapping is enabled). So when swapping is enabled, we need to reverse
4829 if (bp->link_params.multi_phy_config &
4830 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4831 if (sel_phy_idx == EXT_PHY1)
4832 sel_phy_idx = EXT_PHY2;
4833 else if (sel_phy_idx == EXT_PHY2)
4834 sel_phy_idx = EXT_PHY1;
4836 return LINK_CONFIG_IDX(sel_phy_idx);
4839 #ifdef NETDEV_FCOE_WWNN
4840 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4842 struct bnx2x *bp = netdev_priv(dev);
4843 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4846 case NETDEV_FCOE_WWNN:
4847 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4848 cp->fcoe_wwn_node_name_lo);
4850 case NETDEV_FCOE_WWPN:
4851 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4852 cp->fcoe_wwn_port_name_lo);
4855 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4863 /* called with rtnl_lock */
4864 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4866 struct bnx2x *bp = netdev_priv(dev);
4868 if (pci_num_vf(bp->pdev)) {
4869 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4873 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4874 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4878 /* This does not race with packet allocation
4879 * because the actual alloc size is
4880 * only updated as part of load
4884 if (!bnx2x_mtu_allows_gro(new_mtu))
4885 dev->features &= ~NETIF_F_GRO_HW;
4887 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4888 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4890 return bnx2x_reload_if_running(dev);
4893 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4894 netdev_features_t features)
4896 struct bnx2x *bp = netdev_priv(dev);
4898 if (pci_num_vf(bp->pdev)) {
4899 netdev_features_t changed = dev->features ^ features;
4901 /* Revert the requested changes in features if they
4902 * would require internal reload of PF in bnx2x_set_features().
4904 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4905 features &= ~NETIF_F_RXCSUM;
4906 features |= dev->features & NETIF_F_RXCSUM;
4909 if (changed & NETIF_F_LOOPBACK) {
4910 features &= ~NETIF_F_LOOPBACK;
4911 features |= dev->features & NETIF_F_LOOPBACK;
4915 /* TPA requires Rx CSUM offloading */
4916 if (!(features & NETIF_F_RXCSUM))
4917 features &= ~NETIF_F_LRO;
4919 if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu))
4920 features &= ~NETIF_F_GRO_HW;
4921 if (features & NETIF_F_GRO_HW)
4922 features &= ~NETIF_F_LRO;
4927 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4929 struct bnx2x *bp = netdev_priv(dev);
4930 netdev_features_t changes = features ^ dev->features;
4931 bool bnx2x_reload = false;
4934 /* VFs or non SRIOV PFs should be able to change loopback feature */
4935 if (!pci_num_vf(bp->pdev)) {
4936 if (features & NETIF_F_LOOPBACK) {
4937 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4938 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4939 bnx2x_reload = true;
4942 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4943 bp->link_params.loopback_mode = LOOPBACK_NONE;
4944 bnx2x_reload = true;
4949 /* Don't care about GRO changes */
4950 changes &= ~NETIF_F_GRO;
4953 bnx2x_reload = true;
4956 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4957 dev->features = features;
4958 rc = bnx2x_reload_if_running(dev);
4961 /* else: bnx2x_nic_load() will be called at end of recovery */
4967 void bnx2x_tx_timeout(struct net_device *dev)
4969 struct bnx2x *bp = netdev_priv(dev);
4971 /* We want the information of the dump logged,
4972 * but calling bnx2x_panic() would kill all chances of recovery.
4975 #ifndef BNX2X_STOP_ON_ERROR
4976 bnx2x_panic_dump(bp, false);
4981 /* This allows the netif to be shutdown gracefully before resetting */
4982 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4985 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4987 struct net_device *dev = pci_get_drvdata(pdev);
4991 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4994 bp = netdev_priv(dev);
4998 pci_save_state(pdev);
5000 if (!netif_running(dev)) {
5005 netif_device_detach(dev);
5007 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
5009 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
5016 int bnx2x_resume(struct pci_dev *pdev)
5018 struct net_device *dev = pci_get_drvdata(pdev);
5023 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5026 bp = netdev_priv(dev);
5028 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
5029 BNX2X_ERR("Handling parity error recovery. Try again later\n");
5035 pci_restore_state(pdev);
5037 if (!netif_running(dev)) {
5042 bnx2x_set_power_state(bp, PCI_D0);
5043 netif_device_attach(dev);
5045 rc = bnx2x_nic_load(bp, LOAD_OPEN);
5052 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5056 BNX2X_ERR("bad context pointer %p\n", cxt);
5060 /* ustorm cxt validation */
5061 cxt->ustorm_ag_context.cdu_usage =
5062 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5063 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5064 /* xcontext validation */
5065 cxt->xstorm_ag_context.cdu_reserved =
5066 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5067 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5070 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5071 u8 fw_sb_id, u8 sb_index,
5074 u32 addr = BAR_CSTRORM_INTMEM +
5075 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5076 REG_WR8(bp, addr, ticks);
5078 "port %x fw_sb_id %d sb_index %d ticks %d\n",
5079 port, fw_sb_id, sb_index, ticks);
5082 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5083 u16 fw_sb_id, u8 sb_index,
5086 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5087 u32 addr = BAR_CSTRORM_INTMEM +
5088 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5089 u8 flags = REG_RD8(bp, addr);
5091 flags &= ~HC_INDEX_DATA_HC_ENABLED;
5092 flags |= enable_flag;
5093 REG_WR8(bp, addr, flags);
5095 "port %x fw_sb_id %d sb_index %d disable %d\n",
5096 port, fw_sb_id, sb_index, disable);
5099 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5100 u8 sb_index, u8 disable, u16 usec)
5102 int port = BP_PORT(bp);
5103 u8 ticks = usec / BNX2X_BTR;
5105 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5107 disable = disable ? 1 : (usec ? 0 : 1);
5108 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5111 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5114 smp_mb__before_atomic();
5115 set_bit(flag, &bp->sp_rtnl_state);
5116 smp_mb__after_atomic();
5117 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5119 schedule_delayed_work(&bp->sp_rtnl_task, 0);