1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
26 #include <net/ip6_checksum.h>
27 #include <net/busy_poll.h>
28 #include <linux/prefetch.h>
29 #include "bnx2x_cmn.h"
30 #include "bnx2x_init.h"
33 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
34 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
35 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
36 static int bnx2x_poll(struct napi_struct *napi, int budget);
38 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
42 /* Add NAPI objects */
43 for_each_rx_queue_cnic(bp, i) {
44 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
45 bnx2x_poll, NAPI_POLL_WEIGHT);
46 napi_hash_add(&bnx2x_fp(bp, i, napi));
50 static void bnx2x_add_all_napi(struct bnx2x *bp)
54 /* Add NAPI objects */
55 for_each_eth_queue(bp, i) {
56 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
57 bnx2x_poll, NAPI_POLL_WEIGHT);
58 napi_hash_add(&bnx2x_fp(bp, i, napi));
62 static int bnx2x_calc_num_queues(struct bnx2x *bp)
64 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
66 /* Reduce memory usage in kdump environment by using only one queue */
70 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
75 * bnx2x_move_fp - move content of the fastpath structure.
78 * @from: source FP index
79 * @to: destination FP index
81 * Makes sure the contents of the bp->fp[to].napi is kept
82 * intact. This is done by first copying the napi struct from
83 * the target to the source, and then mem copying the entire
84 * source onto the target. Update txdata pointers and related
87 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
89 struct bnx2x_fastpath *from_fp = &bp->fp[from];
90 struct bnx2x_fastpath *to_fp = &bp->fp[to];
91 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
92 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
93 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
94 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
95 int old_max_eth_txqs, new_max_eth_txqs;
96 int old_txdata_index = 0, new_txdata_index = 0;
97 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
99 /* Copy the NAPI object as it has been already initialized */
100 from_fp->napi = to_fp->napi;
102 /* Move bnx2x_fastpath contents */
103 memcpy(to_fp, from_fp, sizeof(*to_fp));
106 /* Retain the tpa_info of the original `to' version as we don't want
107 * 2 FPs to contain the same tpa_info pointer.
109 to_fp->tpa_info = old_tpa_info;
111 /* move sp_objs contents as well, as their indices match fp ones */
112 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
114 /* move fp_stats contents as well, as their indices match fp ones */
115 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
117 /* Update txdata pointers in fp and move txdata content accordingly:
118 * Each fp consumes 'max_cos' txdata structures, so the index should be
119 * decremented by max_cos x delta.
122 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
123 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
125 if (from == FCOE_IDX(bp)) {
126 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
127 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
130 memcpy(&bp->bnx2x_txq[new_txdata_index],
131 &bp->bnx2x_txq[old_txdata_index],
132 sizeof(struct bnx2x_fp_txdata));
133 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
137 * bnx2x_fill_fw_str - Fill buffer with FW version string.
140 * @buf: character buffer to fill with the fw name
141 * @buf_len: length of the above buffer
144 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
147 u8 phy_fw_ver[PHY_FW_VER_LEN];
149 phy_fw_ver[0] = '\0';
150 bnx2x_get_ext_phy_fw_version(&bp->link_params,
151 phy_fw_ver, PHY_FW_VER_LEN);
152 strlcpy(buf, bp->fw_ver, buf_len);
153 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
155 (bp->common.bc_ver & 0xff0000) >> 16,
156 (bp->common.bc_ver & 0xff00) >> 8,
157 (bp->common.bc_ver & 0xff),
158 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
160 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
165 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
168 * @delta: number of eth queues which were not allocated
170 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
172 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
174 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
175 * backward along the array could cause memory to be overridden
177 for (cos = 1; cos < bp->max_cos; cos++) {
178 for (i = 0; i < old_eth_num - delta; i++) {
179 struct bnx2x_fastpath *fp = &bp->fp[i];
180 int new_idx = cos * (old_eth_num - delta) + i;
182 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
183 sizeof(struct bnx2x_fp_txdata));
184 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
189 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
191 /* free skb in the packet ring at pos idx
192 * return idx of last bd freed
194 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
195 u16 idx, unsigned int *pkts_compl,
196 unsigned int *bytes_compl)
198 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
199 struct eth_tx_start_bd *tx_start_bd;
200 struct eth_tx_bd *tx_data_bd;
201 struct sk_buff *skb = tx_buf->skb;
202 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
204 u16 split_bd_len = 0;
206 /* prefetch skb end pointer to speedup dev_kfree_skb() */
209 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
210 txdata->txq_index, idx, tx_buf, skb);
212 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
214 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
215 #ifdef BNX2X_STOP_ON_ERROR
216 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
217 BNX2X_ERR("BAD nbd!\n");
221 new_cons = nbd + tx_buf->first_bd;
223 /* Get the next bd */
224 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
226 /* Skip a parse bd... */
228 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
230 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
231 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
232 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
233 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
235 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
239 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
240 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
246 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
247 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
248 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
250 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
257 (*bytes_compl) += skb->len;
260 dev_kfree_skb_any(skb);
261 tx_buf->first_bd = 0;
267 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
269 struct netdev_queue *txq;
270 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
271 unsigned int pkts_compl = 0, bytes_compl = 0;
273 #ifdef BNX2X_STOP_ON_ERROR
274 if (unlikely(bp->panic))
278 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
279 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
280 sw_cons = txdata->tx_pkt_cons;
282 while (sw_cons != hw_cons) {
285 pkt_cons = TX_BD(sw_cons);
287 DP(NETIF_MSG_TX_DONE,
288 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
289 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
291 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
292 &pkts_compl, &bytes_compl);
297 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
299 txdata->tx_pkt_cons = sw_cons;
300 txdata->tx_bd_cons = bd_cons;
302 /* Need to make the tx_bd_cons update visible to start_xmit()
303 * before checking for netif_tx_queue_stopped(). Without the
304 * memory barrier, there is a small possibility that
305 * start_xmit() will miss it and cause the queue to be stopped
307 * On the other hand we need an rmb() here to ensure the proper
308 * ordering of bit testing in the following
309 * netif_tx_queue_stopped(txq) call.
313 if (unlikely(netif_tx_queue_stopped(txq))) {
314 /* Taking tx_lock() is needed to prevent re-enabling the queue
315 * while it's empty. This could have happen if rx_action() gets
316 * suspended in bnx2x_tx_int() after the condition before
317 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
319 * stops the queue->sees fresh tx_bd_cons->releases the queue->
320 * sends some packets consuming the whole queue again->
324 __netif_tx_lock(txq, smp_processor_id());
326 if ((netif_tx_queue_stopped(txq)) &&
327 (bp->state == BNX2X_STATE_OPEN) &&
328 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
329 netif_tx_wake_queue(txq);
331 __netif_tx_unlock(txq);
336 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
339 u16 last_max = fp->last_max_sge;
341 if (SUB_S16(idx, last_max) > 0)
342 fp->last_max_sge = idx;
345 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
347 struct eth_end_agg_rx_cqe *cqe)
349 struct bnx2x *bp = fp->bp;
350 u16 last_max, last_elem, first_elem;
357 /* First mark all used pages */
358 for (i = 0; i < sge_len; i++)
359 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
360 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
362 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
363 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
365 /* Here we assume that the last SGE index is the biggest */
366 prefetch((void *)(fp->sge_mask));
367 bnx2x_update_last_max_sge(fp,
368 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
370 last_max = RX_SGE(fp->last_max_sge);
371 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
372 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
374 /* If ring is not full */
375 if (last_elem + 1 != first_elem)
378 /* Now update the prod */
379 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
380 if (likely(fp->sge_mask[i]))
383 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
384 delta += BIT_VEC64_ELEM_SZ;
388 fp->rx_sge_prod += delta;
389 /* clear page-end entries */
390 bnx2x_clear_sge_mask_next_elems(fp);
393 DP(NETIF_MSG_RX_STATUS,
394 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
395 fp->last_max_sge, fp->rx_sge_prod);
398 /* Get Toeplitz hash value in the skb using the value from the
399 * CQE (calculated by HW).
401 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
402 const struct eth_fast_path_rx_cqe *cqe,
403 enum pkt_hash_types *rxhash_type)
405 /* Get Toeplitz hash from CQE */
406 if ((bp->dev->features & NETIF_F_RXHASH) &&
407 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
408 enum eth_rss_hash_type htype;
410 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
411 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
412 (htype == TCP_IPV6_HASH_TYPE)) ?
413 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
415 return le32_to_cpu(cqe->rss_hash_result);
417 *rxhash_type = PKT_HASH_TYPE_NONE;
421 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
423 struct eth_fast_path_rx_cqe *cqe)
425 struct bnx2x *bp = fp->bp;
426 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
427 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
428 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
430 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
431 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
433 /* print error if current state != stop */
434 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
435 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
437 /* Try to map an empty data buffer from the aggregation info */
438 mapping = dma_map_single(&bp->pdev->dev,
439 first_buf->data + NET_SKB_PAD,
440 fp->rx_buf_size, DMA_FROM_DEVICE);
442 * ...if it fails - move the skb from the consumer to the producer
443 * and set the current aggregation state as ERROR to drop it
444 * when TPA_STOP arrives.
447 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
448 /* Move the BD from the consumer to the producer */
449 bnx2x_reuse_rx_data(fp, cons, prod);
450 tpa_info->tpa_state = BNX2X_TPA_ERROR;
454 /* move empty data from pool to prod */
455 prod_rx_buf->data = first_buf->data;
456 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
457 /* point prod_bd to new data */
458 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
459 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
461 /* move partial skb from cons to pool (don't unmap yet) */
462 *first_buf = *cons_rx_buf;
464 /* mark bin state as START */
465 tpa_info->parsing_flags =
466 le16_to_cpu(cqe->pars_flags.flags);
467 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
468 tpa_info->tpa_state = BNX2X_TPA_START;
469 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
470 tpa_info->placement_offset = cqe->placement_offset;
471 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
472 if (fp->mode == TPA_MODE_GRO) {
473 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
474 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
475 tpa_info->gro_size = gro_size;
478 #ifdef BNX2X_STOP_ON_ERROR
479 fp->tpa_queue_used |= (1 << queue);
480 #ifdef _ASM_GENERIC_INT_L64_H
481 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
483 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
489 /* Timestamp option length allowed for TPA aggregation:
491 * nop nop kind length echo val
493 #define TPA_TSTAMP_OPT_LEN 12
495 * bnx2x_set_gro_params - compute GRO values
498 * @parsing_flags: parsing flags from the START CQE
499 * @len_on_bd: total length of the first packet for the
501 * @pkt_len: length of all segments
503 * Approximate value of the MSS for this aggregation calculated using
504 * the first packet of it.
505 * Compute number of aggregated segments, and gso_type.
507 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
508 u16 len_on_bd, unsigned int pkt_len,
509 u16 num_of_coalesced_segs)
511 /* TPA aggregation won't have either IP options or TCP options
512 * other than timestamp or IPv6 extension headers.
514 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
516 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
517 PRS_FLAG_OVERETH_IPV6) {
518 hdrs_len += sizeof(struct ipv6hdr);
519 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
521 hdrs_len += sizeof(struct iphdr);
522 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
525 /* Check if there was a TCP timestamp, if there is it's will
526 * always be 12 bytes length: nop nop kind length echo val.
528 * Otherwise FW would close the aggregation.
530 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
531 hdrs_len += TPA_TSTAMP_OPT_LEN;
533 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
535 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
536 * to skb_shinfo(skb)->gso_segs
538 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
541 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
542 u16 index, gfp_t gfp_mask)
544 struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
545 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
546 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
549 if (unlikely(page == NULL)) {
550 BNX2X_ERR("Can't alloc sge\n");
554 mapping = dma_map_page(&bp->pdev->dev, page, 0,
555 SGE_PAGES, DMA_FROM_DEVICE);
556 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
557 __free_pages(page, PAGES_PER_SGE_SHIFT);
558 BNX2X_ERR("Can't map sge\n");
563 dma_unmap_addr_set(sw_buf, mapping, mapping);
565 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
566 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
571 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
572 struct bnx2x_agg_info *tpa_info,
575 struct eth_end_agg_rx_cqe *cqe,
578 struct sw_rx_page *rx_pg, old_rx_pg;
579 u32 i, frag_len, frag_size;
580 int err, j, frag_id = 0;
581 u16 len_on_bd = tpa_info->len_on_bd;
582 u16 full_page = 0, gro_size = 0;
584 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
586 if (fp->mode == TPA_MODE_GRO) {
587 gro_size = tpa_info->gro_size;
588 full_page = tpa_info->full_page;
591 /* This is needed in order to enable forwarding support */
593 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
594 le16_to_cpu(cqe->pkt_len),
595 le16_to_cpu(cqe->num_of_coalesced_segs));
597 #ifdef BNX2X_STOP_ON_ERROR
598 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
599 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
601 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
607 /* Run through the SGL and compose the fragmented skb */
608 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
609 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
611 /* FW gives the indices of the SGE as if the ring is an array
612 (meaning that "next" element will consume 2 indices) */
613 if (fp->mode == TPA_MODE_GRO)
614 frag_len = min_t(u32, frag_size, (u32)full_page);
616 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
618 rx_pg = &fp->rx_page_ring[sge_idx];
621 /* If we fail to allocate a substitute page, we simply stop
622 where we are and drop the whole packet */
623 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
625 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
629 /* Unmap the page as we're going to pass it to the stack */
630 dma_unmap_page(&bp->pdev->dev,
631 dma_unmap_addr(&old_rx_pg, mapping),
632 SGE_PAGES, DMA_FROM_DEVICE);
633 /* Add one frag and update the appropriate fields in the skb */
634 if (fp->mode == TPA_MODE_LRO)
635 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
639 for (rem = frag_len; rem > 0; rem -= gro_size) {
640 int len = rem > gro_size ? gro_size : rem;
641 skb_fill_page_desc(skb, frag_id++,
642 old_rx_pg.page, offset, len);
644 get_page(old_rx_pg.page);
649 skb->data_len += frag_len;
650 skb->truesize += SGE_PAGES;
651 skb->len += frag_len;
653 frag_size -= frag_len;
659 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
661 if (fp->rx_frag_size)
662 put_page(virt_to_head_page(data));
667 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
669 if (fp->rx_frag_size) {
670 /* GFP_KERNEL allocations are used only during initialization */
671 if (unlikely(gfp_mask & __GFP_WAIT))
672 return (void *)__get_free_page(gfp_mask);
674 return netdev_alloc_frag(fp->rx_frag_size);
677 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
681 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
683 const struct iphdr *iph = ip_hdr(skb);
686 skb_set_transport_header(skb, sizeof(struct iphdr));
689 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
690 iph->saddr, iph->daddr, 0);
693 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
695 struct ipv6hdr *iph = ipv6_hdr(skb);
698 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
701 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
702 &iph->saddr, &iph->daddr, 0);
705 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
706 void (*gro_func)(struct bnx2x*, struct sk_buff*))
708 skb_set_network_header(skb, 0);
710 tcp_gro_complete(skb);
714 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
718 if (skb_shinfo(skb)->gso_size) {
719 switch (be16_to_cpu(skb->protocol)) {
721 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
724 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
727 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
728 be16_to_cpu(skb->protocol));
732 skb_record_rx_queue(skb, fp->rx_queue);
733 napi_gro_receive(&fp->napi, skb);
736 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
737 struct bnx2x_agg_info *tpa_info,
739 struct eth_end_agg_rx_cqe *cqe,
742 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
743 u8 pad = tpa_info->placement_offset;
744 u16 len = tpa_info->len_on_bd;
745 struct sk_buff *skb = NULL;
746 u8 *new_data, *data = rx_buf->data;
747 u8 old_tpa_state = tpa_info->tpa_state;
749 tpa_info->tpa_state = BNX2X_TPA_STOP;
751 /* If we there was an error during the handling of the TPA_START -
752 * drop this aggregation.
754 if (old_tpa_state == BNX2X_TPA_ERROR)
757 /* Try to allocate the new data */
758 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
759 /* Unmap skb in the pool anyway, as we are going to change
760 pool entry status to BNX2X_TPA_STOP even if new skb allocation
762 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
763 fp->rx_buf_size, DMA_FROM_DEVICE);
764 if (likely(new_data))
765 skb = build_skb(data, fp->rx_frag_size);
768 #ifdef BNX2X_STOP_ON_ERROR
769 if (pad + len > fp->rx_buf_size) {
770 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
771 pad, len, fp->rx_buf_size);
777 skb_reserve(skb, pad + NET_SKB_PAD);
779 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
781 skb->protocol = eth_type_trans(skb, bp->dev);
782 skb->ip_summed = CHECKSUM_UNNECESSARY;
784 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
785 skb, cqe, cqe_idx)) {
786 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
787 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
788 bnx2x_gro_receive(bp, fp, skb);
790 DP(NETIF_MSG_RX_STATUS,
791 "Failed to allocate new pages - dropping packet!\n");
792 dev_kfree_skb_any(skb);
795 /* put new data in bin */
796 rx_buf->data = new_data;
800 bnx2x_frag_free(fp, new_data);
802 /* drop the packet and keep the buffer in the bin */
803 DP(NETIF_MSG_RX_STATUS,
804 "Failed to allocate or map a new skb - dropping packet!\n");
805 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
808 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
809 u16 index, gfp_t gfp_mask)
812 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
813 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
816 data = bnx2x_frag_alloc(fp, gfp_mask);
817 if (unlikely(data == NULL))
820 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
823 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
824 bnx2x_frag_free(fp, data);
825 BNX2X_ERR("Can't map rx data\n");
830 dma_unmap_addr_set(rx_buf, mapping, mapping);
832 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
833 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
839 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
840 struct bnx2x_fastpath *fp,
841 struct bnx2x_eth_q_stats *qstats)
843 /* Do nothing if no L4 csum validation was done.
844 * We do not check whether IP csum was validated. For IPv4 we assume
845 * that if the card got as far as validating the L4 csum, it also
846 * validated the IP csum. IPv6 has no IP csum.
848 if (cqe->fast_path_cqe.status_flags &
849 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
852 /* If L4 validation was done, check if an error was found. */
854 if (cqe->fast_path_cqe.type_error_flags &
855 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
856 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
857 qstats->hw_csum_err++;
859 skb->ip_summed = CHECKSUM_UNNECESSARY;
862 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
864 struct bnx2x *bp = fp->bp;
865 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
866 u16 sw_comp_cons, sw_comp_prod;
868 union eth_rx_cqe *cqe;
869 struct eth_fast_path_rx_cqe *cqe_fp;
871 #ifdef BNX2X_STOP_ON_ERROR
872 if (unlikely(bp->panic))
878 bd_cons = fp->rx_bd_cons;
879 bd_prod = fp->rx_bd_prod;
880 bd_prod_fw = bd_prod;
881 sw_comp_cons = fp->rx_comp_cons;
882 sw_comp_prod = fp->rx_comp_prod;
884 comp_ring_cons = RCQ_BD(sw_comp_cons);
885 cqe = &fp->rx_comp_ring[comp_ring_cons];
886 cqe_fp = &cqe->fast_path_cqe;
888 DP(NETIF_MSG_RX_STATUS,
889 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
891 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
892 struct sw_rx_bd *rx_buf = NULL;
895 enum eth_rx_cqe_type cqe_fp_type;
899 enum pkt_hash_types rxhash_type;
901 #ifdef BNX2X_STOP_ON_ERROR
902 if (unlikely(bp->panic))
906 bd_prod = RX_BD(bd_prod);
907 bd_cons = RX_BD(bd_cons);
909 /* A rmb() is required to ensure that the CQE is not read
910 * before it is written by the adapter DMA. PCI ordering
911 * rules will make sure the other fields are written before
912 * the marker at the end of struct eth_fast_path_rx_cqe
913 * but without rmb() a weakly ordered processor can process
914 * stale data. Without the barrier TPA state-machine might
915 * enter inconsistent state and kernel stack might be
916 * provided with incorrect packet description - these lead
917 * to various kernel crashed.
921 cqe_fp_flags = cqe_fp->type_error_flags;
922 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
924 DP(NETIF_MSG_RX_STATUS,
925 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
926 CQE_TYPE(cqe_fp_flags),
927 cqe_fp_flags, cqe_fp->status_flags,
928 le32_to_cpu(cqe_fp->rss_hash_result),
929 le16_to_cpu(cqe_fp->vlan_tag),
930 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
932 /* is this a slowpath msg? */
933 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
934 bnx2x_sp_event(fp, cqe);
938 rx_buf = &fp->rx_buf_ring[bd_cons];
941 if (!CQE_TYPE_FAST(cqe_fp_type)) {
942 struct bnx2x_agg_info *tpa_info;
943 u16 frag_size, pages;
944 #ifdef BNX2X_STOP_ON_ERROR
946 if (fp->disable_tpa &&
947 (CQE_TYPE_START(cqe_fp_type) ||
948 CQE_TYPE_STOP(cqe_fp_type)))
949 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
950 CQE_TYPE(cqe_fp_type));
953 if (CQE_TYPE_START(cqe_fp_type)) {
954 u16 queue = cqe_fp->queue_index;
955 DP(NETIF_MSG_RX_STATUS,
956 "calling tpa_start on queue %d\n",
959 bnx2x_tpa_start(fp, queue,
965 queue = cqe->end_agg_cqe.queue_index;
966 tpa_info = &fp->tpa_info[queue];
967 DP(NETIF_MSG_RX_STATUS,
968 "calling tpa_stop on queue %d\n",
971 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
974 if (fp->mode == TPA_MODE_GRO)
975 pages = (frag_size + tpa_info->full_page - 1) /
978 pages = SGE_PAGE_ALIGN(frag_size) >>
981 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
982 &cqe->end_agg_cqe, comp_ring_cons);
983 #ifdef BNX2X_STOP_ON_ERROR
988 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
992 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
993 pad = cqe_fp->placement_offset;
994 dma_sync_single_for_cpu(&bp->pdev->dev,
995 dma_unmap_addr(rx_buf, mapping),
996 pad + RX_COPY_THRESH,
999 prefetch(data + pad); /* speedup eth_type_trans() */
1000 /* is this an error packet? */
1001 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1002 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1003 "ERROR flags %x rx packet %u\n",
1004 cqe_fp_flags, sw_comp_cons);
1005 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1009 /* Since we don't have a jumbo ring
1010 * copy small packets if mtu > 1500
1012 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1013 (len <= RX_COPY_THRESH)) {
1014 skb = netdev_alloc_skb_ip_align(bp->dev, len);
1016 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1017 "ERROR packet dropped because of alloc failure\n");
1018 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1021 memcpy(skb->data, data + pad, len);
1022 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1024 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1025 GFP_ATOMIC) == 0)) {
1026 dma_unmap_single(&bp->pdev->dev,
1027 dma_unmap_addr(rx_buf, mapping),
1030 skb = build_skb(data, fp->rx_frag_size);
1031 if (unlikely(!skb)) {
1032 bnx2x_frag_free(fp, data);
1033 bnx2x_fp_qstats(bp, fp)->
1034 rx_skb_alloc_failed++;
1037 skb_reserve(skb, pad);
1039 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1040 "ERROR packet dropped because of alloc failure\n");
1041 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1043 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1049 skb->protocol = eth_type_trans(skb, bp->dev);
1051 /* Set Toeplitz hash for a none-LRO skb */
1052 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1053 skb_set_hash(skb, rxhash, rxhash_type);
1055 skb_checksum_none_assert(skb);
1057 if (bp->dev->features & NETIF_F_RXCSUM)
1058 bnx2x_csum_validate(skb, cqe, fp,
1059 bnx2x_fp_qstats(bp, fp));
1061 skb_record_rx_queue(skb, fp->rx_queue);
1063 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1065 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1066 le16_to_cpu(cqe_fp->vlan_tag));
1068 skb_mark_napi_id(skb, &fp->napi);
1070 if (bnx2x_fp_ll_polling(fp))
1071 netif_receive_skb(skb);
1073 napi_gro_receive(&fp->napi, skb);
1075 rx_buf->data = NULL;
1077 bd_cons = NEXT_RX_IDX(bd_cons);
1078 bd_prod = NEXT_RX_IDX(bd_prod);
1079 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1082 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1083 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1085 /* mark CQE as free */
1086 BNX2X_SEED_CQE(cqe_fp);
1088 if (rx_pkt == budget)
1091 comp_ring_cons = RCQ_BD(sw_comp_cons);
1092 cqe = &fp->rx_comp_ring[comp_ring_cons];
1093 cqe_fp = &cqe->fast_path_cqe;
1096 fp->rx_bd_cons = bd_cons;
1097 fp->rx_bd_prod = bd_prod_fw;
1098 fp->rx_comp_cons = sw_comp_cons;
1099 fp->rx_comp_prod = sw_comp_prod;
1101 /* Update producers */
1102 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1105 fp->rx_pkt += rx_pkt;
1111 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1113 struct bnx2x_fastpath *fp = fp_cookie;
1114 struct bnx2x *bp = fp->bp;
1118 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1119 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1121 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1123 #ifdef BNX2X_STOP_ON_ERROR
1124 if (unlikely(bp->panic))
1128 /* Handle Rx and Tx according to MSI-X vector */
1129 for_each_cos_in_tx_queue(fp, cos)
1130 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1132 prefetch(&fp->sb_running_index[SM_RX_ID]);
1133 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1138 /* HW Lock for shared dual port PHYs */
1139 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1141 mutex_lock(&bp->port.phy_mutex);
1143 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1146 void bnx2x_release_phy_lock(struct bnx2x *bp)
1148 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1150 mutex_unlock(&bp->port.phy_mutex);
1153 /* calculates MF speed according to current linespeed and MF configuration */
1154 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1156 u16 line_speed = bp->link_vars.line_speed;
1158 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1159 bp->mf_config[BP_VN(bp)]);
1161 /* Calculate the current MAX line speed limit for the MF
1165 line_speed = (line_speed * maxCfg) / 100;
1166 else { /* SD mode */
1167 u16 vn_max_rate = maxCfg * 100;
1169 if (vn_max_rate < line_speed)
1170 line_speed = vn_max_rate;
1178 * bnx2x_fill_report_data - fill link report data to report
1180 * @bp: driver handle
1181 * @data: link state to update
1183 * It uses a none-atomic bit operations because is called under the mutex.
1185 static void bnx2x_fill_report_data(struct bnx2x *bp,
1186 struct bnx2x_link_report_data *data)
1188 memset(data, 0, sizeof(*data));
1191 /* Fill the report data: effective line speed */
1192 data->line_speed = bnx2x_get_mf_speed(bp);
1195 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1196 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1197 &data->link_report_flags);
1199 if (!BNX2X_NUM_ETH_QUEUES(bp))
1200 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1201 &data->link_report_flags);
1204 if (bp->link_vars.duplex == DUPLEX_FULL)
1205 __set_bit(BNX2X_LINK_REPORT_FD,
1206 &data->link_report_flags);
1208 /* Rx Flow Control is ON */
1209 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1210 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1211 &data->link_report_flags);
1213 /* Tx Flow Control is ON */
1214 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1215 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1216 &data->link_report_flags);
1218 *data = bp->vf_link_vars;
1223 * bnx2x_link_report - report link status to OS.
1225 * @bp: driver handle
1227 * Calls the __bnx2x_link_report() under the same locking scheme
1228 * as a link/PHY state managing code to ensure a consistent link
1232 void bnx2x_link_report(struct bnx2x *bp)
1234 bnx2x_acquire_phy_lock(bp);
1235 __bnx2x_link_report(bp);
1236 bnx2x_release_phy_lock(bp);
1240 * __bnx2x_link_report - report link status to OS.
1242 * @bp: driver handle
1244 * None atomic implementation.
1245 * Should be called under the phy_lock.
1247 void __bnx2x_link_report(struct bnx2x *bp)
1249 struct bnx2x_link_report_data cur_data;
1252 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1253 bnx2x_read_mf_cfg(bp);
1255 /* Read the current link report info */
1256 bnx2x_fill_report_data(bp, &cur_data);
1258 /* Don't report link down or exactly the same link status twice */
1259 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1260 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1261 &bp->last_reported_link.link_report_flags) &&
1262 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1263 &cur_data.link_report_flags)))
1268 /* We are going to report a new link parameters now -
1269 * remember the current data for the next time.
1271 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1273 /* propagate status to VFs */
1275 bnx2x_iov_link_update(bp);
1277 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1278 &cur_data.link_report_flags)) {
1279 netif_carrier_off(bp->dev);
1280 netdev_err(bp->dev, "NIC Link is Down\n");
1286 netif_carrier_on(bp->dev);
1288 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1289 &cur_data.link_report_flags))
1294 /* Handle the FC at the end so that only these flags would be
1295 * possibly set. This way we may easily check if there is no FC
1298 if (cur_data.link_report_flags) {
1299 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1300 &cur_data.link_report_flags)) {
1301 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1302 &cur_data.link_report_flags))
1303 flow = "ON - receive & transmit";
1305 flow = "ON - receive";
1307 flow = "ON - transmit";
1312 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1313 cur_data.line_speed, duplex, flow);
1317 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1321 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1322 struct eth_rx_sge *sge;
1324 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1326 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1327 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1330 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1331 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1335 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1336 struct bnx2x_fastpath *fp, int last)
1340 for (i = 0; i < last; i++) {
1341 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1342 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1343 u8 *data = first_buf->data;
1346 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1349 if (tpa_info->tpa_state == BNX2X_TPA_START)
1350 dma_unmap_single(&bp->pdev->dev,
1351 dma_unmap_addr(first_buf, mapping),
1352 fp->rx_buf_size, DMA_FROM_DEVICE);
1353 bnx2x_frag_free(fp, data);
1354 first_buf->data = NULL;
1358 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1362 for_each_rx_queue_cnic(bp, j) {
1363 struct bnx2x_fastpath *fp = &bp->fp[j];
1367 /* Activate BD ring */
1369 * this will generate an interrupt (to the TSTORM)
1370 * must only be done after chip is initialized
1372 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1377 void bnx2x_init_rx_rings(struct bnx2x *bp)
1379 int func = BP_FUNC(bp);
1383 /* Allocate TPA resources */
1384 for_each_eth_queue(bp, j) {
1385 struct bnx2x_fastpath *fp = &bp->fp[j];
1388 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1390 if (!fp->disable_tpa) {
1391 /* Fill the per-aggregation pool */
1392 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1393 struct bnx2x_agg_info *tpa_info =
1395 struct sw_rx_bd *first_buf =
1396 &tpa_info->first_buf;
1399 bnx2x_frag_alloc(fp, GFP_KERNEL);
1400 if (!first_buf->data) {
1401 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1403 bnx2x_free_tpa_pool(bp, fp, i);
1404 fp->disable_tpa = 1;
1407 dma_unmap_addr_set(first_buf, mapping, 0);
1408 tpa_info->tpa_state = BNX2X_TPA_STOP;
1411 /* "next page" elements initialization */
1412 bnx2x_set_next_page_sgl(fp);
1414 /* set SGEs bit mask */
1415 bnx2x_init_sge_ring_bit_mask(fp);
1417 /* Allocate SGEs and initialize the ring elements */
1418 for (i = 0, ring_prod = 0;
1419 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1421 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1423 BNX2X_ERR("was only able to allocate %d rx sges\n",
1425 BNX2X_ERR("disabling TPA for queue[%d]\n",
1427 /* Cleanup already allocated elements */
1428 bnx2x_free_rx_sge_range(bp, fp,
1430 bnx2x_free_tpa_pool(bp, fp,
1432 fp->disable_tpa = 1;
1436 ring_prod = NEXT_SGE_IDX(ring_prod);
1439 fp->rx_sge_prod = ring_prod;
1443 for_each_eth_queue(bp, j) {
1444 struct bnx2x_fastpath *fp = &bp->fp[j];
1448 /* Activate BD ring */
1450 * this will generate an interrupt (to the TSTORM)
1451 * must only be done after chip is initialized
1453 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1459 if (CHIP_IS_E1(bp)) {
1460 REG_WR(bp, BAR_USTRORM_INTMEM +
1461 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1462 U64_LO(fp->rx_comp_mapping));
1463 REG_WR(bp, BAR_USTRORM_INTMEM +
1464 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1465 U64_HI(fp->rx_comp_mapping));
1470 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1473 struct bnx2x *bp = fp->bp;
1475 for_each_cos_in_tx_queue(fp, cos) {
1476 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1477 unsigned pkts_compl = 0, bytes_compl = 0;
1479 u16 sw_prod = txdata->tx_pkt_prod;
1480 u16 sw_cons = txdata->tx_pkt_cons;
1482 while (sw_cons != sw_prod) {
1483 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1484 &pkts_compl, &bytes_compl);
1488 netdev_tx_reset_queue(
1489 netdev_get_tx_queue(bp->dev,
1490 txdata->txq_index));
1494 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1498 for_each_tx_queue_cnic(bp, i) {
1499 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1503 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1507 for_each_eth_queue(bp, i) {
1508 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1512 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1514 struct bnx2x *bp = fp->bp;
1517 /* ring wasn't allocated */
1518 if (fp->rx_buf_ring == NULL)
1521 for (i = 0; i < NUM_RX_BD; i++) {
1522 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1523 u8 *data = rx_buf->data;
1527 dma_unmap_single(&bp->pdev->dev,
1528 dma_unmap_addr(rx_buf, mapping),
1529 fp->rx_buf_size, DMA_FROM_DEVICE);
1531 rx_buf->data = NULL;
1532 bnx2x_frag_free(fp, data);
1536 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1540 for_each_rx_queue_cnic(bp, j) {
1541 bnx2x_free_rx_bds(&bp->fp[j]);
1545 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1549 for_each_eth_queue(bp, j) {
1550 struct bnx2x_fastpath *fp = &bp->fp[j];
1552 bnx2x_free_rx_bds(fp);
1554 if (!fp->disable_tpa)
1555 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1559 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1561 bnx2x_free_tx_skbs_cnic(bp);
1562 bnx2x_free_rx_skbs_cnic(bp);
1565 void bnx2x_free_skbs(struct bnx2x *bp)
1567 bnx2x_free_tx_skbs(bp);
1568 bnx2x_free_rx_skbs(bp);
1571 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1573 /* load old values */
1574 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1576 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1577 /* leave all but MAX value */
1578 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1580 /* set new MAX value */
1581 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1582 & FUNC_MF_CFG_MAX_BW_MASK;
1584 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1589 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1591 * @bp: driver handle
1592 * @nvecs: number of vectors to be released
1594 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1598 if (nvecs == offset)
1601 /* VFs don't have a default SB */
1603 free_irq(bp->msix_table[offset].vector, bp->dev);
1604 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1605 bp->msix_table[offset].vector);
1609 if (CNIC_SUPPORT(bp)) {
1610 if (nvecs == offset)
1615 for_each_eth_queue(bp, i) {
1616 if (nvecs == offset)
1618 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1619 i, bp->msix_table[offset].vector);
1621 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1625 void bnx2x_free_irq(struct bnx2x *bp)
1627 if (bp->flags & USING_MSIX_FLAG &&
1628 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1629 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1631 /* vfs don't have a default status block */
1635 bnx2x_free_msix_irqs(bp, nvecs);
1637 free_irq(bp->dev->irq, bp->dev);
1641 int bnx2x_enable_msix(struct bnx2x *bp)
1643 int msix_vec = 0, i, rc;
1645 /* VFs don't have a default status block */
1647 bp->msix_table[msix_vec].entry = msix_vec;
1648 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1649 bp->msix_table[0].entry);
1653 /* Cnic requires an msix vector for itself */
1654 if (CNIC_SUPPORT(bp)) {
1655 bp->msix_table[msix_vec].entry = msix_vec;
1656 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1657 msix_vec, bp->msix_table[msix_vec].entry);
1661 /* We need separate vectors for ETH queues only (not FCoE) */
1662 for_each_eth_queue(bp, i) {
1663 bp->msix_table[msix_vec].entry = msix_vec;
1664 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1665 msix_vec, msix_vec, i);
1669 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1672 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1673 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1675 * reconfigure number of tx/rx queues according to available
1678 if (rc == -ENOSPC) {
1679 /* Get by with single vector */
1680 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1682 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1687 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1688 bp->flags |= USING_SINGLE_MSIX_FLAG;
1690 BNX2X_DEV_INFO("set number of queues to 1\n");
1691 bp->num_ethernet_queues = 1;
1692 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1693 } else if (rc < 0) {
1694 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1696 } else if (rc < msix_vec) {
1697 /* how less vectors we will have? */
1698 int diff = msix_vec - rc;
1700 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1703 * decrease number of queues by number of unallocated entries
1705 bp->num_ethernet_queues -= diff;
1706 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1708 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1712 bp->flags |= USING_MSIX_FLAG;
1717 /* fall to INTx if not enough memory */
1719 bp->flags |= DISABLE_MSI_FLAG;
1724 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1726 int i, rc, offset = 0;
1728 /* no default status block for vf */
1730 rc = request_irq(bp->msix_table[offset++].vector,
1731 bnx2x_msix_sp_int, 0,
1732 bp->dev->name, bp->dev);
1734 BNX2X_ERR("request sp irq failed\n");
1739 if (CNIC_SUPPORT(bp))
1742 for_each_eth_queue(bp, i) {
1743 struct bnx2x_fastpath *fp = &bp->fp[i];
1744 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1747 rc = request_irq(bp->msix_table[offset].vector,
1748 bnx2x_msix_fp_int, 0, fp->name, fp);
1750 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1751 bp->msix_table[offset].vector, rc);
1752 bnx2x_free_msix_irqs(bp, offset);
1759 i = BNX2X_NUM_ETH_QUEUES(bp);
1761 offset = 1 + CNIC_SUPPORT(bp);
1762 netdev_info(bp->dev,
1763 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1764 bp->msix_table[0].vector,
1765 0, bp->msix_table[offset].vector,
1766 i - 1, bp->msix_table[offset + i - 1].vector);
1768 offset = CNIC_SUPPORT(bp);
1769 netdev_info(bp->dev,
1770 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1771 0, bp->msix_table[offset].vector,
1772 i - 1, bp->msix_table[offset + i - 1].vector);
1777 int bnx2x_enable_msi(struct bnx2x *bp)
1781 rc = pci_enable_msi(bp->pdev);
1783 BNX2X_DEV_INFO("MSI is not attainable\n");
1786 bp->flags |= USING_MSI_FLAG;
1791 static int bnx2x_req_irq(struct bnx2x *bp)
1793 unsigned long flags;
1796 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1799 flags = IRQF_SHARED;
1801 if (bp->flags & USING_MSIX_FLAG)
1802 irq = bp->msix_table[0].vector;
1804 irq = bp->pdev->irq;
1806 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1809 static int bnx2x_setup_irqs(struct bnx2x *bp)
1812 if (bp->flags & USING_MSIX_FLAG &&
1813 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1814 rc = bnx2x_req_msix_irqs(bp);
1818 rc = bnx2x_req_irq(bp);
1820 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1823 if (bp->flags & USING_MSI_FLAG) {
1824 bp->dev->irq = bp->pdev->irq;
1825 netdev_info(bp->dev, "using MSI IRQ %d\n",
1828 if (bp->flags & USING_MSIX_FLAG) {
1829 bp->dev->irq = bp->msix_table[0].vector;
1830 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1838 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1842 for_each_rx_queue_cnic(bp, i) {
1843 bnx2x_fp_init_lock(&bp->fp[i]);
1844 napi_enable(&bnx2x_fp(bp, i, napi));
1848 static void bnx2x_napi_enable(struct bnx2x *bp)
1852 for_each_eth_queue(bp, i) {
1853 bnx2x_fp_init_lock(&bp->fp[i]);
1854 napi_enable(&bnx2x_fp(bp, i, napi));
1858 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1862 for_each_rx_queue_cnic(bp, i) {
1863 napi_disable(&bnx2x_fp(bp, i, napi));
1864 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1865 usleep_range(1000, 2000);
1869 static void bnx2x_napi_disable(struct bnx2x *bp)
1873 for_each_eth_queue(bp, i) {
1874 napi_disable(&bnx2x_fp(bp, i, napi));
1875 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1876 usleep_range(1000, 2000);
1880 void bnx2x_netif_start(struct bnx2x *bp)
1882 if (netif_running(bp->dev)) {
1883 bnx2x_napi_enable(bp);
1884 if (CNIC_LOADED(bp))
1885 bnx2x_napi_enable_cnic(bp);
1886 bnx2x_int_enable(bp);
1887 if (bp->state == BNX2X_STATE_OPEN)
1888 netif_tx_wake_all_queues(bp->dev);
1892 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1894 bnx2x_int_disable_sync(bp, disable_hw);
1895 bnx2x_napi_disable(bp);
1896 if (CNIC_LOADED(bp))
1897 bnx2x_napi_disable_cnic(bp);
1900 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1901 void *accel_priv, select_queue_fallback_t fallback)
1903 struct bnx2x *bp = netdev_priv(dev);
1905 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1906 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1907 u16 ether_type = ntohs(hdr->h_proto);
1909 /* Skip VLAN tag if present */
1910 if (ether_type == ETH_P_8021Q) {
1911 struct vlan_ethhdr *vhdr =
1912 (struct vlan_ethhdr *)skb->data;
1914 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1917 /* If ethertype is FCoE or FIP - use FCoE ring */
1918 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1919 return bnx2x_fcoe_tx(bp, txq_index);
1922 /* select a non-FCoE queue */
1923 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1926 void bnx2x_set_num_queues(struct bnx2x *bp)
1929 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1931 /* override in STORAGE SD modes */
1932 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1933 bp->num_ethernet_queues = 1;
1935 /* Add special queues */
1936 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1937 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1939 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1943 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1945 * @bp: Driver handle
1947 * We currently support for at most 16 Tx queues for each CoS thus we will
1948 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1951 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1952 * index after all ETH L2 indices.
1954 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1955 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1956 * 16..31,...) with indices that are not coupled with any real Tx queue.
1958 * The proper configuration of skb->queue_mapping is handled by
1959 * bnx2x_select_queue() and __skb_tx_hash().
1961 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1962 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1964 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1968 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1969 rx = BNX2X_NUM_ETH_QUEUES(bp);
1971 /* account for fcoe queue */
1972 if (include_cnic && !NO_FCOE(bp)) {
1977 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1979 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1982 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1984 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1988 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1994 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1998 for_each_queue(bp, i) {
1999 struct bnx2x_fastpath *fp = &bp->fp[i];
2002 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2005 * Although there are no IP frames expected to arrive to
2006 * this ring we still want to add an
2007 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2010 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2013 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2014 IP_HEADER_ALIGNMENT_PADDING +
2017 BNX2X_FW_RX_ALIGN_END;
2018 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2019 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2020 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2022 fp->rx_frag_size = 0;
2026 static int bnx2x_init_rss(struct bnx2x *bp)
2029 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2031 /* Prepare the initial contents for the indirection table if RSS is
2034 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2035 bp->rss_conf_obj.ind_table[i] =
2037 ethtool_rxfh_indir_default(i, num_eth_queues);
2040 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2041 * per-port, so if explicit configuration is needed , do it only
2044 * For 57712 and newer on the other hand it's a per-function
2047 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2050 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2051 bool config_hash, bool enable)
2053 struct bnx2x_config_rss_params params = {NULL};
2055 /* Although RSS is meaningless when there is a single HW queue we
2056 * still need it enabled in order to have HW Rx hash generated.
2058 * if (!is_eth_multi(bp))
2059 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2062 params.rss_obj = rss_obj;
2064 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
2067 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
2069 /* RSS configuration */
2070 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
2071 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
2072 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
2073 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
2074 if (rss_obj->udp_rss_v4)
2075 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
2076 if (rss_obj->udp_rss_v6)
2077 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
2079 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
2083 params.rss_result_mask = MULTI_MASK;
2085 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2089 prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
2090 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
2094 return bnx2x_config_rss(bp, ¶ms);
2096 return bnx2x_vfpf_config_rss(bp, ¶ms);
2099 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2101 struct bnx2x_func_state_params func_params = {NULL};
2103 /* Prepare parameters for function state transitions */
2104 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2106 func_params.f_obj = &bp->func_obj;
2107 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2109 func_params.params.hw_init.load_phase = load_code;
2111 return bnx2x_func_state_change(bp, &func_params);
2115 * Cleans the object that have internal lists without sending
2116 * ramrods. Should be run when interrupts are disabled.
2118 void bnx2x_squeeze_objects(struct bnx2x *bp)
2121 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2122 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2123 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2125 /***************** Cleanup MACs' object first *************************/
2127 /* Wait for completion of requested */
2128 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2129 /* Perform a dry cleanup */
2130 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2132 /* Clean ETH primary MAC */
2133 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2134 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2137 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2139 /* Cleanup UC list */
2141 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2142 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2145 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2147 /***************** Now clean mcast object *****************************/
2148 rparam.mcast_obj = &bp->mcast_obj;
2149 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2151 /* Add a DEL command... - Since we're doing a driver cleanup only,
2152 * we take a lock surrounding both the initial send and the CONTs,
2153 * as we don't want a true completion to disrupt us in the middle.
2155 netif_addr_lock_bh(bp->dev);
2156 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2158 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2161 /* ...and wait until all pending commands are cleared */
2162 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2165 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2167 netif_addr_unlock_bh(bp->dev);
2171 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2173 netif_addr_unlock_bh(bp->dev);
2176 #ifndef BNX2X_STOP_ON_ERROR
2177 #define LOAD_ERROR_EXIT(bp, label) \
2179 (bp)->state = BNX2X_STATE_ERROR; \
2183 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2185 bp->cnic_loaded = false; \
2188 #else /*BNX2X_STOP_ON_ERROR*/
2189 #define LOAD_ERROR_EXIT(bp, label) \
2191 (bp)->state = BNX2X_STATE_ERROR; \
2195 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2197 bp->cnic_loaded = false; \
2201 #endif /*BNX2X_STOP_ON_ERROR*/
2203 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2205 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2206 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2210 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2212 int num_groups, vf_headroom = 0;
2213 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2215 /* number of queues for statistics is number of eth queues + FCoE */
2216 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2218 /* Total number of FW statistics requests =
2219 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2220 * and fcoe l2 queue) stats + num of queues (which includes another 1
2221 * for fcoe l2 queue if applicable)
2223 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2225 /* vf stats appear in the request list, but their data is allocated by
2226 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2227 * it is used to determine where to place the vf stats queries in the
2231 vf_headroom = bnx2x_vf_headroom(bp);
2233 /* Request is built from stats_query_header and an array of
2234 * stats_query_cmd_group each of which contains
2235 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2236 * configured in the stats_query_header.
2239 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2240 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2243 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2244 bp->fw_stats_num, vf_headroom, num_groups);
2245 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2246 num_groups * sizeof(struct stats_query_cmd_group);
2248 /* Data for statistics requests + stats_counter
2249 * stats_counter holds per-STORM counters that are incremented
2250 * when STORM has finished with the current request.
2251 * memory for FCoE offloaded statistics are counted anyway,
2252 * even if they will not be sent.
2253 * VF stats are not accounted for here as the data of VF stats is stored
2254 * in memory allocated by the VF, not here.
2256 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2257 sizeof(struct per_pf_stats) +
2258 sizeof(struct fcoe_statistics_params) +
2259 sizeof(struct per_queue_stats) * num_queue_stats +
2260 sizeof(struct stats_counter);
2262 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2263 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2268 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2269 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2270 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2271 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2272 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2273 bp->fw_stats_req_sz;
2275 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2276 U64_HI(bp->fw_stats_req_mapping),
2277 U64_LO(bp->fw_stats_req_mapping));
2278 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2279 U64_HI(bp->fw_stats_data_mapping),
2280 U64_LO(bp->fw_stats_data_mapping));
2284 bnx2x_free_fw_stats_mem(bp);
2285 BNX2X_ERR("Can't allocate FW stats memory\n");
2289 /* send load request to mcp and analyze response */
2290 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2296 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2297 DRV_MSG_SEQ_NUMBER_MASK);
2298 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2300 /* Get current FW pulse sequence */
2301 bp->fw_drv_pulse_wr_seq =
2302 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2303 DRV_PULSE_SEQ_MASK);
2304 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2306 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2308 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2309 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2312 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2314 /* if mcp fails to respond we must abort */
2315 if (!(*load_code)) {
2316 BNX2X_ERR("MCP response failure, aborting\n");
2320 /* If mcp refused (e.g. other port is in diagnostic mode) we
2323 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2324 BNX2X_ERR("MCP refused load request, aborting\n");
2330 /* check whether another PF has already loaded FW to chip. In
2331 * virtualized environments a pf from another VM may have already
2332 * initialized the device including loading FW
2334 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2336 /* is another pf loaded on this engine? */
2337 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2338 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2339 /* build my FW version dword */
2340 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2341 (BCM_5710_FW_MINOR_VERSION << 8) +
2342 (BCM_5710_FW_REVISION_VERSION << 16) +
2343 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2345 /* read loaded FW from chip */
2346 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2348 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2351 /* abort nic load if version mismatch */
2352 if (my_fw != loaded_fw) {
2354 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2357 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2365 /* returns the "mcp load_code" according to global load_count array */
2366 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2368 int path = BP_PATH(bp);
2370 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2371 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2372 bnx2x_load_count[path][2]);
2373 bnx2x_load_count[path][0]++;
2374 bnx2x_load_count[path][1 + port]++;
2375 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2376 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2377 bnx2x_load_count[path][2]);
2378 if (bnx2x_load_count[path][0] == 1)
2379 return FW_MSG_CODE_DRV_LOAD_COMMON;
2380 else if (bnx2x_load_count[path][1 + port] == 1)
2381 return FW_MSG_CODE_DRV_LOAD_PORT;
2383 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2386 /* mark PMF if applicable */
2387 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2389 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2390 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2391 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2393 /* We need the barrier to ensure the ordering between the
2394 * writing to bp->port.pmf here and reading it from the
2395 * bnx2x_periodic_task().
2402 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2405 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2407 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2408 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2409 (bp->common.shmem2_base)) {
2410 if (SHMEM2_HAS(bp, dcc_support))
2411 SHMEM2_WR(bp, dcc_support,
2412 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2413 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2414 if (SHMEM2_HAS(bp, afex_driver_support))
2415 SHMEM2_WR(bp, afex_driver_support,
2416 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2419 /* Set AFEX default VLAN tag to an invalid value */
2420 bp->afex_def_vlan_tag = -1;
2424 * bnx2x_bz_fp - zero content of the fastpath structure.
2426 * @bp: driver handle
2427 * @index: fastpath index to be zeroed
2429 * Makes sure the contents of the bp->fp[index].napi is kept
2432 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2434 struct bnx2x_fastpath *fp = &bp->fp[index];
2436 struct napi_struct orig_napi = fp->napi;
2437 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2439 /* bzero bnx2x_fastpath contents */
2441 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2442 sizeof(struct bnx2x_agg_info));
2443 memset(fp, 0, sizeof(*fp));
2445 /* Restore the NAPI object as it has been already initialized */
2446 fp->napi = orig_napi;
2447 fp->tpa_info = orig_tpa_info;
2451 fp->max_cos = bp->max_cos;
2453 /* Special queues support only one CoS */
2456 /* Init txdata pointers */
2458 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2460 for_each_cos_in_tx_queue(fp, cos)
2461 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2462 BNX2X_NUM_ETH_QUEUES(bp) + index];
2464 /* set the tpa flag for each queue. The tpa flag determines the queue
2465 * minimal size so it must be set prior to queue memory allocation
2467 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2468 (bp->flags & GRO_ENABLE_FLAG &&
2469 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2470 if (bp->flags & TPA_ENABLE_FLAG)
2471 fp->mode = TPA_MODE_LRO;
2472 else if (bp->flags & GRO_ENABLE_FLAG)
2473 fp->mode = TPA_MODE_GRO;
2475 /* We don't want TPA on an FCoE L2 ring */
2477 fp->disable_tpa = 1;
2480 int bnx2x_load_cnic(struct bnx2x *bp)
2482 int i, rc, port = BP_PORT(bp);
2484 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2486 mutex_init(&bp->cnic_mutex);
2489 rc = bnx2x_alloc_mem_cnic(bp);
2491 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2492 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2496 rc = bnx2x_alloc_fp_mem_cnic(bp);
2498 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2499 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2502 /* Update the number of queues with the cnic queues */
2503 rc = bnx2x_set_real_num_queues(bp, 1);
2505 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2506 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2509 /* Add all CNIC NAPI objects */
2510 bnx2x_add_all_napi_cnic(bp);
2511 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2512 bnx2x_napi_enable_cnic(bp);
2514 rc = bnx2x_init_hw_func_cnic(bp);
2516 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2518 bnx2x_nic_init_cnic(bp);
2521 /* Enable Timer scan */
2522 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2524 /* setup cnic queues */
2525 for_each_cnic_queue(bp, i) {
2526 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2528 BNX2X_ERR("Queue setup failed\n");
2529 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2534 /* Initialize Rx filter. */
2535 bnx2x_set_rx_mode_inner(bp);
2537 /* re-read iscsi info */
2538 bnx2x_get_iscsi_info(bp);
2539 bnx2x_setup_cnic_irq_info(bp);
2540 bnx2x_setup_cnic_info(bp);
2541 bp->cnic_loaded = true;
2542 if (bp->state == BNX2X_STATE_OPEN)
2543 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2545 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2549 #ifndef BNX2X_STOP_ON_ERROR
2551 /* Disable Timer scan */
2552 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2555 bnx2x_napi_disable_cnic(bp);
2556 /* Update the number of queues without the cnic queues */
2557 if (bnx2x_set_real_num_queues(bp, 0))
2558 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2560 BNX2X_ERR("CNIC-related load failed\n");
2561 bnx2x_free_fp_mem_cnic(bp);
2562 bnx2x_free_mem_cnic(bp);
2564 #endif /* ! BNX2X_STOP_ON_ERROR */
2567 /* must be called with rtnl_lock */
2568 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2570 int port = BP_PORT(bp);
2571 int i, rc = 0, load_code = 0;
2573 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2575 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2577 #ifdef BNX2X_STOP_ON_ERROR
2578 if (unlikely(bp->panic)) {
2579 BNX2X_ERR("Can't load NIC when there is panic\n");
2584 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2586 /* zero the structure w/o any lock, before SP handler is initialized */
2587 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2588 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2589 &bp->last_reported_link.link_report_flags);
2592 /* must be called before memory allocation and HW init */
2593 bnx2x_ilt_set_info(bp);
2596 * Zero fastpath structures preserving invariants like napi, which are
2597 * allocated only once, fp index, max_cos, bp pointer.
2598 * Also set fp->disable_tpa and txdata_ptr.
2600 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2601 for_each_queue(bp, i)
2603 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2604 bp->num_cnic_queues) *
2605 sizeof(struct bnx2x_fp_txdata));
2607 bp->fcoe_init = false;
2609 /* Set the receive queues buffer size */
2610 bnx2x_set_rx_buf_size(bp);
2613 rc = bnx2x_alloc_mem(bp);
2615 BNX2X_ERR("Unable to allocate bp memory\n");
2620 /* need to be done after alloc mem, since it's self adjusting to amount
2621 * of memory available for RSS queues
2623 rc = bnx2x_alloc_fp_mem(bp);
2625 BNX2X_ERR("Unable to allocate memory for fps\n");
2626 LOAD_ERROR_EXIT(bp, load_error0);
2629 /* Allocated memory for FW statistics */
2630 if (bnx2x_alloc_fw_stats_mem(bp))
2631 LOAD_ERROR_EXIT(bp, load_error0);
2633 /* request pf to initialize status blocks */
2635 rc = bnx2x_vfpf_init(bp);
2637 LOAD_ERROR_EXIT(bp, load_error0);
2640 /* As long as bnx2x_alloc_mem() may possibly update
2641 * bp->num_queues, bnx2x_set_real_num_queues() should always
2642 * come after it. At this stage cnic queues are not counted.
2644 rc = bnx2x_set_real_num_queues(bp, 0);
2646 BNX2X_ERR("Unable to set real_num_queues\n");
2647 LOAD_ERROR_EXIT(bp, load_error0);
2650 /* configure multi cos mappings in kernel.
2651 * this configuration may be overridden by a multi class queue
2652 * discipline or by a dcbx negotiation result.
2654 bnx2x_setup_tc(bp->dev, bp->max_cos);
2656 /* Add all NAPI objects */
2657 bnx2x_add_all_napi(bp);
2658 DP(NETIF_MSG_IFUP, "napi added\n");
2659 bnx2x_napi_enable(bp);
2662 /* set pf load just before approaching the MCP */
2663 bnx2x_set_pf_load(bp);
2665 /* if mcp exists send load request and analyze response */
2666 if (!BP_NOMCP(bp)) {
2667 /* attempt to load pf */
2668 rc = bnx2x_nic_load_request(bp, &load_code);
2670 LOAD_ERROR_EXIT(bp, load_error1);
2672 /* what did mcp say? */
2673 rc = bnx2x_compare_fw_ver(bp, load_code, true);
2675 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2676 LOAD_ERROR_EXIT(bp, load_error2);
2679 load_code = bnx2x_nic_load_no_mcp(bp, port);
2682 /* mark pmf if applicable */
2683 bnx2x_nic_load_pmf(bp, load_code);
2685 /* Init Function state controlling object */
2686 bnx2x__init_func_obj(bp);
2689 rc = bnx2x_init_hw(bp, load_code);
2691 BNX2X_ERR("HW init failed, aborting\n");
2692 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2693 LOAD_ERROR_EXIT(bp, load_error2);
2697 bnx2x_pre_irq_nic_init(bp);
2699 /* Connect to IRQs */
2700 rc = bnx2x_setup_irqs(bp);
2702 BNX2X_ERR("setup irqs failed\n");
2704 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2705 LOAD_ERROR_EXIT(bp, load_error2);
2708 /* Init per-function objects */
2710 /* Setup NIC internals and enable interrupts */
2711 bnx2x_post_irq_nic_init(bp, load_code);
2713 bnx2x_init_bp_objs(bp);
2714 bnx2x_iov_nic_init(bp);
2716 /* Set AFEX default VLAN tag to an invalid value */
2717 bp->afex_def_vlan_tag = -1;
2718 bnx2x_nic_load_afex_dcc(bp, load_code);
2719 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2720 rc = bnx2x_func_start(bp);
2722 BNX2X_ERR("Function start failed!\n");
2723 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2725 LOAD_ERROR_EXIT(bp, load_error3);
2728 /* Send LOAD_DONE command to MCP */
2729 if (!BP_NOMCP(bp)) {
2730 load_code = bnx2x_fw_command(bp,
2731 DRV_MSG_CODE_LOAD_DONE, 0);
2733 BNX2X_ERR("MCP response failure, aborting\n");
2735 LOAD_ERROR_EXIT(bp, load_error3);
2739 /* initialize FW coalescing state machines in RAM */
2740 bnx2x_update_coalesce(bp);
2743 /* setup the leading queue */
2744 rc = bnx2x_setup_leading(bp);
2746 BNX2X_ERR("Setup leading failed!\n");
2747 LOAD_ERROR_EXIT(bp, load_error3);
2750 /* set up the rest of the queues */
2751 for_each_nondefault_eth_queue(bp, i) {
2753 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2755 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2757 BNX2X_ERR("Queue %d setup failed\n", i);
2758 LOAD_ERROR_EXIT(bp, load_error3);
2763 rc = bnx2x_init_rss(bp);
2765 BNX2X_ERR("PF RSS init failed\n");
2766 LOAD_ERROR_EXIT(bp, load_error3);
2769 /* Now when Clients are configured we are ready to work */
2770 bp->state = BNX2X_STATE_OPEN;
2772 /* Configure a ucast MAC */
2774 rc = bnx2x_set_eth_mac(bp, true);
2776 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2779 BNX2X_ERR("Setting Ethernet MAC failed\n");
2780 LOAD_ERROR_EXIT(bp, load_error3);
2783 if (IS_PF(bp) && bp->pending_max) {
2784 bnx2x_update_max_mf_config(bp, bp->pending_max);
2785 bp->pending_max = 0;
2789 rc = bnx2x_initial_phy_init(bp, load_mode);
2791 LOAD_ERROR_EXIT(bp, load_error3);
2793 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2795 /* Start fast path */
2797 /* Initialize Rx filter. */
2798 bnx2x_set_rx_mode_inner(bp);
2801 switch (load_mode) {
2803 /* Tx queue should be only re-enabled */
2804 netif_tx_wake_all_queues(bp->dev);
2808 netif_tx_start_all_queues(bp->dev);
2809 smp_mb__after_atomic();
2813 case LOAD_LOOPBACK_EXT:
2814 bp->state = BNX2X_STATE_DIAG;
2822 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2824 bnx2x__link_status_update(bp);
2826 /* start the timer */
2827 mod_timer(&bp->timer, jiffies + bp->current_interval);
2829 if (CNIC_ENABLED(bp))
2830 bnx2x_load_cnic(bp);
2833 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2835 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2836 /* mark driver is loaded in shmem2 */
2838 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2839 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2840 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2841 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2844 /* Wait for all pending SP commands to complete */
2845 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2846 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2847 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2851 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2852 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2853 bnx2x_dcbx_init(bp, false);
2855 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2859 #ifndef BNX2X_STOP_ON_ERROR
2862 bnx2x_int_disable_sync(bp, 1);
2864 /* Clean queueable objects */
2865 bnx2x_squeeze_objects(bp);
2868 /* Free SKBs, SGEs, TPA pool and driver internals */
2869 bnx2x_free_skbs(bp);
2870 for_each_rx_queue(bp, i)
2871 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2876 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2877 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2878 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2883 bnx2x_napi_disable(bp);
2884 bnx2x_del_all_napi(bp);
2886 /* clear pf_load status, as it was already set */
2888 bnx2x_clear_pf_load(bp);
2890 bnx2x_free_fw_stats_mem(bp);
2891 bnx2x_free_fp_mem(bp);
2895 #endif /* ! BNX2X_STOP_ON_ERROR */
2898 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2902 /* Wait until tx fastpath tasks complete */
2903 for_each_tx_queue(bp, i) {
2904 struct bnx2x_fastpath *fp = &bp->fp[i];
2906 for_each_cos_in_tx_queue(fp, cos)
2907 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2914 /* must be called with rtnl_lock */
2915 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2918 bool global = false;
2920 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2922 /* mark driver is unloaded in shmem2 */
2923 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2925 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2926 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2927 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2930 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2931 (bp->state == BNX2X_STATE_CLOSED ||
2932 bp->state == BNX2X_STATE_ERROR)) {
2933 /* We can get here if the driver has been unloaded
2934 * during parity error recovery and is either waiting for a
2935 * leader to complete or for other functions to unload and
2936 * then ifdown has been issued. In this case we want to
2937 * unload and let other functions to complete a recovery
2940 bp->recovery_state = BNX2X_RECOVERY_DONE;
2942 bnx2x_release_leader_lock(bp);
2945 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2946 BNX2X_ERR("Can't unload in closed or error state\n");
2950 /* Nothing to do during unload if previous bnx2x_nic_load()
2951 * have not completed successfully - all resources are released.
2953 * we can get here only after unsuccessful ndo_* callback, during which
2954 * dev->IFF_UP flag is still on.
2956 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2959 /* It's important to set the bp->state to the value different from
2960 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2961 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2963 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2966 /* indicate to VFs that the PF is going down */
2967 bnx2x_iov_channel_down(bp);
2969 if (CNIC_LOADED(bp))
2970 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2973 bnx2x_tx_disable(bp);
2974 netdev_reset_tc(bp->dev);
2976 bp->rx_mode = BNX2X_RX_MODE_NONE;
2978 del_timer_sync(&bp->timer);
2981 /* Set ALWAYS_ALIVE bit in shmem */
2982 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2983 bnx2x_drv_pulse(bp);
2984 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2985 bnx2x_save_statistics(bp);
2988 /* wait till consumers catch up with producers in all queues */
2989 bnx2x_drain_tx_queues(bp);
2991 /* if VF indicate to PF this function is going down (PF will delete sp
2992 * elements and clear initializations
2995 bnx2x_vfpf_close_vf(bp);
2996 else if (unload_mode != UNLOAD_RECOVERY)
2997 /* if this is a normal/close unload need to clean up chip*/
2998 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3000 /* Send the UNLOAD_REQUEST to the MCP */
3001 bnx2x_send_unload_req(bp, unload_mode);
3003 /* Prevent transactions to host from the functions on the
3004 * engine that doesn't reset global blocks in case of global
3005 * attention once global blocks are reset and gates are opened
3006 * (the engine which leader will perform the recovery
3009 if (!CHIP_IS_E1x(bp))
3010 bnx2x_pf_disable(bp);
3012 /* Disable HW interrupts, NAPI */
3013 bnx2x_netif_stop(bp, 1);
3014 /* Delete all NAPI objects */
3015 bnx2x_del_all_napi(bp);
3016 if (CNIC_LOADED(bp))
3017 bnx2x_del_all_napi_cnic(bp);
3021 /* Report UNLOAD_DONE to MCP */
3022 bnx2x_send_unload_done(bp, false);
3026 * At this stage no more interrupts will arrive so we may safely clean
3027 * the queueable objects here in case they failed to get cleaned so far.
3030 bnx2x_squeeze_objects(bp);
3032 /* There should be no more pending SP commands at this stage */
3037 /* clear pending work in rtnl task */
3038 bp->sp_rtnl_state = 0;
3041 /* Free SKBs, SGEs, TPA pool and driver internals */
3042 bnx2x_free_skbs(bp);
3043 if (CNIC_LOADED(bp))
3044 bnx2x_free_skbs_cnic(bp);
3045 for_each_rx_queue(bp, i)
3046 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3048 bnx2x_free_fp_mem(bp);
3049 if (CNIC_LOADED(bp))
3050 bnx2x_free_fp_mem_cnic(bp);
3053 if (CNIC_LOADED(bp))
3054 bnx2x_free_mem_cnic(bp);
3058 bp->state = BNX2X_STATE_CLOSED;
3059 bp->cnic_loaded = false;
3061 /* Clear driver version indication in shmem */
3063 bnx2x_update_mng_version(bp);
3065 /* Check if there are pending parity attentions. If there are - set
3066 * RECOVERY_IN_PROGRESS.
3068 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3069 bnx2x_set_reset_in_progress(bp);
3071 /* Set RESET_IS_GLOBAL if needed */
3073 bnx2x_set_reset_global(bp);
3076 /* The last driver must disable a "close the gate" if there is no
3077 * parity attention or "process kill" pending.
3080 !bnx2x_clear_pf_load(bp) &&
3081 bnx2x_reset_is_done(bp, BP_PATH(bp)))
3082 bnx2x_disable_close_the_gate(bp);
3084 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3089 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3093 /* If there is no power capability, silently succeed */
3094 if (!bp->pdev->pm_cap) {
3095 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3099 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3103 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3104 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3105 PCI_PM_CTRL_PME_STATUS));
3107 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3108 /* delay required during transition out of D3hot */
3113 /* If there are other clients above don't
3114 shut down the power */
3115 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3117 /* Don't shut down the power for emulation and FPGA */
3118 if (CHIP_REV_IS_SLOW(bp))
3121 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3125 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3127 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3130 /* No more memory access after this point until
3131 * device is brought back to D0.
3136 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3143 * net_device service functions
3145 static int bnx2x_poll(struct napi_struct *napi, int budget)
3149 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3151 struct bnx2x *bp = fp->bp;
3154 #ifdef BNX2X_STOP_ON_ERROR
3155 if (unlikely(bp->panic)) {
3156 napi_complete(napi);
3160 if (!bnx2x_fp_lock_napi(fp))
3163 for_each_cos_in_tx_queue(fp, cos)
3164 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3165 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3167 if (bnx2x_has_rx_work(fp)) {
3168 work_done += bnx2x_rx_int(fp, budget - work_done);
3170 /* must not complete if we consumed full budget */
3171 if (work_done >= budget) {
3172 bnx2x_fp_unlock_napi(fp);
3177 /* Fall out from the NAPI loop if needed */
3178 if (!bnx2x_fp_unlock_napi(fp) &&
3179 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3181 /* No need to update SB for FCoE L2 ring as long as
3182 * it's connected to the default SB and the SB
3183 * has been updated when NAPI was scheduled.
3185 if (IS_FCOE_FP(fp)) {
3186 napi_complete(napi);
3189 bnx2x_update_fpsb_idx(fp);
3190 /* bnx2x_has_rx_work() reads the status block,
3191 * thus we need to ensure that status block indices
3192 * have been actually read (bnx2x_update_fpsb_idx)
3193 * prior to this check (bnx2x_has_rx_work) so that
3194 * we won't write the "newer" value of the status block
3195 * to IGU (if there was a DMA right after
3196 * bnx2x_has_rx_work and if there is no rmb, the memory
3197 * reading (bnx2x_update_fpsb_idx) may be postponed
3198 * to right before bnx2x_ack_sb). In this case there
3199 * will never be another interrupt until there is
3200 * another update of the status block, while there
3201 * is still unhandled work.
3205 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3206 napi_complete(napi);
3207 /* Re-enable interrupts */
3208 DP(NETIF_MSG_RX_STATUS,
3209 "Update index to %d\n", fp->fp_hc_idx);
3210 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3211 le16_to_cpu(fp->fp_hc_idx),
3221 #ifdef CONFIG_NET_RX_BUSY_POLL
3222 /* must be called with local_bh_disable()d */
3223 int bnx2x_low_latency_recv(struct napi_struct *napi)
3225 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3227 struct bnx2x *bp = fp->bp;
3230 if ((bp->state == BNX2X_STATE_CLOSED) ||
3231 (bp->state == BNX2X_STATE_ERROR) ||
3232 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3233 return LL_FLUSH_FAILED;
3235 if (!bnx2x_fp_lock_poll(fp))
3236 return LL_FLUSH_BUSY;
3238 if (bnx2x_has_rx_work(fp))
3239 found = bnx2x_rx_int(fp, 4);
3241 bnx2x_fp_unlock_poll(fp);
3247 /* we split the first BD into headers and data BDs
3248 * to ease the pain of our fellow microcode engineers
3249 * we use one mapping for both BDs
3251 static u16 bnx2x_tx_split(struct bnx2x *bp,
3252 struct bnx2x_fp_txdata *txdata,
3253 struct sw_tx_bd *tx_buf,
3254 struct eth_tx_start_bd **tx_bd, u16 hlen,
3257 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3258 struct eth_tx_bd *d_tx_bd;
3260 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3262 /* first fix first BD */
3263 h_tx_bd->nbytes = cpu_to_le16(hlen);
3265 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3266 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3268 /* now get a new data BD
3269 * (after the pbd) and fill it */
3270 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3271 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3273 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3274 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3276 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3277 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3278 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3280 /* this marks the BD as one that has no individual mapping */
3281 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3283 DP(NETIF_MSG_TX_QUEUED,
3284 "TSO split data size is %d (%x:%x)\n",
3285 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3288 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3293 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3294 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3295 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3297 __sum16 tsum = (__force __sum16) csum;
3300 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3301 csum_partial(t_header - fix, fix, 0)));
3304 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3305 csum_partial(t_header, -fix, 0)));
3307 return bswab16(tsum);
3310 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3316 if (skb->ip_summed != CHECKSUM_PARTIAL)
3319 protocol = vlan_get_protocol(skb);
3320 if (protocol == htons(ETH_P_IPV6)) {
3322 prot = ipv6_hdr(skb)->nexthdr;
3325 prot = ip_hdr(skb)->protocol;
3328 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3329 if (inner_ip_hdr(skb)->version == 6) {
3330 rc |= XMIT_CSUM_ENC_V6;
3331 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3332 rc |= XMIT_CSUM_TCP;
3334 rc |= XMIT_CSUM_ENC_V4;
3335 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3336 rc |= XMIT_CSUM_TCP;
3339 if (prot == IPPROTO_TCP)
3340 rc |= XMIT_CSUM_TCP;
3342 if (skb_is_gso(skb)) {
3343 if (skb_is_gso_v6(skb)) {
3344 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3345 if (rc & XMIT_CSUM_ENC)
3346 rc |= XMIT_GSO_ENC_V6;
3348 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3349 if (rc & XMIT_CSUM_ENC)
3350 rc |= XMIT_GSO_ENC_V4;
3357 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3358 /* check if packet requires linearization (packet is too fragmented)
3359 no need to check fragmentation if page size > 8K (there will be no
3360 violation to FW restrictions) */
3361 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3366 int first_bd_sz = 0;
3368 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3369 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3371 if (xmit_type & XMIT_GSO) {
3372 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3373 /* Check if LSO packet needs to be copied:
3374 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3375 int wnd_size = MAX_FETCH_BD - 3;
3376 /* Number of windows to check */
3377 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3382 /* Headers length */
3383 hlen = (int)(skb_transport_header(skb) - skb->data) +
3386 /* Amount of data (w/o headers) on linear part of SKB*/
3387 first_bd_sz = skb_headlen(skb) - hlen;
3389 wnd_sum = first_bd_sz;
3391 /* Calculate the first sum - it's special */
3392 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3394 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3396 /* If there was data on linear skb data - check it */
3397 if (first_bd_sz > 0) {
3398 if (unlikely(wnd_sum < lso_mss)) {
3403 wnd_sum -= first_bd_sz;
3406 /* Others are easier: run through the frag list and
3407 check all windows */
3408 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3410 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3412 if (unlikely(wnd_sum < lso_mss)) {
3417 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3420 /* in non-LSO too fragmented packet should always
3427 if (unlikely(to_copy))
3428 DP(NETIF_MSG_TX_QUEUED,
3429 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3430 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3431 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3437 static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3440 struct ipv6hdr *ipv6;
3442 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3443 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3444 ETH_TX_PARSE_BD_E2_LSO_MSS;
3446 if (xmit_type & XMIT_GSO_ENC_V6)
3447 ipv6 = inner_ipv6_hdr(skb);
3448 else if (xmit_type & XMIT_GSO_V6)
3449 ipv6 = ipv6_hdr(skb);
3453 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
3454 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3458 * bnx2x_set_pbd_gso - update PBD in GSO case.
3462 * @xmit_type: xmit flags
3464 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3465 struct eth_tx_parse_bd_e1x *pbd,
3466 struct eth_tx_start_bd *tx_start_bd,
3469 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3470 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3471 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3473 if (xmit_type & XMIT_GSO_V4) {
3474 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3475 pbd->tcp_pseudo_csum =
3476 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3478 0, IPPROTO_TCP, 0));
3480 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3481 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3483 pbd->tcp_pseudo_csum =
3484 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3485 &ipv6_hdr(skb)->daddr,
3486 0, IPPROTO_TCP, 0));
3490 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3494 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3496 * @bp: driver handle
3498 * @parsing_data: data to be updated
3499 * @xmit_type: xmit flags
3501 * 57712/578xx related, when skb has encapsulation
3503 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3504 u32 *parsing_data, u32 xmit_type)
3507 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3508 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3509 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3511 if (xmit_type & XMIT_CSUM_TCP) {
3512 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3513 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3514 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3516 return skb_inner_transport_header(skb) +
3517 inner_tcp_hdrlen(skb) - skb->data;
3520 /* We support checksum offload for TCP and UDP only.
3521 * No need to pass the UDP header length - it's a constant.
3523 return skb_inner_transport_header(skb) +
3524 sizeof(struct udphdr) - skb->data;
3528 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3530 * @bp: driver handle
3532 * @parsing_data: data to be updated
3533 * @xmit_type: xmit flags
3535 * 57712/578xx related
3537 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3538 u32 *parsing_data, u32 xmit_type)
3541 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3542 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3543 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3545 if (xmit_type & XMIT_CSUM_TCP) {
3546 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3547 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3548 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3550 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3552 /* We support checksum offload for TCP and UDP only.
3553 * No need to pass the UDP header length - it's a constant.
3555 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3558 /* set FW indication according to inner or outer protocols if tunneled */
3559 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3560 struct eth_tx_start_bd *tx_start_bd,
3563 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3565 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3566 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3568 if (!(xmit_type & XMIT_CSUM_TCP))
3569 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3573 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3575 * @bp: driver handle
3577 * @pbd: parse BD to be updated
3578 * @xmit_type: xmit flags
3580 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3581 struct eth_tx_parse_bd_e1x *pbd,
3584 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3586 /* for now NS flag is not used in Linux */
3589 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3590 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3592 pbd->ip_hlen_w = (skb_transport_header(skb) -
3593 skb_network_header(skb)) >> 1;
3595 hlen += pbd->ip_hlen_w;
3597 /* We support checksum offload for TCP and UDP only */
3598 if (xmit_type & XMIT_CSUM_TCP)
3599 hlen += tcp_hdrlen(skb) / 2;
3601 hlen += sizeof(struct udphdr) / 2;
3603 pbd->total_hlen_w = cpu_to_le16(hlen);
3606 if (xmit_type & XMIT_CSUM_TCP) {
3607 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3610 s8 fix = SKB_CS_OFF(skb); /* signed! */
3612 DP(NETIF_MSG_TX_QUEUED,
3613 "hlen %d fix %d csum before fix %x\n",
3614 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3616 /* HW bug: fixup the CSUM */
3617 pbd->tcp_pseudo_csum =
3618 bnx2x_csum_fix(skb_transport_header(skb),
3621 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3622 pbd->tcp_pseudo_csum);
3628 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3629 struct eth_tx_parse_bd_e2 *pbd_e2,
3630 struct eth_tx_parse_2nd_bd *pbd2,
3635 u8 outerip_off, outerip_len = 0;
3637 /* from outer IP to transport */
3638 hlen_w = (skb_inner_transport_header(skb) -
3639 skb_network_header(skb)) >> 1;
3642 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3644 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3646 /* outer IP header info */
3647 if (xmit_type & XMIT_CSUM_V4) {
3648 struct iphdr *iph = ip_hdr(skb);
3649 u32 csum = (__force u32)(~iph->check) -
3650 (__force u32)iph->tot_len -
3651 (__force u32)iph->frag_off;
3653 pbd2->fw_ip_csum_wo_len_flags_frag =
3654 bswab16(csum_fold((__force __wsum)csum));
3656 pbd2->fw_ip_hdr_to_payload_w =
3657 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3660 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3662 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3664 if (xmit_type & XMIT_GSO_V4) {
3665 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3667 pbd_e2->data.tunnel_data.pseudo_csum =
3668 bswab16(~csum_tcpudp_magic(
3669 inner_ip_hdr(skb)->saddr,
3670 inner_ip_hdr(skb)->daddr,
3671 0, IPPROTO_TCP, 0));
3673 outerip_len = ip_hdr(skb)->ihl << 1;
3675 pbd_e2->data.tunnel_data.pseudo_csum =
3676 bswab16(~csum_ipv6_magic(
3677 &inner_ipv6_hdr(skb)->saddr,
3678 &inner_ipv6_hdr(skb)->daddr,
3679 0, IPPROTO_TCP, 0));
3682 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3686 (!!(xmit_type & XMIT_CSUM_V6) <<
3687 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3689 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3690 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3691 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3693 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3694 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3695 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3699 /* called with netif_tx_lock
3700 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3701 * netif_wake_queue()
3703 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3705 struct bnx2x *bp = netdev_priv(dev);
3707 struct netdev_queue *txq;
3708 struct bnx2x_fp_txdata *txdata;
3709 struct sw_tx_bd *tx_buf;
3710 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3711 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3712 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3713 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3714 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3715 u32 pbd_e2_parsing_data = 0;
3716 u16 pkt_prod, bd_prod;
3719 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3722 __le16 pkt_size = 0;
3724 u8 mac_type = UNICAST_ADDRESS;
3726 #ifdef BNX2X_STOP_ON_ERROR
3727 if (unlikely(bp->panic))
3728 return NETDEV_TX_BUSY;
3731 txq_index = skb_get_queue_mapping(skb);
3732 txq = netdev_get_tx_queue(dev, txq_index);
3734 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3736 txdata = &bp->bnx2x_txq[txq_index];
3738 /* enable this debug print to view the transmission queue being used
3739 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3740 txq_index, fp_index, txdata_index); */
3742 /* enable this debug print to view the transmission details
3743 DP(NETIF_MSG_TX_QUEUED,
3744 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3745 txdata->cid, fp_index, txdata_index, txdata, fp); */
3747 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3748 skb_shinfo(skb)->nr_frags +
3750 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3751 /* Handle special storage cases separately */
3752 if (txdata->tx_ring_size == 0) {
3753 struct bnx2x_eth_q_stats *q_stats =
3754 bnx2x_fp_qstats(bp, txdata->parent_fp);
3755 q_stats->driver_filtered_tx_pkt++;
3757 return NETDEV_TX_OK;
3759 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3760 netif_tx_stop_queue(txq);
3761 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3763 return NETDEV_TX_BUSY;
3766 DP(NETIF_MSG_TX_QUEUED,
3767 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3768 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3769 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3772 eth = (struct ethhdr *)skb->data;
3774 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3775 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3776 if (is_broadcast_ether_addr(eth->h_dest))
3777 mac_type = BROADCAST_ADDRESS;
3779 mac_type = MULTICAST_ADDRESS;
3782 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3783 /* First, check if we need to linearize the skb (due to FW
3784 restrictions). No need to check fragmentation if page size > 8K
3785 (there will be no violation to FW restrictions) */
3786 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3787 /* Statistics of linearization */
3789 if (skb_linearize(skb) != 0) {
3790 DP(NETIF_MSG_TX_QUEUED,
3791 "SKB linearization failed - silently dropping this SKB\n");
3792 dev_kfree_skb_any(skb);
3793 return NETDEV_TX_OK;
3797 /* Map skb linear data for DMA */
3798 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3799 skb_headlen(skb), DMA_TO_DEVICE);
3800 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3801 DP(NETIF_MSG_TX_QUEUED,
3802 "SKB mapping failed - silently dropping this SKB\n");
3803 dev_kfree_skb_any(skb);
3804 return NETDEV_TX_OK;
3807 Please read carefully. First we use one BD which we mark as start,
3808 then we have a parsing info BD (used for TSO or xsum),
3809 and only then we have the rest of the TSO BDs.
3810 (don't forget to mark the last one as last,
3811 and to unmap only AFTER you write to the BD ...)
3812 And above all, all pdb sizes are in words - NOT DWORDS!
3815 /* get current pkt produced now - advance it just before sending packet
3816 * since mapping of pages may fail and cause packet to be dropped
3818 pkt_prod = txdata->tx_pkt_prod;
3819 bd_prod = TX_BD(txdata->tx_bd_prod);
3821 /* get a tx_buf and first BD
3822 * tx_start_bd may be changed during SPLIT,
3823 * but first_bd will always stay first
3825 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3826 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3827 first_bd = tx_start_bd;
3829 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3831 /* header nbd: indirectly zero other flags! */
3832 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3834 /* remember the first BD of the packet */
3835 tx_buf->first_bd = txdata->tx_bd_prod;
3839 DP(NETIF_MSG_TX_QUEUED,
3840 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3841 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3843 if (vlan_tx_tag_present(skb)) {
3844 tx_start_bd->vlan_or_ethertype =
3845 cpu_to_le16(vlan_tx_tag_get(skb));
3846 tx_start_bd->bd_flags.as_bitfield |=
3847 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3849 /* when transmitting in a vf, start bd must hold the ethertype
3850 * for fw to enforce it
3853 tx_start_bd->vlan_or_ethertype =
3854 cpu_to_le16(ntohs(eth->h_proto));
3856 /* used by FW for packet accounting */
3857 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3860 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3862 /* turn on parsing and get a BD */
3863 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3865 if (xmit_type & XMIT_CSUM)
3866 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3868 if (!CHIP_IS_E1x(bp)) {
3869 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3870 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3872 if (xmit_type & XMIT_CSUM_ENC) {
3873 u16 global_data = 0;
3875 /* Set PBD in enc checksum offload case */
3876 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3877 &pbd_e2_parsing_data,
3880 /* turn on 2nd parsing and get a BD */
3881 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3883 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3885 memset(pbd2, 0, sizeof(*pbd2));
3887 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3888 (skb_inner_network_header(skb) -
3891 if (xmit_type & XMIT_GSO_ENC)
3892 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3896 pbd2->global_data = cpu_to_le16(global_data);
3898 /* add addition parse BD indication to start BD */
3899 SET_FLAG(tx_start_bd->general_data,
3900 ETH_TX_START_BD_PARSE_NBDS, 1);
3901 /* set encapsulation flag in start BD */
3902 SET_FLAG(tx_start_bd->general_data,
3903 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3905 } else if (xmit_type & XMIT_CSUM) {
3906 /* Set PBD in checksum offload case w/o encapsulation */
3907 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3908 &pbd_e2_parsing_data,
3912 /* Add the macs to the parsing BD if this is a vf or if
3913 * Tx Switching is enabled.
3916 /* override GRE parameters in BD */
3917 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3918 &pbd_e2->data.mac_addr.src_mid,
3919 &pbd_e2->data.mac_addr.src_lo,
3922 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3923 &pbd_e2->data.mac_addr.dst_mid,
3924 &pbd_e2->data.mac_addr.dst_lo,
3926 } else if (bp->flags & TX_SWITCHING) {
3927 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3928 &pbd_e2->data.mac_addr.dst_mid,
3929 &pbd_e2->data.mac_addr.dst_lo,
3933 SET_FLAG(pbd_e2_parsing_data,
3934 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3936 u16 global_data = 0;
3937 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3938 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3939 /* Set PBD in checksum offload case */
3940 if (xmit_type & XMIT_CSUM)
3941 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3943 SET_FLAG(global_data,
3944 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3945 pbd_e1x->global_data |= cpu_to_le16(global_data);
3948 /* Setup the data pointer of the first BD of the packet */
3949 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3950 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3951 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3952 pkt_size = tx_start_bd->nbytes;
3954 DP(NETIF_MSG_TX_QUEUED,
3955 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
3956 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3957 le16_to_cpu(tx_start_bd->nbytes),
3958 tx_start_bd->bd_flags.as_bitfield,
3959 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3961 if (xmit_type & XMIT_GSO) {
3963 DP(NETIF_MSG_TX_QUEUED,
3964 "TSO packet len %d hlen %d total len %d tso size %d\n",
3965 skb->len, hlen, skb_headlen(skb),
3966 skb_shinfo(skb)->gso_size);
3968 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3970 if (unlikely(skb_headlen(skb) > hlen)) {
3972 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3976 if (!CHIP_IS_E1x(bp))
3977 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3980 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
3983 /* Set the PBD's parsing_data field if not zero
3984 * (for the chips newer than 57711).
3986 if (pbd_e2_parsing_data)
3987 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3989 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3991 /* Handle fragmented skb */
3992 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3993 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3995 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3996 skb_frag_size(frag), DMA_TO_DEVICE);
3997 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3998 unsigned int pkts_compl = 0, bytes_compl = 0;
4000 DP(NETIF_MSG_TX_QUEUED,
4001 "Unable to map page - dropping packet...\n");
4003 /* we need unmap all buffers already mapped
4005 * first_bd->nbd need to be properly updated
4006 * before call to bnx2x_free_tx_pkt
4008 first_bd->nbd = cpu_to_le16(nbd);
4009 bnx2x_free_tx_pkt(bp, txdata,
4010 TX_BD(txdata->tx_pkt_prod),
4011 &pkts_compl, &bytes_compl);
4012 return NETDEV_TX_OK;
4015 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4016 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4017 if (total_pkt_bd == NULL)
4018 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4020 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4021 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4022 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4023 le16_add_cpu(&pkt_size, skb_frag_size(frag));
4026 DP(NETIF_MSG_TX_QUEUED,
4027 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4028 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4029 le16_to_cpu(tx_data_bd->nbytes));
4032 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4034 /* update with actual num BDs */
4035 first_bd->nbd = cpu_to_le16(nbd);
4037 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4039 /* now send a tx doorbell, counting the next BD
4040 * if the packet contains or ends with it
4042 if (TX_BD_POFF(bd_prod) < nbd)
4045 /* total_pkt_bytes should be set on the first data BD if
4046 * it's not an LSO packet and there is more than one
4047 * data BD. In this case pkt_size is limited by an MTU value.
4048 * However we prefer to set it for an LSO packet (while we don't
4049 * have to) in order to save some CPU cycles in a none-LSO
4050 * case, when we much more care about them.
4052 if (total_pkt_bd != NULL)
4053 total_pkt_bd->total_pkt_bytes = pkt_size;
4056 DP(NETIF_MSG_TX_QUEUED,
4057 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
4058 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4059 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4060 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4061 le16_to_cpu(pbd_e1x->total_hlen_w));
4063 DP(NETIF_MSG_TX_QUEUED,
4064 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
4066 pbd_e2->data.mac_addr.dst_hi,
4067 pbd_e2->data.mac_addr.dst_mid,
4068 pbd_e2->data.mac_addr.dst_lo,
4069 pbd_e2->data.mac_addr.src_hi,
4070 pbd_e2->data.mac_addr.src_mid,
4071 pbd_e2->data.mac_addr.src_lo,
4072 pbd_e2->parsing_data);
4073 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4075 netdev_tx_sent_queue(txq, skb->len);
4077 skb_tx_timestamp(skb);
4079 txdata->tx_pkt_prod++;
4081 * Make sure that the BD data is updated before updating the producer
4082 * since FW might read the BD right after the producer is updated.
4083 * This is only applicable for weak-ordered memory model archs such
4084 * as IA-64. The following barrier is also mandatory since FW will
4085 * assumes packets must have BDs.
4089 txdata->tx_db.data.prod += nbd;
4092 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
4096 txdata->tx_bd_prod += nbd;
4098 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4099 netif_tx_stop_queue(txq);
4101 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4102 * ordering of set_bit() in netif_tx_stop_queue() and read of
4106 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4107 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4108 netif_tx_wake_queue(txq);
4112 return NETDEV_TX_OK;
4116 * bnx2x_setup_tc - routine to configure net_device for multi tc
4118 * @netdev: net device to configure
4119 * @tc: number of traffic classes to enable
4121 * callback connected to the ndo_setup_tc function pointer
4123 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4125 int cos, prio, count, offset;
4126 struct bnx2x *bp = netdev_priv(dev);
4128 /* setup tc must be called under rtnl lock */
4131 /* no traffic classes requested. Aborting */
4133 netdev_reset_tc(dev);
4137 /* requested to support too many traffic classes */
4138 if (num_tc > bp->max_cos) {
4139 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4140 num_tc, bp->max_cos);
4144 /* declare amount of supported traffic classes */
4145 if (netdev_set_num_tc(dev, num_tc)) {
4146 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4150 /* configure priority to traffic class mapping */
4151 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4152 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
4153 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4154 "mapping priority %d to tc %d\n",
4155 prio, bp->prio_to_cos[prio]);
4158 /* Use this configuration to differentiate tc0 from other COSes
4159 This can be used for ets or pfc, and save the effort of setting
4160 up a multio class queue disc or negotiating DCBX with a switch
4161 netdev_set_prio_tc_map(dev, 0, 0);
4162 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4163 for (prio = 1; prio < 16; prio++) {
4164 netdev_set_prio_tc_map(dev, prio, 1);
4165 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4168 /* configure traffic class to transmission queue mapping */
4169 for (cos = 0; cos < bp->max_cos; cos++) {
4170 count = BNX2X_NUM_ETH_QUEUES(bp);
4171 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4172 netdev_set_tc_queue(dev, cos, count, offset);
4173 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4174 "mapping tc %d to offset %d count %d\n",
4175 cos, offset, count);
4181 /* called with rtnl_lock */
4182 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4184 struct sockaddr *addr = p;
4185 struct bnx2x *bp = netdev_priv(dev);
4188 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4189 BNX2X_ERR("Requested MAC address is not valid\n");
4193 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4194 !is_zero_ether_addr(addr->sa_data)) {
4195 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
4199 if (netif_running(dev)) {
4200 rc = bnx2x_set_eth_mac(bp, false);
4205 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4207 if (netif_running(dev))
4208 rc = bnx2x_set_eth_mac(bp, true);
4213 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4215 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4216 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4221 if (IS_FCOE_IDX(fp_index)) {
4222 memset(sb, 0, sizeof(union host_hc_status_block));
4223 fp->status_blk_mapping = 0;
4226 if (!CHIP_IS_E1x(bp))
4227 BNX2X_PCI_FREE(sb->e2_sb,
4228 bnx2x_fp(bp, fp_index,
4229 status_blk_mapping),
4230 sizeof(struct host_hc_status_block_e2));
4232 BNX2X_PCI_FREE(sb->e1x_sb,
4233 bnx2x_fp(bp, fp_index,
4234 status_blk_mapping),
4235 sizeof(struct host_hc_status_block_e1x));
4239 if (!skip_rx_queue(bp, fp_index)) {
4240 bnx2x_free_rx_bds(fp);
4242 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4243 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4244 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4245 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4246 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4248 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4249 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4250 sizeof(struct eth_fast_path_rx_cqe) *
4254 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4255 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4256 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4257 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4261 if (!skip_tx_queue(bp, fp_index)) {
4262 /* fastpath tx rings: tx_buf tx_desc */
4263 for_each_cos_in_tx_queue(fp, cos) {
4264 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4266 DP(NETIF_MSG_IFDOWN,
4267 "freeing tx memory of fp %d cos %d cid %d\n",
4268 fp_index, cos, txdata->cid);
4270 BNX2X_FREE(txdata->tx_buf_ring);
4271 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4272 txdata->tx_desc_mapping,
4273 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4276 /* end of fastpath */
4279 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4282 for_each_cnic_queue(bp, i)
4283 bnx2x_free_fp_mem_at(bp, i);
4286 void bnx2x_free_fp_mem(struct bnx2x *bp)
4289 for_each_eth_queue(bp, i)
4290 bnx2x_free_fp_mem_at(bp, i);
4293 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4295 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4296 if (!CHIP_IS_E1x(bp)) {
4297 bnx2x_fp(bp, index, sb_index_values) =
4298 (__le16 *)status_blk.e2_sb->sb.index_values;
4299 bnx2x_fp(bp, index, sb_running_index) =
4300 (__le16 *)status_blk.e2_sb->sb.running_index;
4302 bnx2x_fp(bp, index, sb_index_values) =
4303 (__le16 *)status_blk.e1x_sb->sb.index_values;
4304 bnx2x_fp(bp, index, sb_running_index) =
4305 (__le16 *)status_blk.e1x_sb->sb.running_index;
4309 /* Returns the number of actually allocated BDs */
4310 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4313 struct bnx2x *bp = fp->bp;
4314 u16 ring_prod, cqe_ring_prod;
4315 int i, failure_cnt = 0;
4317 fp->rx_comp_cons = 0;
4318 cqe_ring_prod = ring_prod = 0;
4320 /* This routine is called only during fo init so
4321 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4323 for (i = 0; i < rx_ring_size; i++) {
4324 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4328 ring_prod = NEXT_RX_IDX(ring_prod);
4329 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4330 WARN_ON(ring_prod <= (i - failure_cnt));
4334 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4335 i - failure_cnt, fp->index);
4337 fp->rx_bd_prod = ring_prod;
4338 /* Limit the CQE producer by the CQE ring size */
4339 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4341 fp->rx_pkt = fp->rx_calls = 0;
4343 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4345 return i - failure_cnt;
4348 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4352 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4353 struct eth_rx_cqe_next_page *nextpg;
4355 nextpg = (struct eth_rx_cqe_next_page *)
4356 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4358 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4359 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4361 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4362 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4366 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4368 union host_hc_status_block *sb;
4369 struct bnx2x_fastpath *fp = &bp->fp[index];
4372 int rx_ring_size = 0;
4374 if (!bp->rx_ring_size &&
4375 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
4376 rx_ring_size = MIN_RX_SIZE_NONTPA;
4377 bp->rx_ring_size = rx_ring_size;
4378 } else if (!bp->rx_ring_size) {
4379 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4381 if (CHIP_IS_E3(bp)) {
4382 u32 cfg = SHMEM_RD(bp,
4383 dev_info.port_hw_config[BP_PORT(bp)].
4386 /* Decrease ring size for 1G functions */
4387 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4388 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4392 /* allocate at least number of buffers required by FW */
4393 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4394 MIN_RX_SIZE_TPA, rx_ring_size);
4396 bp->rx_ring_size = rx_ring_size;
4397 } else /* if rx_ring_size specified - use it */
4398 rx_ring_size = bp->rx_ring_size;
4400 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4403 sb = &bnx2x_fp(bp, index, status_blk);
4405 if (!IS_FCOE_IDX(index)) {
4407 if (!CHIP_IS_E1x(bp)) {
4408 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4409 sizeof(struct host_hc_status_block_e2));
4413 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4414 sizeof(struct host_hc_status_block_e1x));
4420 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4421 * set shortcuts for it.
4423 if (!IS_FCOE_IDX(index))
4424 set_sb_shortcuts(bp, index);
4427 if (!skip_tx_queue(bp, index)) {
4428 /* fastpath tx rings: tx_buf tx_desc */
4429 for_each_cos_in_tx_queue(fp, cos) {
4430 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4433 "allocating tx memory of fp %d cos %d\n",
4436 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4437 sizeof(struct sw_tx_bd),
4439 if (!txdata->tx_buf_ring)
4441 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4442 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4443 if (!txdata->tx_desc_ring)
4449 if (!skip_rx_queue(bp, index)) {
4450 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4451 bnx2x_fp(bp, index, rx_buf_ring) =
4452 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4453 if (!bnx2x_fp(bp, index, rx_buf_ring))
4455 bnx2x_fp(bp, index, rx_desc_ring) =
4456 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4457 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4458 if (!bnx2x_fp(bp, index, rx_desc_ring))
4461 /* Seed all CQEs by 1s */
4462 bnx2x_fp(bp, index, rx_comp_ring) =
4463 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4464 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4465 if (!bnx2x_fp(bp, index, rx_comp_ring))
4469 bnx2x_fp(bp, index, rx_page_ring) =
4470 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4472 if (!bnx2x_fp(bp, index, rx_page_ring))
4474 bnx2x_fp(bp, index, rx_sge_ring) =
4475 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4476 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4477 if (!bnx2x_fp(bp, index, rx_sge_ring))
4480 bnx2x_set_next_page_rx_bd(fp);
4483 bnx2x_set_next_page_rx_cq(fp);
4486 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4487 if (ring_size < rx_ring_size)
4493 /* handles low memory cases */
4495 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4497 /* FW will drop all packets if queue is not big enough,
4498 * In these cases we disable the queue
4499 * Min size is different for OOO, TPA and non-TPA queues
4501 if (ring_size < (fp->disable_tpa ?
4502 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4503 /* release memory allocated for this queue */
4504 bnx2x_free_fp_mem_at(bp, index);
4510 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4514 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4515 /* we will fail load process instead of mark
4523 static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4527 /* 1. Allocate FP for leading - fatal if error
4528 * 2. Allocate RSS - fix number of queues if error
4532 if (bnx2x_alloc_fp_mem_at(bp, 0))
4536 for_each_nondefault_eth_queue(bp, i)
4537 if (bnx2x_alloc_fp_mem_at(bp, i))
4540 /* handle memory failures */
4541 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4542 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4545 bnx2x_shrink_eth_fp(bp, delta);
4546 if (CNIC_SUPPORT(bp))
4547 /* move non eth FPs next to last eth FP
4548 * must be done in that order
4549 * FCOE_IDX < FWD_IDX < OOO_IDX
4552 /* move FCoE fp even NO_FCOE_FLAG is on */
4553 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4554 bp->num_ethernet_queues -= delta;
4555 bp->num_queues = bp->num_ethernet_queues +
4556 bp->num_cnic_queues;
4557 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4558 bp->num_queues + delta, bp->num_queues);
4564 void bnx2x_free_mem_bp(struct bnx2x *bp)
4568 for (i = 0; i < bp->fp_array_size; i++)
4569 kfree(bp->fp[i].tpa_info);
4572 kfree(bp->fp_stats);
4573 kfree(bp->bnx2x_txq);
4574 kfree(bp->msix_table);
4578 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4580 struct bnx2x_fastpath *fp;
4581 struct msix_entry *tbl;
4582 struct bnx2x_ilt *ilt;
4583 int msix_table_size = 0;
4584 int fp_array_size, txq_array_size;
4588 * The biggest MSI-X table we might need is as a maximum number of fast
4589 * path IGU SBs plus default SB (for PF only).
4591 msix_table_size = bp->igu_sb_cnt;
4594 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4596 /* fp array: RSS plus CNIC related L2 queues */
4597 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4598 bp->fp_array_size = fp_array_size;
4599 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4601 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4604 for (i = 0; i < bp->fp_array_size; i++) {
4606 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4607 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4608 if (!(fp[i].tpa_info))
4614 /* allocate sp objs */
4615 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4620 /* allocate fp_stats */
4621 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4626 /* Allocate memory for the transmission queues array */
4628 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4629 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4631 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4637 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4640 bp->msix_table = tbl;
4643 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4650 bnx2x_free_mem_bp(bp);
4654 int bnx2x_reload_if_running(struct net_device *dev)
4656 struct bnx2x *bp = netdev_priv(dev);
4658 if (unlikely(!netif_running(dev)))
4661 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4662 return bnx2x_nic_load(bp, LOAD_NORMAL);
4665 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4667 u32 sel_phy_idx = 0;
4668 if (bp->link_params.num_phys <= 1)
4671 if (bp->link_vars.link_up) {
4672 sel_phy_idx = EXT_PHY1;
4673 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4674 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4675 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4676 sel_phy_idx = EXT_PHY2;
4679 switch (bnx2x_phy_selection(&bp->link_params)) {
4680 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4681 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4682 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4683 sel_phy_idx = EXT_PHY1;
4685 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4686 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4687 sel_phy_idx = EXT_PHY2;
4694 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4696 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4698 * The selected activated PHY is always after swapping (in case PHY
4699 * swapping is enabled). So when swapping is enabled, we need to reverse
4703 if (bp->link_params.multi_phy_config &
4704 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4705 if (sel_phy_idx == EXT_PHY1)
4706 sel_phy_idx = EXT_PHY2;
4707 else if (sel_phy_idx == EXT_PHY2)
4708 sel_phy_idx = EXT_PHY1;
4710 return LINK_CONFIG_IDX(sel_phy_idx);
4713 #ifdef NETDEV_FCOE_WWNN
4714 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4716 struct bnx2x *bp = netdev_priv(dev);
4717 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4720 case NETDEV_FCOE_WWNN:
4721 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4722 cp->fcoe_wwn_node_name_lo);
4724 case NETDEV_FCOE_WWPN:
4725 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4726 cp->fcoe_wwn_port_name_lo);
4729 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4737 /* called with rtnl_lock */
4738 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4740 struct bnx2x *bp = netdev_priv(dev);
4742 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4743 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4747 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4748 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4749 BNX2X_ERR("Can't support requested MTU size\n");
4753 /* This does not race with packet allocation
4754 * because the actual alloc size is
4755 * only updated as part of load
4759 return bnx2x_reload_if_running(dev);
4762 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4763 netdev_features_t features)
4765 struct bnx2x *bp = netdev_priv(dev);
4767 /* TPA requires Rx CSUM offloading */
4768 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4769 features &= ~NETIF_F_LRO;
4770 features &= ~NETIF_F_GRO;
4776 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4778 struct bnx2x *bp = netdev_priv(dev);
4779 u32 flags = bp->flags;
4781 bool bnx2x_reload = false;
4783 if (features & NETIF_F_LRO)
4784 flags |= TPA_ENABLE_FLAG;
4786 flags &= ~TPA_ENABLE_FLAG;
4788 if (features & NETIF_F_GRO)
4789 flags |= GRO_ENABLE_FLAG;
4791 flags &= ~GRO_ENABLE_FLAG;
4793 if (features & NETIF_F_LOOPBACK) {
4794 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4795 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4796 bnx2x_reload = true;
4799 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4800 bp->link_params.loopback_mode = LOOPBACK_NONE;
4801 bnx2x_reload = true;
4805 changes = flags ^ bp->flags;
4807 /* if GRO is changed while LRO is enabled, don't force a reload */
4808 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4809 changes &= ~GRO_ENABLE_FLAG;
4812 bnx2x_reload = true;
4817 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4818 return bnx2x_reload_if_running(dev);
4819 /* else: bnx2x_nic_load() will be called at end of recovery */
4825 void bnx2x_tx_timeout(struct net_device *dev)
4827 struct bnx2x *bp = netdev_priv(dev);
4829 #ifdef BNX2X_STOP_ON_ERROR
4834 /* This allows the netif to be shutdown gracefully before resetting */
4835 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4838 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4840 struct net_device *dev = pci_get_drvdata(pdev);
4844 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4847 bp = netdev_priv(dev);
4851 pci_save_state(pdev);
4853 if (!netif_running(dev)) {
4858 netif_device_detach(dev);
4860 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4862 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4869 int bnx2x_resume(struct pci_dev *pdev)
4871 struct net_device *dev = pci_get_drvdata(pdev);
4876 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4879 bp = netdev_priv(dev);
4881 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4882 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4888 pci_restore_state(pdev);
4890 if (!netif_running(dev)) {
4895 bnx2x_set_power_state(bp, PCI_D0);
4896 netif_device_attach(dev);
4898 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4905 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4909 BNX2X_ERR("bad context pointer %p\n", cxt);
4913 /* ustorm cxt validation */
4914 cxt->ustorm_ag_context.cdu_usage =
4915 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4916 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4917 /* xcontext validation */
4918 cxt->xstorm_ag_context.cdu_reserved =
4919 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4920 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4923 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4924 u8 fw_sb_id, u8 sb_index,
4927 u32 addr = BAR_CSTRORM_INTMEM +
4928 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4929 REG_WR8(bp, addr, ticks);
4931 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4932 port, fw_sb_id, sb_index, ticks);
4935 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4936 u16 fw_sb_id, u8 sb_index,
4939 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4940 u32 addr = BAR_CSTRORM_INTMEM +
4941 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4942 u8 flags = REG_RD8(bp, addr);
4944 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4945 flags |= enable_flag;
4946 REG_WR8(bp, addr, flags);
4948 "port %x fw_sb_id %d sb_index %d disable %d\n",
4949 port, fw_sb_id, sb_index, disable);
4952 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4953 u8 sb_index, u8 disable, u16 usec)
4955 int port = BP_PORT(bp);
4956 u8 ticks = usec / BNX2X_BTR;
4958 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4960 disable = disable ? 1 : (usec ? 0 : 1);
4961 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4964 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
4967 smp_mb__before_atomic();
4968 set_bit(flag, &bp->sp_rtnl_state);
4969 smp_mb__after_atomic();
4970 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
4972 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4974 EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);