1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
26 #include <net/ip6_checksum.h>
27 #include <linux/prefetch.h>
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_init.h"
33 * bnx2x_move_fp - move content of the fastpath structure.
36 * @from: source FP index
37 * @to: destination FP index
39 * Makes sure the contents of the bp->fp[to].napi is kept
40 * intact. This is done by first copying the napi struct from
41 * the target to the source, and then mem copying the entire
42 * source onto the target. Update txdata pointers and related
45 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47 struct bnx2x_fastpath *from_fp = &bp->fp[from];
48 struct bnx2x_fastpath *to_fp = &bp->fp[to];
49 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
50 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
51 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
52 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
53 int old_max_eth_txqs, new_max_eth_txqs;
54 int old_txdata_index = 0, new_txdata_index = 0;
56 /* Copy the NAPI object as it has been already initialized */
57 from_fp->napi = to_fp->napi;
59 /* Move bnx2x_fastpath contents */
60 memcpy(to_fp, from_fp, sizeof(*to_fp));
63 /* move sp_objs contents as well, as their indices match fp ones */
64 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
66 /* move fp_stats contents as well, as their indices match fp ones */
67 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
69 /* Update txdata pointers in fp and move txdata content accordingly:
70 * Each fp consumes 'max_cos' txdata structures, so the index should be
71 * decremented by max_cos x delta.
74 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
75 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
77 if (from == FCOE_IDX(bp)) {
78 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
79 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
82 memcpy(&bp->bnx2x_txq[new_txdata_index],
83 &bp->bnx2x_txq[old_txdata_index],
84 sizeof(struct bnx2x_fp_txdata));
85 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
89 * bnx2x_fill_fw_str - Fill buffer with FW version string.
92 * @buf: character buffer to fill with the fw name
93 * @buf_len: length of the above buffer
96 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
99 u8 phy_fw_ver[PHY_FW_VER_LEN];
101 phy_fw_ver[0] = '\0';
102 bnx2x_get_ext_phy_fw_version(&bp->link_params,
103 phy_fw_ver, PHY_FW_VER_LEN);
104 strlcpy(buf, bp->fw_ver, buf_len);
105 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
107 (bp->common.bc_ver & 0xff0000) >> 16,
108 (bp->common.bc_ver & 0xff00) >> 8,
109 (bp->common.bc_ver & 0xff),
110 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
112 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
117 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
120 * @delta: number of eth queues which were not allocated
122 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
124 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
126 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
127 * backward along the array could cause memory to be overridden
129 for (cos = 1; cos < bp->max_cos; cos++) {
130 for (i = 0; i < old_eth_num - delta; i++) {
131 struct bnx2x_fastpath *fp = &bp->fp[i];
132 int new_idx = cos * (old_eth_num - delta) + i;
134 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
135 sizeof(struct bnx2x_fp_txdata));
136 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
141 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
143 /* free skb in the packet ring at pos idx
144 * return idx of last bd freed
146 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
147 u16 idx, unsigned int *pkts_compl,
148 unsigned int *bytes_compl)
150 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
151 struct eth_tx_start_bd *tx_start_bd;
152 struct eth_tx_bd *tx_data_bd;
153 struct sk_buff *skb = tx_buf->skb;
154 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
157 /* prefetch skb end pointer to speedup dev_kfree_skb() */
160 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
161 txdata->txq_index, idx, tx_buf, skb);
164 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
165 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
166 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
168 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
169 #ifdef BNX2X_STOP_ON_ERROR
170 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
171 BNX2X_ERR("BAD nbd!\n");
175 new_cons = nbd + tx_buf->first_bd;
177 /* Get the next bd */
178 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
180 /* Skip a parse bd... */
182 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
184 /* ...and the TSO split header bd since they have no mapping */
185 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
187 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
193 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
194 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
195 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
197 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
204 (*bytes_compl) += skb->len;
207 dev_kfree_skb_any(skb);
208 tx_buf->first_bd = 0;
214 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
216 struct netdev_queue *txq;
217 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
218 unsigned int pkts_compl = 0, bytes_compl = 0;
220 #ifdef BNX2X_STOP_ON_ERROR
221 if (unlikely(bp->panic))
225 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
226 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
227 sw_cons = txdata->tx_pkt_cons;
229 while (sw_cons != hw_cons) {
232 pkt_cons = TX_BD(sw_cons);
234 DP(NETIF_MSG_TX_DONE,
235 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
236 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
238 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
239 &pkts_compl, &bytes_compl);
244 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
246 txdata->tx_pkt_cons = sw_cons;
247 txdata->tx_bd_cons = bd_cons;
249 /* Need to make the tx_bd_cons update visible to start_xmit()
250 * before checking for netif_tx_queue_stopped(). Without the
251 * memory barrier, there is a small possibility that
252 * start_xmit() will miss it and cause the queue to be stopped
254 * On the other hand we need an rmb() here to ensure the proper
255 * ordering of bit testing in the following
256 * netif_tx_queue_stopped(txq) call.
260 if (unlikely(netif_tx_queue_stopped(txq))) {
261 /* Taking tx_lock() is needed to prevent re-enabling the queue
262 * while it's empty. This could have happen if rx_action() gets
263 * suspended in bnx2x_tx_int() after the condition before
264 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
266 * stops the queue->sees fresh tx_bd_cons->releases the queue->
267 * sends some packets consuming the whole queue again->
271 __netif_tx_lock(txq, smp_processor_id());
273 if ((netif_tx_queue_stopped(txq)) &&
274 (bp->state == BNX2X_STATE_OPEN) &&
275 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
276 netif_tx_wake_queue(txq);
278 __netif_tx_unlock(txq);
283 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
286 u16 last_max = fp->last_max_sge;
288 if (SUB_S16(idx, last_max) > 0)
289 fp->last_max_sge = idx;
292 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
294 struct eth_end_agg_rx_cqe *cqe)
296 struct bnx2x *bp = fp->bp;
297 u16 last_max, last_elem, first_elem;
304 /* First mark all used pages */
305 for (i = 0; i < sge_len; i++)
306 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
307 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
309 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
310 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
312 /* Here we assume that the last SGE index is the biggest */
313 prefetch((void *)(fp->sge_mask));
314 bnx2x_update_last_max_sge(fp,
315 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
317 last_max = RX_SGE(fp->last_max_sge);
318 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
319 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
321 /* If ring is not full */
322 if (last_elem + 1 != first_elem)
325 /* Now update the prod */
326 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
327 if (likely(fp->sge_mask[i]))
330 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
331 delta += BIT_VEC64_ELEM_SZ;
335 fp->rx_sge_prod += delta;
336 /* clear page-end entries */
337 bnx2x_clear_sge_mask_next_elems(fp);
340 DP(NETIF_MSG_RX_STATUS,
341 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
342 fp->last_max_sge, fp->rx_sge_prod);
345 /* Get Toeplitz hash value in the skb using the value from the
346 * CQE (calculated by HW).
348 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
349 const struct eth_fast_path_rx_cqe *cqe,
352 /* Get Toeplitz hash from CQE */
353 if ((bp->dev->features & NETIF_F_RXHASH) &&
354 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
355 enum eth_rss_hash_type htype;
357 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
358 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
359 (htype == TCP_IPV6_HASH_TYPE);
360 return le32_to_cpu(cqe->rss_hash_result);
366 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
368 struct eth_fast_path_rx_cqe *cqe)
370 struct bnx2x *bp = fp->bp;
371 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
372 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
373 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
375 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
376 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
378 /* print error if current state != stop */
379 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
380 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
382 /* Try to map an empty data buffer from the aggregation info */
383 mapping = dma_map_single(&bp->pdev->dev,
384 first_buf->data + NET_SKB_PAD,
385 fp->rx_buf_size, DMA_FROM_DEVICE);
387 * ...if it fails - move the skb from the consumer to the producer
388 * and set the current aggregation state as ERROR to drop it
389 * when TPA_STOP arrives.
392 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
393 /* Move the BD from the consumer to the producer */
394 bnx2x_reuse_rx_data(fp, cons, prod);
395 tpa_info->tpa_state = BNX2X_TPA_ERROR;
399 /* move empty data from pool to prod */
400 prod_rx_buf->data = first_buf->data;
401 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
402 /* point prod_bd to new data */
403 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
404 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
406 /* move partial skb from cons to pool (don't unmap yet) */
407 *first_buf = *cons_rx_buf;
409 /* mark bin state as START */
410 tpa_info->parsing_flags =
411 le16_to_cpu(cqe->pars_flags.flags);
412 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
413 tpa_info->tpa_state = BNX2X_TPA_START;
414 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
415 tpa_info->placement_offset = cqe->placement_offset;
416 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
417 if (fp->mode == TPA_MODE_GRO) {
418 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
419 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
420 tpa_info->gro_size = gro_size;
423 #ifdef BNX2X_STOP_ON_ERROR
424 fp->tpa_queue_used |= (1 << queue);
425 #ifdef _ASM_GENERIC_INT_L64_H
426 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
428 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
434 /* Timestamp option length allowed for TPA aggregation:
436 * nop nop kind length echo val
438 #define TPA_TSTAMP_OPT_LEN 12
440 * bnx2x_set_gro_params - compute GRO values
443 * @parsing_flags: parsing flags from the START CQE
444 * @len_on_bd: total length of the first packet for the
446 * @pkt_len: length of all segments
448 * Approximate value of the MSS for this aggregation calculated using
449 * the first packet of it.
450 * Compute number of aggregated segments, and gso_type.
452 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
453 u16 len_on_bd, unsigned int pkt_len,
454 u16 num_of_coalesced_segs)
456 /* TPA aggregation won't have either IP options or TCP options
457 * other than timestamp or IPv6 extension headers.
459 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
461 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
462 PRS_FLAG_OVERETH_IPV6) {
463 hdrs_len += sizeof(struct ipv6hdr);
464 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
466 hdrs_len += sizeof(struct iphdr);
467 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
470 /* Check if there was a TCP timestamp, if there is it's will
471 * always be 12 bytes length: nop nop kind length echo val.
473 * Otherwise FW would close the aggregation.
475 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
476 hdrs_len += TPA_TSTAMP_OPT_LEN;
478 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
480 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
481 * to skb_shinfo(skb)->gso_segs
483 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
486 static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
487 struct bnx2x_fastpath *fp, u16 index)
489 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
490 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
491 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
494 if (unlikely(page == NULL)) {
495 BNX2X_ERR("Can't alloc sge\n");
499 mapping = dma_map_page(&bp->pdev->dev, page, 0,
500 SGE_PAGES, DMA_FROM_DEVICE);
501 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
502 __free_pages(page, PAGES_PER_SGE_SHIFT);
503 BNX2X_ERR("Can't map sge\n");
508 dma_unmap_addr_set(sw_buf, mapping, mapping);
510 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
511 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
516 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
517 struct bnx2x_agg_info *tpa_info,
520 struct eth_end_agg_rx_cqe *cqe,
523 struct sw_rx_page *rx_pg, old_rx_pg;
524 u32 i, frag_len, frag_size;
525 int err, j, frag_id = 0;
526 u16 len_on_bd = tpa_info->len_on_bd;
527 u16 full_page = 0, gro_size = 0;
529 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
531 if (fp->mode == TPA_MODE_GRO) {
532 gro_size = tpa_info->gro_size;
533 full_page = tpa_info->full_page;
536 /* This is needed in order to enable forwarding support */
538 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
539 le16_to_cpu(cqe->pkt_len),
540 le16_to_cpu(cqe->num_of_coalesced_segs));
542 #ifdef BNX2X_STOP_ON_ERROR
543 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
544 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
546 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
552 /* Run through the SGL and compose the fragmented skb */
553 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
554 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
556 /* FW gives the indices of the SGE as if the ring is an array
557 (meaning that "next" element will consume 2 indices) */
558 if (fp->mode == TPA_MODE_GRO)
559 frag_len = min_t(u32, frag_size, (u32)full_page);
561 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
563 rx_pg = &fp->rx_page_ring[sge_idx];
566 /* If we fail to allocate a substitute page, we simply stop
567 where we are and drop the whole packet */
568 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
570 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
574 /* Unmap the page as we're going to pass it to the stack */
575 dma_unmap_page(&bp->pdev->dev,
576 dma_unmap_addr(&old_rx_pg, mapping),
577 SGE_PAGES, DMA_FROM_DEVICE);
578 /* Add one frag and update the appropriate fields in the skb */
579 if (fp->mode == TPA_MODE_LRO)
580 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
584 for (rem = frag_len; rem > 0; rem -= gro_size) {
585 int len = rem > gro_size ? gro_size : rem;
586 skb_fill_page_desc(skb, frag_id++,
587 old_rx_pg.page, offset, len);
589 get_page(old_rx_pg.page);
594 skb->data_len += frag_len;
595 skb->truesize += SGE_PAGES;
596 skb->len += frag_len;
598 frag_size -= frag_len;
604 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
606 if (fp->rx_frag_size)
607 put_page(virt_to_head_page(data));
612 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
614 if (fp->rx_frag_size)
615 return netdev_alloc_frag(fp->rx_frag_size);
617 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
621 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
623 const struct iphdr *iph = ip_hdr(skb);
626 skb_set_transport_header(skb, sizeof(struct iphdr));
629 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
630 iph->saddr, iph->daddr, 0);
633 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
635 struct ipv6hdr *iph = ipv6_hdr(skb);
638 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
641 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
642 &iph->saddr, &iph->daddr, 0);
645 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
646 void (*gro_func)(struct bnx2x*, struct sk_buff*))
648 skb_set_network_header(skb, 0);
650 tcp_gro_complete(skb);
654 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
658 if (skb_shinfo(skb)->gso_size) {
659 switch (be16_to_cpu(skb->protocol)) {
661 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
664 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
667 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
668 be16_to_cpu(skb->protocol));
672 napi_gro_receive(&fp->napi, skb);
675 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
676 struct bnx2x_agg_info *tpa_info,
678 struct eth_end_agg_rx_cqe *cqe,
681 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
682 u8 pad = tpa_info->placement_offset;
683 u16 len = tpa_info->len_on_bd;
684 struct sk_buff *skb = NULL;
685 u8 *new_data, *data = rx_buf->data;
686 u8 old_tpa_state = tpa_info->tpa_state;
688 tpa_info->tpa_state = BNX2X_TPA_STOP;
690 /* If we there was an error during the handling of the TPA_START -
691 * drop this aggregation.
693 if (old_tpa_state == BNX2X_TPA_ERROR)
696 /* Try to allocate the new data */
697 new_data = bnx2x_frag_alloc(fp);
698 /* Unmap skb in the pool anyway, as we are going to change
699 pool entry status to BNX2X_TPA_STOP even if new skb allocation
701 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
702 fp->rx_buf_size, DMA_FROM_DEVICE);
703 if (likely(new_data))
704 skb = build_skb(data, fp->rx_frag_size);
707 #ifdef BNX2X_STOP_ON_ERROR
708 if (pad + len > fp->rx_buf_size) {
709 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
710 pad, len, fp->rx_buf_size);
716 skb_reserve(skb, pad + NET_SKB_PAD);
718 skb->rxhash = tpa_info->rxhash;
719 skb->l4_rxhash = tpa_info->l4_rxhash;
721 skb->protocol = eth_type_trans(skb, bp->dev);
722 skb->ip_summed = CHECKSUM_UNNECESSARY;
724 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
725 skb, cqe, cqe_idx)) {
726 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
727 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
728 bnx2x_gro_receive(bp, fp, skb);
730 DP(NETIF_MSG_RX_STATUS,
731 "Failed to allocate new pages - dropping packet!\n");
732 dev_kfree_skb_any(skb);
735 /* put new data in bin */
736 rx_buf->data = new_data;
740 bnx2x_frag_free(fp, new_data);
742 /* drop the packet and keep the buffer in the bin */
743 DP(NETIF_MSG_RX_STATUS,
744 "Failed to allocate or map a new skb - dropping packet!\n");
745 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
748 static int bnx2x_alloc_rx_data(struct bnx2x *bp,
749 struct bnx2x_fastpath *fp, u16 index)
752 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
753 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
756 data = bnx2x_frag_alloc(fp);
757 if (unlikely(data == NULL))
760 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
763 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
764 bnx2x_frag_free(fp, data);
765 BNX2X_ERR("Can't map rx data\n");
770 dma_unmap_addr_set(rx_buf, mapping, mapping);
772 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
773 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
779 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
780 struct bnx2x_fastpath *fp,
781 struct bnx2x_eth_q_stats *qstats)
783 /* Do nothing if no L4 csum validation was done.
784 * We do not check whether IP csum was validated. For IPv4 we assume
785 * that if the card got as far as validating the L4 csum, it also
786 * validated the IP csum. IPv6 has no IP csum.
788 if (cqe->fast_path_cqe.status_flags &
789 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
792 /* If L4 validation was done, check if an error was found. */
794 if (cqe->fast_path_cqe.type_error_flags &
795 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
796 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
797 qstats->hw_csum_err++;
799 skb->ip_summed = CHECKSUM_UNNECESSARY;
802 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
804 struct bnx2x *bp = fp->bp;
805 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
806 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
809 #ifdef BNX2X_STOP_ON_ERROR
810 if (unlikely(bp->panic))
814 /* CQ "next element" is of the size of the regular element,
815 that's why it's ok here */
816 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
817 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
820 bd_cons = fp->rx_bd_cons;
821 bd_prod = fp->rx_bd_prod;
822 bd_prod_fw = bd_prod;
823 sw_comp_cons = fp->rx_comp_cons;
824 sw_comp_prod = fp->rx_comp_prod;
826 /* Memory barrier necessary as speculative reads of the rx
827 * buffer can be ahead of the index in the status block
831 DP(NETIF_MSG_RX_STATUS,
832 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
833 fp->index, hw_comp_cons, sw_comp_cons);
835 while (sw_comp_cons != hw_comp_cons) {
836 struct sw_rx_bd *rx_buf = NULL;
838 union eth_rx_cqe *cqe;
839 struct eth_fast_path_rx_cqe *cqe_fp;
841 enum eth_rx_cqe_type cqe_fp_type;
846 #ifdef BNX2X_STOP_ON_ERROR
847 if (unlikely(bp->panic))
851 comp_ring_cons = RCQ_BD(sw_comp_cons);
852 bd_prod = RX_BD(bd_prod);
853 bd_cons = RX_BD(bd_cons);
855 cqe = &fp->rx_comp_ring[comp_ring_cons];
856 cqe_fp = &cqe->fast_path_cqe;
857 cqe_fp_flags = cqe_fp->type_error_flags;
858 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
860 DP(NETIF_MSG_RX_STATUS,
861 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
862 CQE_TYPE(cqe_fp_flags),
863 cqe_fp_flags, cqe_fp->status_flags,
864 le32_to_cpu(cqe_fp->rss_hash_result),
865 le16_to_cpu(cqe_fp->vlan_tag),
866 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
868 /* is this a slowpath msg? */
869 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
870 bnx2x_sp_event(fp, cqe);
874 rx_buf = &fp->rx_buf_ring[bd_cons];
877 if (!CQE_TYPE_FAST(cqe_fp_type)) {
878 struct bnx2x_agg_info *tpa_info;
879 u16 frag_size, pages;
880 #ifdef BNX2X_STOP_ON_ERROR
882 if (fp->disable_tpa &&
883 (CQE_TYPE_START(cqe_fp_type) ||
884 CQE_TYPE_STOP(cqe_fp_type)))
885 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
886 CQE_TYPE(cqe_fp_type));
889 if (CQE_TYPE_START(cqe_fp_type)) {
890 u16 queue = cqe_fp->queue_index;
891 DP(NETIF_MSG_RX_STATUS,
892 "calling tpa_start on queue %d\n",
895 bnx2x_tpa_start(fp, queue,
901 queue = cqe->end_agg_cqe.queue_index;
902 tpa_info = &fp->tpa_info[queue];
903 DP(NETIF_MSG_RX_STATUS,
904 "calling tpa_stop on queue %d\n",
907 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
910 if (fp->mode == TPA_MODE_GRO)
911 pages = (frag_size + tpa_info->full_page - 1) /
914 pages = SGE_PAGE_ALIGN(frag_size) >>
917 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
918 &cqe->end_agg_cqe, comp_ring_cons);
919 #ifdef BNX2X_STOP_ON_ERROR
924 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
928 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
929 pad = cqe_fp->placement_offset;
930 dma_sync_single_for_cpu(&bp->pdev->dev,
931 dma_unmap_addr(rx_buf, mapping),
932 pad + RX_COPY_THRESH,
935 prefetch(data + pad); /* speedup eth_type_trans() */
936 /* is this an error packet? */
937 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
938 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
939 "ERROR flags %x rx packet %u\n",
940 cqe_fp_flags, sw_comp_cons);
941 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
945 /* Since we don't have a jumbo ring
946 * copy small packets if mtu > 1500
948 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
949 (len <= RX_COPY_THRESH)) {
950 skb = netdev_alloc_skb_ip_align(bp->dev, len);
952 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
953 "ERROR packet dropped because of alloc failure\n");
954 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
957 memcpy(skb->data, data + pad, len);
958 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
960 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
961 dma_unmap_single(&bp->pdev->dev,
962 dma_unmap_addr(rx_buf, mapping),
965 skb = build_skb(data, fp->rx_frag_size);
966 if (unlikely(!skb)) {
967 bnx2x_frag_free(fp, data);
968 bnx2x_fp_qstats(bp, fp)->
969 rx_skb_alloc_failed++;
972 skb_reserve(skb, pad);
974 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
975 "ERROR packet dropped because of alloc failure\n");
976 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
978 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
984 skb->protocol = eth_type_trans(skb, bp->dev);
986 /* Set Toeplitz hash for a none-LRO skb */
987 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
988 skb->l4_rxhash = l4_rxhash;
990 skb_checksum_none_assert(skb);
992 if (bp->dev->features & NETIF_F_RXCSUM)
993 bnx2x_csum_validate(skb, cqe, fp,
994 bnx2x_fp_qstats(bp, fp));
996 skb_record_rx_queue(skb, fp->rx_queue);
998 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1000 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1001 le16_to_cpu(cqe_fp->vlan_tag));
1002 napi_gro_receive(&fp->napi, skb);
1005 rx_buf->data = NULL;
1007 bd_cons = NEXT_RX_IDX(bd_cons);
1008 bd_prod = NEXT_RX_IDX(bd_prod);
1009 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1012 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1013 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1015 if (rx_pkt == budget)
1019 fp->rx_bd_cons = bd_cons;
1020 fp->rx_bd_prod = bd_prod_fw;
1021 fp->rx_comp_cons = sw_comp_cons;
1022 fp->rx_comp_prod = sw_comp_prod;
1024 /* Update producers */
1025 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1028 fp->rx_pkt += rx_pkt;
1034 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1036 struct bnx2x_fastpath *fp = fp_cookie;
1037 struct bnx2x *bp = fp->bp;
1041 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1042 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1044 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1046 #ifdef BNX2X_STOP_ON_ERROR
1047 if (unlikely(bp->panic))
1051 /* Handle Rx and Tx according to MSI-X vector */
1052 prefetch(fp->rx_cons_sb);
1054 for_each_cos_in_tx_queue(fp, cos)
1055 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1057 prefetch(&fp->sb_running_index[SM_RX_ID]);
1058 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1063 /* HW Lock for shared dual port PHYs */
1064 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1066 mutex_lock(&bp->port.phy_mutex);
1068 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1071 void bnx2x_release_phy_lock(struct bnx2x *bp)
1073 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1075 mutex_unlock(&bp->port.phy_mutex);
1078 /* calculates MF speed according to current linespeed and MF configuration */
1079 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1081 u16 line_speed = bp->link_vars.line_speed;
1083 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1084 bp->mf_config[BP_VN(bp)]);
1086 /* Calculate the current MAX line speed limit for the MF
1090 line_speed = (line_speed * maxCfg) / 100;
1091 else { /* SD mode */
1092 u16 vn_max_rate = maxCfg * 100;
1094 if (vn_max_rate < line_speed)
1095 line_speed = vn_max_rate;
1103 * bnx2x_fill_report_data - fill link report data to report
1105 * @bp: driver handle
1106 * @data: link state to update
1108 * It uses a none-atomic bit operations because is called under the mutex.
1110 static void bnx2x_fill_report_data(struct bnx2x *bp,
1111 struct bnx2x_link_report_data *data)
1113 u16 line_speed = bnx2x_get_mf_speed(bp);
1115 memset(data, 0, sizeof(*data));
1117 /* Fill the report data: effective line speed */
1118 data->line_speed = line_speed;
1121 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1122 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1123 &data->link_report_flags);
1126 if (bp->link_vars.duplex == DUPLEX_FULL)
1127 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1129 /* Rx Flow Control is ON */
1130 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1131 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1133 /* Tx Flow Control is ON */
1134 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1135 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1139 * bnx2x_link_report - report link status to OS.
1141 * @bp: driver handle
1143 * Calls the __bnx2x_link_report() under the same locking scheme
1144 * as a link/PHY state managing code to ensure a consistent link
1148 void bnx2x_link_report(struct bnx2x *bp)
1150 bnx2x_acquire_phy_lock(bp);
1151 __bnx2x_link_report(bp);
1152 bnx2x_release_phy_lock(bp);
1156 * __bnx2x_link_report - report link status to OS.
1158 * @bp: driver handle
1160 * None atomic implementation.
1161 * Should be called under the phy_lock.
1163 void __bnx2x_link_report(struct bnx2x *bp)
1165 struct bnx2x_link_report_data cur_data;
1168 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1169 bnx2x_read_mf_cfg(bp);
1171 /* Read the current link report info */
1172 bnx2x_fill_report_data(bp, &cur_data);
1174 /* Don't report link down or exactly the same link status twice */
1175 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1176 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1177 &bp->last_reported_link.link_report_flags) &&
1178 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1179 &cur_data.link_report_flags)))
1184 /* We are going to report a new link parameters now -
1185 * remember the current data for the next time.
1187 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1189 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1190 &cur_data.link_report_flags)) {
1191 netif_carrier_off(bp->dev);
1192 netdev_err(bp->dev, "NIC Link is Down\n");
1198 netif_carrier_on(bp->dev);
1200 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1201 &cur_data.link_report_flags))
1206 /* Handle the FC at the end so that only these flags would be
1207 * possibly set. This way we may easily check if there is no FC
1210 if (cur_data.link_report_flags) {
1211 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1212 &cur_data.link_report_flags)) {
1213 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1214 &cur_data.link_report_flags))
1215 flow = "ON - receive & transmit";
1217 flow = "ON - receive";
1219 flow = "ON - transmit";
1224 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1225 cur_data.line_speed, duplex, flow);
1229 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1233 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1234 struct eth_rx_sge *sge;
1236 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1238 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1239 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1242 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1243 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1247 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1248 struct bnx2x_fastpath *fp, int last)
1252 for (i = 0; i < last; i++) {
1253 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1254 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1255 u8 *data = first_buf->data;
1258 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1261 if (tpa_info->tpa_state == BNX2X_TPA_START)
1262 dma_unmap_single(&bp->pdev->dev,
1263 dma_unmap_addr(first_buf, mapping),
1264 fp->rx_buf_size, DMA_FROM_DEVICE);
1265 bnx2x_frag_free(fp, data);
1266 first_buf->data = NULL;
1270 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1274 for_each_rx_queue_cnic(bp, j) {
1275 struct bnx2x_fastpath *fp = &bp->fp[j];
1279 /* Activate BD ring */
1281 * this will generate an interrupt (to the TSTORM)
1282 * must only be done after chip is initialized
1284 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1289 void bnx2x_init_rx_rings(struct bnx2x *bp)
1291 int func = BP_FUNC(bp);
1295 /* Allocate TPA resources */
1296 for_each_eth_queue(bp, j) {
1297 struct bnx2x_fastpath *fp = &bp->fp[j];
1300 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1302 if (!fp->disable_tpa) {
1303 /* Fill the per-aggregation pool */
1304 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1305 struct bnx2x_agg_info *tpa_info =
1307 struct sw_rx_bd *first_buf =
1308 &tpa_info->first_buf;
1310 first_buf->data = bnx2x_frag_alloc(fp);
1311 if (!first_buf->data) {
1312 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1314 bnx2x_free_tpa_pool(bp, fp, i);
1315 fp->disable_tpa = 1;
1318 dma_unmap_addr_set(first_buf, mapping, 0);
1319 tpa_info->tpa_state = BNX2X_TPA_STOP;
1322 /* "next page" elements initialization */
1323 bnx2x_set_next_page_sgl(fp);
1325 /* set SGEs bit mask */
1326 bnx2x_init_sge_ring_bit_mask(fp);
1328 /* Allocate SGEs and initialize the ring elements */
1329 for (i = 0, ring_prod = 0;
1330 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1332 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1333 BNX2X_ERR("was only able to allocate %d rx sges\n",
1335 BNX2X_ERR("disabling TPA for queue[%d]\n",
1337 /* Cleanup already allocated elements */
1338 bnx2x_free_rx_sge_range(bp, fp,
1340 bnx2x_free_tpa_pool(bp, fp,
1342 fp->disable_tpa = 1;
1346 ring_prod = NEXT_SGE_IDX(ring_prod);
1349 fp->rx_sge_prod = ring_prod;
1353 for_each_eth_queue(bp, j) {
1354 struct bnx2x_fastpath *fp = &bp->fp[j];
1358 /* Activate BD ring */
1360 * this will generate an interrupt (to the TSTORM)
1361 * must only be done after chip is initialized
1363 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1369 if (CHIP_IS_E1(bp)) {
1370 REG_WR(bp, BAR_USTRORM_INTMEM +
1371 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1372 U64_LO(fp->rx_comp_mapping));
1373 REG_WR(bp, BAR_USTRORM_INTMEM +
1374 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1375 U64_HI(fp->rx_comp_mapping));
1380 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1383 struct bnx2x *bp = fp->bp;
1385 for_each_cos_in_tx_queue(fp, cos) {
1386 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1387 unsigned pkts_compl = 0, bytes_compl = 0;
1389 u16 sw_prod = txdata->tx_pkt_prod;
1390 u16 sw_cons = txdata->tx_pkt_cons;
1392 while (sw_cons != sw_prod) {
1393 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1394 &pkts_compl, &bytes_compl);
1398 netdev_tx_reset_queue(
1399 netdev_get_tx_queue(bp->dev,
1400 txdata->txq_index));
1404 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1408 for_each_tx_queue_cnic(bp, i) {
1409 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1413 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1417 for_each_eth_queue(bp, i) {
1418 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1422 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1424 struct bnx2x *bp = fp->bp;
1427 /* ring wasn't allocated */
1428 if (fp->rx_buf_ring == NULL)
1431 for (i = 0; i < NUM_RX_BD; i++) {
1432 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1433 u8 *data = rx_buf->data;
1437 dma_unmap_single(&bp->pdev->dev,
1438 dma_unmap_addr(rx_buf, mapping),
1439 fp->rx_buf_size, DMA_FROM_DEVICE);
1441 rx_buf->data = NULL;
1442 bnx2x_frag_free(fp, data);
1446 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1450 for_each_rx_queue_cnic(bp, j) {
1451 bnx2x_free_rx_bds(&bp->fp[j]);
1455 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1459 for_each_eth_queue(bp, j) {
1460 struct bnx2x_fastpath *fp = &bp->fp[j];
1462 bnx2x_free_rx_bds(fp);
1464 if (!fp->disable_tpa)
1465 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1469 void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1471 bnx2x_free_tx_skbs_cnic(bp);
1472 bnx2x_free_rx_skbs_cnic(bp);
1475 void bnx2x_free_skbs(struct bnx2x *bp)
1477 bnx2x_free_tx_skbs(bp);
1478 bnx2x_free_rx_skbs(bp);
1481 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1483 /* load old values */
1484 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1486 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1487 /* leave all but MAX value */
1488 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1490 /* set new MAX value */
1491 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1492 & FUNC_MF_CFG_MAX_BW_MASK;
1494 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1499 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1501 * @bp: driver handle
1502 * @nvecs: number of vectors to be released
1504 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1508 if (nvecs == offset)
1511 /* VFs don't have a default SB */
1513 free_irq(bp->msix_table[offset].vector, bp->dev);
1514 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1515 bp->msix_table[offset].vector);
1519 if (CNIC_SUPPORT(bp)) {
1520 if (nvecs == offset)
1525 for_each_eth_queue(bp, i) {
1526 if (nvecs == offset)
1528 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1529 i, bp->msix_table[offset].vector);
1531 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1535 void bnx2x_free_irq(struct bnx2x *bp)
1537 if (bp->flags & USING_MSIX_FLAG &&
1538 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1539 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1541 /* vfs don't have a default status block */
1545 bnx2x_free_msix_irqs(bp, nvecs);
1547 free_irq(bp->dev->irq, bp->dev);
1551 int bnx2x_enable_msix(struct bnx2x *bp)
1553 int msix_vec = 0, i, rc;
1555 /* VFs don't have a default status block */
1557 bp->msix_table[msix_vec].entry = msix_vec;
1558 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1559 bp->msix_table[0].entry);
1563 /* Cnic requires an msix vector for itself */
1564 if (CNIC_SUPPORT(bp)) {
1565 bp->msix_table[msix_vec].entry = msix_vec;
1566 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1567 msix_vec, bp->msix_table[msix_vec].entry);
1571 /* We need separate vectors for ETH queues only (not FCoE) */
1572 for_each_eth_queue(bp, i) {
1573 bp->msix_table[msix_vec].entry = msix_vec;
1574 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1575 msix_vec, msix_vec, i);
1579 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1582 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
1585 * reconfigure number of tx/rx queues according to available
1588 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
1589 /* how less vectors we will have? */
1590 int diff = msix_vec - rc;
1592 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1594 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1597 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1601 * decrease number of queues by number of unallocated entries
1603 bp->num_ethernet_queues -= diff;
1604 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1606 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1608 } else if (rc > 0) {
1609 /* Get by with single vector */
1610 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1612 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1617 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1618 bp->flags |= USING_SINGLE_MSIX_FLAG;
1620 BNX2X_DEV_INFO("set number of queues to 1\n");
1621 bp->num_ethernet_queues = 1;
1622 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1623 } else if (rc < 0) {
1624 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1628 bp->flags |= USING_MSIX_FLAG;
1633 /* fall to INTx if not enough memory */
1635 bp->flags |= DISABLE_MSI_FLAG;
1640 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1642 int i, rc, offset = 0;
1644 /* no default status block for vf */
1646 rc = request_irq(bp->msix_table[offset++].vector,
1647 bnx2x_msix_sp_int, 0,
1648 bp->dev->name, bp->dev);
1650 BNX2X_ERR("request sp irq failed\n");
1655 if (CNIC_SUPPORT(bp))
1658 for_each_eth_queue(bp, i) {
1659 struct bnx2x_fastpath *fp = &bp->fp[i];
1660 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1663 rc = request_irq(bp->msix_table[offset].vector,
1664 bnx2x_msix_fp_int, 0, fp->name, fp);
1666 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1667 bp->msix_table[offset].vector, rc);
1668 bnx2x_free_msix_irqs(bp, offset);
1675 i = BNX2X_NUM_ETH_QUEUES(bp);
1677 offset = 1 + CNIC_SUPPORT(bp);
1678 netdev_info(bp->dev,
1679 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1680 bp->msix_table[0].vector,
1681 0, bp->msix_table[offset].vector,
1682 i - 1, bp->msix_table[offset + i - 1].vector);
1684 offset = CNIC_SUPPORT(bp);
1685 netdev_info(bp->dev,
1686 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1687 0, bp->msix_table[offset].vector,
1688 i - 1, bp->msix_table[offset + i - 1].vector);
1693 int bnx2x_enable_msi(struct bnx2x *bp)
1697 rc = pci_enable_msi(bp->pdev);
1699 BNX2X_DEV_INFO("MSI is not attainable\n");
1702 bp->flags |= USING_MSI_FLAG;
1707 static int bnx2x_req_irq(struct bnx2x *bp)
1709 unsigned long flags;
1712 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1715 flags = IRQF_SHARED;
1717 if (bp->flags & USING_MSIX_FLAG)
1718 irq = bp->msix_table[0].vector;
1720 irq = bp->pdev->irq;
1722 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1725 int bnx2x_setup_irqs(struct bnx2x *bp)
1728 if (bp->flags & USING_MSIX_FLAG &&
1729 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1730 rc = bnx2x_req_msix_irqs(bp);
1734 rc = bnx2x_req_irq(bp);
1736 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1739 if (bp->flags & USING_MSI_FLAG) {
1740 bp->dev->irq = bp->pdev->irq;
1741 netdev_info(bp->dev, "using MSI IRQ %d\n",
1744 if (bp->flags & USING_MSIX_FLAG) {
1745 bp->dev->irq = bp->msix_table[0].vector;
1746 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1754 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1758 for_each_rx_queue_cnic(bp, i)
1759 napi_enable(&bnx2x_fp(bp, i, napi));
1762 static void bnx2x_napi_enable(struct bnx2x *bp)
1766 for_each_eth_queue(bp, i)
1767 napi_enable(&bnx2x_fp(bp, i, napi));
1770 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1774 for_each_rx_queue_cnic(bp, i)
1775 napi_disable(&bnx2x_fp(bp, i, napi));
1778 static void bnx2x_napi_disable(struct bnx2x *bp)
1782 for_each_eth_queue(bp, i)
1783 napi_disable(&bnx2x_fp(bp, i, napi));
1786 void bnx2x_netif_start(struct bnx2x *bp)
1788 if (netif_running(bp->dev)) {
1789 bnx2x_napi_enable(bp);
1790 if (CNIC_LOADED(bp))
1791 bnx2x_napi_enable_cnic(bp);
1792 bnx2x_int_enable(bp);
1793 if (bp->state == BNX2X_STATE_OPEN)
1794 netif_tx_wake_all_queues(bp->dev);
1798 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1800 bnx2x_int_disable_sync(bp, disable_hw);
1801 bnx2x_napi_disable(bp);
1802 if (CNIC_LOADED(bp))
1803 bnx2x_napi_disable_cnic(bp);
1806 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1808 struct bnx2x *bp = netdev_priv(dev);
1810 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1811 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1812 u16 ether_type = ntohs(hdr->h_proto);
1814 /* Skip VLAN tag if present */
1815 if (ether_type == ETH_P_8021Q) {
1816 struct vlan_ethhdr *vhdr =
1817 (struct vlan_ethhdr *)skb->data;
1819 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1822 /* If ethertype is FCoE or FIP - use FCoE ring */
1823 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1824 return bnx2x_fcoe_tx(bp, txq_index);
1827 /* select a non-FCoE queue */
1828 return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1831 void bnx2x_set_num_queues(struct bnx2x *bp)
1834 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1836 /* override in STORAGE SD modes */
1837 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1838 bp->num_ethernet_queues = 1;
1840 /* Add special queues */
1841 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1842 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1844 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1848 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1850 * @bp: Driver handle
1852 * We currently support for at most 16 Tx queues for each CoS thus we will
1853 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1856 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1857 * index after all ETH L2 indices.
1859 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1860 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1861 * 16..31,...) with indices that are not coupled with any real Tx queue.
1863 * The proper configuration of skb->queue_mapping is handled by
1864 * bnx2x_select_queue() and __skb_tx_hash().
1866 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1867 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1869 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1873 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1874 rx = BNX2X_NUM_ETH_QUEUES(bp);
1876 /* account for fcoe queue */
1877 if (include_cnic && !NO_FCOE(bp)) {
1882 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1884 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1887 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1889 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1893 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1899 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1903 for_each_queue(bp, i) {
1904 struct bnx2x_fastpath *fp = &bp->fp[i];
1907 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1910 * Although there are no IP frames expected to arrive to
1911 * this ring we still want to add an
1912 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1915 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1918 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1919 IP_HEADER_ALIGNMENT_PADDING +
1922 BNX2X_FW_RX_ALIGN_END;
1923 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
1924 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1925 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1927 fp->rx_frag_size = 0;
1931 static int bnx2x_init_rss_pf(struct bnx2x *bp)
1934 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1936 /* Prepare the initial contents for the indirection table if RSS is
1939 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1940 bp->rss_conf_obj.ind_table[i] =
1942 ethtool_rxfh_indir_default(i, num_eth_queues);
1945 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1946 * per-port, so if explicit configuration is needed , do it only
1949 * For 57712 and newer on the other hand it's a per-function
1952 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1955 int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1958 struct bnx2x_config_rss_params params = {NULL};
1960 /* Although RSS is meaningless when there is a single HW queue we
1961 * still need it enabled in order to have HW Rx hash generated.
1963 * if (!is_eth_multi(bp))
1964 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1967 params.rss_obj = rss_obj;
1969 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
1971 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
1973 /* RSS configuration */
1974 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
1975 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
1976 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
1977 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
1978 if (rss_obj->udp_rss_v4)
1979 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
1980 if (rss_obj->udp_rss_v6)
1981 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
1984 params.rss_result_mask = MULTI_MASK;
1986 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
1990 prandom_bytes(params.rss_key, sizeof(params.rss_key));
1991 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
1994 return bnx2x_config_rss(bp, ¶ms);
1997 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1999 struct bnx2x_func_state_params func_params = {NULL};
2001 /* Prepare parameters for function state transitions */
2002 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2004 func_params.f_obj = &bp->func_obj;
2005 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2007 func_params.params.hw_init.load_phase = load_code;
2009 return bnx2x_func_state_change(bp, &func_params);
2013 * Cleans the object that have internal lists without sending
2014 * ramrods. Should be run when interrupts are disabled.
2016 void bnx2x_squeeze_objects(struct bnx2x *bp)
2019 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2020 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2021 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2023 /***************** Cleanup MACs' object first *************************/
2025 /* Wait for completion of requested */
2026 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2027 /* Perform a dry cleanup */
2028 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2030 /* Clean ETH primary MAC */
2031 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2032 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2035 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2037 /* Cleanup UC list */
2039 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2040 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2043 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2045 /***************** Now clean mcast object *****************************/
2046 rparam.mcast_obj = &bp->mcast_obj;
2047 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2049 /* Add a DEL command... */
2050 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2052 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2055 /* ...and wait until all pending commands are cleared */
2056 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2059 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2064 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2068 #ifndef BNX2X_STOP_ON_ERROR
2069 #define LOAD_ERROR_EXIT(bp, label) \
2071 (bp)->state = BNX2X_STATE_ERROR; \
2075 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2077 bp->cnic_loaded = false; \
2080 #else /*BNX2X_STOP_ON_ERROR*/
2081 #define LOAD_ERROR_EXIT(bp, label) \
2083 (bp)->state = BNX2X_STATE_ERROR; \
2087 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2089 bp->cnic_loaded = false; \
2093 #endif /*BNX2X_STOP_ON_ERROR*/
2095 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2097 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2098 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2102 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2104 int num_groups, vf_headroom = 0;
2105 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2107 /* number of queues for statistics is number of eth queues + FCoE */
2108 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2110 /* Total number of FW statistics requests =
2111 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2112 * and fcoe l2 queue) stats + num of queues (which includes another 1
2113 * for fcoe l2 queue if applicable)
2115 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2117 /* vf stats appear in the request list, but their data is allocated by
2118 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2119 * it is used to determine where to place the vf stats queries in the
2123 vf_headroom = bnx2x_vf_headroom(bp);
2125 /* Request is built from stats_query_header and an array of
2126 * stats_query_cmd_group each of which contains
2127 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2128 * configured in the stats_query_header.
2131 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2132 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2135 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2136 bp->fw_stats_num, vf_headroom, num_groups);
2137 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2138 num_groups * sizeof(struct stats_query_cmd_group);
2140 /* Data for statistics requests + stats_counter
2141 * stats_counter holds per-STORM counters that are incremented
2142 * when STORM has finished with the current request.
2143 * memory for FCoE offloaded statistics are counted anyway,
2144 * even if they will not be sent.
2145 * VF stats are not accounted for here as the data of VF stats is stored
2146 * in memory allocated by the VF, not here.
2148 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2149 sizeof(struct per_pf_stats) +
2150 sizeof(struct fcoe_statistics_params) +
2151 sizeof(struct per_queue_stats) * num_queue_stats +
2152 sizeof(struct stats_counter);
2154 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2155 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2158 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2159 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2160 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2161 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2162 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2163 bp->fw_stats_req_sz;
2165 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2166 U64_HI(bp->fw_stats_req_mapping),
2167 U64_LO(bp->fw_stats_req_mapping));
2168 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2169 U64_HI(bp->fw_stats_data_mapping),
2170 U64_LO(bp->fw_stats_data_mapping));
2174 bnx2x_free_fw_stats_mem(bp);
2175 BNX2X_ERR("Can't allocate FW stats memory\n");
2179 /* send load request to mcp and analyze response */
2180 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2186 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2187 DRV_MSG_SEQ_NUMBER_MASK);
2188 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2190 /* Get current FW pulse sequence */
2191 bp->fw_drv_pulse_wr_seq =
2192 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2193 DRV_PULSE_SEQ_MASK);
2194 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2196 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2198 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2199 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2202 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2204 /* if mcp fails to respond we must abort */
2205 if (!(*load_code)) {
2206 BNX2X_ERR("MCP response failure, aborting\n");
2210 /* If mcp refused (e.g. other port is in diagnostic mode) we
2213 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2214 BNX2X_ERR("MCP refused load request, aborting\n");
2220 /* check whether another PF has already loaded FW to chip. In
2221 * virtualized environments a pf from another VM may have already
2222 * initialized the device including loading FW
2224 int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2226 /* is another pf loaded on this engine? */
2227 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2228 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2229 /* build my FW version dword */
2230 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2231 (BCM_5710_FW_MINOR_VERSION << 8) +
2232 (BCM_5710_FW_REVISION_VERSION << 16) +
2233 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2235 /* read loaded FW from chip */
2236 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2238 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2241 /* abort nic load if version mismatch */
2242 if (my_fw != loaded_fw) {
2243 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2251 /* returns the "mcp load_code" according to global load_count array */
2252 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2254 int path = BP_PATH(bp);
2256 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2257 path, load_count[path][0], load_count[path][1],
2258 load_count[path][2]);
2259 load_count[path][0]++;
2260 load_count[path][1 + port]++;
2261 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2262 path, load_count[path][0], load_count[path][1],
2263 load_count[path][2]);
2264 if (load_count[path][0] == 1)
2265 return FW_MSG_CODE_DRV_LOAD_COMMON;
2266 else if (load_count[path][1 + port] == 1)
2267 return FW_MSG_CODE_DRV_LOAD_PORT;
2269 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2272 /* mark PMF if applicable */
2273 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2275 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2276 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2277 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2279 /* We need the barrier to ensure the ordering between the
2280 * writing to bp->port.pmf here and reading it from the
2281 * bnx2x_periodic_task().
2288 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2291 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2293 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2294 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2295 (bp->common.shmem2_base)) {
2296 if (SHMEM2_HAS(bp, dcc_support))
2297 SHMEM2_WR(bp, dcc_support,
2298 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2299 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2300 if (SHMEM2_HAS(bp, afex_driver_support))
2301 SHMEM2_WR(bp, afex_driver_support,
2302 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2305 /* Set AFEX default VLAN tag to an invalid value */
2306 bp->afex_def_vlan_tag = -1;
2310 * bnx2x_bz_fp - zero content of the fastpath structure.
2312 * @bp: driver handle
2313 * @index: fastpath index to be zeroed
2315 * Makes sure the contents of the bp->fp[index].napi is kept
2318 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2320 struct bnx2x_fastpath *fp = &bp->fp[index];
2322 struct napi_struct orig_napi = fp->napi;
2323 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2325 /* bzero bnx2x_fastpath contents */
2327 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2328 sizeof(struct bnx2x_agg_info));
2329 memset(fp, 0, sizeof(*fp));
2331 /* Restore the NAPI object as it has been already initialized */
2332 fp->napi = orig_napi;
2333 fp->tpa_info = orig_tpa_info;
2337 fp->max_cos = bp->max_cos;
2339 /* Special queues support only one CoS */
2342 /* Init txdata pointers */
2344 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2346 for_each_cos_in_tx_queue(fp, cos)
2347 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2348 BNX2X_NUM_ETH_QUEUES(bp) + index];
2350 /* set the tpa flag for each queue. The tpa flag determines the queue
2351 * minimal size so it must be set prior to queue memory allocation
2353 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2354 (bp->flags & GRO_ENABLE_FLAG &&
2355 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2356 if (bp->flags & TPA_ENABLE_FLAG)
2357 fp->mode = TPA_MODE_LRO;
2358 else if (bp->flags & GRO_ENABLE_FLAG)
2359 fp->mode = TPA_MODE_GRO;
2361 /* We don't want TPA on an FCoE L2 ring */
2363 fp->disable_tpa = 1;
2366 int bnx2x_load_cnic(struct bnx2x *bp)
2368 int i, rc, port = BP_PORT(bp);
2370 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2372 mutex_init(&bp->cnic_mutex);
2375 rc = bnx2x_alloc_mem_cnic(bp);
2377 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2378 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2382 rc = bnx2x_alloc_fp_mem_cnic(bp);
2384 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2385 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2388 /* Update the number of queues with the cnic queues */
2389 rc = bnx2x_set_real_num_queues(bp, 1);
2391 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2392 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2395 /* Add all CNIC NAPI objects */
2396 bnx2x_add_all_napi_cnic(bp);
2397 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2398 bnx2x_napi_enable_cnic(bp);
2400 rc = bnx2x_init_hw_func_cnic(bp);
2402 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2404 bnx2x_nic_init_cnic(bp);
2407 /* Enable Timer scan */
2408 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2410 /* setup cnic queues */
2411 for_each_cnic_queue(bp, i) {
2412 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2414 BNX2X_ERR("Queue setup failed\n");
2415 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2420 /* Initialize Rx filter. */
2421 netif_addr_lock_bh(bp->dev);
2422 bnx2x_set_rx_mode(bp->dev);
2423 netif_addr_unlock_bh(bp->dev);
2425 /* re-read iscsi info */
2426 bnx2x_get_iscsi_info(bp);
2427 bnx2x_setup_cnic_irq_info(bp);
2428 bnx2x_setup_cnic_info(bp);
2429 bp->cnic_loaded = true;
2430 if (bp->state == BNX2X_STATE_OPEN)
2431 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2433 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2437 #ifndef BNX2X_STOP_ON_ERROR
2439 /* Disable Timer scan */
2440 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2443 bnx2x_napi_disable_cnic(bp);
2444 /* Update the number of queues without the cnic queues */
2445 rc = bnx2x_set_real_num_queues(bp, 0);
2447 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2449 BNX2X_ERR("CNIC-related load failed\n");
2450 bnx2x_free_fp_mem_cnic(bp);
2451 bnx2x_free_mem_cnic(bp);
2453 #endif /* ! BNX2X_STOP_ON_ERROR */
2456 /* must be called with rtnl_lock */
2457 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2459 int port = BP_PORT(bp);
2460 int i, rc = 0, load_code = 0;
2462 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2464 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2466 #ifdef BNX2X_STOP_ON_ERROR
2467 if (unlikely(bp->panic)) {
2468 BNX2X_ERR("Can't load NIC when there is panic\n");
2473 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2475 /* zero the structure w/o any lock, before SP handler is initialized */
2476 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2477 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2478 &bp->last_reported_link.link_report_flags);
2481 /* must be called before memory allocation and HW init */
2482 bnx2x_ilt_set_info(bp);
2485 * Zero fastpath structures preserving invariants like napi, which are
2486 * allocated only once, fp index, max_cos, bp pointer.
2487 * Also set fp->disable_tpa and txdata_ptr.
2489 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2490 for_each_queue(bp, i)
2492 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2493 bp->num_cnic_queues) *
2494 sizeof(struct bnx2x_fp_txdata));
2496 bp->fcoe_init = false;
2498 /* Set the receive queues buffer size */
2499 bnx2x_set_rx_buf_size(bp);
2502 rc = bnx2x_alloc_mem(bp);
2504 BNX2X_ERR("Unable to allocate bp memory\n");
2509 /* Allocated memory for FW statistics */
2510 if (bnx2x_alloc_fw_stats_mem(bp))
2511 LOAD_ERROR_EXIT(bp, load_error0);
2513 /* need to be done after alloc mem, since it's self adjusting to amount
2514 * of memory available for RSS queues
2516 rc = bnx2x_alloc_fp_mem(bp);
2518 BNX2X_ERR("Unable to allocate memory for fps\n");
2519 LOAD_ERROR_EXIT(bp, load_error0);
2522 /* request pf to initialize status blocks */
2524 rc = bnx2x_vfpf_init(bp);
2526 LOAD_ERROR_EXIT(bp, load_error0);
2529 /* As long as bnx2x_alloc_mem() may possibly update
2530 * bp->num_queues, bnx2x_set_real_num_queues() should always
2531 * come after it. At this stage cnic queues are not counted.
2533 rc = bnx2x_set_real_num_queues(bp, 0);
2535 BNX2X_ERR("Unable to set real_num_queues\n");
2536 LOAD_ERROR_EXIT(bp, load_error0);
2539 /* configure multi cos mappings in kernel.
2540 * this configuration may be overridden by a multi class queue
2541 * discipline or by a dcbx negotiation result.
2543 bnx2x_setup_tc(bp->dev, bp->max_cos);
2545 /* Add all NAPI objects */
2546 bnx2x_add_all_napi(bp);
2547 DP(NETIF_MSG_IFUP, "napi added\n");
2548 bnx2x_napi_enable(bp);
2551 /* set pf load just before approaching the MCP */
2552 bnx2x_set_pf_load(bp);
2554 /* if mcp exists send load request and analyze response */
2555 if (!BP_NOMCP(bp)) {
2556 /* attempt to load pf */
2557 rc = bnx2x_nic_load_request(bp, &load_code);
2559 LOAD_ERROR_EXIT(bp, load_error1);
2561 /* what did mcp say? */
2562 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2564 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2565 LOAD_ERROR_EXIT(bp, load_error2);
2568 load_code = bnx2x_nic_load_no_mcp(bp, port);
2571 /* mark pmf if applicable */
2572 bnx2x_nic_load_pmf(bp, load_code);
2574 /* Init Function state controlling object */
2575 bnx2x__init_func_obj(bp);
2578 rc = bnx2x_init_hw(bp, load_code);
2580 BNX2X_ERR("HW init failed, aborting\n");
2581 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2582 LOAD_ERROR_EXIT(bp, load_error2);
2586 bnx2x_pre_irq_nic_init(bp);
2588 /* Connect to IRQs */
2589 rc = bnx2x_setup_irqs(bp);
2591 BNX2X_ERR("setup irqs failed\n");
2593 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2594 LOAD_ERROR_EXIT(bp, load_error2);
2597 /* Init per-function objects */
2599 /* Setup NIC internals and enable interrupts */
2600 bnx2x_post_irq_nic_init(bp, load_code);
2602 bnx2x_init_bp_objs(bp);
2603 bnx2x_iov_nic_init(bp);
2605 /* Set AFEX default VLAN tag to an invalid value */
2606 bp->afex_def_vlan_tag = -1;
2607 bnx2x_nic_load_afex_dcc(bp, load_code);
2608 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2609 rc = bnx2x_func_start(bp);
2611 BNX2X_ERR("Function start failed!\n");
2612 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2614 LOAD_ERROR_EXIT(bp, load_error3);
2617 /* Send LOAD_DONE command to MCP */
2618 if (!BP_NOMCP(bp)) {
2619 load_code = bnx2x_fw_command(bp,
2620 DRV_MSG_CODE_LOAD_DONE, 0);
2622 BNX2X_ERR("MCP response failure, aborting\n");
2624 LOAD_ERROR_EXIT(bp, load_error3);
2628 /* initialize FW coalescing state machines in RAM */
2629 bnx2x_update_coalesce(bp);
2631 /* setup the leading queue */
2632 rc = bnx2x_setup_leading(bp);
2634 BNX2X_ERR("Setup leading failed!\n");
2635 LOAD_ERROR_EXIT(bp, load_error3);
2638 /* set up the rest of the queues */
2639 for_each_nondefault_eth_queue(bp, i) {
2640 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2642 BNX2X_ERR("Queue setup failed\n");
2643 LOAD_ERROR_EXIT(bp, load_error3);
2648 rc = bnx2x_init_rss_pf(bp);
2650 BNX2X_ERR("PF RSS init failed\n");
2651 LOAD_ERROR_EXIT(bp, load_error3);
2655 for_each_eth_queue(bp, i) {
2656 rc = bnx2x_vfpf_setup_q(bp, i);
2658 BNX2X_ERR("Queue setup failed\n");
2659 LOAD_ERROR_EXIT(bp, load_error3);
2664 /* Now when Clients are configured we are ready to work */
2665 bp->state = BNX2X_STATE_OPEN;
2667 /* Configure a ucast MAC */
2669 rc = bnx2x_set_eth_mac(bp, true);
2671 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2674 BNX2X_ERR("Setting Ethernet MAC failed\n");
2675 LOAD_ERROR_EXIT(bp, load_error3);
2678 if (IS_PF(bp) && bp->pending_max) {
2679 bnx2x_update_max_mf_config(bp, bp->pending_max);
2680 bp->pending_max = 0;
2684 rc = bnx2x_initial_phy_init(bp, load_mode);
2686 LOAD_ERROR_EXIT(bp, load_error3);
2688 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2690 /* Start fast path */
2692 /* Initialize Rx filter. */
2693 netif_addr_lock_bh(bp->dev);
2694 bnx2x_set_rx_mode(bp->dev);
2695 netif_addr_unlock_bh(bp->dev);
2698 switch (load_mode) {
2700 /* Tx queue should be only re-enabled */
2701 netif_tx_wake_all_queues(bp->dev);
2705 netif_tx_start_all_queues(bp->dev);
2706 smp_mb__after_clear_bit();
2710 case LOAD_LOOPBACK_EXT:
2711 bp->state = BNX2X_STATE_DIAG;
2719 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2721 bnx2x__link_status_update(bp);
2723 /* start the timer */
2724 mod_timer(&bp->timer, jiffies + bp->current_interval);
2726 if (CNIC_ENABLED(bp))
2727 bnx2x_load_cnic(bp);
2729 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2730 /* mark driver is loaded in shmem2 */
2732 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2733 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2734 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2735 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2738 /* Wait for all pending SP commands to complete */
2739 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2740 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2741 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2745 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2746 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2747 bnx2x_dcbx_init(bp, false);
2749 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2753 #ifndef BNX2X_STOP_ON_ERROR
2756 bnx2x_int_disable_sync(bp, 1);
2758 /* Clean queueable objects */
2759 bnx2x_squeeze_objects(bp);
2762 /* Free SKBs, SGEs, TPA pool and driver internals */
2763 bnx2x_free_skbs(bp);
2764 for_each_rx_queue(bp, i)
2765 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2770 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2771 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2772 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2777 bnx2x_napi_disable(bp);
2778 bnx2x_del_all_napi(bp);
2780 /* clear pf_load status, as it was already set */
2782 bnx2x_clear_pf_load(bp);
2784 bnx2x_free_fp_mem(bp);
2785 bnx2x_free_fw_stats_mem(bp);
2789 #endif /* ! BNX2X_STOP_ON_ERROR */
2792 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2796 /* Wait until tx fastpath tasks complete */
2797 for_each_tx_queue(bp, i) {
2798 struct bnx2x_fastpath *fp = &bp->fp[i];
2800 for_each_cos_in_tx_queue(fp, cos)
2801 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2808 /* must be called with rtnl_lock */
2809 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2812 bool global = false;
2814 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2816 /* mark driver is unloaded in shmem2 */
2817 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2819 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2820 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2821 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2824 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2825 (bp->state == BNX2X_STATE_CLOSED ||
2826 bp->state == BNX2X_STATE_ERROR)) {
2827 /* We can get here if the driver has been unloaded
2828 * during parity error recovery and is either waiting for a
2829 * leader to complete or for other functions to unload and
2830 * then ifdown has been issued. In this case we want to
2831 * unload and let other functions to complete a recovery
2834 bp->recovery_state = BNX2X_RECOVERY_DONE;
2836 bnx2x_release_leader_lock(bp);
2839 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2840 BNX2X_ERR("Can't unload in closed or error state\n");
2844 /* Nothing to do during unload if previous bnx2x_nic_load()
2845 * have not completed successfully - all resources are released.
2847 * we can get here only after unsuccessful ndo_* callback, during which
2848 * dev->IFF_UP flag is still on.
2850 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2853 /* It's important to set the bp->state to the value different from
2854 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2855 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2857 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2860 if (CNIC_LOADED(bp))
2861 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2864 bnx2x_tx_disable(bp);
2865 netdev_reset_tc(bp->dev);
2867 bp->rx_mode = BNX2X_RX_MODE_NONE;
2869 del_timer_sync(&bp->timer);
2872 /* Set ALWAYS_ALIVE bit in shmem */
2873 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2874 bnx2x_drv_pulse(bp);
2875 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2876 bnx2x_save_statistics(bp);
2879 /* wait till consumers catch up with producers in all queues */
2880 bnx2x_drain_tx_queues(bp);
2882 /* if VF indicate to PF this function is going down (PF will delete sp
2883 * elements and clear initializations
2886 bnx2x_vfpf_close_vf(bp);
2887 else if (unload_mode != UNLOAD_RECOVERY)
2888 /* if this is a normal/close unload need to clean up chip*/
2889 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2891 /* Send the UNLOAD_REQUEST to the MCP */
2892 bnx2x_send_unload_req(bp, unload_mode);
2894 /* Prevent transactions to host from the functions on the
2895 * engine that doesn't reset global blocks in case of global
2896 * attention once global blocks are reset and gates are opened
2897 * (the engine which leader will perform the recovery
2900 if (!CHIP_IS_E1x(bp))
2901 bnx2x_pf_disable(bp);
2903 /* Disable HW interrupts, NAPI */
2904 bnx2x_netif_stop(bp, 1);
2905 /* Delete all NAPI objects */
2906 bnx2x_del_all_napi(bp);
2907 if (CNIC_LOADED(bp))
2908 bnx2x_del_all_napi_cnic(bp);
2912 /* Report UNLOAD_DONE to MCP */
2913 bnx2x_send_unload_done(bp, false);
2917 * At this stage no more interrupts will arrive so we may safely clean
2918 * the queueable objects here in case they failed to get cleaned so far.
2921 bnx2x_squeeze_objects(bp);
2923 /* There should be no more pending SP commands at this stage */
2928 /* Free SKBs, SGEs, TPA pool and driver internals */
2929 bnx2x_free_skbs(bp);
2930 if (CNIC_LOADED(bp))
2931 bnx2x_free_skbs_cnic(bp);
2932 for_each_rx_queue(bp, i)
2933 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2935 bnx2x_free_fp_mem(bp);
2936 if (CNIC_LOADED(bp))
2937 bnx2x_free_fp_mem_cnic(bp);
2940 if (CNIC_LOADED(bp))
2941 bnx2x_free_mem_cnic(bp);
2944 bp->state = BNX2X_STATE_CLOSED;
2945 bp->cnic_loaded = false;
2947 /* Check if there are pending parity attentions. If there are - set
2948 * RECOVERY_IN_PROGRESS.
2950 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
2951 bnx2x_set_reset_in_progress(bp);
2953 /* Set RESET_IS_GLOBAL if needed */
2955 bnx2x_set_reset_global(bp);
2958 /* The last driver must disable a "close the gate" if there is no
2959 * parity attention or "process kill" pending.
2962 !bnx2x_clear_pf_load(bp) &&
2963 bnx2x_reset_is_done(bp, BP_PATH(bp)))
2964 bnx2x_disable_close_the_gate(bp);
2966 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2971 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2975 /* If there is no power capability, silently succeed */
2977 BNX2X_DEV_INFO("No power capability. Breaking.\n");
2981 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2985 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2986 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2987 PCI_PM_CTRL_PME_STATUS));
2989 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2990 /* delay required during transition out of D3hot */
2995 /* If there are other clients above don't
2996 shut down the power */
2997 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2999 /* Don't shut down the power for emulation and FPGA */
3000 if (CHIP_REV_IS_SLOW(bp))
3003 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3007 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3009 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3012 /* No more memory access after this point until
3013 * device is brought back to D0.
3018 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3025 * net_device service functions
3027 int bnx2x_poll(struct napi_struct *napi, int budget)
3031 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3033 struct bnx2x *bp = fp->bp;
3036 #ifdef BNX2X_STOP_ON_ERROR
3037 if (unlikely(bp->panic)) {
3038 napi_complete(napi);
3043 for_each_cos_in_tx_queue(fp, cos)
3044 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3045 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3047 if (bnx2x_has_rx_work(fp)) {
3048 work_done += bnx2x_rx_int(fp, budget - work_done);
3050 /* must not complete if we consumed full budget */
3051 if (work_done >= budget)
3055 /* Fall out from the NAPI loop if needed */
3056 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3058 /* No need to update SB for FCoE L2 ring as long as
3059 * it's connected to the default SB and the SB
3060 * has been updated when NAPI was scheduled.
3062 if (IS_FCOE_FP(fp)) {
3063 napi_complete(napi);
3066 bnx2x_update_fpsb_idx(fp);
3067 /* bnx2x_has_rx_work() reads the status block,
3068 * thus we need to ensure that status block indices
3069 * have been actually read (bnx2x_update_fpsb_idx)
3070 * prior to this check (bnx2x_has_rx_work) so that
3071 * we won't write the "newer" value of the status block
3072 * to IGU (if there was a DMA right after
3073 * bnx2x_has_rx_work and if there is no rmb, the memory
3074 * reading (bnx2x_update_fpsb_idx) may be postponed
3075 * to right before bnx2x_ack_sb). In this case there
3076 * will never be another interrupt until there is
3077 * another update of the status block, while there
3078 * is still unhandled work.
3082 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3083 napi_complete(napi);
3084 /* Re-enable interrupts */
3085 DP(NETIF_MSG_RX_STATUS,
3086 "Update index to %d\n", fp->fp_hc_idx);
3087 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3088 le16_to_cpu(fp->fp_hc_idx),
3098 /* we split the first BD into headers and data BDs
3099 * to ease the pain of our fellow microcode engineers
3100 * we use one mapping for both BDs
3102 static u16 bnx2x_tx_split(struct bnx2x *bp,
3103 struct bnx2x_fp_txdata *txdata,
3104 struct sw_tx_bd *tx_buf,
3105 struct eth_tx_start_bd **tx_bd, u16 hlen,
3108 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3109 struct eth_tx_bd *d_tx_bd;
3111 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3113 /* first fix first BD */
3114 h_tx_bd->nbytes = cpu_to_le16(hlen);
3116 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3117 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3119 /* now get a new data BD
3120 * (after the pbd) and fill it */
3121 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3122 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3124 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3125 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3127 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3128 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3129 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3131 /* this marks the BD as one that has no individual mapping */
3132 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3134 DP(NETIF_MSG_TX_QUEUED,
3135 "TSO split data size is %d (%x:%x)\n",
3136 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3139 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3144 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3145 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3146 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3148 __sum16 tsum = (__force __sum16) csum;
3151 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3152 csum_partial(t_header - fix, fix, 0)));
3155 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3156 csum_partial(t_header, -fix, 0)));
3158 return bswab16(tsum);
3161 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3167 if (skb->ip_summed != CHECKSUM_PARTIAL)
3170 protocol = vlan_get_protocol(skb);
3171 if (protocol == htons(ETH_P_IPV6)) {
3173 prot = ipv6_hdr(skb)->nexthdr;
3176 prot = ip_hdr(skb)->protocol;
3179 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3180 if (inner_ip_hdr(skb)->version == 6) {
3181 rc |= XMIT_CSUM_ENC_V6;
3182 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3183 rc |= XMIT_CSUM_TCP;
3185 rc |= XMIT_CSUM_ENC_V4;
3186 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3187 rc |= XMIT_CSUM_TCP;
3190 if (prot == IPPROTO_TCP)
3191 rc |= XMIT_CSUM_TCP;
3193 if (skb_is_gso_v6(skb)) {
3194 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3195 if (rc & XMIT_CSUM_ENC)
3196 rc |= XMIT_GSO_ENC_V6;
3197 } else if (skb_is_gso(skb)) {
3198 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3199 if (rc & XMIT_CSUM_ENC)
3200 rc |= XMIT_GSO_ENC_V4;
3206 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3207 /* check if packet requires linearization (packet is too fragmented)
3208 no need to check fragmentation if page size > 8K (there will be no
3209 violation to FW restrictions) */
3210 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3215 int first_bd_sz = 0;
3217 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3218 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3220 if (xmit_type & XMIT_GSO) {
3221 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3222 /* Check if LSO packet needs to be copied:
3223 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3224 int wnd_size = MAX_FETCH_BD - 3;
3225 /* Number of windows to check */
3226 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3231 /* Headers length */
3232 hlen = (int)(skb_transport_header(skb) - skb->data) +
3235 /* Amount of data (w/o headers) on linear part of SKB*/
3236 first_bd_sz = skb_headlen(skb) - hlen;
3238 wnd_sum = first_bd_sz;
3240 /* Calculate the first sum - it's special */
3241 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3243 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3245 /* If there was data on linear skb data - check it */
3246 if (first_bd_sz > 0) {
3247 if (unlikely(wnd_sum < lso_mss)) {
3252 wnd_sum -= first_bd_sz;
3255 /* Others are easier: run through the frag list and
3256 check all windows */
3257 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3259 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3261 if (unlikely(wnd_sum < lso_mss)) {
3266 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3269 /* in non-LSO too fragmented packet should always
3276 if (unlikely(to_copy))
3277 DP(NETIF_MSG_TX_QUEUED,
3278 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3279 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3280 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3286 static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3289 struct ipv6hdr *ipv6;
3291 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3292 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3293 ETH_TX_PARSE_BD_E2_LSO_MSS;
3295 if (xmit_type & XMIT_GSO_ENC_V6)
3296 ipv6 = inner_ipv6_hdr(skb);
3297 else if (xmit_type & XMIT_GSO_V6)
3298 ipv6 = ipv6_hdr(skb);
3302 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
3303 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3307 * bnx2x_set_pbd_gso - update PBD in GSO case.
3311 * @xmit_type: xmit flags
3313 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3314 struct eth_tx_parse_bd_e1x *pbd,
3315 struct eth_tx_start_bd *tx_start_bd,
3318 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3319 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3320 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3322 if (xmit_type & XMIT_GSO_V4) {
3323 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3324 pbd->tcp_pseudo_csum =
3325 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3327 0, IPPROTO_TCP, 0));
3329 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3330 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3332 pbd->tcp_pseudo_csum =
3333 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3334 &ipv6_hdr(skb)->daddr,
3335 0, IPPROTO_TCP, 0));
3339 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3343 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3345 * @bp: driver handle
3347 * @parsing_data: data to be updated
3348 * @xmit_type: xmit flags
3350 * 57712/578xx related, when skb has encapsulation
3352 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3353 u32 *parsing_data, u32 xmit_type)
3356 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3357 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3358 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3360 if (xmit_type & XMIT_CSUM_TCP) {
3361 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3362 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3363 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3365 return skb_inner_transport_header(skb) +
3366 inner_tcp_hdrlen(skb) - skb->data;
3369 /* We support checksum offload for TCP and UDP only.
3370 * No need to pass the UDP header length - it's a constant.
3372 return skb_inner_transport_header(skb) +
3373 sizeof(struct udphdr) - skb->data;
3377 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3379 * @bp: driver handle
3381 * @parsing_data: data to be updated
3382 * @xmit_type: xmit flags
3384 * 57712/578xx related
3386 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3387 u32 *parsing_data, u32 xmit_type)
3390 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3391 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3392 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3394 if (xmit_type & XMIT_CSUM_TCP) {
3395 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3396 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3397 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3399 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3401 /* We support checksum offload for TCP and UDP only.
3402 * No need to pass the UDP header length - it's a constant.
3404 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3407 /* set FW indication according to inner or outer protocols if tunneled */
3408 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3409 struct eth_tx_start_bd *tx_start_bd,
3412 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3414 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3415 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3417 if (!(xmit_type & XMIT_CSUM_TCP))
3418 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3422 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3424 * @bp: driver handle
3426 * @pbd: parse BD to be updated
3427 * @xmit_type: xmit flags
3429 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3430 struct eth_tx_parse_bd_e1x *pbd,
3433 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3435 /* for now NS flag is not used in Linux */
3438 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3439 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3441 pbd->ip_hlen_w = (skb_transport_header(skb) -
3442 skb_network_header(skb)) >> 1;
3444 hlen += pbd->ip_hlen_w;
3446 /* We support checksum offload for TCP and UDP only */
3447 if (xmit_type & XMIT_CSUM_TCP)
3448 hlen += tcp_hdrlen(skb) / 2;
3450 hlen += sizeof(struct udphdr) / 2;
3452 pbd->total_hlen_w = cpu_to_le16(hlen);
3455 if (xmit_type & XMIT_CSUM_TCP) {
3456 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3459 s8 fix = SKB_CS_OFF(skb); /* signed! */
3461 DP(NETIF_MSG_TX_QUEUED,
3462 "hlen %d fix %d csum before fix %x\n",
3463 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3465 /* HW bug: fixup the CSUM */
3466 pbd->tcp_pseudo_csum =
3467 bnx2x_csum_fix(skb_transport_header(skb),
3470 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3471 pbd->tcp_pseudo_csum);
3477 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3478 struct eth_tx_parse_bd_e2 *pbd_e2,
3479 struct eth_tx_parse_2nd_bd *pbd2,
3484 u8 outerip_off, outerip_len = 0;
3486 /* from outer IP to transport */
3487 hlen_w = (skb_inner_transport_header(skb) -
3488 skb_network_header(skb)) >> 1;
3491 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3493 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3495 /* outer IP header info */
3496 if (xmit_type & XMIT_CSUM_V4) {
3497 struct iphdr *iph = ip_hdr(skb);
3498 pbd2->fw_ip_csum_wo_len_flags_frag =
3499 bswab16(csum_fold((~iph->check) -
3500 iph->tot_len - iph->frag_off));
3502 pbd2->fw_ip_hdr_to_payload_w =
3503 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3506 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3508 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3510 if (xmit_type & XMIT_GSO_V4) {
3511 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3513 pbd_e2->data.tunnel_data.pseudo_csum =
3514 bswab16(~csum_tcpudp_magic(
3515 inner_ip_hdr(skb)->saddr,
3516 inner_ip_hdr(skb)->daddr,
3517 0, IPPROTO_TCP, 0));
3519 outerip_len = ip_hdr(skb)->ihl << 1;
3521 pbd_e2->data.tunnel_data.pseudo_csum =
3522 bswab16(~csum_ipv6_magic(
3523 &inner_ipv6_hdr(skb)->saddr,
3524 &inner_ipv6_hdr(skb)->daddr,
3525 0, IPPROTO_TCP, 0));
3528 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3532 (!!(xmit_type & XMIT_CSUM_V6) <<
3533 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3535 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3536 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3537 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3539 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3540 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3541 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3545 /* called with netif_tx_lock
3546 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3547 * netif_wake_queue()
3549 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3551 struct bnx2x *bp = netdev_priv(dev);
3553 struct netdev_queue *txq;
3554 struct bnx2x_fp_txdata *txdata;
3555 struct sw_tx_bd *tx_buf;
3556 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3557 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3558 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3559 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3560 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3561 u32 pbd_e2_parsing_data = 0;
3562 u16 pkt_prod, bd_prod;
3565 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3568 __le16 pkt_size = 0;
3570 u8 mac_type = UNICAST_ADDRESS;
3572 #ifdef BNX2X_STOP_ON_ERROR
3573 if (unlikely(bp->panic))
3574 return NETDEV_TX_BUSY;
3577 txq_index = skb_get_queue_mapping(skb);
3578 txq = netdev_get_tx_queue(dev, txq_index);
3580 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3582 txdata = &bp->bnx2x_txq[txq_index];
3584 /* enable this debug print to view the transmission queue being used
3585 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3586 txq_index, fp_index, txdata_index); */
3588 /* enable this debug print to view the transmission details
3589 DP(NETIF_MSG_TX_QUEUED,
3590 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3591 txdata->cid, fp_index, txdata_index, txdata, fp); */
3593 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3594 skb_shinfo(skb)->nr_frags +
3596 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3597 /* Handle special storage cases separately */
3598 if (txdata->tx_ring_size == 0) {
3599 struct bnx2x_eth_q_stats *q_stats =
3600 bnx2x_fp_qstats(bp, txdata->parent_fp);
3601 q_stats->driver_filtered_tx_pkt++;
3603 return NETDEV_TX_OK;
3605 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3606 netif_tx_stop_queue(txq);
3607 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3609 return NETDEV_TX_BUSY;
3612 DP(NETIF_MSG_TX_QUEUED,
3613 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3614 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3615 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3618 eth = (struct ethhdr *)skb->data;
3620 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3621 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3622 if (is_broadcast_ether_addr(eth->h_dest))
3623 mac_type = BROADCAST_ADDRESS;
3625 mac_type = MULTICAST_ADDRESS;
3628 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3629 /* First, check if we need to linearize the skb (due to FW
3630 restrictions). No need to check fragmentation if page size > 8K
3631 (there will be no violation to FW restrictions) */
3632 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3633 /* Statistics of linearization */
3635 if (skb_linearize(skb) != 0) {
3636 DP(NETIF_MSG_TX_QUEUED,
3637 "SKB linearization failed - silently dropping this SKB\n");
3638 dev_kfree_skb_any(skb);
3639 return NETDEV_TX_OK;
3643 /* Map skb linear data for DMA */
3644 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3645 skb_headlen(skb), DMA_TO_DEVICE);
3646 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3647 DP(NETIF_MSG_TX_QUEUED,
3648 "SKB mapping failed - silently dropping this SKB\n");
3649 dev_kfree_skb_any(skb);
3650 return NETDEV_TX_OK;
3653 Please read carefully. First we use one BD which we mark as start,
3654 then we have a parsing info BD (used for TSO or xsum),
3655 and only then we have the rest of the TSO BDs.
3656 (don't forget to mark the last one as last,
3657 and to unmap only AFTER you write to the BD ...)
3658 And above all, all pdb sizes are in words - NOT DWORDS!
3661 /* get current pkt produced now - advance it just before sending packet
3662 * since mapping of pages may fail and cause packet to be dropped
3664 pkt_prod = txdata->tx_pkt_prod;
3665 bd_prod = TX_BD(txdata->tx_bd_prod);
3667 /* get a tx_buf and first BD
3668 * tx_start_bd may be changed during SPLIT,
3669 * but first_bd will always stay first
3671 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3672 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3673 first_bd = tx_start_bd;
3675 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3677 /* header nbd: indirectly zero other flags! */
3678 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3680 /* remember the first BD of the packet */
3681 tx_buf->first_bd = txdata->tx_bd_prod;
3685 DP(NETIF_MSG_TX_QUEUED,
3686 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3687 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3689 if (vlan_tx_tag_present(skb)) {
3690 tx_start_bd->vlan_or_ethertype =
3691 cpu_to_le16(vlan_tx_tag_get(skb));
3692 tx_start_bd->bd_flags.as_bitfield |=
3693 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3695 /* when transmitting in a vf, start bd must hold the ethertype
3696 * for fw to enforce it
3699 tx_start_bd->vlan_or_ethertype =
3700 cpu_to_le16(ntohs(eth->h_proto));
3702 /* used by FW for packet accounting */
3703 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3706 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3708 /* turn on parsing and get a BD */
3709 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3711 if (xmit_type & XMIT_CSUM)
3712 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3714 if (!CHIP_IS_E1x(bp)) {
3715 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3716 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3718 if (xmit_type & XMIT_CSUM_ENC) {
3719 u16 global_data = 0;
3721 /* Set PBD in enc checksum offload case */
3722 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3723 &pbd_e2_parsing_data,
3726 /* turn on 2nd parsing and get a BD */
3727 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3729 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3731 memset(pbd2, 0, sizeof(*pbd2));
3733 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3734 (skb_inner_network_header(skb) -
3737 if (xmit_type & XMIT_GSO_ENC)
3738 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3742 pbd2->global_data = cpu_to_le16(global_data);
3744 /* add addition parse BD indication to start BD */
3745 SET_FLAG(tx_start_bd->general_data,
3746 ETH_TX_START_BD_PARSE_NBDS, 1);
3747 /* set encapsulation flag in start BD */
3748 SET_FLAG(tx_start_bd->general_data,
3749 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3751 } else if (xmit_type & XMIT_CSUM) {
3752 /* Set PBD in checksum offload case w/o encapsulation */
3753 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3754 &pbd_e2_parsing_data,
3758 /* Add the macs to the parsing BD this is a vf */
3760 /* override GRE parameters in BD */
3761 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3762 &pbd_e2->data.mac_addr.src_mid,
3763 &pbd_e2->data.mac_addr.src_lo,
3766 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3767 &pbd_e2->data.mac_addr.dst_mid,
3768 &pbd_e2->data.mac_addr.dst_lo,
3772 SET_FLAG(pbd_e2_parsing_data,
3773 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3775 u16 global_data = 0;
3776 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3777 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3778 /* Set PBD in checksum offload case */
3779 if (xmit_type & XMIT_CSUM)
3780 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3782 SET_FLAG(global_data,
3783 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3784 pbd_e1x->global_data |= cpu_to_le16(global_data);
3787 /* Setup the data pointer of the first BD of the packet */
3788 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3789 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3790 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3791 pkt_size = tx_start_bd->nbytes;
3793 DP(NETIF_MSG_TX_QUEUED,
3794 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
3795 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3796 le16_to_cpu(tx_start_bd->nbytes),
3797 tx_start_bd->bd_flags.as_bitfield,
3798 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3800 if (xmit_type & XMIT_GSO) {
3802 DP(NETIF_MSG_TX_QUEUED,
3803 "TSO packet len %d hlen %d total len %d tso size %d\n",
3804 skb->len, hlen, skb_headlen(skb),
3805 skb_shinfo(skb)->gso_size);
3807 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3809 if (unlikely(skb_headlen(skb) > hlen)) {
3811 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3815 if (!CHIP_IS_E1x(bp))
3816 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3819 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
3822 /* Set the PBD's parsing_data field if not zero
3823 * (for the chips newer than 57711).
3825 if (pbd_e2_parsing_data)
3826 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3828 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3830 /* Handle fragmented skb */
3831 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3832 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3834 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3835 skb_frag_size(frag), DMA_TO_DEVICE);
3836 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3837 unsigned int pkts_compl = 0, bytes_compl = 0;
3839 DP(NETIF_MSG_TX_QUEUED,
3840 "Unable to map page - dropping packet...\n");
3842 /* we need unmap all buffers already mapped
3844 * first_bd->nbd need to be properly updated
3845 * before call to bnx2x_free_tx_pkt
3847 first_bd->nbd = cpu_to_le16(nbd);
3848 bnx2x_free_tx_pkt(bp, txdata,
3849 TX_BD(txdata->tx_pkt_prod),
3850 &pkts_compl, &bytes_compl);
3851 return NETDEV_TX_OK;
3854 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3855 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3856 if (total_pkt_bd == NULL)
3857 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3859 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3860 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3861 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3862 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3865 DP(NETIF_MSG_TX_QUEUED,
3866 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3867 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3868 le16_to_cpu(tx_data_bd->nbytes));
3871 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3873 /* update with actual num BDs */
3874 first_bd->nbd = cpu_to_le16(nbd);
3876 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3878 /* now send a tx doorbell, counting the next BD
3879 * if the packet contains or ends with it
3881 if (TX_BD_POFF(bd_prod) < nbd)
3884 /* total_pkt_bytes should be set on the first data BD if
3885 * it's not an LSO packet and there is more than one
3886 * data BD. In this case pkt_size is limited by an MTU value.
3887 * However we prefer to set it for an LSO packet (while we don't
3888 * have to) in order to save some CPU cycles in a none-LSO
3889 * case, when we much more care about them.
3891 if (total_pkt_bd != NULL)
3892 total_pkt_bd->total_pkt_bytes = pkt_size;
3895 DP(NETIF_MSG_TX_QUEUED,
3896 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
3897 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3898 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3899 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3900 le16_to_cpu(pbd_e1x->total_hlen_w));
3902 DP(NETIF_MSG_TX_QUEUED,
3903 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3905 pbd_e2->data.mac_addr.dst_hi,
3906 pbd_e2->data.mac_addr.dst_mid,
3907 pbd_e2->data.mac_addr.dst_lo,
3908 pbd_e2->data.mac_addr.src_hi,
3909 pbd_e2->data.mac_addr.src_mid,
3910 pbd_e2->data.mac_addr.src_lo,
3911 pbd_e2->parsing_data);
3912 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3914 netdev_tx_sent_queue(txq, skb->len);
3916 skb_tx_timestamp(skb);
3918 txdata->tx_pkt_prod++;
3920 * Make sure that the BD data is updated before updating the producer
3921 * since FW might read the BD right after the producer is updated.
3922 * This is only applicable for weak-ordered memory model archs such
3923 * as IA-64. The following barrier is also mandatory since FW will
3924 * assumes packets must have BDs.
3928 txdata->tx_db.data.prod += nbd;
3931 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
3935 txdata->tx_bd_prod += nbd;
3937 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
3938 netif_tx_stop_queue(txq);
3940 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3941 * ordering of set_bit() in netif_tx_stop_queue() and read of
3945 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3946 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
3947 netif_tx_wake_queue(txq);
3951 return NETDEV_TX_OK;
3955 * bnx2x_setup_tc - routine to configure net_device for multi tc
3957 * @netdev: net device to configure
3958 * @tc: number of traffic classes to enable
3960 * callback connected to the ndo_setup_tc function pointer
3962 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3964 int cos, prio, count, offset;
3965 struct bnx2x *bp = netdev_priv(dev);
3967 /* setup tc must be called under rtnl lock */
3970 /* no traffic classes requested. Aborting */
3972 netdev_reset_tc(dev);
3976 /* requested to support too many traffic classes */
3977 if (num_tc > bp->max_cos) {
3978 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
3979 num_tc, bp->max_cos);
3983 /* declare amount of supported traffic classes */
3984 if (netdev_set_num_tc(dev, num_tc)) {
3985 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
3989 /* configure priority to traffic class mapping */
3990 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3991 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
3992 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3993 "mapping priority %d to tc %d\n",
3994 prio, bp->prio_to_cos[prio]);
3997 /* Use this configuration to differentiate tc0 from other COSes
3998 This can be used for ets or pfc, and save the effort of setting
3999 up a multio class queue disc or negotiating DCBX with a switch
4000 netdev_set_prio_tc_map(dev, 0, 0);
4001 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4002 for (prio = 1; prio < 16; prio++) {
4003 netdev_set_prio_tc_map(dev, prio, 1);
4004 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4007 /* configure traffic class to transmission queue mapping */
4008 for (cos = 0; cos < bp->max_cos; cos++) {
4009 count = BNX2X_NUM_ETH_QUEUES(bp);
4010 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4011 netdev_set_tc_queue(dev, cos, count, offset);
4012 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4013 "mapping tc %d to offset %d count %d\n",
4014 cos, offset, count);
4020 /* called with rtnl_lock */
4021 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4023 struct sockaddr *addr = p;
4024 struct bnx2x *bp = netdev_priv(dev);
4027 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4028 BNX2X_ERR("Requested MAC address is not valid\n");
4032 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4033 !is_zero_ether_addr(addr->sa_data)) {
4034 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
4038 if (netif_running(dev)) {
4039 rc = bnx2x_set_eth_mac(bp, false);
4044 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4046 if (netif_running(dev))
4047 rc = bnx2x_set_eth_mac(bp, true);
4052 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4054 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4055 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4060 if (IS_FCOE_IDX(fp_index)) {
4061 memset(sb, 0, sizeof(union host_hc_status_block));
4062 fp->status_blk_mapping = 0;
4065 if (!CHIP_IS_E1x(bp))
4066 BNX2X_PCI_FREE(sb->e2_sb,
4067 bnx2x_fp(bp, fp_index,
4068 status_blk_mapping),
4069 sizeof(struct host_hc_status_block_e2));
4071 BNX2X_PCI_FREE(sb->e1x_sb,
4072 bnx2x_fp(bp, fp_index,
4073 status_blk_mapping),
4074 sizeof(struct host_hc_status_block_e1x));
4078 if (!skip_rx_queue(bp, fp_index)) {
4079 bnx2x_free_rx_bds(fp);
4081 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4082 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4083 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4084 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4085 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4087 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4088 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4089 sizeof(struct eth_fast_path_rx_cqe) *
4093 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4094 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4095 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4096 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4100 if (!skip_tx_queue(bp, fp_index)) {
4101 /* fastpath tx rings: tx_buf tx_desc */
4102 for_each_cos_in_tx_queue(fp, cos) {
4103 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4105 DP(NETIF_MSG_IFDOWN,
4106 "freeing tx memory of fp %d cos %d cid %d\n",
4107 fp_index, cos, txdata->cid);
4109 BNX2X_FREE(txdata->tx_buf_ring);
4110 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4111 txdata->tx_desc_mapping,
4112 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4115 /* end of fastpath */
4118 void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4121 for_each_cnic_queue(bp, i)
4122 bnx2x_free_fp_mem_at(bp, i);
4125 void bnx2x_free_fp_mem(struct bnx2x *bp)
4128 for_each_eth_queue(bp, i)
4129 bnx2x_free_fp_mem_at(bp, i);
4132 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4134 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4135 if (!CHIP_IS_E1x(bp)) {
4136 bnx2x_fp(bp, index, sb_index_values) =
4137 (__le16 *)status_blk.e2_sb->sb.index_values;
4138 bnx2x_fp(bp, index, sb_running_index) =
4139 (__le16 *)status_blk.e2_sb->sb.running_index;
4141 bnx2x_fp(bp, index, sb_index_values) =
4142 (__le16 *)status_blk.e1x_sb->sb.index_values;
4143 bnx2x_fp(bp, index, sb_running_index) =
4144 (__le16 *)status_blk.e1x_sb->sb.running_index;
4148 /* Returns the number of actually allocated BDs */
4149 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4152 struct bnx2x *bp = fp->bp;
4153 u16 ring_prod, cqe_ring_prod;
4154 int i, failure_cnt = 0;
4156 fp->rx_comp_cons = 0;
4157 cqe_ring_prod = ring_prod = 0;
4159 /* This routine is called only during fo init so
4160 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4162 for (i = 0; i < rx_ring_size; i++) {
4163 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
4167 ring_prod = NEXT_RX_IDX(ring_prod);
4168 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4169 WARN_ON(ring_prod <= (i - failure_cnt));
4173 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4174 i - failure_cnt, fp->index);
4176 fp->rx_bd_prod = ring_prod;
4177 /* Limit the CQE producer by the CQE ring size */
4178 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4180 fp->rx_pkt = fp->rx_calls = 0;
4182 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4184 return i - failure_cnt;
4187 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4191 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4192 struct eth_rx_cqe_next_page *nextpg;
4194 nextpg = (struct eth_rx_cqe_next_page *)
4195 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4197 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4198 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4200 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4201 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4205 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4207 union host_hc_status_block *sb;
4208 struct bnx2x_fastpath *fp = &bp->fp[index];
4211 int rx_ring_size = 0;
4213 if (!bp->rx_ring_size &&
4214 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
4215 rx_ring_size = MIN_RX_SIZE_NONTPA;
4216 bp->rx_ring_size = rx_ring_size;
4217 } else if (!bp->rx_ring_size) {
4218 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4220 if (CHIP_IS_E3(bp)) {
4221 u32 cfg = SHMEM_RD(bp,
4222 dev_info.port_hw_config[BP_PORT(bp)].
4225 /* Decrease ring size for 1G functions */
4226 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4227 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4231 /* allocate at least number of buffers required by FW */
4232 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4233 MIN_RX_SIZE_TPA, rx_ring_size);
4235 bp->rx_ring_size = rx_ring_size;
4236 } else /* if rx_ring_size specified - use it */
4237 rx_ring_size = bp->rx_ring_size;
4239 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4242 sb = &bnx2x_fp(bp, index, status_blk);
4244 if (!IS_FCOE_IDX(index)) {
4246 if (!CHIP_IS_E1x(bp))
4247 BNX2X_PCI_ALLOC(sb->e2_sb,
4248 &bnx2x_fp(bp, index, status_blk_mapping),
4249 sizeof(struct host_hc_status_block_e2));
4251 BNX2X_PCI_ALLOC(sb->e1x_sb,
4252 &bnx2x_fp(bp, index, status_blk_mapping),
4253 sizeof(struct host_hc_status_block_e1x));
4256 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4257 * set shortcuts for it.
4259 if (!IS_FCOE_IDX(index))
4260 set_sb_shortcuts(bp, index);
4263 if (!skip_tx_queue(bp, index)) {
4264 /* fastpath tx rings: tx_buf tx_desc */
4265 for_each_cos_in_tx_queue(fp, cos) {
4266 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4269 "allocating tx memory of fp %d cos %d\n",
4272 BNX2X_ALLOC(txdata->tx_buf_ring,
4273 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4274 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4275 &txdata->tx_desc_mapping,
4276 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4281 if (!skip_rx_queue(bp, index)) {
4282 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4283 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4284 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4285 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4286 &bnx2x_fp(bp, index, rx_desc_mapping),
4287 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4289 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4290 &bnx2x_fp(bp, index, rx_comp_mapping),
4291 sizeof(struct eth_fast_path_rx_cqe) *
4295 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4296 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4297 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4298 &bnx2x_fp(bp, index, rx_sge_mapping),
4299 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4301 bnx2x_set_next_page_rx_bd(fp);
4304 bnx2x_set_next_page_rx_cq(fp);
4307 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4308 if (ring_size < rx_ring_size)
4314 /* handles low memory cases */
4316 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4318 /* FW will drop all packets if queue is not big enough,
4319 * In these cases we disable the queue
4320 * Min size is different for OOO, TPA and non-TPA queues
4322 if (ring_size < (fp->disable_tpa ?
4323 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4324 /* release memory allocated for this queue */
4325 bnx2x_free_fp_mem_at(bp, index);
4331 int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4335 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4336 /* we will fail load process instead of mark
4344 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4348 /* 1. Allocate FP for leading - fatal if error
4349 * 2. Allocate RSS - fix number of queues if error
4353 if (bnx2x_alloc_fp_mem_at(bp, 0))
4357 for_each_nondefault_eth_queue(bp, i)
4358 if (bnx2x_alloc_fp_mem_at(bp, i))
4361 /* handle memory failures */
4362 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4363 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4366 bnx2x_shrink_eth_fp(bp, delta);
4367 if (CNIC_SUPPORT(bp))
4368 /* move non eth FPs next to last eth FP
4369 * must be done in that order
4370 * FCOE_IDX < FWD_IDX < OOO_IDX
4373 /* move FCoE fp even NO_FCOE_FLAG is on */
4374 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4375 bp->num_ethernet_queues -= delta;
4376 bp->num_queues = bp->num_ethernet_queues +
4377 bp->num_cnic_queues;
4378 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4379 bp->num_queues + delta, bp->num_queues);
4385 void bnx2x_free_mem_bp(struct bnx2x *bp)
4389 for (i = 0; i < bp->fp_array_size; i++)
4390 kfree(bp->fp[i].tpa_info);
4393 kfree(bp->fp_stats);
4394 kfree(bp->bnx2x_txq);
4395 kfree(bp->msix_table);
4399 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4401 struct bnx2x_fastpath *fp;
4402 struct msix_entry *tbl;
4403 struct bnx2x_ilt *ilt;
4404 int msix_table_size = 0;
4405 int fp_array_size, txq_array_size;
4409 * The biggest MSI-X table we might need is as a maximum number of fast
4410 * path IGU SBs plus default SB (for PF only).
4412 msix_table_size = bp->igu_sb_cnt;
4415 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4417 /* fp array: RSS plus CNIC related L2 queues */
4418 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4419 bp->fp_array_size = fp_array_size;
4420 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4422 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4425 for (i = 0; i < bp->fp_array_size; i++) {
4427 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4428 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4429 if (!(fp[i].tpa_info))
4435 /* allocate sp objs */
4436 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4441 /* allocate fp_stats */
4442 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4447 /* Allocate memory for the transmission queues array */
4449 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4450 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4452 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4458 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4461 bp->msix_table = tbl;
4464 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4471 bnx2x_free_mem_bp(bp);
4475 int bnx2x_reload_if_running(struct net_device *dev)
4477 struct bnx2x *bp = netdev_priv(dev);
4479 if (unlikely(!netif_running(dev)))
4482 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4483 return bnx2x_nic_load(bp, LOAD_NORMAL);
4486 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4488 u32 sel_phy_idx = 0;
4489 if (bp->link_params.num_phys <= 1)
4492 if (bp->link_vars.link_up) {
4493 sel_phy_idx = EXT_PHY1;
4494 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4495 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4496 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4497 sel_phy_idx = EXT_PHY2;
4500 switch (bnx2x_phy_selection(&bp->link_params)) {
4501 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4502 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4503 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4504 sel_phy_idx = EXT_PHY1;
4506 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4507 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4508 sel_phy_idx = EXT_PHY2;
4515 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4517 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4519 * The selected activated PHY is always after swapping (in case PHY
4520 * swapping is enabled). So when swapping is enabled, we need to reverse
4524 if (bp->link_params.multi_phy_config &
4525 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4526 if (sel_phy_idx == EXT_PHY1)
4527 sel_phy_idx = EXT_PHY2;
4528 else if (sel_phy_idx == EXT_PHY2)
4529 sel_phy_idx = EXT_PHY1;
4531 return LINK_CONFIG_IDX(sel_phy_idx);
4534 #ifdef NETDEV_FCOE_WWNN
4535 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4537 struct bnx2x *bp = netdev_priv(dev);
4538 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4541 case NETDEV_FCOE_WWNN:
4542 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4543 cp->fcoe_wwn_node_name_lo);
4545 case NETDEV_FCOE_WWPN:
4546 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4547 cp->fcoe_wwn_port_name_lo);
4550 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4558 /* called with rtnl_lock */
4559 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4561 struct bnx2x *bp = netdev_priv(dev);
4563 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4564 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4568 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4569 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4570 BNX2X_ERR("Can't support requested MTU size\n");
4574 /* This does not race with packet allocation
4575 * because the actual alloc size is
4576 * only updated as part of load
4580 return bnx2x_reload_if_running(dev);
4583 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4584 netdev_features_t features)
4586 struct bnx2x *bp = netdev_priv(dev);
4588 /* TPA requires Rx CSUM offloading */
4589 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4590 features &= ~NETIF_F_LRO;
4591 features &= ~NETIF_F_GRO;
4597 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4599 struct bnx2x *bp = netdev_priv(dev);
4600 u32 flags = bp->flags;
4602 bool bnx2x_reload = false;
4604 if (features & NETIF_F_LRO)
4605 flags |= TPA_ENABLE_FLAG;
4607 flags &= ~TPA_ENABLE_FLAG;
4609 if (features & NETIF_F_GRO)
4610 flags |= GRO_ENABLE_FLAG;
4612 flags &= ~GRO_ENABLE_FLAG;
4614 if (features & NETIF_F_LOOPBACK) {
4615 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4616 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4617 bnx2x_reload = true;
4620 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4621 bp->link_params.loopback_mode = LOOPBACK_NONE;
4622 bnx2x_reload = true;
4626 changes = flags ^ bp->flags;
4628 /* if GRO is changed while LRO is enabled, don't force a reload */
4629 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4630 changes &= ~GRO_ENABLE_FLAG;
4633 bnx2x_reload = true;
4638 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4639 return bnx2x_reload_if_running(dev);
4640 /* else: bnx2x_nic_load() will be called at end of recovery */
4646 void bnx2x_tx_timeout(struct net_device *dev)
4648 struct bnx2x *bp = netdev_priv(dev);
4650 #ifdef BNX2X_STOP_ON_ERROR
4655 smp_mb__before_clear_bit();
4656 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4657 smp_mb__after_clear_bit();
4659 /* This allows the netif to be shutdown gracefully before resetting */
4660 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4663 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4665 struct net_device *dev = pci_get_drvdata(pdev);
4669 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4672 bp = netdev_priv(dev);
4676 pci_save_state(pdev);
4678 if (!netif_running(dev)) {
4683 netif_device_detach(dev);
4685 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4687 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4694 int bnx2x_resume(struct pci_dev *pdev)
4696 struct net_device *dev = pci_get_drvdata(pdev);
4701 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4704 bp = netdev_priv(dev);
4706 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4707 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4713 pci_restore_state(pdev);
4715 if (!netif_running(dev)) {
4720 bnx2x_set_power_state(bp, PCI_D0);
4721 netif_device_attach(dev);
4723 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4730 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4733 /* ustorm cxt validation */
4734 cxt->ustorm_ag_context.cdu_usage =
4735 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4736 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4737 /* xcontext validation */
4738 cxt->xstorm_ag_context.cdu_reserved =
4739 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4740 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4743 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4744 u8 fw_sb_id, u8 sb_index,
4747 u32 addr = BAR_CSTRORM_INTMEM +
4748 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4749 REG_WR8(bp, addr, ticks);
4751 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4752 port, fw_sb_id, sb_index, ticks);
4755 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4756 u16 fw_sb_id, u8 sb_index,
4759 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4760 u32 addr = BAR_CSTRORM_INTMEM +
4761 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4762 u8 flags = REG_RD8(bp, addr);
4764 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4765 flags |= enable_flag;
4766 REG_WR8(bp, addr, flags);
4768 "port %x fw_sb_id %d sb_index %d disable %d\n",
4769 port, fw_sb_id, sb_index, disable);
4772 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4773 u8 sb_index, u8 disable, u16 usec)
4775 int port = BP_PORT(bp);
4776 u8 ticks = usec / BNX2X_BTR;
4778 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4780 disable = disable ? 1 : (usec ? 0 : 1);
4781 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);