1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
10 * vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
14 * The module loadable parameters that are supported by the driver and a brief
15 * explanation of all the variables:
17 * Strip VLAN Tag enable/disable. Instructs the device to remove
18 * the VLAN tag from all received tagged frames that are not
19 * replicated at the internal L2 switch.
20 * 0 - Do not strip the VLAN tag.
21 * 1 - Strip the VLAN tag.
24 * Enable learning the mac address of the guest OS interface in
25 * a virtualization environment.
30 * Maximum number of port to be supported.
34 * This configures the maximum no of VPATH configures for each
36 * MIN - 1 and MAX - 17
39 * This configures maximum no of Device function to be enabled.
40 * MIN - 1 and MAX - 17
42 ******************************************************************************/
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46 #include <linux/bitops.h>
47 #include <linux/if_vlan.h>
48 #include <linux/interrupt.h>
49 #include <linux/pci.h>
50 #include <linux/slab.h>
51 #include <linux/tcp.h>
53 #include <linux/netdevice.h>
54 #include <linux/etherdevice.h>
55 #include <linux/firmware.h>
56 #include <linux/net_tstamp.h>
57 #include <linux/prefetch.h>
58 #include <linux/module.h>
59 #include "vxge-main.h"
62 MODULE_LICENSE("Dual BSD/GPL");
63 MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
64 "Virtualized Server Adapter");
66 static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = {
67 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
69 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
74 MODULE_DEVICE_TABLE(pci, vxge_id_table);
76 VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE);
77 VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT);
78 VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT);
79 VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT);
80 VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT);
81 VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV);
83 static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] =
84 {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
85 static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
86 {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF};
87 module_param_array(bw_percentage, uint, NULL, 0);
89 static struct vxge_drv_config *driver_config;
90 static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
92 static inline int is_vxge_card_up(struct vxgedev *vdev)
94 return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
97 static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
99 struct sk_buff **skb_ptr = NULL;
100 struct sk_buff **temp;
101 #define NR_SKB_COMPLETED 128
102 struct sk_buff *completed[NR_SKB_COMPLETED];
109 if (__netif_tx_trylock(fifo->txq)) {
110 vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr,
111 NR_SKB_COMPLETED, &more);
112 __netif_tx_unlock(fifo->txq);
116 for (temp = completed; temp != skb_ptr; temp++)
117 dev_kfree_skb_irq(*temp);
121 static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
125 /* Complete all transmits */
126 for (i = 0; i < vdev->no_of_vpath; i++)
127 VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo);
130 static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
133 struct vxge_ring *ring;
135 /* Complete all receives*/
136 for (i = 0; i < vdev->no_of_vpath; i++) {
137 ring = &vdev->vpaths[i].ring;
138 vxge_hw_vpath_poll_rx(ring->handle);
143 * vxge_callback_link_up
145 * This function is called during interrupt context to notify link up state
148 static void vxge_callback_link_up(struct __vxge_hw_device *hldev)
150 struct net_device *dev = hldev->ndev;
151 struct vxgedev *vdev = netdev_priv(dev);
153 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
154 vdev->ndev->name, __func__, __LINE__);
155 netdev_notice(vdev->ndev, "Link Up\n");
156 vdev->stats.link_up++;
158 netif_carrier_on(vdev->ndev);
159 netif_tx_wake_all_queues(vdev->ndev);
161 vxge_debug_entryexit(VXGE_TRACE,
162 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
166 * vxge_callback_link_down
168 * This function is called during interrupt context to notify link down state
171 static void vxge_callback_link_down(struct __vxge_hw_device *hldev)
173 struct net_device *dev = hldev->ndev;
174 struct vxgedev *vdev = netdev_priv(dev);
176 vxge_debug_entryexit(VXGE_TRACE,
177 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
178 netdev_notice(vdev->ndev, "Link Down\n");
180 vdev->stats.link_down++;
181 netif_carrier_off(vdev->ndev);
182 netif_tx_stop_all_queues(vdev->ndev);
184 vxge_debug_entryexit(VXGE_TRACE,
185 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
193 static struct sk_buff *
194 vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
196 struct net_device *dev;
198 struct vxge_rx_priv *rx_priv;
201 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
202 ring->ndev->name, __func__, __LINE__);
204 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
206 /* try to allocate skb first. this one may fail */
207 skb = netdev_alloc_skb(dev, skb_size +
208 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
210 vxge_debug_mem(VXGE_ERR,
211 "%s: out of memory to allocate SKB", dev->name);
212 ring->stats.skb_alloc_fail++;
216 vxge_debug_mem(VXGE_TRACE,
217 "%s: %s:%d Skb : 0x%p", ring->ndev->name,
218 __func__, __LINE__, skb);
220 skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
223 rx_priv->skb_data = NULL;
224 rx_priv->data_size = skb_size;
225 vxge_debug_entryexit(VXGE_TRACE,
226 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
234 static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
236 struct vxge_rx_priv *rx_priv;
239 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
240 ring->ndev->name, __func__, __LINE__);
241 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
243 rx_priv->skb_data = rx_priv->skb->data;
244 dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data,
245 rx_priv->data_size, PCI_DMA_FROMDEVICE);
247 if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) {
248 ring->stats.pci_map_fail++;
251 vxge_debug_mem(VXGE_TRACE,
252 "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
253 ring->ndev->name, __func__, __LINE__,
254 (unsigned long long)dma_addr);
255 vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size);
257 rx_priv->data_dma = dma_addr;
258 vxge_debug_entryexit(VXGE_TRACE,
259 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
265 * vxge_rx_initial_replenish
266 * Allocation of RxD as an initial replenish procedure.
268 static enum vxge_hw_status
269 vxge_rx_initial_replenish(void *dtrh, void *userdata)
271 struct vxge_ring *ring = (struct vxge_ring *)userdata;
272 struct vxge_rx_priv *rx_priv;
274 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
275 ring->ndev->name, __func__, __LINE__);
276 if (vxge_rx_alloc(dtrh, ring,
277 VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL)
280 if (vxge_rx_map(dtrh, ring)) {
281 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
282 dev_kfree_skb(rx_priv->skb);
286 vxge_debug_entryexit(VXGE_TRACE,
287 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
293 vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
294 int pkt_length, struct vxge_hw_ring_rxd_info *ext_info)
297 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
298 ring->ndev->name, __func__, __LINE__);
299 skb_record_rx_queue(skb, ring->driver_id);
300 skb->protocol = eth_type_trans(skb, ring->ndev);
302 u64_stats_update_begin(&ring->stats.syncp);
303 ring->stats.rx_frms++;
304 ring->stats.rx_bytes += pkt_length;
306 if (skb->pkt_type == PACKET_MULTICAST)
307 ring->stats.rx_mcast++;
308 u64_stats_update_end(&ring->stats.syncp);
310 vxge_debug_rx(VXGE_TRACE,
311 "%s: %s:%d skb protocol = %d",
312 ring->ndev->name, __func__, __LINE__, skb->protocol);
314 if (ext_info->vlan &&
315 ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)
316 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ext_info->vlan);
317 napi_gro_receive(ring->napi_p, skb);
319 vxge_debug_entryexit(VXGE_TRACE,
320 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
323 static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
324 struct vxge_rx_priv *rx_priv)
326 pci_dma_sync_single_for_device(ring->pdev,
327 rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE);
329 vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
330 vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
333 static inline void vxge_post(int *dtr_cnt, void **first_dtr,
334 void *post_dtr, struct __vxge_hw_ring *ringh)
336 int dtr_count = *dtr_cnt;
337 if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) {
339 vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr);
340 *first_dtr = post_dtr;
342 vxge_hw_ring_rxd_post_post(ringh, post_dtr);
344 *dtr_cnt = dtr_count;
350 * If the interrupt is because of a received frame or if the receive ring
351 * contains fresh as yet un-processed frames, this function is called.
353 static enum vxge_hw_status
354 vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
355 u8 t_code, void *userdata)
357 struct vxge_ring *ring = (struct vxge_ring *)userdata;
358 struct net_device *dev = ring->ndev;
359 unsigned int dma_sizes;
360 void *first_dtr = NULL;
366 struct vxge_rx_priv *rx_priv;
367 struct vxge_hw_ring_rxd_info ext_info;
368 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
369 ring->ndev->name, __func__, __LINE__);
372 prefetch((char *)dtr + L1_CACHE_BYTES);
373 rx_priv = vxge_hw_ring_rxd_private_get(dtr);
375 data_size = rx_priv->data_size;
376 data_dma = rx_priv->data_dma;
377 prefetch(rx_priv->skb_data);
379 vxge_debug_rx(VXGE_TRACE,
380 "%s: %s:%d skb = 0x%p",
381 ring->ndev->name, __func__, __LINE__, skb);
383 vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes);
384 pkt_length = dma_sizes;
386 pkt_length -= ETH_FCS_LEN;
388 vxge_debug_rx(VXGE_TRACE,
389 "%s: %s:%d Packet Length = %d",
390 ring->ndev->name, __func__, __LINE__, pkt_length);
392 vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info);
394 /* check skb validity */
397 prefetch((char *)skb + L1_CACHE_BYTES);
398 if (unlikely(t_code)) {
399 if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
402 ring->stats.rx_errors++;
403 vxge_debug_rx(VXGE_TRACE,
404 "%s: %s :%d Rx T_code is %d",
405 ring->ndev->name, __func__,
408 /* If the t_code is not supported and if the
409 * t_code is other than 0x5 (unparseable packet
410 * such as unknown UPV6 header), Drop it !!!
412 vxge_re_pre_post(dtr, ring, rx_priv);
414 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
415 ring->stats.rx_dropped++;
420 if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
421 if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
422 if (!vxge_rx_map(dtr, ring)) {
423 skb_put(skb, pkt_length);
425 pci_unmap_single(ring->pdev, data_dma,
426 data_size, PCI_DMA_FROMDEVICE);
428 vxge_hw_ring_rxd_pre_post(ringh, dtr);
429 vxge_post(&dtr_cnt, &first_dtr, dtr,
432 dev_kfree_skb(rx_priv->skb);
434 rx_priv->data_size = data_size;
435 vxge_re_pre_post(dtr, ring, rx_priv);
437 vxge_post(&dtr_cnt, &first_dtr, dtr,
439 ring->stats.rx_dropped++;
443 vxge_re_pre_post(dtr, ring, rx_priv);
445 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
446 ring->stats.rx_dropped++;
450 struct sk_buff *skb_up;
452 skb_up = netdev_alloc_skb(dev, pkt_length +
453 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
454 if (skb_up != NULL) {
456 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
458 pci_dma_sync_single_for_cpu(ring->pdev,
462 vxge_debug_mem(VXGE_TRACE,
463 "%s: %s:%d skb_up = %p",
464 ring->ndev->name, __func__,
466 memcpy(skb_up->data, skb->data, pkt_length);
468 vxge_re_pre_post(dtr, ring, rx_priv);
470 vxge_post(&dtr_cnt, &first_dtr, dtr,
472 /* will netif_rx small SKB instead */
474 skb_put(skb, pkt_length);
476 vxge_re_pre_post(dtr, ring, rx_priv);
478 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
479 vxge_debug_rx(VXGE_ERR,
480 "%s: vxge_rx_1b_compl: out of "
481 "memory", dev->name);
482 ring->stats.skb_alloc_fail++;
487 if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
488 !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
489 (dev->features & NETIF_F_RXCSUM) && /* Offload Rx side CSUM */
490 ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
491 ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
492 skb->ip_summed = CHECKSUM_UNNECESSARY;
494 skb_checksum_none_assert(skb);
498 struct skb_shared_hwtstamps *skb_hwts;
499 u32 ns = *(u32 *)(skb->head + pkt_length);
501 skb_hwts = skb_hwtstamps(skb);
502 skb_hwts->hwtstamp = ns_to_ktime(ns);
503 skb_hwts->syststamp.tv64 = 0;
506 /* rth_hash_type and rth_it_hit are non-zero regardless of
507 * whether rss is enabled. Only the rth_value is zero/non-zero
508 * if rss is disabled/enabled, so key off of that.
510 if (ext_info.rth_value)
511 skb_set_hash(skb, ext_info.rth_value,
514 vxge_rx_complete(ring, skb, ext_info.vlan,
515 pkt_length, &ext_info);
518 ring->pkts_processed++;
522 } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr,
523 &t_code) == VXGE_HW_OK);
526 vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
528 vxge_debug_entryexit(VXGE_TRACE,
537 * If an interrupt was raised to indicate DMA complete of the Tx packet,
538 * this function is called. It identifies the last TxD whose buffer was
539 * freed and frees all skbs whose data have already DMA'ed into the NICs
542 static enum vxge_hw_status
543 vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
544 enum vxge_hw_fifo_tcode t_code, void *userdata,
545 struct sk_buff ***skb_ptr, int nr_skb, int *more)
547 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
548 struct sk_buff *skb, **done_skb = *skb_ptr;
551 vxge_debug_entryexit(VXGE_TRACE,
552 "%s:%d Entered....", __func__, __LINE__);
558 struct vxge_tx_priv *txd_priv =
559 vxge_hw_fifo_txdl_private_get(dtr);
562 frg_cnt = skb_shinfo(skb)->nr_frags;
563 frag = &skb_shinfo(skb)->frags[0];
565 vxge_debug_tx(VXGE_TRACE,
566 "%s: %s:%d fifo_hw = %p dtr = %p "
567 "tcode = 0x%x", fifo->ndev->name, __func__,
568 __LINE__, fifo_hw, dtr, t_code);
569 /* check skb validity */
571 vxge_debug_tx(VXGE_TRACE,
572 "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
573 fifo->ndev->name, __func__, __LINE__,
574 skb, txd_priv, frg_cnt);
575 if (unlikely(t_code)) {
576 fifo->stats.tx_errors++;
577 vxge_debug_tx(VXGE_ERR,
578 "%s: tx: dtr %p completed due to "
579 "error t_code %01x", fifo->ndev->name,
581 vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code);
584 /* for unfragmented skb */
585 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
586 skb_headlen(skb), PCI_DMA_TODEVICE);
588 for (j = 0; j < frg_cnt; j++) {
589 pci_unmap_page(fifo->pdev,
590 txd_priv->dma_buffers[i++],
591 skb_frag_size(frag), PCI_DMA_TODEVICE);
595 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
597 /* Updating the statistics block */
598 u64_stats_update_begin(&fifo->stats.syncp);
599 fifo->stats.tx_frms++;
600 fifo->stats.tx_bytes += skb->len;
601 u64_stats_update_end(&fifo->stats.syncp);
611 if (pkt_cnt > fifo->indicate_max_pkts)
614 } while (vxge_hw_fifo_txdl_next_completed(fifo_hw,
615 &dtr, &t_code) == VXGE_HW_OK);
618 if (netif_tx_queue_stopped(fifo->txq))
619 netif_tx_wake_queue(fifo->txq);
621 vxge_debug_entryexit(VXGE_TRACE,
622 "%s: %s:%d Exiting...",
623 fifo->ndev->name, __func__, __LINE__);
627 /* select a vpath to transmit the packet */
628 static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb)
630 u16 queue_len, counter = 0;
631 if (skb->protocol == htons(ETH_P_IP)) {
637 if (!ip_is_fragment(ip)) {
638 th = (struct tcphdr *)(((unsigned char *)ip) +
641 queue_len = vdev->no_of_vpath;
642 counter = (ntohs(th->source) +
644 vdev->vpath_selector[queue_len - 1];
645 if (counter >= queue_len)
646 counter = queue_len - 1;
652 static enum vxge_hw_status vxge_search_mac_addr_in_list(
653 struct vxge_vpath *vpath, u64 del_mac)
655 struct list_head *entry, *next;
656 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
657 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac)
663 static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
665 struct vxge_mac_addrs *new_mac_entry;
666 u8 *mac_address = NULL;
668 if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
671 new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
672 if (!new_mac_entry) {
673 vxge_debug_mem(VXGE_ERR,
674 "%s: memory allocation failed",
679 list_add(&new_mac_entry->item, &vpath->mac_addr_list);
681 /* Copy the new mac address to the list */
682 mac_address = (u8 *)&new_mac_entry->macaddr;
683 memcpy(mac_address, mac->macaddr, ETH_ALEN);
685 new_mac_entry->state = mac->state;
686 vpath->mac_addr_cnt++;
688 if (is_multicast_ether_addr(mac->macaddr))
689 vpath->mcast_addr_cnt++;
694 /* Add a mac address to DA table */
695 static enum vxge_hw_status
696 vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
698 enum vxge_hw_status status = VXGE_HW_OK;
699 struct vxge_vpath *vpath;
700 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
702 if (is_multicast_ether_addr(mac->macaddr))
703 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
705 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
707 vpath = &vdev->vpaths[mac->vpath_no];
708 status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
709 mac->macmask, duplicate_mode);
710 if (status != VXGE_HW_OK) {
711 vxge_debug_init(VXGE_ERR,
712 "DA config add entry failed for vpath:%d",
715 if (FALSE == vxge_mac_list_add(vpath, mac))
721 static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
723 struct macInfo mac_info;
724 u8 *mac_address = NULL;
725 u64 mac_addr = 0, vpath_vector = 0;
727 enum vxge_hw_status status = VXGE_HW_OK;
728 struct vxge_vpath *vpath = NULL;
729 struct __vxge_hw_device *hldev;
731 hldev = pci_get_drvdata(vdev->pdev);
733 mac_address = (u8 *)&mac_addr;
734 memcpy(mac_address, mac_header, ETH_ALEN);
736 /* Is this mac address already in the list? */
737 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
738 vpath = &vdev->vpaths[vpath_idx];
739 if (vxge_search_mac_addr_in_list(vpath, mac_addr))
743 memset(&mac_info, 0, sizeof(struct macInfo));
744 memcpy(mac_info.macaddr, mac_header, ETH_ALEN);
746 /* Any vpath has room to add mac address to its da table? */
747 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
748 vpath = &vdev->vpaths[vpath_idx];
749 if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) {
750 /* Add this mac address to this vpath */
751 mac_info.vpath_no = vpath_idx;
752 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
753 status = vxge_add_mac_addr(vdev, &mac_info);
754 if (status != VXGE_HW_OK)
760 mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST;
762 mac_info.vpath_no = vpath_idx;
763 /* Is the first vpath already selected as catch-basin ? */
764 vpath = &vdev->vpaths[vpath_idx];
765 if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) {
766 /* Add this mac address to this vpath */
767 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
772 /* Select first vpath as catch-basin */
773 vpath_vector = vxge_mBIT(vpath->device_id);
774 status = vxge_hw_mgmt_reg_write(vpath->vdev->devh,
775 vxge_hw_mgmt_reg_type_mrpcim,
778 struct vxge_hw_mrpcim_reg,
781 if (status != VXGE_HW_OK) {
782 vxge_debug_tx(VXGE_ERR,
783 "%s: Unable to set the vpath-%d in catch-basin mode",
784 VXGE_DRIVER_NAME, vpath->device_id);
788 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
796 * @skb : the socket buffer containing the Tx data.
797 * @dev : device pointer.
799 * This function is the Tx entry point of the driver. Neterion NIC supports
800 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
803 vxge_xmit(struct sk_buff *skb, struct net_device *dev)
805 struct vxge_fifo *fifo = NULL;
808 struct vxgedev *vdev = NULL;
809 enum vxge_hw_status status;
810 int frg_cnt, first_frg_len;
812 int i = 0, j = 0, avail;
814 struct vxge_tx_priv *txdl_priv = NULL;
815 struct __vxge_hw_fifo *fifo_hw;
819 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
820 dev->name, __func__, __LINE__);
822 /* A buffer with no data will be dropped */
823 if (unlikely(skb->len <= 0)) {
824 vxge_debug_tx(VXGE_ERR,
825 "%s: Buffer has no data..", dev->name);
830 vdev = netdev_priv(dev);
832 if (unlikely(!is_vxge_card_up(vdev))) {
833 vxge_debug_tx(VXGE_ERR,
834 "%s: vdev not initialized", dev->name);
839 if (vdev->config.addr_learn_en) {
840 vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN);
841 if (vpath_no == -EPERM) {
842 vxge_debug_tx(VXGE_ERR,
843 "%s: Failed to store the mac address",
850 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
851 vpath_no = skb_get_queue_mapping(skb);
852 else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
853 vpath_no = vxge_get_vpath_no(vdev, skb);
855 vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
857 if (vpath_no >= vdev->no_of_vpath)
860 fifo = &vdev->vpaths[vpath_no].fifo;
861 fifo_hw = fifo->handle;
863 if (netif_tx_queue_stopped(fifo->txq))
864 return NETDEV_TX_BUSY;
866 avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
868 vxge_debug_tx(VXGE_ERR,
869 "%s: No free TXDs available", dev->name);
870 fifo->stats.txd_not_free++;
874 /* Last TXD? Stop tx queue to avoid dropping packets. TX
875 * completion will resume the queue.
878 netif_tx_stop_queue(fifo->txq);
880 status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
881 if (unlikely(status != VXGE_HW_OK)) {
882 vxge_debug_tx(VXGE_ERR,
883 "%s: Out of descriptors .", dev->name);
884 fifo->stats.txd_out_of_desc++;
888 vxge_debug_tx(VXGE_TRACE,
889 "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
890 dev->name, __func__, __LINE__,
891 fifo_hw, dtr, dtr_priv);
893 if (vlan_tx_tag_present(skb)) {
894 u16 vlan_tag = vlan_tx_tag_get(skb);
895 vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
898 first_frg_len = skb_headlen(skb);
900 dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len,
903 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) {
904 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
905 fifo->stats.pci_map_fail++;
909 txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
910 txdl_priv->skb = skb;
911 txdl_priv->dma_buffers[j] = dma_pointer;
913 frg_cnt = skb_shinfo(skb)->nr_frags;
914 vxge_debug_tx(VXGE_TRACE,
915 "%s: %s:%d skb = %p txdl_priv = %p "
916 "frag_cnt = %d dma_pointer = 0x%llx", dev->name,
917 __func__, __LINE__, skb, txdl_priv,
918 frg_cnt, (unsigned long long)dma_pointer);
920 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
923 frag = &skb_shinfo(skb)->frags[0];
924 for (i = 0; i < frg_cnt; i++) {
925 /* ignore 0 length fragment */
926 if (!skb_frag_size(frag))
929 dma_pointer = (u64)skb_frag_dma_map(&fifo->pdev->dev, frag,
930 0, skb_frag_size(frag),
933 if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer)))
935 vxge_debug_tx(VXGE_TRACE,
936 "%s: %s:%d frag = %d dma_pointer = 0x%llx",
937 dev->name, __func__, __LINE__, i,
938 (unsigned long long)dma_pointer);
940 txdl_priv->dma_buffers[j] = dma_pointer;
941 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
942 skb_frag_size(frag));
946 offload_type = vxge_offload_type(skb);
948 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
949 int mss = vxge_tcp_mss(skb);
951 vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d",
952 dev->name, __func__, __LINE__, mss);
953 vxge_hw_fifo_txdl_mss_set(dtr, mss);
955 vxge_assert(skb->len <=
956 dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE);
962 if (skb->ip_summed == CHECKSUM_PARTIAL)
963 vxge_hw_fifo_txdl_cksum_set_bits(dtr,
964 VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN |
965 VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN |
966 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
968 vxge_hw_fifo_txdl_post(fifo_hw, dtr);
970 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
971 dev->name, __func__, __LINE__);
975 vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
978 frag = &skb_shinfo(skb)->frags[0];
980 pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++],
981 skb_headlen(skb), PCI_DMA_TODEVICE);
984 pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j],
985 skb_frag_size(frag), PCI_DMA_TODEVICE);
989 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
991 netif_tx_stop_queue(fifo->txq);
1000 * Function will be called by hw function to abort all outstanding receive
1004 vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata)
1006 struct vxge_ring *ring = (struct vxge_ring *)userdata;
1007 struct vxge_rx_priv *rx_priv =
1008 vxge_hw_ring_rxd_private_get(dtrh);
1010 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
1011 ring->ndev->name, __func__, __LINE__);
1012 if (state != VXGE_HW_RXD_STATE_POSTED)
1015 pci_unmap_single(ring->pdev, rx_priv->data_dma,
1016 rx_priv->data_size, PCI_DMA_FROMDEVICE);
1018 dev_kfree_skb(rx_priv->skb);
1019 rx_priv->skb_data = NULL;
1021 vxge_debug_entryexit(VXGE_TRACE,
1022 "%s: %s:%d Exiting...",
1023 ring->ndev->name, __func__, __LINE__);
1029 * Function will be called to abort all outstanding tx descriptors
1032 vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
1034 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
1036 int i = 0, j, frg_cnt;
1037 struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh);
1038 struct sk_buff *skb = txd_priv->skb;
1040 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1042 if (state != VXGE_HW_TXDL_STATE_POSTED)
1045 /* check skb validity */
1047 frg_cnt = skb_shinfo(skb)->nr_frags;
1048 frag = &skb_shinfo(skb)->frags[0];
1050 /* for unfragmented skb */
1051 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
1052 skb_headlen(skb), PCI_DMA_TODEVICE);
1054 for (j = 0; j < frg_cnt; j++) {
1055 pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++],
1056 skb_frag_size(frag), PCI_DMA_TODEVICE);
1062 vxge_debug_entryexit(VXGE_TRACE,
1063 "%s:%d Exiting...", __func__, __LINE__);
1066 static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1068 struct list_head *entry, *next;
1070 u8 *mac_address = (u8 *) (&del_mac);
1072 /* Copy the mac address to delete from the list */
1073 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1075 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1076 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
1078 kfree((struct vxge_mac_addrs *)entry);
1079 vpath->mac_addr_cnt--;
1081 if (is_multicast_ether_addr(mac->macaddr))
1082 vpath->mcast_addr_cnt--;
1090 /* delete a mac address from DA table */
1091 static enum vxge_hw_status
1092 vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
1094 enum vxge_hw_status status = VXGE_HW_OK;
1095 struct vxge_vpath *vpath;
1097 vpath = &vdev->vpaths[mac->vpath_no];
1098 status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
1100 if (status != VXGE_HW_OK) {
1101 vxge_debug_init(VXGE_ERR,
1102 "DA config delete entry failed for vpath:%d",
1105 vxge_mac_list_del(vpath, mac);
1110 * vxge_set_multicast
1111 * @dev: pointer to the device structure
1113 * Entry point for multicast address enable/disable
1114 * This function is a driver entry point which gets called by the kernel
1115 * whenever multicast addresses must be enabled/disabled. This also gets
1116 * called to set/reset promiscuous mode. Depending on the deivce flag, we
1117 * determine, if multicast address must be enabled or if promiscuous mode
1118 * is to be disabled etc.
1120 static void vxge_set_multicast(struct net_device *dev)
1122 struct netdev_hw_addr *ha;
1123 struct vxgedev *vdev;
1124 int i, mcast_cnt = 0;
1125 struct __vxge_hw_device *hldev;
1126 struct vxge_vpath *vpath;
1127 enum vxge_hw_status status = VXGE_HW_OK;
1128 struct macInfo mac_info;
1130 struct vxge_mac_addrs *mac_entry;
1131 struct list_head *list_head;
1132 struct list_head *entry, *next;
1133 u8 *mac_address = NULL;
1135 vxge_debug_entryexit(VXGE_TRACE,
1136 "%s:%d", __func__, __LINE__);
1138 vdev = netdev_priv(dev);
1141 if (unlikely(!is_vxge_card_up(vdev)))
1144 if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
1145 for (i = 0; i < vdev->no_of_vpath; i++) {
1146 vpath = &vdev->vpaths[i];
1147 vxge_assert(vpath->is_open);
1148 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1149 if (status != VXGE_HW_OK)
1150 vxge_debug_init(VXGE_ERR, "failed to enable "
1151 "multicast, status %d", status);
1152 vdev->all_multi_flg = 1;
1154 } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
1155 for (i = 0; i < vdev->no_of_vpath; i++) {
1156 vpath = &vdev->vpaths[i];
1157 vxge_assert(vpath->is_open);
1158 status = vxge_hw_vpath_mcast_disable(vpath->handle);
1159 if (status != VXGE_HW_OK)
1160 vxge_debug_init(VXGE_ERR, "failed to disable "
1161 "multicast, status %d", status);
1162 vdev->all_multi_flg = 0;
1167 if (!vdev->config.addr_learn_en) {
1168 for (i = 0; i < vdev->no_of_vpath; i++) {
1169 vpath = &vdev->vpaths[i];
1170 vxge_assert(vpath->is_open);
1172 if (dev->flags & IFF_PROMISC)
1173 status = vxge_hw_vpath_promisc_enable(
1176 status = vxge_hw_vpath_promisc_disable(
1178 if (status != VXGE_HW_OK)
1179 vxge_debug_init(VXGE_ERR, "failed to %s promisc"
1180 ", status %d", dev->flags&IFF_PROMISC ?
1181 "enable" : "disable", status);
1185 memset(&mac_info, 0, sizeof(struct macInfo));
1186 /* Update individual M_CAST address list */
1187 if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
1188 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1189 list_head = &vdev->vpaths[0].mac_addr_list;
1190 if ((netdev_mc_count(dev) +
1191 (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
1192 vdev->vpaths[0].max_mac_addr_cnt)
1193 goto _set_all_mcast;
1195 /* Delete previous MC's */
1196 for (i = 0; i < mcast_cnt; i++) {
1197 list_for_each_safe(entry, next, list_head) {
1198 mac_entry = (struct vxge_mac_addrs *)entry;
1199 /* Copy the mac address to delete */
1200 mac_address = (u8 *)&mac_entry->macaddr;
1201 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1203 if (is_multicast_ether_addr(mac_info.macaddr)) {
1204 for (vpath_idx = 0; vpath_idx <
1207 mac_info.vpath_no = vpath_idx;
1208 status = vxge_del_mac_addr(
1217 netdev_for_each_mc_addr(ha, dev) {
1218 memcpy(mac_info.macaddr, ha->addr, ETH_ALEN);
1219 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1221 mac_info.vpath_no = vpath_idx;
1222 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1223 status = vxge_add_mac_addr(vdev, &mac_info);
1224 if (status != VXGE_HW_OK) {
1225 vxge_debug_init(VXGE_ERR,
1226 "%s:%d Setting individual"
1227 "multicast address failed",
1228 __func__, __LINE__);
1229 goto _set_all_mcast;
1236 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1237 /* Delete previous MC's */
1238 for (i = 0; i < mcast_cnt; i++) {
1239 list_for_each_safe(entry, next, list_head) {
1240 mac_entry = (struct vxge_mac_addrs *)entry;
1241 /* Copy the mac address to delete */
1242 mac_address = (u8 *)&mac_entry->macaddr;
1243 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1245 if (is_multicast_ether_addr(mac_info.macaddr))
1249 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1251 mac_info.vpath_no = vpath_idx;
1252 status = vxge_del_mac_addr(vdev, &mac_info);
1256 /* Enable all multicast */
1257 for (i = 0; i < vdev->no_of_vpath; i++) {
1258 vpath = &vdev->vpaths[i];
1259 vxge_assert(vpath->is_open);
1261 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1262 if (status != VXGE_HW_OK) {
1263 vxge_debug_init(VXGE_ERR,
1264 "%s:%d Enabling all multicasts failed",
1265 __func__, __LINE__);
1267 vdev->all_multi_flg = 1;
1269 dev->flags |= IFF_ALLMULTI;
1272 vxge_debug_entryexit(VXGE_TRACE,
1273 "%s:%d Exiting...", __func__, __LINE__);
1278 * @dev: pointer to the device structure
1280 * Update entry "0" (default MAC addr)
1282 static int vxge_set_mac_addr(struct net_device *dev, void *p)
1284 struct sockaddr *addr = p;
1285 struct vxgedev *vdev;
1286 struct __vxge_hw_device *hldev;
1287 enum vxge_hw_status status = VXGE_HW_OK;
1288 struct macInfo mac_info_new, mac_info_old;
1291 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1293 vdev = netdev_priv(dev);
1296 if (!is_valid_ether_addr(addr->sa_data))
1299 memset(&mac_info_new, 0, sizeof(struct macInfo));
1300 memset(&mac_info_old, 0, sizeof(struct macInfo));
1302 vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...",
1303 __func__, __LINE__);
1305 /* Get the old address */
1306 memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len);
1308 /* Copy the new address */
1309 memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len);
1311 /* First delete the old mac address from all the vpaths
1312 as we can't specify the index while adding new mac address */
1313 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1314 struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx];
1315 if (!vpath->is_open) {
1316 /* This can happen when this interface is added/removed
1317 to the bonding interface. Delete this station address
1318 from the linked list */
1319 vxge_mac_list_del(vpath, &mac_info_old);
1321 /* Add this new address to the linked list
1322 for later restoring */
1323 vxge_mac_list_add(vpath, &mac_info_new);
1327 /* Delete the station address */
1328 mac_info_old.vpath_no = vpath_idx;
1329 status = vxge_del_mac_addr(vdev, &mac_info_old);
1332 if (unlikely(!is_vxge_card_up(vdev))) {
1333 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1337 /* Set this mac address to all the vpaths */
1338 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1339 mac_info_new.vpath_no = vpath_idx;
1340 mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1341 status = vxge_add_mac_addr(vdev, &mac_info_new);
1342 if (status != VXGE_HW_OK)
1346 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1352 * vxge_vpath_intr_enable
1353 * @vdev: pointer to vdev
1354 * @vp_id: vpath for which to enable the interrupts
1356 * Enables the interrupts for the vpath
1358 static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1360 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1362 int tim_msix_id[4] = {0, 1, 0, 0};
1363 int alarm_msix_id = VXGE_ALARM_MSIX_ID;
1365 vxge_hw_vpath_intr_enable(vpath->handle);
1367 if (vdev->config.intr_type == INTA)
1368 vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
1370 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
1373 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1374 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1375 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
1377 /* enable the alarm vector */
1378 msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1379 VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
1380 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1385 * vxge_vpath_intr_disable
1386 * @vdev: pointer to vdev
1387 * @vp_id: vpath for which to disable the interrupts
1389 * Disables the interrupts for the vpath
1391 static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1393 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1394 struct __vxge_hw_device *hldev;
1397 hldev = pci_get_drvdata(vdev->pdev);
1399 vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
1401 vxge_hw_vpath_intr_disable(vpath->handle);
1403 if (vdev->config.intr_type == INTA)
1404 vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
1406 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1407 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1408 vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
1410 /* disable the alarm vector */
1411 msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1412 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
1413 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1417 /* list all mac addresses from DA table */
1418 static enum vxge_hw_status
1419 vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
1421 enum vxge_hw_status status = VXGE_HW_OK;
1422 unsigned char macmask[ETH_ALEN];
1423 unsigned char macaddr[ETH_ALEN];
1425 status = vxge_hw_vpath_mac_addr_get(vpath->handle,
1427 if (status != VXGE_HW_OK) {
1428 vxge_debug_init(VXGE_ERR,
1429 "DA config list entry failed for vpath:%d",
1434 while (!ether_addr_equal(mac->macaddr, macaddr)) {
1435 status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
1437 if (status != VXGE_HW_OK)
1444 /* Store all mac addresses from the list to the DA table */
1445 static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1447 enum vxge_hw_status status = VXGE_HW_OK;
1448 struct macInfo mac_info;
1449 u8 *mac_address = NULL;
1450 struct list_head *entry, *next;
1452 memset(&mac_info, 0, sizeof(struct macInfo));
1454 if (vpath->is_open) {
1455 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1458 ((struct vxge_mac_addrs *)entry)->macaddr;
1459 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1460 ((struct vxge_mac_addrs *)entry)->state =
1461 VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1462 /* does this mac address already exist in da table? */
1463 status = vxge_search_mac_addr_in_da_table(vpath,
1465 if (status != VXGE_HW_OK) {
1466 /* Add this mac address to the DA table */
1467 status = vxge_hw_vpath_mac_addr_add(
1468 vpath->handle, mac_info.macaddr,
1470 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
1471 if (status != VXGE_HW_OK) {
1472 vxge_debug_init(VXGE_ERR,
1473 "DA add entry failed for vpath:%d",
1475 ((struct vxge_mac_addrs *)entry)->state
1476 = VXGE_LL_MAC_ADDR_IN_LIST;
1485 /* Store all vlan ids from the list to the vid table */
1486 static enum vxge_hw_status
1487 vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
1489 enum vxge_hw_status status = VXGE_HW_OK;
1490 struct vxgedev *vdev = vpath->vdev;
1493 if (!vpath->is_open)
1496 for_each_set_bit(vid, vdev->active_vlans, VLAN_N_VID)
1497 status = vxge_hw_vpath_vid_add(vpath->handle, vid);
1504 * @vdev: pointer to vdev
1505 * @vp_id: vpath to reset
1509 static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1511 enum vxge_hw_status status = VXGE_HW_OK;
1512 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1515 /* check if device is down already */
1516 if (unlikely(!is_vxge_card_up(vdev)))
1519 /* is device reset already scheduled */
1520 if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1523 if (vpath->handle) {
1524 if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
1525 if (is_vxge_card_up(vdev) &&
1526 vxge_hw_vpath_recover_from_reset(vpath->handle)
1528 vxge_debug_init(VXGE_ERR,
1529 "vxge_hw_vpath_recover_from_reset"
1530 "failed for vpath:%d", vp_id);
1534 vxge_debug_init(VXGE_ERR,
1535 "vxge_hw_vpath_reset failed for"
1540 return VXGE_HW_FAIL;
1542 vxge_restore_vpath_mac_addr(vpath);
1543 vxge_restore_vpath_vid_table(vpath);
1545 /* Enable all broadcast */
1546 vxge_hw_vpath_bcast_enable(vpath->handle);
1548 /* Enable all multicast */
1549 if (vdev->all_multi_flg) {
1550 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1551 if (status != VXGE_HW_OK)
1552 vxge_debug_init(VXGE_ERR,
1553 "%s:%d Enabling multicast failed",
1554 __func__, __LINE__);
1557 /* Enable the interrupts */
1558 vxge_vpath_intr_enable(vdev, vp_id);
1562 /* Enable the flow of traffic through the vpath */
1563 vxge_hw_vpath_enable(vpath->handle);
1566 vxge_hw_vpath_rx_doorbell_init(vpath->handle);
1567 vpath->ring.last_status = VXGE_HW_OK;
1569 /* Vpath reset done */
1570 clear_bit(vp_id, &vdev->vp_reset);
1572 /* Start the vpath queue */
1573 if (netif_tx_queue_stopped(vpath->fifo.txq))
1574 netif_tx_wake_queue(vpath->fifo.txq);
1580 static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
1584 /* Enable CI for RTI */
1585 if (vdev->config.intr_type == MSI_X) {
1586 for (i = 0; i < vdev->no_of_vpath; i++) {
1587 struct __vxge_hw_ring *hw_ring;
1589 hw_ring = vdev->vpaths[i].ring.handle;
1590 vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
1594 /* Enable CI for TTI */
1595 for (i = 0; i < vdev->no_of_vpath; i++) {
1596 struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
1597 vxge_hw_vpath_tti_ci_set(hw_fifo);
1599 * For Inta (with or without napi), Set CI ON for only one
1600 * vpath. (Have only one free running timer).
1602 if ((vdev->config.intr_type == INTA) && (i == 0))
1609 static int do_vxge_reset(struct vxgedev *vdev, int event)
1611 enum vxge_hw_status status;
1612 int ret = 0, vp_id, i;
1614 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1616 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) {
1617 /* check if device is down already */
1618 if (unlikely(!is_vxge_card_up(vdev)))
1621 /* is reset already scheduled */
1622 if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1626 if (event == VXGE_LL_FULL_RESET) {
1627 netif_carrier_off(vdev->ndev);
1629 /* wait for all the vpath reset to complete */
1630 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1631 while (test_bit(vp_id, &vdev->vp_reset))
1635 netif_carrier_on(vdev->ndev);
1637 /* if execution mode is set to debug, don't reset the adapter */
1638 if (unlikely(vdev->exec_mode)) {
1639 vxge_debug_init(VXGE_ERR,
1640 "%s: execution mode is debug, returning..",
1642 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1643 netif_tx_stop_all_queues(vdev->ndev);
1648 if (event == VXGE_LL_FULL_RESET) {
1649 vxge_hw_device_wait_receive_idle(vdev->devh);
1650 vxge_hw_device_intr_disable(vdev->devh);
1652 switch (vdev->cric_err_event) {
1653 case VXGE_HW_EVENT_UNKNOWN:
1654 netif_tx_stop_all_queues(vdev->ndev);
1655 vxge_debug_init(VXGE_ERR,
1656 "fatal: %s: Disabling device due to"
1661 case VXGE_HW_EVENT_RESET_START:
1663 case VXGE_HW_EVENT_RESET_COMPLETE:
1664 case VXGE_HW_EVENT_LINK_DOWN:
1665 case VXGE_HW_EVENT_LINK_UP:
1666 case VXGE_HW_EVENT_ALARM_CLEARED:
1667 case VXGE_HW_EVENT_ECCERR:
1668 case VXGE_HW_EVENT_MRPCIM_ECCERR:
1671 case VXGE_HW_EVENT_FIFO_ERR:
1672 case VXGE_HW_EVENT_VPATH_ERR:
1674 case VXGE_HW_EVENT_CRITICAL_ERR:
1675 netif_tx_stop_all_queues(vdev->ndev);
1676 vxge_debug_init(VXGE_ERR,
1677 "fatal: %s: Disabling device due to"
1680 /* SOP or device reset required */
1681 /* This event is not currently used */
1684 case VXGE_HW_EVENT_SERR:
1685 netif_tx_stop_all_queues(vdev->ndev);
1686 vxge_debug_init(VXGE_ERR,
1687 "fatal: %s: Disabling device due to"
1692 case VXGE_HW_EVENT_SRPCIM_SERR:
1693 case VXGE_HW_EVENT_MRPCIM_SERR:
1696 case VXGE_HW_EVENT_SLOT_FREEZE:
1697 netif_tx_stop_all_queues(vdev->ndev);
1698 vxge_debug_init(VXGE_ERR,
1699 "fatal: %s: Disabling device due to"
1710 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET))
1711 netif_tx_stop_all_queues(vdev->ndev);
1713 if (event == VXGE_LL_FULL_RESET) {
1714 status = vxge_reset_all_vpaths(vdev);
1715 if (status != VXGE_HW_OK) {
1716 vxge_debug_init(VXGE_ERR,
1717 "fatal: %s: can not reset vpaths",
1724 if (event == VXGE_LL_COMPL_RESET) {
1725 for (i = 0; i < vdev->no_of_vpath; i++)
1726 if (vdev->vpaths[i].handle) {
1727 if (vxge_hw_vpath_recover_from_reset(
1728 vdev->vpaths[i].handle)
1730 vxge_debug_init(VXGE_ERR,
1731 "vxge_hw_vpath_recover_"
1732 "from_reset failed for vpath: "
1738 vxge_debug_init(VXGE_ERR,
1739 "vxge_hw_vpath_reset failed for "
1746 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) {
1747 /* Reprogram the DA table with populated mac addresses */
1748 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1749 vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
1750 vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
1753 /* enable vpath interrupts */
1754 for (i = 0; i < vdev->no_of_vpath; i++)
1755 vxge_vpath_intr_enable(vdev, i);
1757 vxge_hw_device_intr_enable(vdev->devh);
1761 /* Indicate card up */
1762 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1764 /* Get the traffic to flow through the vpaths */
1765 for (i = 0; i < vdev->no_of_vpath; i++) {
1766 vxge_hw_vpath_enable(vdev->vpaths[i].handle);
1768 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
1771 netif_tx_wake_all_queues(vdev->ndev);
1775 vxge_config_ci_for_tti_rti(vdev);
1778 vxge_debug_entryexit(VXGE_TRACE,
1779 "%s:%d Exiting...", __func__, __LINE__);
1781 /* Indicate reset done */
1782 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET))
1783 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
1789 * @vdev: pointer to ll device
1791 * driver may reset the chip on events of serr, eccerr, etc
1793 static void vxge_reset(struct work_struct *work)
1795 struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task);
1797 if (!netif_running(vdev->ndev))
1800 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
1804 * vxge_poll - Receive handler when Receive Polling is used.
1805 * @dev: pointer to the device structure.
1806 * @budget: Number of packets budgeted to be processed in this iteration.
1808 * This function comes into picture only if Receive side is being handled
1809 * through polling (called NAPI in linux). It mostly does what the normal
1810 * Rx interrupt handler does in terms of descriptor and packet processing
1811 * but not in an interrupt context. Also it will process a specified number
1812 * of packets at most in one iteration. This value is passed down by the
1813 * kernel as the function argument 'budget'.
1815 static int vxge_poll_msix(struct napi_struct *napi, int budget)
1817 struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
1819 int budget_org = budget;
1821 ring->budget = budget;
1822 ring->pkts_processed = 0;
1823 vxge_hw_vpath_poll_rx(ring->handle);
1824 pkts_processed = ring->pkts_processed;
1826 if (ring->pkts_processed < budget_org) {
1827 napi_complete(napi);
1829 /* Re enable the Rx interrupts for the vpath */
1830 vxge_hw_channel_msix_unmask(
1831 (struct __vxge_hw_channel *)ring->handle,
1832 ring->rx_vector_no);
1836 /* We are copying and returning the local variable, in case if after
1837 * clearing the msix interrupt above, if the interrupt fires right
1838 * away which can preempt this NAPI thread */
1839 return pkts_processed;
1842 static int vxge_poll_inta(struct napi_struct *napi, int budget)
1844 struct vxgedev *vdev = container_of(napi, struct vxgedev, napi);
1845 int pkts_processed = 0;
1847 int budget_org = budget;
1848 struct vxge_ring *ring;
1850 struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev);
1852 for (i = 0; i < vdev->no_of_vpath; i++) {
1853 ring = &vdev->vpaths[i].ring;
1854 ring->budget = budget;
1855 ring->pkts_processed = 0;
1856 vxge_hw_vpath_poll_rx(ring->handle);
1857 pkts_processed += ring->pkts_processed;
1858 budget -= ring->pkts_processed;
1863 VXGE_COMPLETE_ALL_TX(vdev);
1865 if (pkts_processed < budget_org) {
1866 napi_complete(napi);
1867 /* Re enable the Rx interrupts for the ring */
1868 vxge_hw_device_unmask_all(hldev);
1869 vxge_hw_device_flush_io(hldev);
1872 return pkts_processed;
1875 #ifdef CONFIG_NET_POLL_CONTROLLER
1877 * vxge_netpoll - netpoll event handler entry point
1878 * @dev : pointer to the device structure.
1880 * This function will be called by upper layer to check for events on the
1881 * interface in situations where interrupts are disabled. It is used for
1882 * specific in-kernel networking tasks, such as remote consoles and kernel
1883 * debugging over the network (example netdump in RedHat).
1885 static void vxge_netpoll(struct net_device *dev)
1887 struct vxgedev *vdev = netdev_priv(dev);
1888 struct pci_dev *pdev = vdev->pdev;
1889 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
1890 const int irq = pdev->irq;
1892 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1894 if (pci_channel_offline(pdev))
1898 vxge_hw_device_clear_tx_rx(hldev);
1900 vxge_hw_device_clear_tx_rx(hldev);
1901 VXGE_COMPLETE_ALL_RX(vdev);
1902 VXGE_COMPLETE_ALL_TX(vdev);
1906 vxge_debug_entryexit(VXGE_TRACE,
1907 "%s:%d Exiting...", __func__, __LINE__);
1911 /* RTH configuration */
1912 static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1914 enum vxge_hw_status status = VXGE_HW_OK;
1915 struct vxge_hw_rth_hash_types hash_types;
1916 u8 itable[256] = {0}; /* indirection table */
1917 u8 mtable[256] = {0}; /* CPU to vpath mapping */
1922 * - itable with bucket numbers
1923 * - mtable with bucket-to-vpath mapping
1925 for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) {
1926 itable[index] = index;
1927 mtable[index] = index % vdev->no_of_vpath;
1930 /* set indirection table, bucket-to-vpath mapping */
1931 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
1934 vdev->config.rth_bkt_sz);
1935 if (status != VXGE_HW_OK) {
1936 vxge_debug_init(VXGE_ERR,
1937 "RTH indirection table configuration failed "
1938 "for vpath:%d", vdev->vpaths[0].device_id);
1942 /* Fill RTH hash types */
1943 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
1944 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
1945 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
1946 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
1947 hash_types.hash_type_tcpipv6ex_en =
1948 vdev->config.rth_hash_type_tcpipv6ex;
1949 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
1952 * Because the itable_set() method uses the active_table field
1953 * for the target virtual path the RTH config should be updated
1954 * for all VPATHs. The h/w only uses the lowest numbered VPATH
1955 * when steering frames.
1957 for (index = 0; index < vdev->no_of_vpath; index++) {
1958 status = vxge_hw_vpath_rts_rth_set(
1959 vdev->vpaths[index].handle,
1960 vdev->config.rth_algorithm,
1962 vdev->config.rth_bkt_sz);
1963 if (status != VXGE_HW_OK) {
1964 vxge_debug_init(VXGE_ERR,
1965 "RTH configuration failed for vpath:%d",
1966 vdev->vpaths[index].device_id);
1975 static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
1977 enum vxge_hw_status status = VXGE_HW_OK;
1978 struct vxge_vpath *vpath;
1981 for (i = 0; i < vdev->no_of_vpath; i++) {
1982 vpath = &vdev->vpaths[i];
1983 if (vpath->handle) {
1984 if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
1985 if (is_vxge_card_up(vdev) &&
1986 vxge_hw_vpath_recover_from_reset(
1987 vpath->handle) != VXGE_HW_OK) {
1988 vxge_debug_init(VXGE_ERR,
1989 "vxge_hw_vpath_recover_"
1990 "from_reset failed for vpath: "
1995 vxge_debug_init(VXGE_ERR,
1996 "vxge_hw_vpath_reset failed for "
2007 static void vxge_close_vpaths(struct vxgedev *vdev, int index)
2009 struct vxge_vpath *vpath;
2012 for (i = index; i < vdev->no_of_vpath; i++) {
2013 vpath = &vdev->vpaths[i];
2015 if (vpath->handle && vpath->is_open) {
2016 vxge_hw_vpath_close(vpath->handle);
2017 vdev->stats.vpaths_open--;
2020 vpath->handle = NULL;
2025 static int vxge_open_vpaths(struct vxgedev *vdev)
2027 struct vxge_hw_vpath_attr attr;
2028 enum vxge_hw_status status;
2029 struct vxge_vpath *vpath;
2033 for (i = 0; i < vdev->no_of_vpath; i++) {
2034 vpath = &vdev->vpaths[i];
2035 vxge_assert(vpath->is_configured);
2037 if (!vdev->titan1) {
2038 struct vxge_hw_vp_config *vcfg;
2039 vcfg = &vdev->devh->config.vp_config[vpath->device_id];
2041 vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
2042 vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
2043 vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
2044 vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
2045 vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
2046 vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
2047 vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
2048 vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
2049 vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
2052 attr.vp_id = vpath->device_id;
2053 attr.fifo_attr.callback = vxge_xmit_compl;
2054 attr.fifo_attr.txdl_term = vxge_tx_term;
2055 attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv);
2056 attr.fifo_attr.userdata = &vpath->fifo;
2058 attr.ring_attr.callback = vxge_rx_1b_compl;
2059 attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
2060 attr.ring_attr.rxd_term = vxge_rx_term;
2061 attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv);
2062 attr.ring_attr.userdata = &vpath->ring;
2064 vpath->ring.ndev = vdev->ndev;
2065 vpath->ring.pdev = vdev->pdev;
2067 status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
2068 if (status == VXGE_HW_OK) {
2069 vpath->fifo.handle =
2070 (struct __vxge_hw_fifo *)attr.fifo_attr.userdata;
2071 vpath->ring.handle =
2072 (struct __vxge_hw_ring *)attr.ring_attr.userdata;
2073 vpath->fifo.tx_steering_type =
2074 vdev->config.tx_steering_type;
2075 vpath->fifo.ndev = vdev->ndev;
2076 vpath->fifo.pdev = vdev->pdev;
2078 u64_stats_init(&vpath->fifo.stats.syncp);
2079 u64_stats_init(&vpath->ring.stats.syncp);
2081 if (vdev->config.tx_steering_type)
2083 netdev_get_tx_queue(vdev->ndev, i);
2086 netdev_get_tx_queue(vdev->ndev, 0);
2087 vpath->fifo.indicate_max_pkts =
2088 vdev->config.fifo_indicate_max_pkts;
2089 vpath->fifo.tx_vector_no = 0;
2090 vpath->ring.rx_vector_no = 0;
2091 vpath->ring.rx_hwts = vdev->rx_hwts;
2093 vdev->vp_handles[i] = vpath->handle;
2094 vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
2095 vdev->stats.vpaths_open++;
2097 vdev->stats.vpath_open_fail++;
2098 vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to "
2099 "open with status: %d",
2100 vdev->ndev->name, vpath->device_id,
2102 vxge_close_vpaths(vdev, 0);
2106 vp_id = vpath->handle->vpath->vp_id;
2107 vdev->vpaths_deployed |= vxge_mBIT(vp_id);
2114 * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
2115 * if the interrupts are not within a range
2116 * @fifo: pointer to transmit fifo structure
2117 * Description: The function changes boundary timer and restriction timer
2118 * value depends on the traffic
2119 * Return Value: None
2121 static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
2123 fifo->interrupt_count++;
2124 if (jiffies > fifo->jiffies + HZ / 100) {
2125 struct __vxge_hw_fifo *hw_fifo = fifo->handle;
2127 fifo->jiffies = jiffies;
2128 if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
2129 hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
2130 hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
2131 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2132 } else if (hw_fifo->rtimer != 0) {
2133 hw_fifo->rtimer = 0;
2134 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2136 fifo->interrupt_count = 0;
2141 * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
2142 * if the interrupts are not within a range
2143 * @ring: pointer to receive ring structure
2144 * Description: The function increases of decreases the packet counts within
2145 * the ranges of traffic utilization, if the interrupts due to this ring are
2146 * not within a fixed range.
2147 * Return Value: Nothing
2149 static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
2151 ring->interrupt_count++;
2152 if (jiffies > ring->jiffies + HZ / 100) {
2153 struct __vxge_hw_ring *hw_ring = ring->handle;
2155 ring->jiffies = jiffies;
2156 if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
2157 hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
2158 hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
2159 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2160 } else if (hw_ring->rtimer != 0) {
2161 hw_ring->rtimer = 0;
2162 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2164 ring->interrupt_count = 0;
2170 * @irq: the irq of the device.
2171 * @dev_id: a void pointer to the hldev structure of the Titan device
2172 * @ptregs: pointer to the registers pushed on the stack.
2174 * This function is the ISR handler of the device when napi is enabled. It
2175 * identifies the reason for the interrupt and calls the relevant service
2178 static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2180 struct net_device *dev;
2181 struct __vxge_hw_device *hldev;
2183 enum vxge_hw_status status;
2184 struct vxgedev *vdev = (struct vxgedev *)dev_id;
2186 vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
2189 hldev = pci_get_drvdata(vdev->pdev);
2191 if (pci_channel_offline(vdev->pdev))
2194 if (unlikely(!is_vxge_card_up(vdev)))
2197 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason);
2198 if (status == VXGE_HW_OK) {
2199 vxge_hw_device_mask_all(hldev);
2202 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
2203 vdev->vpaths_deployed >>
2204 (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) {
2206 vxge_hw_device_clear_tx_rx(hldev);
2207 napi_schedule(&vdev->napi);
2208 vxge_debug_intr(VXGE_TRACE,
2209 "%s:%d Exiting...", __func__, __LINE__);
2212 vxge_hw_device_unmask_all(hldev);
2213 } else if (unlikely((status == VXGE_HW_ERR_VPATH) ||
2214 (status == VXGE_HW_ERR_CRITICAL) ||
2215 (status == VXGE_HW_ERR_FIFO))) {
2216 vxge_hw_device_mask_all(hldev);
2217 vxge_hw_device_flush_io(hldev);
2219 } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE))
2222 vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__);
2226 #ifdef CONFIG_PCI_MSI
2228 static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
2230 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
2232 adaptive_coalesce_tx_interrupts(fifo);
2234 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
2235 fifo->tx_vector_no);
2237 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
2238 fifo->tx_vector_no);
2240 VXGE_COMPLETE_VPATH_TX(fifo);
2242 vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
2243 fifo->tx_vector_no);
2250 static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
2252 struct vxge_ring *ring = (struct vxge_ring *)dev_id;
2254 adaptive_coalesce_rx_interrupts(ring);
2256 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
2257 ring->rx_vector_no);
2259 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
2260 ring->rx_vector_no);
2262 napi_schedule(&ring->napi);
2267 vxge_alarm_msix_handle(int irq, void *dev_id)
2270 enum vxge_hw_status status;
2271 struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
2272 struct vxgedev *vdev = vpath->vdev;
2273 int msix_id = (vpath->handle->vpath->vp_id *
2274 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2276 for (i = 0; i < vdev->no_of_vpath; i++) {
2277 /* Reduce the chance of losing alarm interrupts by masking
2278 * the vector. A pending bit will be set if an alarm is
2279 * generated and on unmask the interrupt will be fired.
2281 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
2282 vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
2285 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
2287 if (status == VXGE_HW_OK) {
2288 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
2293 vxge_debug_intr(VXGE_ERR,
2294 "%s: vxge_hw_vpath_alarm_process failed %x ",
2295 VXGE_DRIVER_NAME, status);
2300 static int vxge_alloc_msix(struct vxgedev *vdev)
2303 int msix_intr_vect = 0, temp;
2307 /* Tx/Rx MSIX Vectors count */
2308 vdev->intr_cnt = vdev->no_of_vpath * 2;
2310 /* Alarm MSIX Vectors count */
2313 vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry),
2315 if (!vdev->entries) {
2316 vxge_debug_init(VXGE_ERR,
2317 "%s: memory allocation failed",
2320 goto alloc_entries_failed;
2323 vdev->vxge_entries = kcalloc(vdev->intr_cnt,
2324 sizeof(struct vxge_msix_entry),
2326 if (!vdev->vxge_entries) {
2327 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
2330 goto alloc_vxge_entries_failed;
2333 for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
2335 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
2337 /* Initialize the fifo vector */
2338 vdev->entries[j].entry = msix_intr_vect;
2339 vdev->vxge_entries[j].entry = msix_intr_vect;
2340 vdev->vxge_entries[j].in_use = 0;
2343 /* Initialize the ring vector */
2344 vdev->entries[j].entry = msix_intr_vect + 1;
2345 vdev->vxge_entries[j].entry = msix_intr_vect + 1;
2346 vdev->vxge_entries[j].in_use = 0;
2350 /* Initialize the alarm vector */
2351 vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
2352 vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
2353 vdev->vxge_entries[j].in_use = 0;
2355 ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
2357 vxge_debug_init(VXGE_ERR,
2358 "%s: MSI-X enable failed for %d vectors, ret: %d",
2359 VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
2360 if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) {
2362 goto enable_msix_failed;
2365 kfree(vdev->entries);
2366 kfree(vdev->vxge_entries);
2367 vdev->entries = NULL;
2368 vdev->vxge_entries = NULL;
2369 /* Try with less no of vector by reducing no of vpaths count */
2371 vxge_close_vpaths(vdev, temp);
2372 vdev->no_of_vpath = temp;
2374 } else if (ret < 0) {
2376 goto enable_msix_failed;
2381 kfree(vdev->vxge_entries);
2382 alloc_vxge_entries_failed:
2383 kfree(vdev->entries);
2384 alloc_entries_failed:
2388 static int vxge_enable_msix(struct vxgedev *vdev)
2392 /* 0 - Tx, 1 - Rx */
2393 int tim_msix_id[4] = {0, 1, 0, 0};
2397 /* allocate msix vectors */
2398 ret = vxge_alloc_msix(vdev);
2400 for (i = 0; i < vdev->no_of_vpath; i++) {
2401 struct vxge_vpath *vpath = &vdev->vpaths[i];
2403 /* If fifo or ring are not enabled, the MSIX vector for
2404 * it should be set to 0.
2406 vpath->ring.rx_vector_no = (vpath->device_id *
2407 VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
2409 vpath->fifo.tx_vector_no = (vpath->device_id *
2410 VXGE_HW_VPATH_MSIX_ACTIVE);
2412 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
2413 VXGE_ALARM_MSIX_ID);
2420 static void vxge_rem_msix_isr(struct vxgedev *vdev)
2424 for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
2426 if (vdev->vxge_entries[intr_cnt].in_use) {
2427 synchronize_irq(vdev->entries[intr_cnt].vector);
2428 free_irq(vdev->entries[intr_cnt].vector,
2429 vdev->vxge_entries[intr_cnt].arg);
2430 vdev->vxge_entries[intr_cnt].in_use = 0;
2434 kfree(vdev->entries);
2435 kfree(vdev->vxge_entries);
2436 vdev->entries = NULL;
2437 vdev->vxge_entries = NULL;
2439 if (vdev->config.intr_type == MSI_X)
2440 pci_disable_msix(vdev->pdev);
2444 static void vxge_rem_isr(struct vxgedev *vdev)
2446 struct __vxge_hw_device *hldev;
2447 hldev = pci_get_drvdata(vdev->pdev);
2449 #ifdef CONFIG_PCI_MSI
2450 if (vdev->config.intr_type == MSI_X) {
2451 vxge_rem_msix_isr(vdev);
2454 if (vdev->config.intr_type == INTA) {
2455 synchronize_irq(vdev->pdev->irq);
2456 free_irq(vdev->pdev->irq, vdev);
2460 static int vxge_add_isr(struct vxgedev *vdev)
2463 #ifdef CONFIG_PCI_MSI
2464 int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
2465 int pci_fun = PCI_FUNC(vdev->pdev->devfn);
2467 if (vdev->config.intr_type == MSI_X)
2468 ret = vxge_enable_msix(vdev);
2471 vxge_debug_init(VXGE_ERR,
2472 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
2473 vxge_debug_init(VXGE_ERR,
2474 "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
2475 vdev->config.intr_type = INTA;
2478 if (vdev->config.intr_type == MSI_X) {
2480 intr_idx < (vdev->no_of_vpath *
2481 VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
2483 msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE;
2488 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2489 "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
2491 vdev->entries[intr_cnt].entry,
2494 vdev->entries[intr_cnt].vector,
2495 vxge_tx_msix_handle, 0,
2496 vdev->desc[intr_cnt],
2497 &vdev->vpaths[vp_idx].fifo);
2498 vdev->vxge_entries[intr_cnt].arg =
2499 &vdev->vpaths[vp_idx].fifo;
2503 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2504 "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
2506 vdev->entries[intr_cnt].entry,
2509 vdev->entries[intr_cnt].vector,
2510 vxge_rx_msix_napi_handle,
2512 vdev->desc[intr_cnt],
2513 &vdev->vpaths[vp_idx].ring);
2514 vdev->vxge_entries[intr_cnt].arg =
2515 &vdev->vpaths[vp_idx].ring;
2521 vxge_debug_init(VXGE_ERR,
2522 "%s: MSIX - %d Registration failed",
2523 vdev->ndev->name, intr_cnt);
2524 vxge_rem_msix_isr(vdev);
2525 vdev->config.intr_type = INTA;
2526 vxge_debug_init(VXGE_ERR,
2527 "%s: Defaulting to INTA"
2528 , vdev->ndev->name);
2533 /* We requested for this msix interrupt */
2534 vdev->vxge_entries[intr_cnt].in_use = 1;
2535 msix_idx += vdev->vpaths[vp_idx].device_id *
2536 VXGE_HW_VPATH_MSIX_ACTIVE;
2537 vxge_hw_vpath_msix_unmask(
2538 vdev->vpaths[vp_idx].handle,
2543 /* Point to next vpath handler */
2544 if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) &&
2545 (vp_idx < (vdev->no_of_vpath - 1)))
2549 intr_cnt = vdev->no_of_vpath * 2;
2550 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2551 "%s:vxge:MSI-X %d - Alarm - fn:%d",
2553 vdev->entries[intr_cnt].entry,
2555 /* For Alarm interrupts */
2556 ret = request_irq(vdev->entries[intr_cnt].vector,
2557 vxge_alarm_msix_handle, 0,
2558 vdev->desc[intr_cnt],
2561 vxge_debug_init(VXGE_ERR,
2562 "%s: MSIX - %d Registration failed",
2563 vdev->ndev->name, intr_cnt);
2564 vxge_rem_msix_isr(vdev);
2565 vdev->config.intr_type = INTA;
2566 vxge_debug_init(VXGE_ERR,
2567 "%s: Defaulting to INTA",
2572 msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
2573 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2574 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
2576 vdev->vxge_entries[intr_cnt].in_use = 1;
2577 vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
2582 if (vdev->config.intr_type == INTA) {
2583 snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
2584 "%s:vxge:INTA", vdev->ndev->name);
2585 vxge_hw_device_set_intr_type(vdev->devh,
2586 VXGE_HW_INTR_MODE_IRQLINE);
2588 vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
2590 ret = request_irq((int) vdev->pdev->irq,
2592 IRQF_SHARED, vdev->desc[0], vdev);
2594 vxge_debug_init(VXGE_ERR,
2595 "%s %s-%d: ISR registration failed",
2596 VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq);
2599 vxge_debug_init(VXGE_TRACE,
2600 "new %s-%d line allocated",
2601 "IRQ", vdev->pdev->irq);
2607 static void vxge_poll_vp_reset(unsigned long data)
2609 struct vxgedev *vdev = (struct vxgedev *)data;
2612 for (i = 0; i < vdev->no_of_vpath; i++) {
2613 if (test_bit(i, &vdev->vp_reset)) {
2614 vxge_reset_vpath(vdev, i);
2618 if (j && (vdev->config.intr_type != MSI_X)) {
2619 vxge_hw_device_unmask_all(vdev->devh);
2620 vxge_hw_device_flush_io(vdev->devh);
2623 mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2);
2626 static void vxge_poll_vp_lockup(unsigned long data)
2628 struct vxgedev *vdev = (struct vxgedev *)data;
2629 enum vxge_hw_status status = VXGE_HW_OK;
2630 struct vxge_vpath *vpath;
2631 struct vxge_ring *ring;
2633 unsigned long rx_frms;
2635 for (i = 0; i < vdev->no_of_vpath; i++) {
2636 ring = &vdev->vpaths[i].ring;
2638 /* Truncated to machine word size number of frames */
2639 rx_frms = ACCESS_ONCE(ring->stats.rx_frms);
2641 /* Did this vpath received any packets */
2642 if (ring->stats.prev_rx_frms == rx_frms) {
2643 status = vxge_hw_vpath_check_leak(ring->handle);
2645 /* Did it received any packets last time */
2646 if ((VXGE_HW_FAIL == status) &&
2647 (VXGE_HW_FAIL == ring->last_status)) {
2649 /* schedule vpath reset */
2650 if (!test_and_set_bit(i, &vdev->vp_reset)) {
2651 vpath = &vdev->vpaths[i];
2653 /* disable interrupts for this vpath */
2654 vxge_vpath_intr_disable(vdev, i);
2656 /* stop the queue for this vpath */
2657 netif_tx_stop_queue(vpath->fifo.txq);
2662 ring->stats.prev_rx_frms = rx_frms;
2663 ring->last_status = status;
2666 /* Check every 1 milli second */
2667 mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
2670 static netdev_features_t vxge_fix_features(struct net_device *dev,
2671 netdev_features_t features)
2673 netdev_features_t changed = dev->features ^ features;
2675 /* Enabling RTH requires some of the logic in vxge_device_register and a
2676 * vpath reset. Due to these restrictions, only allow modification
2677 * while the interface is down.
2679 if ((changed & NETIF_F_RXHASH) && netif_running(dev))
2680 features ^= NETIF_F_RXHASH;
2685 static int vxge_set_features(struct net_device *dev, netdev_features_t features)
2687 struct vxgedev *vdev = netdev_priv(dev);
2688 netdev_features_t changed = dev->features ^ features;
2690 if (!(changed & NETIF_F_RXHASH))
2693 /* !netif_running() ensured by vxge_fix_features() */
2695 vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH);
2696 if (vxge_reset_all_vpaths(vdev) != VXGE_HW_OK) {
2697 dev->features = features ^ NETIF_F_RXHASH;
2698 vdev->devh->config.rth_en = !!(dev->features & NETIF_F_RXHASH);
2707 * @dev: pointer to the device structure.
2709 * This function is the open entry point of the driver. It mainly calls a
2710 * function to allocate Rx buffers and inserts them into the buffer
2711 * descriptors and then enables the Rx part of the NIC.
2712 * Return value: '0' on success and an appropriate (-)ve integer as
2713 * defined in errno.h file on failure.
2715 static int vxge_open(struct net_device *dev)
2717 enum vxge_hw_status status;
2718 struct vxgedev *vdev;
2719 struct __vxge_hw_device *hldev;
2720 struct vxge_vpath *vpath;
2723 u64 val64, function_mode;
2725 vxge_debug_entryexit(VXGE_TRACE,
2726 "%s: %s:%d", dev->name, __func__, __LINE__);
2728 vdev = netdev_priv(dev);
2729 hldev = pci_get_drvdata(vdev->pdev);
2730 function_mode = vdev->config.device_hw_info.function_mode;
2732 /* make sure you have link off by default every time Nic is
2734 netif_carrier_off(dev);
2737 status = vxge_open_vpaths(vdev);
2738 if (status != VXGE_HW_OK) {
2739 vxge_debug_init(VXGE_ERR,
2740 "%s: fatal: Vpath open failed", vdev->ndev->name);
2745 vdev->mtu = dev->mtu;
2747 status = vxge_add_isr(vdev);
2748 if (status != VXGE_HW_OK) {
2749 vxge_debug_init(VXGE_ERR,
2750 "%s: fatal: ISR add failed", dev->name);
2755 if (vdev->config.intr_type != MSI_X) {
2756 netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
2757 vdev->config.napi_weight);
2758 napi_enable(&vdev->napi);
2759 for (i = 0; i < vdev->no_of_vpath; i++) {
2760 vpath = &vdev->vpaths[i];
2761 vpath->ring.napi_p = &vdev->napi;
2764 for (i = 0; i < vdev->no_of_vpath; i++) {
2765 vpath = &vdev->vpaths[i];
2766 netif_napi_add(dev, &vpath->ring.napi,
2767 vxge_poll_msix, vdev->config.napi_weight);
2768 napi_enable(&vpath->ring.napi);
2769 vpath->ring.napi_p = &vpath->ring.napi;
2774 if (vdev->config.rth_steering) {
2775 status = vxge_rth_configure(vdev);
2776 if (status != VXGE_HW_OK) {
2777 vxge_debug_init(VXGE_ERR,
2778 "%s: fatal: RTH configuration failed",
2784 printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
2785 hldev->config.rth_en ? "enabled" : "disabled");
2787 for (i = 0; i < vdev->no_of_vpath; i++) {
2788 vpath = &vdev->vpaths[i];
2790 /* set initial mtu before enabling the device */
2791 status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu);
2792 if (status != VXGE_HW_OK) {
2793 vxge_debug_init(VXGE_ERR,
2794 "%s: fatal: can not set new MTU", dev->name);
2800 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev);
2801 vxge_debug_init(vdev->level_trace,
2802 "%s: MTU is %d", vdev->ndev->name, vdev->mtu);
2803 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
2805 /* Restore the DA, VID table and also multicast and promiscuous mode
2808 if (vdev->all_multi_flg) {
2809 for (i = 0; i < vdev->no_of_vpath; i++) {
2810 vpath = &vdev->vpaths[i];
2811 vxge_restore_vpath_mac_addr(vpath);
2812 vxge_restore_vpath_vid_table(vpath);
2814 status = vxge_hw_vpath_mcast_enable(vpath->handle);
2815 if (status != VXGE_HW_OK)
2816 vxge_debug_init(VXGE_ERR,
2817 "%s:%d Enabling multicast failed",
2818 __func__, __LINE__);
2822 /* Enable vpath to sniff all unicast/multicast traffic that not
2823 * addressed to them. We allow promiscuous mode for PF only
2827 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
2828 val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i);
2830 vxge_hw_mgmt_reg_write(vdev->devh,
2831 vxge_hw_mgmt_reg_type_mrpcim,
2833 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2834 rxmac_authorize_all_addr),
2837 vxge_hw_mgmt_reg_write(vdev->devh,
2838 vxge_hw_mgmt_reg_type_mrpcim,
2840 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2841 rxmac_authorize_all_vid),
2844 vxge_set_multicast(dev);
2846 /* Enabling Bcast and mcast for all vpath */
2847 for (i = 0; i < vdev->no_of_vpath; i++) {
2848 vpath = &vdev->vpaths[i];
2849 status = vxge_hw_vpath_bcast_enable(vpath->handle);
2850 if (status != VXGE_HW_OK)
2851 vxge_debug_init(VXGE_ERR,
2852 "%s : Can not enable bcast for vpath "
2853 "id %d", dev->name, i);
2854 if (vdev->config.addr_learn_en) {
2855 status = vxge_hw_vpath_mcast_enable(vpath->handle);
2856 if (status != VXGE_HW_OK)
2857 vxge_debug_init(VXGE_ERR,
2858 "%s : Can not enable mcast for vpath "
2859 "id %d", dev->name, i);
2863 vxge_hw_device_setpause_data(vdev->devh, 0,
2864 vdev->config.tx_pause_enable,
2865 vdev->config.rx_pause_enable);
2867 if (vdev->vp_reset_timer.function == NULL)
2868 vxge_os_timer(&vdev->vp_reset_timer, vxge_poll_vp_reset, vdev,
2871 /* There is no need to check for RxD leak and RxD lookup on Titan1A */
2872 if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
2873 vxge_os_timer(&vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
2876 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2880 if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
2881 netif_carrier_on(vdev->ndev);
2882 netdev_notice(vdev->ndev, "Link Up\n");
2883 vdev->stats.link_up++;
2886 vxge_hw_device_intr_enable(vdev->devh);
2890 for (i = 0; i < vdev->no_of_vpath; i++) {
2891 vpath = &vdev->vpaths[i];
2893 vxge_hw_vpath_enable(vpath->handle);
2895 vxge_hw_vpath_rx_doorbell_init(vpath->handle);
2898 netif_tx_start_all_queues(vdev->ndev);
2901 vxge_config_ci_for_tti_rti(vdev);
2909 if (vdev->config.intr_type != MSI_X)
2910 napi_disable(&vdev->napi);
2912 for (i = 0; i < vdev->no_of_vpath; i++)
2913 napi_disable(&vdev->vpaths[i].ring.napi);
2917 vxge_close_vpaths(vdev, 0);
2919 vxge_debug_entryexit(VXGE_TRACE,
2920 "%s: %s:%d Exiting...",
2921 dev->name, __func__, __LINE__);
2925 /* Loop through the mac address list and delete all the entries */
2926 static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
2929 struct list_head *entry, *next;
2930 if (list_empty(&vpath->mac_addr_list))
2933 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
2935 kfree((struct vxge_mac_addrs *)entry);
2939 static void vxge_napi_del_all(struct vxgedev *vdev)
2942 if (vdev->config.intr_type != MSI_X)
2943 netif_napi_del(&vdev->napi);
2945 for (i = 0; i < vdev->no_of_vpath; i++)
2946 netif_napi_del(&vdev->vpaths[i].ring.napi);
2950 static int do_vxge_close(struct net_device *dev, int do_io)
2952 enum vxge_hw_status status;
2953 struct vxgedev *vdev;
2954 struct __vxge_hw_device *hldev;
2956 u64 val64, vpath_vector;
2957 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
2958 dev->name, __func__, __LINE__);
2960 vdev = netdev_priv(dev);
2961 hldev = pci_get_drvdata(vdev->pdev);
2963 if (unlikely(!is_vxge_card_up(vdev)))
2966 /* If vxge_handle_crit_err task is executing,
2967 * wait till it completes. */
2968 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
2972 /* Put the vpath back in normal mode */
2973 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
2974 status = vxge_hw_mgmt_reg_read(vdev->devh,
2975 vxge_hw_mgmt_reg_type_mrpcim,
2978 struct vxge_hw_mrpcim_reg,
2979 rts_mgr_cbasin_cfg),
2981 if (status == VXGE_HW_OK) {
2982 val64 &= ~vpath_vector;
2983 status = vxge_hw_mgmt_reg_write(vdev->devh,
2984 vxge_hw_mgmt_reg_type_mrpcim,
2987 struct vxge_hw_mrpcim_reg,
2988 rts_mgr_cbasin_cfg),
2992 /* Remove the function 0 from promiscuous mode */
2993 vxge_hw_mgmt_reg_write(vdev->devh,
2994 vxge_hw_mgmt_reg_type_mrpcim,
2996 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2997 rxmac_authorize_all_addr),
3000 vxge_hw_mgmt_reg_write(vdev->devh,
3001 vxge_hw_mgmt_reg_type_mrpcim,
3003 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
3004 rxmac_authorize_all_vid),
3011 del_timer_sync(&vdev->vp_lockup_timer);
3013 del_timer_sync(&vdev->vp_reset_timer);
3016 vxge_hw_device_wait_receive_idle(hldev);
3018 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3021 if (vdev->config.intr_type != MSI_X)
3022 napi_disable(&vdev->napi);
3024 for (i = 0; i < vdev->no_of_vpath; i++)
3025 napi_disable(&vdev->vpaths[i].ring.napi);
3028 netif_carrier_off(vdev->ndev);
3029 netdev_notice(vdev->ndev, "Link Down\n");
3030 netif_tx_stop_all_queues(vdev->ndev);
3032 /* Note that at this point xmit() is stopped by upper layer */
3034 vxge_hw_device_intr_disable(vdev->devh);
3038 vxge_napi_del_all(vdev);
3041 vxge_reset_all_vpaths(vdev);
3043 vxge_close_vpaths(vdev, 0);
3045 vxge_debug_entryexit(VXGE_TRACE,
3046 "%s: %s:%d Exiting...", dev->name, __func__, __LINE__);
3048 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
3055 * @dev: device pointer.
3057 * This is the stop entry point of the driver. It needs to undo exactly
3058 * whatever was done by the open entry point, thus it's usually referred to
3059 * as the close function.Among other things this function mainly stops the
3060 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3061 * Return value: '0' on success and an appropriate (-)ve integer as
3062 * defined in errno.h file on failure.
3064 static int vxge_close(struct net_device *dev)
3066 do_vxge_close(dev, 1);
3072 * @dev: net device pointer.
3073 * @new_mtu :the new MTU size for the device.
3075 * A driver entry point to change MTU size for the device. Before changing
3076 * the MTU the device must be stopped.
3078 static int vxge_change_mtu(struct net_device *dev, int new_mtu)
3080 struct vxgedev *vdev = netdev_priv(dev);
3082 vxge_debug_entryexit(vdev->level_trace,
3083 "%s:%d", __func__, __LINE__);
3084 if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > VXGE_HW_MAX_MTU)) {
3085 vxge_debug_init(vdev->level_err,
3086 "%s: mtu size is invalid", dev->name);
3090 /* check if device is down already */
3091 if (unlikely(!is_vxge_card_up(vdev))) {
3092 /* just store new value, will use later on open() */
3094 vxge_debug_init(vdev->level_err,
3095 "%s", "device is down on MTU change");
3099 vxge_debug_init(vdev->level_trace,
3100 "trying to apply new MTU %d", new_mtu);
3102 if (vxge_close(dev))
3106 vdev->mtu = new_mtu;
3111 vxge_debug_init(vdev->level_trace,
3112 "%s: MTU changed to %d", vdev->ndev->name, new_mtu);
3114 vxge_debug_entryexit(vdev->level_trace,
3115 "%s:%d Exiting...", __func__, __LINE__);
3122 * @dev: pointer to the device structure
3123 * @stats: pointer to struct rtnl_link_stats64
3126 static struct rtnl_link_stats64 *
3127 vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
3129 struct vxgedev *vdev = netdev_priv(dev);
3132 /* net_stats already zeroed by caller */
3133 for (k = 0; k < vdev->no_of_vpath; k++) {
3134 struct vxge_ring_stats *rxstats = &vdev->vpaths[k].ring.stats;
3135 struct vxge_fifo_stats *txstats = &vdev->vpaths[k].fifo.stats;
3137 u64 packets, bytes, multicast;
3140 start = u64_stats_fetch_begin_bh(&rxstats->syncp);
3142 packets = rxstats->rx_frms;
3143 multicast = rxstats->rx_mcast;
3144 bytes = rxstats->rx_bytes;
3145 } while (u64_stats_fetch_retry_bh(&rxstats->syncp, start));
3147 net_stats->rx_packets += packets;
3148 net_stats->rx_bytes += bytes;
3149 net_stats->multicast += multicast;
3151 net_stats->rx_errors += rxstats->rx_errors;
3152 net_stats->rx_dropped += rxstats->rx_dropped;
3155 start = u64_stats_fetch_begin_bh(&txstats->syncp);
3157 packets = txstats->tx_frms;
3158 bytes = txstats->tx_bytes;
3159 } while (u64_stats_fetch_retry_bh(&txstats->syncp, start));
3161 net_stats->tx_packets += packets;
3162 net_stats->tx_bytes += bytes;
3163 net_stats->tx_errors += txstats->tx_errors;
3169 static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh)
3171 enum vxge_hw_status status;
3174 /* Timestamp is passed to the driver via the FCS, therefore we
3175 * must disable the FCS stripping by the adapter. Since this is
3176 * required for the driver to load (due to a hardware bug),
3177 * there is no need to do anything special here.
3179 val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
3180 VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
3181 VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
3183 status = vxge_hw_mgmt_reg_write(devh,
3184 vxge_hw_mgmt_reg_type_mrpcim,
3186 offsetof(struct vxge_hw_mrpcim_reg,
3189 vxge_hw_device_flush_io(devh);
3190 devh->config.hwts_en = VXGE_HW_HWTS_ENABLE;
3194 static int vxge_hwtstamp_set(struct vxgedev *vdev, void __user *data)
3196 struct hwtstamp_config config;
3199 if (copy_from_user(&config, data, sizeof(config)))
3202 /* reserved for future extensions */
3206 /* Transmit HW Timestamp not supported */
3207 switch (config.tx_type) {
3208 case HWTSTAMP_TX_OFF:
3210 case HWTSTAMP_TX_ON:
3215 switch (config.rx_filter) {
3216 case HWTSTAMP_FILTER_NONE:
3218 config.rx_filter = HWTSTAMP_FILTER_NONE;
3221 case HWTSTAMP_FILTER_ALL:
3222 case HWTSTAMP_FILTER_SOME:
3223 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3224 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3225 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3226 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3227 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3228 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3229 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3230 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3231 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3232 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3233 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3234 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3235 if (vdev->devh->config.hwts_en != VXGE_HW_HWTS_ENABLE)
3239 config.rx_filter = HWTSTAMP_FILTER_ALL;
3246 for (i = 0; i < vdev->no_of_vpath; i++)
3247 vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
3249 if (copy_to_user(data, &config, sizeof(config)))
3255 static int vxge_hwtstamp_get(struct vxgedev *vdev, void __user *data)
3257 struct hwtstamp_config config;
3260 config.tx_type = HWTSTAMP_TX_OFF;
3261 config.rx_filter = (vdev->rx_hwts ?
3262 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
3264 if (copy_to_user(data, &config, sizeof(config)))
3272 * @dev: Device pointer.
3273 * @ifr: An IOCTL specific structure, that can contain a pointer to
3274 * a proprietary structure used to pass information to the driver.
3275 * @cmd: This is used to distinguish between the different commands that
3276 * can be passed to the IOCTL functions.
3278 * Entry point for the Ioctl.
3280 static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3282 struct vxgedev *vdev = netdev_priv(dev);
3286 return vxge_hwtstamp_set(vdev, rq->ifr_data);
3288 return vxge_hwtstamp_get(vdev, rq->ifr_data);
3296 * @dev: pointer to net device structure
3298 * Watchdog for transmit side.
3299 * This function is triggered if the Tx Queue is stopped
3300 * for a pre-defined amount of time when the Interface is still up.
3302 static void vxge_tx_watchdog(struct net_device *dev)
3304 struct vxgedev *vdev;
3306 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3308 vdev = netdev_priv(dev);
3310 vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
3312 schedule_work(&vdev->reset_task);
3313 vxge_debug_entryexit(VXGE_TRACE,
3314 "%s:%d Exiting...", __func__, __LINE__);
3318 * vxge_vlan_rx_add_vid
3319 * @dev: net device pointer.
3320 * @proto: vlan protocol
3323 * Add the vlan id to the devices vlan id table
3326 vxge_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
3328 struct vxgedev *vdev = netdev_priv(dev);
3329 struct vxge_vpath *vpath;
3332 /* Add these vlan to the vid table */
3333 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3334 vpath = &vdev->vpaths[vp_id];
3335 if (!vpath->is_open)
3337 vxge_hw_vpath_vid_add(vpath->handle, vid);
3339 set_bit(vid, vdev->active_vlans);
3344 * vxge_vlan_rx_kill_vid
3345 * @dev: net device pointer.
3346 * @proto: vlan protocol
3349 * Remove the vlan id from the device's vlan id table
3352 vxge_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
3354 struct vxgedev *vdev = netdev_priv(dev);
3355 struct vxge_vpath *vpath;
3358 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3360 /* Delete this vlan from the vid table */
3361 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3362 vpath = &vdev->vpaths[vp_id];
3363 if (!vpath->is_open)
3365 vxge_hw_vpath_vid_delete(vpath->handle, vid);
3367 vxge_debug_entryexit(VXGE_TRACE,
3368 "%s:%d Exiting...", __func__, __LINE__);
3369 clear_bit(vid, vdev->active_vlans);
3373 static const struct net_device_ops vxge_netdev_ops = {
3374 .ndo_open = vxge_open,
3375 .ndo_stop = vxge_close,
3376 .ndo_get_stats64 = vxge_get_stats64,
3377 .ndo_start_xmit = vxge_xmit,
3378 .ndo_validate_addr = eth_validate_addr,
3379 .ndo_set_rx_mode = vxge_set_multicast,
3380 .ndo_do_ioctl = vxge_ioctl,
3381 .ndo_set_mac_address = vxge_set_mac_addr,
3382 .ndo_change_mtu = vxge_change_mtu,
3383 .ndo_fix_features = vxge_fix_features,
3384 .ndo_set_features = vxge_set_features,
3385 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
3386 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
3387 .ndo_tx_timeout = vxge_tx_watchdog,
3388 #ifdef CONFIG_NET_POLL_CONTROLLER
3389 .ndo_poll_controller = vxge_netpoll,
3393 static int vxge_device_register(struct __vxge_hw_device *hldev,
3394 struct vxge_config *config, int high_dma,
3395 int no_of_vpath, struct vxgedev **vdev_out)
3397 struct net_device *ndev;
3398 enum vxge_hw_status status = VXGE_HW_OK;
3399 struct vxgedev *vdev;
3400 int ret = 0, no_of_queue = 1;
3404 if (config->tx_steering_type)
3405 no_of_queue = no_of_vpath;
3407 ndev = alloc_etherdev_mq(sizeof(struct vxgedev),
3411 vxge_hw_device_trace_level_get(hldev),
3412 "%s : device allocation failed", __func__);
3417 vxge_debug_entryexit(
3418 vxge_hw_device_trace_level_get(hldev),
3419 "%s: %s:%d Entering...",
3420 ndev->name, __func__, __LINE__);
3422 vdev = netdev_priv(ndev);
3423 memset(vdev, 0, sizeof(struct vxgedev));
3427 vdev->pdev = hldev->pdev;
3428 memcpy(&vdev->config, config, sizeof(struct vxge_config));
3430 vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
3432 SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3434 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG |
3435 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3436 NETIF_F_TSO | NETIF_F_TSO6 |
3437 NETIF_F_HW_VLAN_CTAG_TX;
3438 if (vdev->config.rth_steering != NO_STEERING)
3439 ndev->hw_features |= NETIF_F_RXHASH;
3441 ndev->features |= ndev->hw_features |
3442 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3445 ndev->netdev_ops = &vxge_netdev_ops;
3447 ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
3448 INIT_WORK(&vdev->reset_task, vxge_reset);
3450 vxge_initialize_ethtool_ops(ndev);
3452 /* Allocate memory for vpath */
3453 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
3454 no_of_vpath, GFP_KERNEL);
3455 if (!vdev->vpaths) {
3456 vxge_debug_init(VXGE_ERR,
3457 "%s: vpath memory allocation failed",
3463 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3464 "%s : checksumming enabled", __func__);
3467 ndev->features |= NETIF_F_HIGHDMA;
3468 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3469 "%s : using High DMA", __func__);
3472 ret = register_netdev(ndev);
3474 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3475 "%s: %s : device registration failed!",
3476 ndev->name, __func__);
3480 /* Set the factory defined MAC address initially */
3481 ndev->addr_len = ETH_ALEN;
3483 /* Make Link state as off at this point, when the Link change
3484 * interrupt comes the state will be automatically changed to
3487 netif_carrier_off(ndev);
3489 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3490 "%s: Ethernet device registered",
3496 /* Resetting the Device stats */
3497 status = vxge_hw_mrpcim_stats_access(
3499 VXGE_HW_STATS_OP_CLEAR_ALL_STATS,
3504 if (status == VXGE_HW_ERR_PRIVILAGED_OPEARATION)
3506 vxge_hw_device_trace_level_get(hldev),
3507 "%s: device stats clear returns"
3508 "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->name);
3510 vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev),
3511 "%s: %s:%d Exiting...",
3512 ndev->name, __func__, __LINE__);
3516 kfree(vdev->vpaths);
3524 * vxge_device_unregister
3526 * This function will unregister and free network device
3528 static void vxge_device_unregister(struct __vxge_hw_device *hldev)
3530 struct vxgedev *vdev;
3531 struct net_device *dev;
3535 vdev = netdev_priv(dev);
3537 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
3538 __func__, __LINE__);
3540 strncpy(buf, dev->name, IFNAMSIZ);
3542 flush_work(&vdev->reset_task);
3544 /* in 2.6 will call stop() if device is up */
3545 unregister_netdev(dev);
3547 kfree(vdev->vpaths);
3549 /* we are safe to free it now */
3552 vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
3554 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
3555 __func__, __LINE__);
3559 * vxge_callback_crit_err
3561 * This function is called by the alarm handler in interrupt context.
3562 * Driver must analyze it based on the event type.
3565 vxge_callback_crit_err(struct __vxge_hw_device *hldev,
3566 enum vxge_hw_event type, u64 vp_id)
3568 struct net_device *dev = hldev->ndev;
3569 struct vxgedev *vdev = netdev_priv(dev);
3570 struct vxge_vpath *vpath = NULL;
3573 vxge_debug_entryexit(vdev->level_trace,
3574 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3576 /* Note: This event type should be used for device wide
3577 * indications only - Serious errors, Slot freeze and critical errors
3579 vdev->cric_err_event = type;
3581 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
3582 vpath = &vdev->vpaths[vpath_idx];
3583 if (vpath->device_id == vp_id)
3587 if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
3588 if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
3589 vxge_debug_init(VXGE_ERR,
3590 "%s: Slot is frozen", vdev->ndev->name);
3591 } else if (type == VXGE_HW_EVENT_SERR) {
3592 vxge_debug_init(VXGE_ERR,
3593 "%s: Encountered Serious Error",
3595 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR)
3596 vxge_debug_init(VXGE_ERR,
3597 "%s: Encountered Critical Error",
3601 if ((type == VXGE_HW_EVENT_SERR) ||
3602 (type == VXGE_HW_EVENT_SLOT_FREEZE)) {
3603 if (unlikely(vdev->exec_mode))
3604 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3605 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) {
3606 vxge_hw_device_mask_all(hldev);
3607 if (unlikely(vdev->exec_mode))
3608 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3609 } else if ((type == VXGE_HW_EVENT_FIFO_ERR) ||
3610 (type == VXGE_HW_EVENT_VPATH_ERR)) {
3612 if (unlikely(vdev->exec_mode))
3613 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3615 /* check if this vpath is already set for reset */
3616 if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) {
3618 /* disable interrupts for this vpath */
3619 vxge_vpath_intr_disable(vdev, vpath_idx);
3621 /* stop the queue for this vpath */
3622 netif_tx_stop_queue(vpath->fifo.txq);
3627 vxge_debug_entryexit(vdev->level_trace,
3628 "%s: %s:%d Exiting...",
3629 vdev->ndev->name, __func__, __LINE__);
3632 static void verify_bandwidth(void)
3634 int i, band_width, total = 0, equal_priority = 0;
3636 /* 1. If user enters 0 for some fifo, give equal priority to all */
3637 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3638 if (bw_percentage[i] == 0) {
3644 if (!equal_priority) {
3645 /* 2. If sum exceeds 100, give equal priority to all */
3646 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3647 if (bw_percentage[i] == 0xFF)
3650 total += bw_percentage[i];
3651 if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) {
3658 if (!equal_priority) {
3659 /* Is all the bandwidth consumed? */
3660 if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) {
3661 if (i < VXGE_HW_MAX_VIRTUAL_PATHS) {
3662 /* Split rest of bw equally among next VPs*/
3664 (VXGE_HW_VPATH_BANDWIDTH_MAX - total) /
3665 (VXGE_HW_MAX_VIRTUAL_PATHS - i);
3666 if (band_width < 2) /* min of 2% */
3669 for (; i < VXGE_HW_MAX_VIRTUAL_PATHS;
3675 } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS)
3679 if (equal_priority) {
3680 vxge_debug_init(VXGE_ERR,
3681 "%s: Assigning equal bandwidth to all the vpaths",
3683 bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX /
3684 VXGE_HW_MAX_VIRTUAL_PATHS;
3685 for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3686 bw_percentage[i] = bw_percentage[0];
3691 * Vpath configuration
3693 static int vxge_config_vpaths(struct vxge_hw_device_config *device_config,
3694 u64 vpath_mask, struct vxge_config *config_param)
3696 int i, no_of_vpaths = 0, default_no_vpath = 0, temp;
3697 u32 txdl_size, txdl_per_memblock;
3699 temp = driver_config->vpath_per_dev;
3700 if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) &&
3701 (max_config_dev == VXGE_MAX_CONFIG_DEV)) {
3702 /* No more CPU. Return vpath number as zero.*/
3703 if (driver_config->g_no_cpus == -1)
3706 if (!driver_config->g_no_cpus)
3707 driver_config->g_no_cpus =
3708 netif_get_num_default_rss_queues();
3710 driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
3711 if (!driver_config->vpath_per_dev)
3712 driver_config->vpath_per_dev = 1;
3714 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3715 if (!vxge_bVALn(vpath_mask, i, 1))
3719 if (default_no_vpath < driver_config->vpath_per_dev)
3720 driver_config->vpath_per_dev = default_no_vpath;
3722 driver_config->g_no_cpus = driver_config->g_no_cpus -
3723 (driver_config->vpath_per_dev * 2);
3724 if (driver_config->g_no_cpus <= 0)
3725 driver_config->g_no_cpus = -1;
3728 if (driver_config->vpath_per_dev == 1) {
3729 vxge_debug_ll_config(VXGE_TRACE,
3730 "%s: Disable tx and rx steering, "
3731 "as single vpath is configured", VXGE_DRIVER_NAME);
3732 config_param->rth_steering = NO_STEERING;
3733 config_param->tx_steering_type = NO_STEERING;
3734 device_config->rth_en = 0;
3737 /* configure bandwidth */
3738 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3739 device_config->vp_config[i].min_bandwidth = bw_percentage[i];
3741 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3742 device_config->vp_config[i].vp_id = i;
3743 device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU;
3744 if (no_of_vpaths < driver_config->vpath_per_dev) {
3745 if (!vxge_bVALn(vpath_mask, i, 1)) {
3746 vxge_debug_ll_config(VXGE_TRACE,
3747 "%s: vpath: %d is not available",
3748 VXGE_DRIVER_NAME, i);
3751 vxge_debug_ll_config(VXGE_TRACE,
3752 "%s: vpath: %d available",
3753 VXGE_DRIVER_NAME, i);
3757 vxge_debug_ll_config(VXGE_TRACE,
3758 "%s: vpath: %d is not configured, "
3759 "max_config_vpath exceeded",
3760 VXGE_DRIVER_NAME, i);
3764 /* Configure Tx fifo's */
3765 device_config->vp_config[i].fifo.enable =
3766 VXGE_HW_FIFO_ENABLE;
3767 device_config->vp_config[i].fifo.max_frags =
3769 device_config->vp_config[i].fifo.memblock_size =
3770 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
3772 txdl_size = device_config->vp_config[i].fifo.max_frags *
3773 sizeof(struct vxge_hw_fifo_txd);
3774 txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
3776 device_config->vp_config[i].fifo.fifo_blocks =
3777 ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1;
3779 device_config->vp_config[i].fifo.intr =
3780 VXGE_HW_FIFO_QUEUE_INTR_DISABLE;
3782 /* Configure tti properties */
3783 device_config->vp_config[i].tti.intr_enable =
3784 VXGE_HW_TIM_INTR_ENABLE;
3786 device_config->vp_config[i].tti.btimer_val =
3787 (VXGE_TTI_BTIMER_VAL * 1000) / 272;
3789 device_config->vp_config[i].tti.timer_ac_en =
3790 VXGE_HW_TIM_TIMER_AC_ENABLE;
3792 /* For msi-x with napi (each vector has a handler of its own) -
3793 * Set CI to OFF for all vpaths
3795 device_config->vp_config[i].tti.timer_ci_en =
3796 VXGE_HW_TIM_TIMER_CI_DISABLE;
3798 device_config->vp_config[i].tti.timer_ri_en =
3799 VXGE_HW_TIM_TIMER_RI_DISABLE;
3801 device_config->vp_config[i].tti.util_sel =
3802 VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL;
3804 device_config->vp_config[i].tti.ltimer_val =
3805 (VXGE_TTI_LTIMER_VAL * 1000) / 272;
3807 device_config->vp_config[i].tti.rtimer_val =
3808 (VXGE_TTI_RTIMER_VAL * 1000) / 272;
3810 device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A;
3811 device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B;
3812 device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C;
3813 device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A;
3814 device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B;
3815 device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C;
3816 device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D;
3818 /* Configure Rx rings */
3819 device_config->vp_config[i].ring.enable =
3820 VXGE_HW_RING_ENABLE;
3822 device_config->vp_config[i].ring.ring_blocks =
3823 VXGE_HW_DEF_RING_BLOCKS;
3825 device_config->vp_config[i].ring.buffer_mode =
3826 VXGE_HW_RING_RXD_BUFFER_MODE_1;
3828 device_config->vp_config[i].ring.rxds_limit =
3829 VXGE_HW_DEF_RING_RXDS_LIMIT;
3831 device_config->vp_config[i].ring.scatter_mode =
3832 VXGE_HW_RING_SCATTER_MODE_A;
3834 /* Configure rti properties */
3835 device_config->vp_config[i].rti.intr_enable =
3836 VXGE_HW_TIM_INTR_ENABLE;
3838 device_config->vp_config[i].rti.btimer_val =
3839 (VXGE_RTI_BTIMER_VAL * 1000)/272;
3841 device_config->vp_config[i].rti.timer_ac_en =
3842 VXGE_HW_TIM_TIMER_AC_ENABLE;
3844 device_config->vp_config[i].rti.timer_ci_en =
3845 VXGE_HW_TIM_TIMER_CI_DISABLE;
3847 device_config->vp_config[i].rti.timer_ri_en =
3848 VXGE_HW_TIM_TIMER_RI_DISABLE;
3850 device_config->vp_config[i].rti.util_sel =
3851 VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL;
3853 device_config->vp_config[i].rti.urange_a =
3855 device_config->vp_config[i].rti.urange_b =
3857 device_config->vp_config[i].rti.urange_c =
3859 device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A;
3860 device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B;
3861 device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C;
3862 device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D;
3864 device_config->vp_config[i].rti.rtimer_val =
3865 (VXGE_RTI_RTIMER_VAL * 1000) / 272;
3867 device_config->vp_config[i].rti.ltimer_val =
3868 (VXGE_RTI_LTIMER_VAL * 1000) / 272;
3870 device_config->vp_config[i].rpa_strip_vlan_tag =
3874 driver_config->vpath_per_dev = temp;
3875 return no_of_vpaths;
3878 /* initialize device configuratrions */
3879 static void vxge_device_config_init(struct vxge_hw_device_config *device_config,
3882 /* Used for CQRQ/SRQ. */
3883 device_config->dma_blockpool_initial =
3884 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
3886 device_config->dma_blockpool_max =
3887 VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
3889 if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
3890 max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
3892 #ifndef CONFIG_PCI_MSI
3893 vxge_debug_init(VXGE_ERR,
3894 "%s: This Kernel does not support "
3895 "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
3899 /* Configure whether MSI-X or IRQL. */
3900 switch (*intr_type) {
3902 device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
3906 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
3910 /* Timer period between device poll */
3911 device_config->device_poll_millis = VXGE_TIMER_DELAY;
3913 /* Configure mac based steering. */
3914 device_config->rts_mac_en = addr_learn_en;
3916 /* Configure Vpaths */
3917 device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT;
3919 vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
3921 vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
3922 device_config->intr_mode);
3923 vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
3924 device_config->device_poll_millis);
3925 vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
3926 device_config->rth_en);
3927 vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
3928 device_config->rth_it_type);
3931 static void vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3935 vxge_debug_init(VXGE_TRACE,
3936 "%s: %d Vpath(s) opened",
3937 vdev->ndev->name, vdev->no_of_vpath);
3939 switch (vdev->config.intr_type) {
3941 vxge_debug_init(VXGE_TRACE,
3942 "%s: Interrupt type INTA", vdev->ndev->name);
3946 vxge_debug_init(VXGE_TRACE,
3947 "%s: Interrupt type MSI-X", vdev->ndev->name);
3951 if (vdev->config.rth_steering) {
3952 vxge_debug_init(VXGE_TRACE,
3953 "%s: RTH steering enabled for TCP_IPV4",
3956 vxge_debug_init(VXGE_TRACE,
3957 "%s: RTH steering disabled", vdev->ndev->name);
3960 switch (vdev->config.tx_steering_type) {
3962 vxge_debug_init(VXGE_TRACE,
3963 "%s: Tx steering disabled", vdev->ndev->name);
3965 case TX_PRIORITY_STEERING:
3966 vxge_debug_init(VXGE_TRACE,
3967 "%s: Unsupported tx steering option",
3969 vxge_debug_init(VXGE_TRACE,
3970 "%s: Tx steering disabled", vdev->ndev->name);
3971 vdev->config.tx_steering_type = 0;
3973 case TX_VLAN_STEERING:
3974 vxge_debug_init(VXGE_TRACE,
3975 "%s: Unsupported tx steering option",
3977 vxge_debug_init(VXGE_TRACE,
3978 "%s: Tx steering disabled", vdev->ndev->name);
3979 vdev->config.tx_steering_type = 0;
3981 case TX_MULTIQ_STEERING:
3982 vxge_debug_init(VXGE_TRACE,
3983 "%s: Tx multiqueue steering enabled",
3986 case TX_PORT_STEERING:
3987 vxge_debug_init(VXGE_TRACE,
3988 "%s: Tx port steering enabled",
3992 vxge_debug_init(VXGE_ERR,
3993 "%s: Unsupported tx steering type",
3995 vxge_debug_init(VXGE_TRACE,
3996 "%s: Tx steering disabled", vdev->ndev->name);
3997 vdev->config.tx_steering_type = 0;
4000 if (vdev->config.addr_learn_en)
4001 vxge_debug_init(VXGE_TRACE,
4002 "%s: MAC Address learning enabled", vdev->ndev->name);
4004 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4005 if (!vxge_bVALn(vpath_mask, i, 1))
4007 vxge_debug_ll_config(VXGE_TRACE,
4008 "%s: MTU size - %d", vdev->ndev->name,
4010 config.vp_config[i].mtu);
4011 vxge_debug_init(VXGE_TRACE,
4012 "%s: VLAN tag stripping %s", vdev->ndev->name,
4014 config.vp_config[i].rpa_strip_vlan_tag
4015 ? "Enabled" : "Disabled");
4016 vxge_debug_ll_config(VXGE_TRACE,
4017 "%s: Max frags : %d", vdev->ndev->name,
4019 config.vp_config[i].fifo.max_frags);
4026 * vxge_pm_suspend - vxge power management suspend entry point
4029 static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state)
4034 * vxge_pm_resume - vxge power management resume entry point
4037 static int vxge_pm_resume(struct pci_dev *pdev)
4045 * vxge_io_error_detected - called when PCI error is detected
4046 * @pdev: Pointer to PCI device
4047 * @state: The current pci connection state
4049 * This function is called after a PCI bus error affecting
4050 * this device has been detected.
4052 static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
4053 pci_channel_state_t state)
4055 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
4056 struct net_device *netdev = hldev->ndev;
4058 netif_device_detach(netdev);
4060 if (state == pci_channel_io_perm_failure)
4061 return PCI_ERS_RESULT_DISCONNECT;
4063 if (netif_running(netdev)) {
4064 /* Bring down the card, while avoiding PCI I/O */
4065 do_vxge_close(netdev, 0);
4068 pci_disable_device(pdev);
4070 return PCI_ERS_RESULT_NEED_RESET;
4074 * vxge_io_slot_reset - called after the pci bus has been reset.
4075 * @pdev: Pointer to PCI device
4077 * Restart the card from scratch, as if from a cold-boot.
4078 * At this point, the card has exprienced a hard reset,
4079 * followed by fixups by BIOS, and has its config space
4080 * set up identically to what it was at cold boot.
4082 static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
4084 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
4085 struct net_device *netdev = hldev->ndev;
4087 struct vxgedev *vdev = netdev_priv(netdev);
4089 if (pci_enable_device(pdev)) {
4090 netdev_err(netdev, "Cannot re-enable device after reset\n");
4091 return PCI_ERS_RESULT_DISCONNECT;
4094 pci_set_master(pdev);
4095 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
4097 return PCI_ERS_RESULT_RECOVERED;
4101 * vxge_io_resume - called when traffic can start flowing again.
4102 * @pdev: Pointer to PCI device
4104 * This callback is called when the error recovery driver tells
4105 * us that its OK to resume normal operation.
4107 static void vxge_io_resume(struct pci_dev *pdev)
4109 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
4110 struct net_device *netdev = hldev->ndev;
4112 if (netif_running(netdev)) {
4113 if (vxge_open(netdev)) {
4115 "Can't bring device back up after reset\n");
4120 netif_device_attach(netdev);
4123 static inline u32 vxge_get_num_vfs(u64 function_mode)
4125 u32 num_functions = 0;
4127 switch (function_mode) {
4128 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
4129 case VXGE_HW_FUNCTION_MODE_SRIOV_8:
4132 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
4135 case VXGE_HW_FUNCTION_MODE_SRIOV:
4136 case VXGE_HW_FUNCTION_MODE_MRIOV:
4137 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17:
4140 case VXGE_HW_FUNCTION_MODE_SRIOV_4:
4143 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2:
4146 case VXGE_HW_FUNCTION_MODE_MRIOV_8:
4147 num_functions = 8; /* TODO */
4150 return num_functions;
4153 int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
4155 struct __vxge_hw_device *hldev = vdev->devh;
4156 u32 maj, min, bld, cmaj, cmin, cbld;
4157 enum vxge_hw_status status;
4158 const struct firmware *fw;
4161 ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
4163 vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
4164 VXGE_DRIVER_NAME, fw_name);
4168 /* Load the new firmware onto the adapter */
4169 status = vxge_update_fw_image(hldev, fw->data, fw->size);
4170 if (status != VXGE_HW_OK) {
4171 vxge_debug_init(VXGE_ERR,
4172 "%s: FW image download to adapter failed '%s'.",
4173 VXGE_DRIVER_NAME, fw_name);
4178 /* Read the version of the new firmware */
4179 status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
4180 if (status != VXGE_HW_OK) {
4181 vxge_debug_init(VXGE_ERR,
4182 "%s: Upgrade read version failed '%s'.",
4183 VXGE_DRIVER_NAME, fw_name);
4188 cmaj = vdev->config.device_hw_info.fw_version.major;
4189 cmin = vdev->config.device_hw_info.fw_version.minor;
4190 cbld = vdev->config.device_hw_info.fw_version.build;
4191 /* It's possible the version in /lib/firmware is not the latest version.
4192 * If so, we could get into a loop of trying to upgrade to the latest
4193 * and flashing the older version.
4195 if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
4201 printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
4204 /* Flash the adapter with the new firmware */
4205 status = vxge_hw_flash_fw(hldev);
4206 if (status != VXGE_HW_OK) {
4207 vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
4208 VXGE_DRIVER_NAME, fw_name);
4213 printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be "
4214 "hard reset before using, thus requiring a system reboot or a "
4215 "hotplug event.\n");
4218 release_firmware(fw);
4222 static int vxge_probe_fw_update(struct vxgedev *vdev)
4228 maj = vdev->config.device_hw_info.fw_version.major;
4229 min = vdev->config.device_hw_info.fw_version.minor;
4230 bld = vdev->config.device_hw_info.fw_version.build;
4232 if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
4235 /* Ignore the build number when determining if the current firmware is
4236 * "too new" to load the driver
4238 if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
4239 vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
4240 "version, unable to load driver\n",
4245 /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
4246 * work with this driver.
4248 if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
4249 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
4250 "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
4254 /* If file not specified, determine gPXE or not */
4255 if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
4257 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
4258 if (vdev->devh->eprom_versions[i]) {
4264 fw_name = "vxge/X3fw-pxe.ncf";
4266 fw_name = "vxge/X3fw.ncf";
4268 ret = vxge_fw_upgrade(vdev, fw_name, 0);
4269 /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
4270 * probe, so ignore them
4272 if (ret != -EINVAL && ret != -ENOENT)
4277 if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
4278 VXGE_FW_VER(maj, min, 0)) {
4279 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
4280 " be used with this driver.",
4281 VXGE_DRIVER_NAME, maj, min, bld);
4288 static int is_sriov_initialized(struct pci_dev *pdev)
4293 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4295 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
4296 if (ctrl & PCI_SRIOV_CTRL_VFE)
4302 static const struct vxge_hw_uld_cbs vxge_callbacks = {
4303 .link_up = vxge_callback_link_up,
4304 .link_down = vxge_callback_link_down,
4305 .crit_err = vxge_callback_crit_err,
4310 * @pdev : structure containing the PCI related information of the device.
4311 * @pre: List of PCI devices supported by the driver listed in vxge_id_table.
4313 * This function is called when a new PCI device gets detected and initializes
4316 * returns 0 on success and negative on failure.
4320 vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4322 struct __vxge_hw_device *hldev;
4323 enum vxge_hw_status status;
4327 struct vxgedev *vdev;
4328 struct vxge_config *ll_config = NULL;
4329 struct vxge_hw_device_config *device_config = NULL;
4330 struct vxge_hw_device_attr attr;
4331 int i, j, no_of_vpath = 0, max_vpath_supported = 0;
4333 struct vxge_mac_addrs *entry;
4334 static int bus = -1, device = -1;
4337 enum vxge_hw_status is_privileged;
4341 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
4344 /* In SRIOV-17 mode, functions of the same adapter
4345 * can be deployed on different buses
4347 if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) &&
4351 bus = pdev->bus->number;
4352 device = PCI_SLOT(pdev->devfn);
4355 if (driver_config->config_dev_cnt &&
4356 (driver_config->config_dev_cnt !=
4357 driver_config->total_dev_cnt))
4358 vxge_debug_init(VXGE_ERR,
4359 "%s: Configured %d of %d devices",
4361 driver_config->config_dev_cnt,
4362 driver_config->total_dev_cnt);
4363 driver_config->config_dev_cnt = 0;
4364 driver_config->total_dev_cnt = 0;
4367 /* Now making the CPU based no of vpath calculation
4368 * applicable for individual functions as well.
4370 driver_config->g_no_cpus = 0;
4371 driver_config->vpath_per_dev = max_config_vpath;
4373 driver_config->total_dev_cnt++;
4374 if (++driver_config->config_dev_cnt > max_config_dev) {
4379 device_config = kzalloc(sizeof(struct vxge_hw_device_config),
4381 if (!device_config) {
4383 vxge_debug_init(VXGE_ERR,
4384 "device_config : malloc failed %s %d",
4385 __FILE__, __LINE__);
4389 ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL);
4392 vxge_debug_init(VXGE_ERR,
4393 "device_config : malloc failed %s %d",
4394 __FILE__, __LINE__);
4397 ll_config->tx_steering_type = TX_MULTIQ_STEERING;
4398 ll_config->intr_type = MSI_X;
4399 ll_config->napi_weight = NEW_NAPI_WEIGHT;
4400 ll_config->rth_steering = RTH_STEERING;
4402 /* get the default configuration parameters */
4403 vxge_hw_device_config_default_get(device_config);
4405 /* initialize configuration parameters */
4406 vxge_device_config_init(device_config, &ll_config->intr_type);
4408 ret = pci_enable_device(pdev);
4410 vxge_debug_init(VXGE_ERR,
4411 "%s : can not enable PCI device", __func__);
4415 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4416 vxge_debug_ll_config(VXGE_TRACE,
4417 "%s : using 64bit DMA", __func__);
4421 if (pci_set_consistent_dma_mask(pdev,
4422 DMA_BIT_MASK(64))) {
4423 vxge_debug_init(VXGE_ERR,
4424 "%s : unable to obtain 64bit DMA for "
4425 "consistent allocations", __func__);
4429 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
4430 vxge_debug_ll_config(VXGE_TRACE,
4431 "%s : using 32bit DMA", __func__);
4437 ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
4439 vxge_debug_init(VXGE_ERR,
4440 "%s : request regions failed", __func__);
4444 pci_set_master(pdev);
4446 attr.bar0 = pci_ioremap_bar(pdev, 0);
4448 vxge_debug_init(VXGE_ERR,
4449 "%s : cannot remap io memory bar0", __func__);
4453 vxge_debug_ll_config(VXGE_TRACE,
4454 "pci ioremap bar0: %p:0x%llx",
4456 (unsigned long long)pci_resource_start(pdev, 0));
4458 status = vxge_hw_device_hw_info_get(attr.bar0,
4459 &ll_config->device_hw_info);
4460 if (status != VXGE_HW_OK) {
4461 vxge_debug_init(VXGE_ERR,
4462 "%s: Reading of hardware info failed."
4463 "Please try upgrading the firmware.", VXGE_DRIVER_NAME);
4468 vpath_mask = ll_config->device_hw_info.vpath_mask;
4469 if (vpath_mask == 0) {
4470 vxge_debug_ll_config(VXGE_TRACE,
4471 "%s: No vpaths available in device", VXGE_DRIVER_NAME);
4476 vxge_debug_ll_config(VXGE_TRACE,
4477 "%s:%d Vpath mask = %llx", __func__, __LINE__,
4478 (unsigned long long)vpath_mask);
4480 function_mode = ll_config->device_hw_info.function_mode;
4481 host_type = ll_config->device_hw_info.host_type;
4482 is_privileged = __vxge_hw_device_is_privilaged(host_type,
4483 ll_config->device_hw_info.func_id);
4485 /* Check how many vpaths are available */
4486 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4487 if (!((vpath_mask) & vxge_mBIT(i)))
4489 max_vpath_supported++;
4493 num_vfs = vxge_get_num_vfs(function_mode) - 1;
4495 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
4496 if (is_sriov(function_mode) && !is_sriov_initialized(pdev) &&
4497 (ll_config->intr_type != INTA)) {
4498 ret = pci_enable_sriov(pdev, num_vfs);
4500 vxge_debug_ll_config(VXGE_ERR,
4501 "Failed in enabling SRIOV mode: %d\n", ret);
4502 /* No need to fail out, as an error here is non-fatal */
4506 * Configure vpaths and get driver configured number of vpaths
4507 * which is less than or equal to the maximum vpaths per function.
4509 no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config);
4511 vxge_debug_ll_config(VXGE_ERR,
4512 "%s: No more vpaths to configure", VXGE_DRIVER_NAME);
4517 /* Setting driver callbacks */
4518 attr.uld_callbacks = &vxge_callbacks;
4520 status = vxge_hw_device_initialize(&hldev, &attr, device_config);
4521 if (status != VXGE_HW_OK) {
4522 vxge_debug_init(VXGE_ERR,
4523 "Failed to initialize device (%d)", status);
4528 if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
4529 ll_config->device_hw_info.fw_version.minor,
4530 ll_config->device_hw_info.fw_version.build) >=
4531 VXGE_EPROM_FW_VER) {
4532 struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
4534 status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
4535 if (status != VXGE_HW_OK) {
4536 vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
4538 /* This is a non-fatal error, continue */
4541 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
4542 hldev->eprom_versions[i] = img[i].version;
4543 if (!img[i].is_valid)
4545 vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
4546 "%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
4547 VXGE_EPROM_IMG_MAJOR(img[i].version),
4548 VXGE_EPROM_IMG_MINOR(img[i].version),
4549 VXGE_EPROM_IMG_FIX(img[i].version),
4550 VXGE_EPROM_IMG_BUILD(img[i].version));
4554 /* if FCS stripping is not disabled in MAC fail driver load */
4555 status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
4556 if (status != VXGE_HW_OK) {
4557 vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
4558 " failing driver load", VXGE_DRIVER_NAME);
4563 /* Always enable HWTS. This will always cause the FCS to be invalid,
4564 * due to the fact that HWTS is using the FCS as the location of the
4565 * timestamp. The HW FCS checking will still correctly determine if
4566 * there is a valid checksum, and the FCS is being removed by the driver
4567 * anyway. So no fucntionality is being lost. Since it is always
4568 * enabled, we now simply use the ioctl call to set whether or not the
4569 * driver should be paying attention to the HWTS.
4571 if (is_privileged == VXGE_HW_OK) {
4572 status = vxge_timestamp_config(hldev);
4573 if (status != VXGE_HW_OK) {
4574 vxge_debug_init(VXGE_ERR, "%s: HWTS enable failed",
4581 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4583 /* set private device info */
4584 pci_set_drvdata(pdev, hldev);
4586 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
4587 ll_config->addr_learn_en = addr_learn_en;
4588 ll_config->rth_algorithm = RTH_ALG_JENKINS;
4589 ll_config->rth_hash_type_tcpipv4 = 1;
4590 ll_config->rth_hash_type_ipv4 = 0;
4591 ll_config->rth_hash_type_tcpipv6 = 0;
4592 ll_config->rth_hash_type_ipv6 = 0;
4593 ll_config->rth_hash_type_tcpipv6ex = 0;
4594 ll_config->rth_hash_type_ipv6ex = 0;
4595 ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
4596 ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4597 ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4599 ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
4606 ret = vxge_probe_fw_update(vdev);
4610 vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
4611 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4612 vxge_hw_device_trace_level_get(hldev));
4614 /* set private HW device info */
4615 vdev->mtu = VXGE_HW_DEFAULT_MTU;
4616 vdev->bar0 = attr.bar0;
4617 vdev->max_vpath_supported = max_vpath_supported;
4618 vdev->no_of_vpath = no_of_vpath;
4620 /* Virtual Path count */
4621 for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4622 if (!vxge_bVALn(vpath_mask, i, 1))
4624 if (j >= vdev->no_of_vpath)
4627 vdev->vpaths[j].is_configured = 1;
4628 vdev->vpaths[j].device_id = i;
4629 vdev->vpaths[j].ring.driver_id = j;
4630 vdev->vpaths[j].vdev = vdev;
4631 vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
4632 memcpy((u8 *)vdev->vpaths[j].macaddr,
4633 ll_config->device_hw_info.mac_addrs[i],
4636 /* Initialize the mac address list header */
4637 INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list);
4639 vdev->vpaths[j].mac_addr_cnt = 0;
4640 vdev->vpaths[j].mcast_addr_cnt = 0;
4643 vdev->exec_mode = VXGE_EXEC_MODE_DISABLE;
4644 vdev->max_config_port = max_config_port;
4646 vdev->vlan_tag_strip = vlan_tag_strip;
4648 /* map the hashing selector table to the configured vpaths */
4649 for (i = 0; i < vdev->no_of_vpath; i++)
4650 vdev->vpath_selector[i] = vpath_selector[i];
4652 macaddr = (u8 *)vdev->vpaths[0].macaddr;
4654 ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
4655 ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
4656 ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
4658 vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
4659 vdev->ndev->name, ll_config->device_hw_info.serial_number);
4661 vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
4662 vdev->ndev->name, ll_config->device_hw_info.part_number);
4664 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
4665 vdev->ndev->name, ll_config->device_hw_info.product_desc);
4667 vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
4668 vdev->ndev->name, macaddr);
4670 vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
4671 vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
4673 vxge_debug_init(VXGE_TRACE,
4674 "%s: Firmware version : %s Date : %s", vdev->ndev->name,
4675 ll_config->device_hw_info.fw_version.version,
4676 ll_config->device_hw_info.fw_date.date);
4679 switch (ll_config->device_hw_info.function_mode) {
4680 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
4681 vxge_debug_init(VXGE_TRACE,
4682 "%s: Single Function Mode Enabled", vdev->ndev->name);
4684 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
4685 vxge_debug_init(VXGE_TRACE,
4686 "%s: Multi Function Mode Enabled", vdev->ndev->name);
4688 case VXGE_HW_FUNCTION_MODE_SRIOV:
4689 vxge_debug_init(VXGE_TRACE,
4690 "%s: Single Root IOV Mode Enabled", vdev->ndev->name);
4692 case VXGE_HW_FUNCTION_MODE_MRIOV:
4693 vxge_debug_init(VXGE_TRACE,
4694 "%s: Multi Root IOV Mode Enabled", vdev->ndev->name);
4699 vxge_print_parm(vdev, vpath_mask);
4701 /* Store the fw version for ethttool option */
4702 strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
4703 memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
4705 /* Copy the station mac address to the list */
4706 for (i = 0; i < vdev->no_of_vpath; i++) {
4707 entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL);
4708 if (NULL == entry) {
4709 vxge_debug_init(VXGE_ERR,
4710 "%s: mac_addr_list : memory allocation failed",
4715 macaddr = (u8 *)&entry->macaddr;
4716 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
4717 list_add(&entry->item, &vdev->vpaths[i].mac_addr_list);
4718 vdev->vpaths[i].mac_addr_cnt = 1;
4721 kfree(device_config);
4724 * INTA is shared in multi-function mode. This is unlike the INTA
4725 * implementation in MR mode, where each VH has its own INTA message.
4726 * - INTA is masked (disabled) as long as at least one function sets
4727 * its TITAN_MASK_ALL_INT.ALARM bit.
4728 * - INTA is unmasked (enabled) when all enabled functions have cleared
4729 * their own TITAN_MASK_ALL_INT.ALARM bit.
4730 * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up.
4731 * Though this driver leaves the top level interrupts unmasked while
4732 * leaving the required module interrupt bits masked on exit, there
4733 * could be a rougue driver around that does not follow this procedure
4734 * resulting in a failure to generate interrupts. The following code is
4735 * present to prevent such a failure.
4738 if (ll_config->device_hw_info.function_mode ==
4739 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
4740 if (vdev->config.intr_type == INTA)
4741 vxge_hw_device_unmask_all(hldev);
4743 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
4744 vdev->ndev->name, __func__, __LINE__);
4746 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4747 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4748 vxge_hw_device_trace_level_get(hldev));
4754 for (i = 0; i < vdev->no_of_vpath; i++)
4755 vxge_free_mac_add_list(&vdev->vpaths[i]);
4757 vxge_device_unregister(hldev);
4759 vxge_hw_device_terminate(hldev);
4760 pci_disable_sriov(pdev);
4764 pci_release_region(pdev, 0);
4766 pci_disable_device(pdev);
4769 kfree(device_config);
4770 driver_config->config_dev_cnt--;
4771 driver_config->total_dev_cnt--;
4776 * vxge_rem_nic - Free the PCI device
4777 * @pdev: structure containing the PCI related information of the device.
4778 * Description: This function is called by the Pci subsystem to release a
4779 * PCI device and free up all resource held up by the device.
4781 static void vxge_remove(struct pci_dev *pdev)
4783 struct __vxge_hw_device *hldev;
4784 struct vxgedev *vdev;
4787 hldev = pci_get_drvdata(pdev);
4791 vdev = netdev_priv(hldev->ndev);
4793 vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
4794 vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
4797 for (i = 0; i < vdev->no_of_vpath; i++)
4798 vxge_free_mac_add_list(&vdev->vpaths[i]);
4800 vxge_device_unregister(hldev);
4801 /* Do not call pci_disable_sriov here, as it will break child devices */
4802 vxge_hw_device_terminate(hldev);
4803 iounmap(vdev->bar0);
4804 pci_release_region(pdev, 0);
4805 pci_disable_device(pdev);
4806 driver_config->config_dev_cnt--;
4807 driver_config->total_dev_cnt--;
4809 vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
4810 __func__, __LINE__);
4811 vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
4815 static const struct pci_error_handlers vxge_err_handler = {
4816 .error_detected = vxge_io_error_detected,
4817 .slot_reset = vxge_io_slot_reset,
4818 .resume = vxge_io_resume,
4821 static struct pci_driver vxge_driver = {
4822 .name = VXGE_DRIVER_NAME,
4823 .id_table = vxge_id_table,
4824 .probe = vxge_probe,
4825 .remove = vxge_remove,
4827 .suspend = vxge_pm_suspend,
4828 .resume = vxge_pm_resume,
4830 .err_handler = &vxge_err_handler,
4838 pr_info("Copyright(c) 2002-2010 Exar Corp.\n");
4839 pr_info("Driver version: %s\n", DRV_VERSION);
4843 driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL);
4847 ret = pci_register_driver(&vxge_driver);
4849 kfree(driver_config);
4853 if (driver_config->config_dev_cnt &&
4854 (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
4855 vxge_debug_init(VXGE_ERR,
4856 "%s: Configured %d of %d devices",
4857 VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
4858 driver_config->total_dev_cnt);
4866 pci_unregister_driver(&vxge_driver);
4867 kfree(driver_config);
4869 module_init(vxge_starter);
4870 module_exit(vxge_closer);