1 // SPDX-License-Identifier: GPL-2.0-only
3 * FUJITSU Extended Socket Network Device driver
4 * Copyright (c) 2015 FUJITSU LIMITED
7 #include <linux/module.h>
8 #include <linux/types.h>
10 #include <linux/platform_device.h>
11 #include <linux/netdevice.h>
12 #include <linux/interrupt.h>
15 #include "fjes_trace.h"
19 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
20 #define DRV_NAME "fjes"
21 char fjes_driver_name[] = DRV_NAME;
22 char fjes_driver_version[] = DRV_VERSION;
23 static const char fjes_driver_string[] =
24 "FUJITSU Extended Socket Network Device Driver";
25 static const char fjes_copyright[] =
26 "Copyright (c) 2015 FUJITSU LIMITED";
28 MODULE_AUTHOR("Taku Izumi <izumi.taku@jp.fujitsu.com>");
29 MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
30 MODULE_LICENSE("GPL");
31 MODULE_VERSION(DRV_VERSION);
33 #define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02"
35 static int fjes_request_irq(struct fjes_adapter *);
36 static void fjes_free_irq(struct fjes_adapter *);
38 static int fjes_open(struct net_device *);
39 static int fjes_close(struct net_device *);
40 static int fjes_setup_resources(struct fjes_adapter *);
41 static void fjes_free_resources(struct fjes_adapter *);
42 static netdev_tx_t fjes_xmit_frame(struct sk_buff *, struct net_device *);
43 static void fjes_raise_intr_rxdata_task(struct work_struct *);
44 static void fjes_tx_stall_task(struct work_struct *);
45 static void fjes_force_close_task(struct work_struct *);
46 static irqreturn_t fjes_intr(int, void*);
47 static void fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *);
48 static int fjes_change_mtu(struct net_device *, int);
49 static int fjes_vlan_rx_add_vid(struct net_device *, __be16 proto, u16);
50 static int fjes_vlan_rx_kill_vid(struct net_device *, __be16 proto, u16);
51 static void fjes_tx_retry(struct net_device *, unsigned int txqueue);
53 static int fjes_acpi_add(struct acpi_device *);
54 static int fjes_acpi_remove(struct acpi_device *);
55 static acpi_status fjes_get_acpi_resource(struct acpi_resource *, void*);
57 static int fjes_probe(struct platform_device *);
58 static int fjes_remove(struct platform_device *);
60 static int fjes_sw_init(struct fjes_adapter *);
61 static void fjes_netdev_setup(struct net_device *);
62 static void fjes_irq_watch_task(struct work_struct *);
63 static void fjes_watch_unshare_task(struct work_struct *);
64 static void fjes_rx_irq(struct fjes_adapter *, int);
65 static int fjes_poll(struct napi_struct *, int);
67 static const struct acpi_device_id fjes_acpi_ids[] = {
68 {ACPI_MOTHERBOARD_RESOURCE_HID, 0},
71 MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
73 static struct acpi_driver fjes_acpi_driver = {
80 .remove = fjes_acpi_remove,
84 static struct platform_driver fjes_driver = {
89 .remove = fjes_remove,
92 static struct resource fjes_resource[] = {
97 static bool is_extended_socket_device(struct acpi_device *device)
99 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
100 char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1];
101 union acpi_object *str;
105 status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer);
106 if (ACPI_FAILURE(status))
109 str = buffer.pointer;
110 result = utf16s_to_utf8s((wchar_t *)str->string.pointer,
111 str->string.length, UTF16_LITTLE_ENDIAN,
112 str_buf, sizeof(str_buf) - 1);
115 if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) {
116 kfree(buffer.pointer);
119 kfree(buffer.pointer);
124 static int acpi_check_extended_socket_status(struct acpi_device *device)
126 unsigned long long sta;
129 status = acpi_evaluate_integer(device->handle, "_STA", NULL, &sta);
130 if (ACPI_FAILURE(status))
133 if (!((sta & ACPI_STA_DEVICE_PRESENT) &&
134 (sta & ACPI_STA_DEVICE_ENABLED) &&
135 (sta & ACPI_STA_DEVICE_UI) &&
136 (sta & ACPI_STA_DEVICE_FUNCTIONING)))
142 static int fjes_acpi_add(struct acpi_device *device)
144 struct platform_device *plat_dev;
147 if (!is_extended_socket_device(device))
150 if (acpi_check_extended_socket_status(device))
153 status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
154 fjes_get_acpi_resource, fjes_resource);
155 if (ACPI_FAILURE(status))
158 /* create platform_device */
159 plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
160 ARRAY_SIZE(fjes_resource));
161 if (IS_ERR(plat_dev))
162 return PTR_ERR(plat_dev);
164 device->driver_data = plat_dev;
169 static int fjes_acpi_remove(struct acpi_device *device)
171 struct platform_device *plat_dev;
173 plat_dev = (struct platform_device *)acpi_driver_data(device);
174 platform_device_unregister(plat_dev);
180 fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data)
182 struct acpi_resource_address32 *addr;
183 struct acpi_resource_irq *irq;
184 struct resource *res = data;
186 switch (acpi_res->type) {
187 case ACPI_RESOURCE_TYPE_ADDRESS32:
188 addr = &acpi_res->data.address32;
189 res[0].start = addr->address.minimum;
190 res[0].end = addr->address.minimum +
191 addr->address.address_length - 1;
194 case ACPI_RESOURCE_TYPE_IRQ:
195 irq = &acpi_res->data.irq;
196 if (irq->interrupt_count != 1)
198 res[1].start = irq->interrupts[0];
199 res[1].end = irq->interrupts[0];
209 static int fjes_request_irq(struct fjes_adapter *adapter)
211 struct net_device *netdev = adapter->netdev;
214 adapter->interrupt_watch_enable = true;
215 if (!delayed_work_pending(&adapter->interrupt_watch_task)) {
216 queue_delayed_work(adapter->control_wq,
217 &adapter->interrupt_watch_task,
218 FJES_IRQ_WATCH_DELAY);
221 if (!adapter->irq_registered) {
222 result = request_irq(adapter->hw.hw_res.irq, fjes_intr,
223 IRQF_SHARED, netdev->name, adapter);
225 adapter->irq_registered = false;
227 adapter->irq_registered = true;
233 static void fjes_free_irq(struct fjes_adapter *adapter)
235 struct fjes_hw *hw = &adapter->hw;
237 adapter->interrupt_watch_enable = false;
238 cancel_delayed_work_sync(&adapter->interrupt_watch_task);
240 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
242 if (adapter->irq_registered) {
243 free_irq(adapter->hw.hw_res.irq, adapter);
244 adapter->irq_registered = false;
248 static const struct net_device_ops fjes_netdev_ops = {
249 .ndo_open = fjes_open,
250 .ndo_stop = fjes_close,
251 .ndo_start_xmit = fjes_xmit_frame,
252 .ndo_get_stats64 = fjes_get_stats64,
253 .ndo_change_mtu = fjes_change_mtu,
254 .ndo_tx_timeout = fjes_tx_retry,
255 .ndo_vlan_rx_add_vid = fjes_vlan_rx_add_vid,
256 .ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid,
259 /* fjes_open - Called when a network interface is made active */
260 static int fjes_open(struct net_device *netdev)
262 struct fjes_adapter *adapter = netdev_priv(netdev);
263 struct fjes_hw *hw = &adapter->hw;
266 if (adapter->open_guard)
269 result = fjes_setup_resources(adapter);
273 hw->txrx_stop_req_bit = 0;
274 hw->epstop_req_bit = 0;
276 napi_enable(&adapter->napi);
278 fjes_hw_capture_interrupt_status(hw);
280 result = fjes_request_irq(adapter);
284 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false);
286 netif_tx_start_all_queues(netdev);
287 netif_carrier_on(netdev);
292 fjes_free_irq(adapter);
293 napi_disable(&adapter->napi);
296 fjes_free_resources(adapter);
300 /* fjes_close - Disables a network interface */
301 static int fjes_close(struct net_device *netdev)
303 struct fjes_adapter *adapter = netdev_priv(netdev);
304 struct fjes_hw *hw = &adapter->hw;
308 netif_tx_stop_all_queues(netdev);
309 netif_carrier_off(netdev);
311 fjes_hw_raise_epstop(hw);
313 napi_disable(&adapter->napi);
315 spin_lock_irqsave(&hw->rx_status_lock, flags);
316 for (epidx = 0; epidx < hw->max_epid; epidx++) {
317 if (epidx == hw->my_epid)
320 if (fjes_hw_get_partner_ep_status(hw, epidx) ==
322 adapter->hw.ep_shm_info[epidx]
323 .tx.info->v1i.rx_status &=
326 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
328 fjes_free_irq(adapter);
330 cancel_delayed_work_sync(&adapter->interrupt_watch_task);
331 cancel_work_sync(&adapter->unshare_watch_task);
332 adapter->unshare_watch_bitmask = 0;
333 cancel_work_sync(&adapter->raise_intr_rxdata_task);
334 cancel_work_sync(&adapter->tx_stall_task);
336 cancel_work_sync(&hw->update_zone_task);
337 cancel_work_sync(&hw->epstop_task);
339 fjes_hw_wait_epstop(hw);
341 fjes_free_resources(adapter);
346 static int fjes_setup_resources(struct fjes_adapter *adapter)
348 struct net_device *netdev = adapter->netdev;
349 struct ep_share_mem_info *buf_pair;
350 struct fjes_hw *hw = &adapter->hw;
355 mutex_lock(&hw->hw_info.lock);
356 result = fjes_hw_request_info(hw);
359 for (epidx = 0; epidx < hw->max_epid; epidx++) {
360 hw->ep_shm_info[epidx].es_status =
361 hw->hw_info.res_buf->info.info[epidx].es_status;
362 hw->ep_shm_info[epidx].zone =
363 hw->hw_info.res_buf->info.info[epidx].zone;
369 adapter->force_reset = true;
371 mutex_unlock(&hw->hw_info.lock);
374 mutex_unlock(&hw->hw_info.lock);
376 for (epidx = 0; epidx < (hw->max_epid); epidx++) {
377 if ((epidx != hw->my_epid) &&
378 (hw->ep_shm_info[epidx].es_status ==
379 FJES_ZONING_STATUS_ENABLE)) {
380 fjes_hw_raise_interrupt(hw, epidx,
381 REG_ICTL_MASK_INFO_UPDATE);
382 hw->ep_shm_info[epidx].ep_stats
383 .send_intr_zoneupdate += 1;
387 msleep(FJES_OPEN_ZONE_UPDATE_WAIT * hw->max_epid);
389 for (epidx = 0; epidx < (hw->max_epid); epidx++) {
390 if (epidx == hw->my_epid)
393 buf_pair = &hw->ep_shm_info[epidx];
395 spin_lock_irqsave(&hw->rx_status_lock, flags);
396 fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr,
398 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
400 if (fjes_hw_epid_is_same_zone(hw, epidx)) {
401 mutex_lock(&hw->hw_info.lock);
403 fjes_hw_register_buff_addr(hw, epidx, buf_pair);
404 mutex_unlock(&hw->hw_info.lock);
412 adapter->force_reset = true;
416 hw->ep_shm_info[epidx].ep_stats
417 .com_regist_buf_exec += 1;
424 static void fjes_free_resources(struct fjes_adapter *adapter)
426 struct net_device *netdev = adapter->netdev;
427 struct fjes_device_command_param param;
428 struct ep_share_mem_info *buf_pair;
429 struct fjes_hw *hw = &adapter->hw;
430 bool reset_flag = false;
435 for (epidx = 0; epidx < hw->max_epid; epidx++) {
436 if (epidx == hw->my_epid)
439 mutex_lock(&hw->hw_info.lock);
440 result = fjes_hw_unregister_buff_addr(hw, epidx);
441 mutex_unlock(&hw->hw_info.lock);
443 hw->ep_shm_info[epidx].ep_stats.com_unregist_buf_exec += 1;
448 buf_pair = &hw->ep_shm_info[epidx];
450 spin_lock_irqsave(&hw->rx_status_lock, flags);
451 fjes_hw_setup_epbuf(&buf_pair->tx,
452 netdev->dev_addr, netdev->mtu);
453 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
455 clear_bit(epidx, &hw->txrx_stop_req_bit);
458 if (reset_flag || adapter->force_reset) {
459 result = fjes_hw_reset(hw);
461 adapter->force_reset = false;
464 adapter->open_guard = true;
466 hw->hw_info.buffer_share_bit = 0;
468 memset((void *)¶m, 0, sizeof(param));
470 param.req_len = hw->hw_info.req_buf_size;
471 param.req_start = __pa(hw->hw_info.req_buf);
472 param.res_len = hw->hw_info.res_buf_size;
473 param.res_start = __pa(hw->hw_info.res_buf);
474 param.share_start = __pa(hw->hw_info.share->ep_status);
476 fjes_hw_init_command_registers(hw, ¶m);
480 static void fjes_tx_stall_task(struct work_struct *work)
482 struct fjes_adapter *adapter = container_of(work,
483 struct fjes_adapter, tx_stall_task);
484 struct net_device *netdev = adapter->netdev;
485 struct fjes_hw *hw = &adapter->hw;
486 int all_queue_available, sendable;
487 enum ep_partner_status pstatus;
488 int max_epid, my_epid, epid;
489 union ep_buffer_info *info;
493 dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) {
494 netif_wake_queue(netdev);
498 my_epid = hw->my_epid;
499 max_epid = hw->max_epid;
501 for (i = 0; i < 5; i++) {
502 all_queue_available = 1;
504 for (epid = 0; epid < max_epid; epid++) {
508 pstatus = fjes_hw_get_partner_ep_status(hw, epid);
509 sendable = (pstatus == EP_PARTNER_SHARED);
513 info = adapter->hw.ep_shm_info[epid].tx.info;
515 if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
518 if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
519 info->v1i.count_max)) {
520 all_queue_available = 0;
525 if (all_queue_available) {
526 netif_wake_queue(netdev);
531 usleep_range(50, 100);
533 queue_work(adapter->txrx_wq, &adapter->tx_stall_task);
536 static void fjes_force_close_task(struct work_struct *work)
538 struct fjes_adapter *adapter = container_of(work,
539 struct fjes_adapter, force_close_task);
540 struct net_device *netdev = adapter->netdev;
547 static void fjes_raise_intr_rxdata_task(struct work_struct *work)
549 struct fjes_adapter *adapter = container_of(work,
550 struct fjes_adapter, raise_intr_rxdata_task);
551 struct fjes_hw *hw = &adapter->hw;
552 enum ep_partner_status pstatus;
553 int max_epid, my_epid, epid;
555 my_epid = hw->my_epid;
556 max_epid = hw->max_epid;
558 for (epid = 0; epid < max_epid; epid++)
559 hw->ep_shm_info[epid].tx_status_work = 0;
561 for (epid = 0; epid < max_epid; epid++) {
565 pstatus = fjes_hw_get_partner_ep_status(hw, epid);
566 if (pstatus == EP_PARTNER_SHARED) {
567 hw->ep_shm_info[epid].tx_status_work =
568 hw->ep_shm_info[epid].tx.info->v1i.tx_status;
570 if (hw->ep_shm_info[epid].tx_status_work ==
571 FJES_TX_DELAY_SEND_PENDING) {
572 hw->ep_shm_info[epid].tx.info->v1i.tx_status =
573 FJES_TX_DELAY_SEND_NONE;
578 for (epid = 0; epid < max_epid; epid++) {
582 pstatus = fjes_hw_get_partner_ep_status(hw, epid);
583 if ((hw->ep_shm_info[epid].tx_status_work ==
584 FJES_TX_DELAY_SEND_PENDING) &&
585 (pstatus == EP_PARTNER_SHARED) &&
586 !(hw->ep_shm_info[epid].rx.info->v1i.rx_status &
587 FJES_RX_POLL_WORK)) {
588 fjes_hw_raise_interrupt(hw, epid,
589 REG_ICTL_MASK_RX_DATA);
590 hw->ep_shm_info[epid].ep_stats.send_intr_rx += 1;
594 usleep_range(500, 1000);
597 static int fjes_tx_send(struct fjes_adapter *adapter, int dest,
598 void *data, size_t len)
602 retval = fjes_hw_epbuf_tx_pkt_send(&adapter->hw.ep_shm_info[dest].tx,
607 adapter->hw.ep_shm_info[dest].tx.info->v1i.tx_status =
608 FJES_TX_DELAY_SEND_PENDING;
609 if (!work_pending(&adapter->raise_intr_rxdata_task))
610 queue_work(adapter->txrx_wq,
611 &adapter->raise_intr_rxdata_task);
618 fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
620 struct fjes_adapter *adapter = netdev_priv(netdev);
621 struct fjes_hw *hw = &adapter->hw;
623 int max_epid, my_epid, dest_epid;
624 enum ep_partner_status pstatus;
625 struct netdev_queue *cur_queue;
626 char shortpkt[VLAN_ETH_HLEN];
637 cur_queue = netdev_get_tx_queue(netdev, queue_no);
639 eth = (struct ethhdr *)skb->data;
640 my_epid = hw->my_epid;
642 vlan = (vlan_get_tag(skb, &vlan_id) == 0) ? true : false;
647 if (is_multicast_ether_addr(eth->h_dest)) {
649 max_epid = hw->max_epid;
651 } else if (is_local_ether_addr(eth->h_dest)) {
652 dest_epid = eth->h_dest[ETH_ALEN - 1];
653 max_epid = dest_epid + 1;
655 if ((eth->h_dest[0] == 0x02) &&
656 (0x00 == (eth->h_dest[1] | eth->h_dest[2] |
657 eth->h_dest[3] | eth->h_dest[4])) &&
658 (dest_epid < hw->max_epid)) {
665 adapter->stats64.tx_packets += 1;
666 hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
667 adapter->stats64.tx_bytes += len;
668 hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
675 adapter->stats64.tx_packets += 1;
676 hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
677 adapter->stats64.tx_bytes += len;
678 hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
681 for (; dest_epid < max_epid; dest_epid++) {
682 if (my_epid == dest_epid)
685 pstatus = fjes_hw_get_partner_ep_status(hw, dest_epid);
686 if (pstatus != EP_PARTNER_SHARED) {
688 hw->ep_shm_info[dest_epid].ep_stats
689 .tx_dropped_not_shared += 1;
691 } else if (!fjes_hw_check_epbuf_version(
692 &adapter->hw.ep_shm_info[dest_epid].rx, 0)) {
693 /* version is NOT 0 */
694 adapter->stats64.tx_carrier_errors += 1;
695 hw->ep_shm_info[dest_epid].net_stats
696 .tx_carrier_errors += 1;
697 hw->ep_shm_info[dest_epid].ep_stats
698 .tx_dropped_ver_mismatch += 1;
701 } else if (!fjes_hw_check_mtu(
702 &adapter->hw.ep_shm_info[dest_epid].rx,
704 adapter->stats64.tx_dropped += 1;
705 hw->ep_shm_info[dest_epid].net_stats.tx_dropped += 1;
706 adapter->stats64.tx_errors += 1;
707 hw->ep_shm_info[dest_epid].net_stats.tx_errors += 1;
708 hw->ep_shm_info[dest_epid].ep_stats
709 .tx_dropped_buf_size_mismatch += 1;
713 !fjes_hw_check_vlan_id(
714 &adapter->hw.ep_shm_info[dest_epid].rx,
716 hw->ep_shm_info[dest_epid].ep_stats
717 .tx_dropped_vlanid_mismatch += 1;
720 if (len < VLAN_ETH_HLEN) {
721 memset(shortpkt, 0, VLAN_ETH_HLEN);
722 memcpy(shortpkt, skb->data, skb->len);
727 if (adapter->tx_retry_count == 0) {
728 adapter->tx_start_jiffies = jiffies;
729 adapter->tx_retry_count = 1;
731 adapter->tx_retry_count++;
734 if (fjes_tx_send(adapter, dest_epid, data, len)) {
739 (long)adapter->tx_start_jiffies) >=
740 FJES_TX_RETRY_TIMEOUT) {
741 adapter->stats64.tx_fifo_errors += 1;
742 hw->ep_shm_info[dest_epid].net_stats
743 .tx_fifo_errors += 1;
744 adapter->stats64.tx_errors += 1;
745 hw->ep_shm_info[dest_epid].net_stats
750 netif_trans_update(netdev);
751 hw->ep_shm_info[dest_epid].ep_stats
752 .tx_buffer_full += 1;
753 netif_tx_stop_queue(cur_queue);
755 if (!work_pending(&adapter->tx_stall_task))
756 queue_work(adapter->txrx_wq,
757 &adapter->tx_stall_task);
759 ret = NETDEV_TX_BUSY;
763 adapter->stats64.tx_packets += 1;
764 hw->ep_shm_info[dest_epid].net_stats
766 adapter->stats64.tx_bytes += len;
767 hw->ep_shm_info[dest_epid].net_stats
771 adapter->tx_retry_count = 0;
777 if (ret == NETDEV_TX_OK) {
780 adapter->stats64.tx_packets += 1;
781 hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
782 adapter->stats64.tx_bytes += 1;
783 hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
790 static void fjes_tx_retry(struct net_device *netdev, unsigned int txqueue)
792 struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
794 netif_tx_wake_queue(queue);
798 fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
800 struct fjes_adapter *adapter = netdev_priv(netdev);
802 memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64));
805 static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
807 struct fjes_adapter *adapter = netdev_priv(netdev);
808 bool running = netif_running(netdev);
809 struct fjes_hw *hw = &adapter->hw;
814 for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
815 if (new_mtu <= fjes_support_mtu[idx]) {
816 new_mtu = fjes_support_mtu[idx];
817 if (new_mtu == netdev->mtu)
829 spin_lock_irqsave(&hw->rx_status_lock, flags);
830 for (epidx = 0; epidx < hw->max_epid; epidx++) {
831 if (epidx == hw->my_epid)
833 hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
834 ~FJES_RX_MTU_CHANGING_DONE;
836 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
838 netif_tx_stop_all_queues(netdev);
839 netif_carrier_off(netdev);
840 cancel_work_sync(&adapter->tx_stall_task);
841 napi_disable(&adapter->napi);
845 netif_tx_stop_all_queues(netdev);
848 netdev->mtu = new_mtu;
851 for (epidx = 0; epidx < hw->max_epid; epidx++) {
852 if (epidx == hw->my_epid)
855 spin_lock_irqsave(&hw->rx_status_lock, flags);
856 fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
860 hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
861 FJES_RX_MTU_CHANGING_DONE;
862 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
865 netif_tx_wake_all_queues(netdev);
866 netif_carrier_on(netdev);
867 napi_enable(&adapter->napi);
868 napi_schedule(&adapter->napi);
874 static int fjes_vlan_rx_add_vid(struct net_device *netdev,
875 __be16 proto, u16 vid)
877 struct fjes_adapter *adapter = netdev_priv(netdev);
881 for (epid = 0; epid < adapter->hw.max_epid; epid++) {
882 if (epid == adapter->hw.my_epid)
885 if (!fjes_hw_check_vlan_id(
886 &adapter->hw.ep_shm_info[epid].tx, vid))
887 ret = fjes_hw_set_vlan_id(
888 &adapter->hw.ep_shm_info[epid].tx, vid);
891 return ret ? 0 : -ENOSPC;
894 static int fjes_vlan_rx_kill_vid(struct net_device *netdev,
895 __be16 proto, u16 vid)
897 struct fjes_adapter *adapter = netdev_priv(netdev);
900 for (epid = 0; epid < adapter->hw.max_epid; epid++) {
901 if (epid == adapter->hw.my_epid)
904 fjes_hw_del_vlan_id(&adapter->hw.ep_shm_info[epid].tx, vid);
910 static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
913 struct fjes_hw *hw = &adapter->hw;
914 enum ep_partner_status status;
917 status = fjes_hw_get_partner_ep_status(hw, src_epid);
918 trace_fjes_txrx_stop_req_irq_pre(hw, src_epid, status);
920 case EP_PARTNER_UNSHARE:
921 case EP_PARTNER_COMPLETE:
924 case EP_PARTNER_WAITING:
925 if (src_epid < hw->my_epid) {
926 spin_lock_irqsave(&hw->rx_status_lock, flags);
927 hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
928 FJES_RX_STOP_REQ_DONE;
929 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
931 clear_bit(src_epid, &hw->txrx_stop_req_bit);
932 set_bit(src_epid, &adapter->unshare_watch_bitmask);
934 if (!work_pending(&adapter->unshare_watch_task))
935 queue_work(adapter->control_wq,
936 &adapter->unshare_watch_task);
939 case EP_PARTNER_SHARED:
940 if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status &
941 FJES_RX_STOP_REQ_REQUEST) {
942 set_bit(src_epid, &hw->epstop_req_bit);
943 if (!work_pending(&hw->epstop_task))
944 queue_work(adapter->control_wq,
949 trace_fjes_txrx_stop_req_irq_post(hw, src_epid);
952 static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
954 struct fjes_hw *hw = &adapter->hw;
955 enum ep_partner_status status;
958 set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
960 status = fjes_hw_get_partner_ep_status(hw, src_epid);
961 trace_fjes_stop_req_irq_pre(hw, src_epid, status);
963 case EP_PARTNER_WAITING:
964 spin_lock_irqsave(&hw->rx_status_lock, flags);
965 hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
966 FJES_RX_STOP_REQ_DONE;
967 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
968 clear_bit(src_epid, &hw->txrx_stop_req_bit);
970 case EP_PARTNER_UNSHARE:
971 case EP_PARTNER_COMPLETE:
973 set_bit(src_epid, &adapter->unshare_watch_bitmask);
974 if (!work_pending(&adapter->unshare_watch_task))
975 queue_work(adapter->control_wq,
976 &adapter->unshare_watch_task);
978 case EP_PARTNER_SHARED:
979 set_bit(src_epid, &hw->epstop_req_bit);
981 if (!work_pending(&hw->epstop_task))
982 queue_work(adapter->control_wq, &hw->epstop_task);
985 trace_fjes_stop_req_irq_post(hw, src_epid);
988 static void fjes_update_zone_irq(struct fjes_adapter *adapter,
991 struct fjes_hw *hw = &adapter->hw;
993 if (!work_pending(&hw->update_zone_task))
994 queue_work(adapter->control_wq, &hw->update_zone_task);
997 static irqreturn_t fjes_intr(int irq, void *data)
999 struct fjes_adapter *adapter = data;
1000 struct fjes_hw *hw = &adapter->hw;
1004 icr = fjes_hw_capture_interrupt_status(hw);
1006 if (icr & REG_IS_MASK_IS_ASSERT) {
1007 if (icr & REG_ICTL_MASK_RX_DATA) {
1008 fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID);
1009 hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
1013 if (icr & REG_ICTL_MASK_DEV_STOP_REQ) {
1014 fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
1015 hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
1016 .recv_intr_stop += 1;
1019 if (icr & REG_ICTL_MASK_TXRX_STOP_REQ) {
1020 fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
1021 hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
1022 .recv_intr_unshare += 1;
1025 if (icr & REG_ICTL_MASK_TXRX_STOP_DONE)
1026 fjes_hw_set_irqmask(hw,
1027 REG_ICTL_MASK_TXRX_STOP_DONE, true);
1029 if (icr & REG_ICTL_MASK_INFO_UPDATE) {
1030 fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID);
1031 hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
1032 .recv_intr_zoneupdate += 1;
1043 static int fjes_rxframe_search_exist(struct fjes_adapter *adapter,
1046 struct fjes_hw *hw = &adapter->hw;
1047 enum ep_partner_status pstatus;
1048 int max_epid, cur_epid;
1051 max_epid = hw->max_epid;
1052 start_epid = (start_epid + 1 + max_epid) % max_epid;
1054 for (i = 0; i < max_epid; i++) {
1055 cur_epid = (start_epid + i) % max_epid;
1056 if (cur_epid == hw->my_epid)
1059 pstatus = fjes_hw_get_partner_ep_status(hw, cur_epid);
1060 if (pstatus == EP_PARTNER_SHARED) {
1061 if (!fjes_hw_epbuf_rx_is_empty(
1062 &hw->ep_shm_info[cur_epid].rx))
1069 static void *fjes_rxframe_get(struct fjes_adapter *adapter, size_t *psize,
1074 *cur_epid = fjes_rxframe_search_exist(adapter, *cur_epid);
1079 fjes_hw_epbuf_rx_curpkt_get_addr(
1080 &adapter->hw.ep_shm_info[*cur_epid].rx, psize);
1085 static void fjes_rxframe_release(struct fjes_adapter *adapter, int cur_epid)
1087 fjes_hw_epbuf_rx_curpkt_drop(&adapter->hw.ep_shm_info[cur_epid].rx);
1090 static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid)
1092 struct fjes_hw *hw = &adapter->hw;
1094 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true);
1096 adapter->unset_rx_last = true;
1097 napi_schedule(&adapter->napi);
1100 static int fjes_poll(struct napi_struct *napi, int budget)
1102 struct fjes_adapter *adapter =
1103 container_of(napi, struct fjes_adapter, napi);
1104 struct net_device *netdev = napi->dev;
1105 struct fjes_hw *hw = &adapter->hw;
1106 struct sk_buff *skb;
1113 spin_lock(&hw->rx_status_lock);
1114 for (epidx = 0; epidx < hw->max_epid; epidx++) {
1115 if (epidx == hw->my_epid)
1118 if (fjes_hw_get_partner_ep_status(hw, epidx) ==
1120 adapter->hw.ep_shm_info[epidx]
1121 .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK;
1123 spin_unlock(&hw->rx_status_lock);
1125 while (work_done < budget) {
1126 prefetch(&adapter->hw);
1127 frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid);
1130 skb = napi_alloc_skb(napi, frame_len);
1132 adapter->stats64.rx_dropped += 1;
1133 hw->ep_shm_info[cur_epid].net_stats
1135 adapter->stats64.rx_errors += 1;
1136 hw->ep_shm_info[cur_epid].net_stats
1139 skb_put_data(skb, frame, frame_len);
1140 skb->protocol = eth_type_trans(skb, netdev);
1141 skb->ip_summed = CHECKSUM_UNNECESSARY;
1143 netif_receive_skb(skb);
1147 adapter->stats64.rx_packets += 1;
1148 hw->ep_shm_info[cur_epid].net_stats
1150 adapter->stats64.rx_bytes += frame_len;
1151 hw->ep_shm_info[cur_epid].net_stats
1152 .rx_bytes += frame_len;
1154 if (is_multicast_ether_addr(
1155 ((struct ethhdr *)frame)->h_dest)) {
1156 adapter->stats64.multicast += 1;
1157 hw->ep_shm_info[cur_epid].net_stats
1162 fjes_rxframe_release(adapter, cur_epid);
1163 adapter->unset_rx_last = true;
1169 if (work_done < budget) {
1170 napi_complete_done(napi, work_done);
1172 if (adapter->unset_rx_last) {
1173 adapter->rx_last_jiffies = jiffies;
1174 adapter->unset_rx_last = false;
1177 if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
1178 napi_reschedule(napi);
1180 spin_lock(&hw->rx_status_lock);
1181 for (epidx = 0; epidx < hw->max_epid; epidx++) {
1182 if (epidx == hw->my_epid)
1184 if (fjes_hw_get_partner_ep_status(hw, epidx) ==
1186 adapter->hw.ep_shm_info[epidx].tx
1187 .info->v1i.rx_status &=
1190 spin_unlock(&hw->rx_status_lock);
1192 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
1199 /* fjes_probe - Device Initialization Routine */
1200 static int fjes_probe(struct platform_device *plat_dev)
1202 struct fjes_adapter *adapter;
1203 struct net_device *netdev;
1204 struct resource *res;
1209 netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d",
1210 NET_NAME_UNKNOWN, fjes_netdev_setup,
1216 SET_NETDEV_DEV(netdev, &plat_dev->dev);
1218 dev_set_drvdata(&plat_dev->dev, netdev);
1219 adapter = netdev_priv(netdev);
1220 adapter->netdev = netdev;
1221 adapter->plat_dev = plat_dev;
1225 /* setup the private structure */
1226 err = fjes_sw_init(adapter);
1228 goto err_free_netdev;
1230 INIT_WORK(&adapter->force_close_task, fjes_force_close_task);
1231 adapter->force_reset = false;
1232 adapter->open_guard = false;
1234 adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
1235 if (unlikely(!adapter->txrx_wq)) {
1237 goto err_free_netdev;
1240 adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
1242 if (unlikely(!adapter->control_wq)) {
1244 goto err_free_txrx_wq;
1247 INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
1248 INIT_WORK(&adapter->raise_intr_rxdata_task,
1249 fjes_raise_intr_rxdata_task);
1250 INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task);
1251 adapter->unshare_watch_bitmask = 0;
1253 INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task);
1254 adapter->interrupt_watch_enable = false;
1256 res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
1259 goto err_free_control_wq;
1261 hw->hw_res.start = res->start;
1262 hw->hw_res.size = resource_size(res);
1263 hw->hw_res.irq = platform_get_irq(plat_dev, 0);
1264 err = fjes_hw_init(&adapter->hw);
1266 goto err_free_control_wq;
1268 /* setup MAC address (02:00:00:00:00:[epid])*/
1269 netdev->dev_addr[0] = 2;
1270 netdev->dev_addr[1] = 0;
1271 netdev->dev_addr[2] = 0;
1272 netdev->dev_addr[3] = 0;
1273 netdev->dev_addr[4] = 0;
1274 netdev->dev_addr[5] = hw->my_epid; /* EPID */
1276 err = register_netdev(netdev);
1280 netif_carrier_off(netdev);
1282 fjes_dbg_adapter_init(adapter);
1287 fjes_hw_exit(&adapter->hw);
1288 err_free_control_wq:
1289 destroy_workqueue(adapter->control_wq);
1291 destroy_workqueue(adapter->txrx_wq);
1293 free_netdev(netdev);
1298 /* fjes_remove - Device Removal Routine */
1299 static int fjes_remove(struct platform_device *plat_dev)
1301 struct net_device *netdev = dev_get_drvdata(&plat_dev->dev);
1302 struct fjes_adapter *adapter = netdev_priv(netdev);
1303 struct fjes_hw *hw = &adapter->hw;
1305 fjes_dbg_adapter_exit(adapter);
1307 cancel_delayed_work_sync(&adapter->interrupt_watch_task);
1308 cancel_work_sync(&adapter->unshare_watch_task);
1309 cancel_work_sync(&adapter->raise_intr_rxdata_task);
1310 cancel_work_sync(&adapter->tx_stall_task);
1311 if (adapter->control_wq)
1312 destroy_workqueue(adapter->control_wq);
1313 if (adapter->txrx_wq)
1314 destroy_workqueue(adapter->txrx_wq);
1316 unregister_netdev(netdev);
1320 netif_napi_del(&adapter->napi);
1322 free_netdev(netdev);
1327 static int fjes_sw_init(struct fjes_adapter *adapter)
1329 struct net_device *netdev = adapter->netdev;
1331 netif_napi_add(netdev, &adapter->napi, fjes_poll, 64);
1336 /* fjes_netdev_setup - netdevice initialization routine */
1337 static void fjes_netdev_setup(struct net_device *netdev)
1339 ether_setup(netdev);
1341 netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
1342 netdev->netdev_ops = &fjes_netdev_ops;
1343 fjes_set_ethtool_ops(netdev);
1344 netdev->mtu = fjes_support_mtu[3];
1345 netdev->min_mtu = fjes_support_mtu[0];
1346 netdev->max_mtu = fjes_support_mtu[3];
1347 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1350 static void fjes_irq_watch_task(struct work_struct *work)
1352 struct fjes_adapter *adapter = container_of(to_delayed_work(work),
1353 struct fjes_adapter, interrupt_watch_task);
1355 local_irq_disable();
1356 fjes_intr(adapter->hw.hw_res.irq, adapter);
1359 if (fjes_rxframe_search_exist(adapter, 0) >= 0)
1360 napi_schedule(&adapter->napi);
1362 if (adapter->interrupt_watch_enable) {
1363 if (!delayed_work_pending(&adapter->interrupt_watch_task))
1364 queue_delayed_work(adapter->control_wq,
1365 &adapter->interrupt_watch_task,
1366 FJES_IRQ_WATCH_DELAY);
1370 static void fjes_watch_unshare_task(struct work_struct *work)
1372 struct fjes_adapter *adapter =
1373 container_of(work, struct fjes_adapter, unshare_watch_task);
1375 struct net_device *netdev = adapter->netdev;
1376 struct fjes_hw *hw = &adapter->hw;
1378 int unshare_watch, unshare_reserve;
1379 int max_epid, my_epid, epidx;
1380 int stop_req, stop_req_done;
1381 ulong unshare_watch_bitmask;
1382 unsigned long flags;
1387 my_epid = hw->my_epid;
1388 max_epid = hw->max_epid;
1390 unshare_watch_bitmask = adapter->unshare_watch_bitmask;
1391 adapter->unshare_watch_bitmask = 0;
1393 while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) &&
1394 (wait_time < 3000)) {
1395 for (epidx = 0; epidx < max_epid; epidx++) {
1396 if (epidx == my_epid)
1399 is_shared = fjes_hw_epid_is_shared(hw->hw_info.share,
1402 stop_req = test_bit(epidx, &hw->txrx_stop_req_bit);
1404 stop_req_done = hw->ep_shm_info[epidx].rx.info->v1i.rx_status &
1405 FJES_RX_STOP_REQ_DONE;
1407 unshare_watch = test_bit(epidx, &unshare_watch_bitmask);
1409 unshare_reserve = test_bit(epidx,
1410 &hw->hw_info.buffer_unshare_reserve_bit);
1413 (is_shared && (!is_shared || !stop_req_done))) &&
1414 (is_shared || !unshare_watch || !unshare_reserve))
1417 mutex_lock(&hw->hw_info.lock);
1418 ret = fjes_hw_unregister_buff_addr(hw, epidx);
1426 &adapter->force_close_task)) {
1427 adapter->force_reset = true;
1429 &adapter->force_close_task);
1433 mutex_unlock(&hw->hw_info.lock);
1434 hw->ep_shm_info[epidx].ep_stats
1435 .com_unregist_buf_exec += 1;
1437 spin_lock_irqsave(&hw->rx_status_lock, flags);
1438 fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
1439 netdev->dev_addr, netdev->mtu);
1440 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
1442 clear_bit(epidx, &hw->txrx_stop_req_bit);
1443 clear_bit(epidx, &unshare_watch_bitmask);
1445 &hw->hw_info.buffer_unshare_reserve_bit);
1452 if (hw->hw_info.buffer_unshare_reserve_bit) {
1453 for (epidx = 0; epidx < max_epid; epidx++) {
1454 if (epidx == my_epid)
1458 &hw->hw_info.buffer_unshare_reserve_bit)) {
1459 mutex_lock(&hw->hw_info.lock);
1461 ret = fjes_hw_unregister_buff_addr(hw, epidx);
1469 &adapter->force_close_task)) {
1470 adapter->force_reset = true;
1472 &adapter->force_close_task);
1476 mutex_unlock(&hw->hw_info.lock);
1478 hw->ep_shm_info[epidx].ep_stats
1479 .com_unregist_buf_exec += 1;
1481 spin_lock_irqsave(&hw->rx_status_lock, flags);
1482 fjes_hw_setup_epbuf(
1483 &hw->ep_shm_info[epidx].tx,
1484 netdev->dev_addr, netdev->mtu);
1485 spin_unlock_irqrestore(&hw->rx_status_lock,
1488 clear_bit(epidx, &hw->txrx_stop_req_bit);
1489 clear_bit(epidx, &unshare_watch_bitmask);
1490 clear_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
1493 if (test_bit(epidx, &unshare_watch_bitmask)) {
1494 spin_lock_irqsave(&hw->rx_status_lock, flags);
1495 hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
1496 ~FJES_RX_STOP_REQ_DONE;
1497 spin_unlock_irqrestore(&hw->rx_status_lock,
1505 acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level,
1506 void *context, void **return_value)
1508 struct acpi_device *device;
1509 bool *found = context;
1512 result = acpi_bus_get_device(obj_handle, &device);
1516 if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID))
1519 if (!is_extended_socket_device(device))
1522 if (acpi_check_extended_socket_status(device))
1526 return AE_CTRL_TERMINATE;
1529 /* fjes_init_module - Driver Registration Routine */
1530 static int __init fjes_init_module(void)
1535 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
1536 acpi_find_extended_socket_device, NULL, &found,
1542 pr_info("%s - version %s - %s\n",
1543 fjes_driver_string, fjes_driver_version, fjes_copyright);
1547 result = platform_driver_register(&fjes_driver);
1553 result = acpi_bus_register_driver(&fjes_acpi_driver);
1555 goto fail_acpi_driver;
1560 platform_driver_unregister(&fjes_driver);
1565 module_init(fjes_init_module);
1567 /* fjes_exit_module - Driver Exit Cleanup Routine */
1568 static void __exit fjes_exit_module(void)
1570 acpi_bus_unregister_driver(&fjes_acpi_driver);
1571 platform_driver_unregister(&fjes_driver);
1575 module_exit(fjes_exit_module);