1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5 * Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
10 #define KMSG_COMPONENT "qeth"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/compat.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/log2.h>
22 #include <linux/tcp.h>
23 #include <linux/mii.h>
25 #include <linux/kthread.h>
26 #include <linux/slab.h>
27 #include <linux/if_vlan.h>
28 #include <linux/netdevice.h>
29 #include <linux/netdev_features.h>
30 #include <linux/rcutree.h>
31 #include <linux/skbuff.h>
32 #include <linux/vmalloc.h>
34 #include <net/iucv/af_iucv.h>
35 #include <net/dsfield.h>
38 #include <asm/ebcdic.h>
39 #include <asm/chpid.h>
40 #include <asm/sysinfo.h>
43 #include <asm/ccwdev.h>
44 #include <asm/cpcmd.h>
46 #include "qeth_core.h"
48 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
49 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
51 [QETH_DBF_SETUP] = {"qeth_setup",
52 8, 1, 8, 5, &debug_hex_ascii_view, NULL},
53 [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
54 &debug_sprintf_view, NULL},
55 [QETH_DBF_CTRL] = {"qeth_control",
56 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
58 EXPORT_SYMBOL_GPL(qeth_dbf);
60 static struct kmem_cache *qeth_core_header_cache;
61 static struct kmem_cache *qeth_qdio_outbuf_cache;
63 static struct device *qeth_core_root_dev;
64 static struct dentry *qeth_debugfs_root;
65 static struct lock_class_key qdio_out_skb_queue_key;
67 static void qeth_issue_next_read_cb(struct qeth_card *card,
68 struct qeth_cmd_buffer *iob,
69 unsigned int data_length);
70 static int qeth_qdio_establish(struct qeth_card *);
71 static void qeth_free_qdio_queues(struct qeth_card *card);
73 static const char *qeth_get_cardname(struct qeth_card *card)
75 if (IS_VM_NIC(card)) {
76 switch (card->info.type) {
77 case QETH_CARD_TYPE_OSD:
78 return " Virtual NIC QDIO";
79 case QETH_CARD_TYPE_IQD:
80 return " Virtual NIC Hiper";
81 case QETH_CARD_TYPE_OSM:
82 return " Virtual NIC QDIO - OSM";
83 case QETH_CARD_TYPE_OSX:
84 return " Virtual NIC QDIO - OSX";
89 switch (card->info.type) {
90 case QETH_CARD_TYPE_OSD:
91 return " OSD Express";
92 case QETH_CARD_TYPE_IQD:
93 return " HiperSockets";
94 case QETH_CARD_TYPE_OSM:
96 case QETH_CARD_TYPE_OSX:
105 /* max length to be returned: 14 */
106 const char *qeth_get_cardname_short(struct qeth_card *card)
108 if (IS_VM_NIC(card)) {
109 switch (card->info.type) {
110 case QETH_CARD_TYPE_OSD:
111 return "Virt.NIC QDIO";
112 case QETH_CARD_TYPE_IQD:
113 return "Virt.NIC Hiper";
114 case QETH_CARD_TYPE_OSM:
115 return "Virt.NIC OSM";
116 case QETH_CARD_TYPE_OSX:
117 return "Virt.NIC OSX";
122 switch (card->info.type) {
123 case QETH_CARD_TYPE_OSD:
124 switch (card->info.link_type) {
125 case QETH_LINK_TYPE_FAST_ETH:
127 case QETH_LINK_TYPE_HSTR:
129 case QETH_LINK_TYPE_GBIT_ETH:
131 case QETH_LINK_TYPE_10GBIT_ETH:
133 case QETH_LINK_TYPE_25GBIT_ETH:
135 case QETH_LINK_TYPE_LANE_ETH100:
136 return "OSD_FE_LANE";
137 case QETH_LINK_TYPE_LANE_TR:
138 return "OSD_TR_LANE";
139 case QETH_LINK_TYPE_LANE_ETH1000:
140 return "OSD_GbE_LANE";
141 case QETH_LINK_TYPE_LANE:
142 return "OSD_ATM_LANE";
144 return "OSD_Express";
146 case QETH_CARD_TYPE_IQD:
147 return "HiperSockets";
148 case QETH_CARD_TYPE_OSM:
150 case QETH_CARD_TYPE_OSX:
159 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
160 int clear_start_mask)
164 spin_lock_irqsave(&card->thread_mask_lock, flags);
165 card->thread_allowed_mask = threads;
166 if (clear_start_mask)
167 card->thread_start_mask &= threads;
168 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
169 wake_up(&card->wait_q);
171 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
173 int qeth_threads_running(struct qeth_card *card, unsigned long threads)
178 spin_lock_irqsave(&card->thread_mask_lock, flags);
179 rc = (card->thread_running_mask & threads);
180 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
183 EXPORT_SYMBOL_GPL(qeth_threads_running);
185 static void qeth_clear_working_pool_list(struct qeth_card *card)
187 struct qeth_buffer_pool_entry *pool_entry, *tmp;
188 struct qeth_qdio_q *queue = card->qdio.in_q;
191 QETH_CARD_TEXT(card, 5, "clwrklst");
192 list_for_each_entry_safe(pool_entry, tmp,
193 &card->qdio.in_buf_pool.entry_list, list)
194 list_del(&pool_entry->list);
199 for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
200 queue->bufs[i].pool_entry = NULL;
203 static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
207 for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
208 if (entry->elements[i])
209 __free_page(entry->elements[i]);
215 static void qeth_free_buffer_pool(struct qeth_card *card)
217 struct qeth_buffer_pool_entry *entry, *tmp;
219 list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
221 list_del(&entry->init_list);
222 qeth_free_pool_entry(entry);
226 static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
228 struct qeth_buffer_pool_entry *entry;
231 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
235 for (i = 0; i < pages; i++) {
236 entry->elements[i] = __dev_alloc_page(GFP_KERNEL);
238 if (!entry->elements[i]) {
239 qeth_free_pool_entry(entry);
247 static int qeth_alloc_buffer_pool(struct qeth_card *card)
249 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
252 QETH_CARD_TEXT(card, 5, "alocpool");
253 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
254 struct qeth_buffer_pool_entry *entry;
256 entry = qeth_alloc_pool_entry(buf_elements);
258 qeth_free_buffer_pool(card);
262 list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
267 int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
269 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
270 struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
271 struct qeth_buffer_pool_entry *entry, *tmp;
272 int delta = count - pool->buf_count;
275 QETH_CARD_TEXT(card, 2, "realcbp");
277 /* Defer until queue is allocated: */
278 if (!card->qdio.in_q)
281 /* Remove entries from the pool: */
283 entry = list_first_entry(&pool->entry_list,
284 struct qeth_buffer_pool_entry,
286 list_del(&entry->init_list);
287 qeth_free_pool_entry(entry);
292 /* Allocate additional entries: */
294 entry = qeth_alloc_pool_entry(buf_elements);
296 list_for_each_entry_safe(entry, tmp, &entries,
298 list_del(&entry->init_list);
299 qeth_free_pool_entry(entry);
305 list_add(&entry->init_list, &entries);
310 list_splice(&entries, &pool->entry_list);
313 card->qdio.in_buf_pool.buf_count = count;
314 pool->buf_count = count;
317 EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
319 static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
324 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
328 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
330 struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
336 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
341 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
342 q->bufs[i].buffer = q->qdio_bufs[i];
344 QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
348 static int qeth_cq_init(struct qeth_card *card)
352 if (card->options.cq == QETH_CQ_ENABLED) {
353 QETH_CARD_TEXT(card, 2, "cqinit");
354 qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
355 QDIO_MAX_BUFFERS_PER_Q);
356 card->qdio.c_q->next_buf_to_init = 127;
357 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
358 card->qdio.no_in_queues - 1, 0, 127, NULL);
360 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
369 static int qeth_alloc_cq(struct qeth_card *card)
371 if (card->options.cq == QETH_CQ_ENABLED) {
372 QETH_CARD_TEXT(card, 2, "cqon");
373 card->qdio.c_q = qeth_alloc_qdio_queue();
374 if (!card->qdio.c_q) {
375 dev_err(&card->gdev->dev, "Failed to create completion queue\n");
379 card->qdio.no_in_queues = 2;
381 QETH_CARD_TEXT(card, 2, "nocq");
382 card->qdio.c_q = NULL;
383 card->qdio.no_in_queues = 1;
385 QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
389 static void qeth_free_cq(struct qeth_card *card)
391 if (card->qdio.c_q) {
392 --card->qdio.no_in_queues;
393 qeth_free_qdio_queue(card->qdio.c_q);
394 card->qdio.c_q = NULL;
398 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
401 enum iucv_tx_notify n;
405 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
411 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
412 TX_NOTIFY_UNREACHABLE;
415 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
416 TX_NOTIFY_GENERALERROR;
423 static void qeth_put_cmd(struct qeth_cmd_buffer *iob)
425 if (refcount_dec_and_test(&iob->ref_count)) {
430 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
433 ccw->cmd_code = cmd_code;
434 ccw->flags = flags | CCW_FLAG_SLI;
436 ccw->cda = (__u32) __pa(data);
439 static int __qeth_issue_next_read(struct qeth_card *card)
441 struct qeth_cmd_buffer *iob = card->read_cmd;
442 struct qeth_channel *channel = iob->channel;
443 struct ccw1 *ccw = __ccw_from_cmd(iob);
446 QETH_CARD_TEXT(card, 5, "issnxrd");
447 if (channel->state != CH_STATE_UP)
450 memset(iob->data, 0, iob->length);
451 qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
452 iob->callback = qeth_issue_next_read_cb;
453 /* keep the cmd alive after completion: */
456 QETH_CARD_TEXT(card, 6, "noirqpnd");
457 rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
459 channel->active_cmd = iob;
461 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
462 rc, CARD_DEVID(card));
463 qeth_unlock_channel(card, channel);
465 card->read_or_write_problem = 1;
466 qeth_schedule_recovery(card);
471 static int qeth_issue_next_read(struct qeth_card *card)
475 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
476 ret = __qeth_issue_next_read(card);
477 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
482 static void qeth_enqueue_cmd(struct qeth_card *card,
483 struct qeth_cmd_buffer *iob)
485 spin_lock_irq(&card->lock);
486 list_add_tail(&iob->list_entry, &card->cmd_waiter_list);
487 spin_unlock_irq(&card->lock);
490 static void qeth_dequeue_cmd(struct qeth_card *card,
491 struct qeth_cmd_buffer *iob)
493 spin_lock_irq(&card->lock);
494 list_del(&iob->list_entry);
495 spin_unlock_irq(&card->lock);
498 static void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
501 complete(&iob->done);
504 static void qeth_flush_local_addrs4(struct qeth_card *card)
506 struct qeth_local_addr *addr;
507 struct hlist_node *tmp;
510 spin_lock_irq(&card->local_addrs4_lock);
511 hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) {
512 hash_del_rcu(&addr->hnode);
513 kfree_rcu(addr, rcu);
515 spin_unlock_irq(&card->local_addrs4_lock);
518 static void qeth_flush_local_addrs6(struct qeth_card *card)
520 struct qeth_local_addr *addr;
521 struct hlist_node *tmp;
524 spin_lock_irq(&card->local_addrs6_lock);
525 hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) {
526 hash_del_rcu(&addr->hnode);
527 kfree_rcu(addr, rcu);
529 spin_unlock_irq(&card->local_addrs6_lock);
532 static void qeth_flush_local_addrs(struct qeth_card *card)
534 qeth_flush_local_addrs4(card);
535 qeth_flush_local_addrs6(card);
538 static void qeth_add_local_addrs4(struct qeth_card *card,
539 struct qeth_ipacmd_local_addrs4 *cmd)
543 if (cmd->addr_length !=
544 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
545 dev_err_ratelimited(&card->gdev->dev,
546 "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
551 spin_lock(&card->local_addrs4_lock);
552 for (i = 0; i < cmd->count; i++) {
553 unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr);
554 struct qeth_local_addr *addr;
555 bool duplicate = false;
557 hash_for_each_possible(card->local_addrs4, addr, hnode, key) {
558 if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) {
567 addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
569 dev_err(&card->gdev->dev,
570 "Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
571 &cmd->addrs[i].addr);
575 ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr);
576 hash_add_rcu(card->local_addrs4, &addr->hnode, key);
578 spin_unlock(&card->local_addrs4_lock);
581 static void qeth_add_local_addrs6(struct qeth_card *card,
582 struct qeth_ipacmd_local_addrs6 *cmd)
586 if (cmd->addr_length !=
587 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
588 dev_err_ratelimited(&card->gdev->dev,
589 "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
594 spin_lock(&card->local_addrs6_lock);
595 for (i = 0; i < cmd->count; i++) {
596 u32 key = ipv6_addr_hash(&cmd->addrs[i].addr);
597 struct qeth_local_addr *addr;
598 bool duplicate = false;
600 hash_for_each_possible(card->local_addrs6, addr, hnode, key) {
601 if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) {
610 addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
612 dev_err(&card->gdev->dev,
613 "Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
614 &cmd->addrs[i].addr);
618 addr->addr = cmd->addrs[i].addr;
619 hash_add_rcu(card->local_addrs6, &addr->hnode, key);
621 spin_unlock(&card->local_addrs6_lock);
624 static void qeth_del_local_addrs4(struct qeth_card *card,
625 struct qeth_ipacmd_local_addrs4 *cmd)
629 if (cmd->addr_length !=
630 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
631 dev_err_ratelimited(&card->gdev->dev,
632 "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
637 spin_lock(&card->local_addrs4_lock);
638 for (i = 0; i < cmd->count; i++) {
639 struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i];
640 unsigned int key = ipv4_addr_hash(addr->addr);
641 struct qeth_local_addr *tmp;
643 hash_for_each_possible(card->local_addrs4, tmp, hnode, key) {
644 if (tmp->addr.s6_addr32[3] == addr->addr) {
645 hash_del_rcu(&tmp->hnode);
651 spin_unlock(&card->local_addrs4_lock);
654 static void qeth_del_local_addrs6(struct qeth_card *card,
655 struct qeth_ipacmd_local_addrs6 *cmd)
659 if (cmd->addr_length !=
660 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
661 dev_err_ratelimited(&card->gdev->dev,
662 "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
667 spin_lock(&card->local_addrs6_lock);
668 for (i = 0; i < cmd->count; i++) {
669 struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i];
670 u32 key = ipv6_addr_hash(&addr->addr);
671 struct qeth_local_addr *tmp;
673 hash_for_each_possible(card->local_addrs6, tmp, hnode, key) {
674 if (ipv6_addr_equal(&tmp->addr, &addr->addr)) {
675 hash_del_rcu(&tmp->hnode);
681 spin_unlock(&card->local_addrs6_lock);
684 static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
687 struct qeth_local_addr *tmp;
688 bool is_local = false;
692 if (hash_empty(card->local_addrs4))
696 next_hop = qeth_next_hop_v4_rcu(skb,
697 qeth_dst_check_rcu(skb, htons(ETH_P_IP)));
698 key = ipv4_addr_hash(next_hop);
700 hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
701 if (tmp->addr.s6_addr32[3] == next_hop) {
711 static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
714 struct qeth_local_addr *tmp;
715 struct in6_addr *next_hop;
716 bool is_local = false;
719 if (hash_empty(card->local_addrs6))
723 next_hop = qeth_next_hop_v6_rcu(skb,
724 qeth_dst_check_rcu(skb, htons(ETH_P_IPV6)));
725 key = ipv6_addr_hash(next_hop);
727 hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
728 if (ipv6_addr_equal(&tmp->addr, next_hop)) {
738 static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v)
740 struct qeth_card *card = m->private;
741 struct qeth_local_addr *tmp;
745 hash_for_each_rcu(card->local_addrs4, i, tmp, hnode)
746 seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]);
747 hash_for_each_rcu(card->local_addrs6, i, tmp, hnode)
748 seq_printf(m, "%pI6c\n", &tmp->addr);
754 DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr);
756 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
757 struct qeth_card *card)
759 const char *ipa_name;
760 int com = cmd->hdr.command;
762 ipa_name = qeth_get_ipa_cmd_name(com);
765 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
766 ipa_name, com, CARD_DEVID(card), rc,
767 qeth_get_ipa_msg(rc));
769 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
770 ipa_name, com, CARD_DEVID(card));
773 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
774 struct qeth_ipa_cmd *cmd)
776 QETH_CARD_TEXT(card, 5, "chkipad");
778 if (IS_IPA_REPLY(cmd)) {
779 if (cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
780 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
784 /* handle unsolicited event: */
785 switch (cmd->hdr.command) {
786 case IPA_CMD_STOPLAN:
787 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
788 dev_err(&card->gdev->dev,
789 "Adjacent port of interface %s is no longer in reflective relay mode, trigger recovery\n",
790 netdev_name(card->dev));
791 /* Set offline, then probably fail to set online: */
792 qeth_schedule_recovery(card);
794 /* stay online for subsequent STARTLAN */
795 dev_warn(&card->gdev->dev,
796 "The link for interface %s on CHPID 0x%X failed\n",
797 netdev_name(card->dev), card->info.chpid);
798 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
799 netif_carrier_off(card->dev);
802 case IPA_CMD_STARTLAN:
803 dev_info(&card->gdev->dev,
804 "The link for %s on CHPID 0x%X has been restored\n",
805 netdev_name(card->dev), card->info.chpid);
806 if (card->info.hwtrap)
807 card->info.hwtrap = 2;
808 qeth_schedule_recovery(card);
810 case IPA_CMD_SETBRIDGEPORT_IQD:
811 case IPA_CMD_SETBRIDGEPORT_OSA:
812 case IPA_CMD_ADDRESS_CHANGE_NOTIF:
813 if (card->discipline->control_event_handler(card, cmd))
816 case IPA_CMD_REGISTER_LOCAL_ADDR:
817 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
818 qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
819 else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
820 qeth_add_local_addrs6(card, &cmd->data.local_addrs6);
822 QETH_CARD_TEXT(card, 3, "irla");
824 case IPA_CMD_UNREGISTER_LOCAL_ADDR:
825 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
826 qeth_del_local_addrs4(card, &cmd->data.local_addrs4);
827 else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
828 qeth_del_local_addrs6(card, &cmd->data.local_addrs6);
830 QETH_CARD_TEXT(card, 3, "urla");
833 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
838 static void qeth_clear_ipacmd_list(struct qeth_card *card)
840 struct qeth_cmd_buffer *iob;
843 QETH_CARD_TEXT(card, 4, "clipalst");
845 spin_lock_irqsave(&card->lock, flags);
846 list_for_each_entry(iob, &card->cmd_waiter_list, list_entry)
847 qeth_notify_cmd(iob, -ECANCELED);
848 spin_unlock_irqrestore(&card->lock, flags);
851 static int qeth_check_idx_response(struct qeth_card *card,
852 unsigned char *buffer)
854 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
855 if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
856 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
858 QETH_CARD_TEXT(card, 2, "ckidxres");
859 QETH_CARD_TEXT(card, 2, " idxterm");
860 QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
861 if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
862 buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
863 dev_err(&card->gdev->dev,
864 "The device does not support the configured transport mode\n");
865 return -EPROTONOSUPPORT;
872 static void qeth_release_buffer_cb(struct qeth_card *card,
873 struct qeth_cmd_buffer *iob,
874 unsigned int data_length)
879 static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
881 qeth_notify_cmd(iob, rc);
885 static struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
887 unsigned int ccws, long timeout)
889 struct qeth_cmd_buffer *iob;
891 if (length > QETH_BUFSIZE)
894 iob = kzalloc(sizeof(*iob), GFP_KERNEL);
898 iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
899 GFP_KERNEL | GFP_DMA);
905 init_completion(&iob->done);
906 spin_lock_init(&iob->lock);
907 refcount_set(&iob->ref_count, 1);
908 iob->channel = channel;
909 iob->timeout = timeout;
910 iob->length = length;
914 static void qeth_issue_next_read_cb(struct qeth_card *card,
915 struct qeth_cmd_buffer *iob,
916 unsigned int data_length)
918 struct qeth_cmd_buffer *request = NULL;
919 struct qeth_ipa_cmd *cmd = NULL;
920 struct qeth_reply *reply = NULL;
921 struct qeth_cmd_buffer *tmp;
925 QETH_CARD_TEXT(card, 4, "sndctlcb");
926 rc = qeth_check_idx_response(card, iob->data);
931 qeth_schedule_recovery(card);
934 qeth_clear_ipacmd_list(card);
938 cmd = __ipa_reply(iob);
940 cmd = qeth_check_ipa_data(card, cmd);
945 /* match against pending cmd requests */
946 spin_lock_irqsave(&card->lock, flags);
947 list_for_each_entry(tmp, &card->cmd_waiter_list, list_entry) {
948 if (tmp->match && tmp->match(tmp, iob)) {
950 /* take the object outside the lock */
951 qeth_get_cmd(request);
955 spin_unlock_irqrestore(&card->lock, flags);
960 reply = &request->reply;
961 if (!reply->callback) {
966 spin_lock_irqsave(&request->lock, flags);
968 /* Bail out when the requestor has already left: */
971 rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
973 spin_unlock_irqrestore(&request->lock, flags);
977 qeth_notify_cmd(request, rc);
978 qeth_put_cmd(request);
980 memcpy(&card->seqno.pdu_hdr_ack,
981 QETH_PDU_HEADER_SEQ_NO(iob->data),
983 __qeth_issue_next_read(card);
988 static int qeth_set_thread_start_bit(struct qeth_card *card,
989 unsigned long thread)
994 spin_lock_irqsave(&card->thread_mask_lock, flags);
995 if (!(card->thread_allowed_mask & thread))
997 else if (card->thread_start_mask & thread)
1000 card->thread_start_mask |= thread;
1001 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1006 static void qeth_clear_thread_start_bit(struct qeth_card *card,
1007 unsigned long thread)
1009 unsigned long flags;
1011 spin_lock_irqsave(&card->thread_mask_lock, flags);
1012 card->thread_start_mask &= ~thread;
1013 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1014 wake_up(&card->wait_q);
1017 static void qeth_clear_thread_running_bit(struct qeth_card *card,
1018 unsigned long thread)
1020 unsigned long flags;
1022 spin_lock_irqsave(&card->thread_mask_lock, flags);
1023 card->thread_running_mask &= ~thread;
1024 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1025 wake_up_all(&card->wait_q);
1028 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
1030 unsigned long flags;
1033 spin_lock_irqsave(&card->thread_mask_lock, flags);
1034 if (card->thread_start_mask & thread) {
1035 if ((card->thread_allowed_mask & thread) &&
1036 !(card->thread_running_mask & thread)) {
1038 card->thread_start_mask &= ~thread;
1039 card->thread_running_mask |= thread;
1043 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1047 static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
1051 wait_event(card->wait_q,
1052 (rc = __qeth_do_run_thread(card, thread)) >= 0);
1056 int qeth_schedule_recovery(struct qeth_card *card)
1060 QETH_CARD_TEXT(card, 2, "startrec");
1062 rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
1064 schedule_work(&card->kernel_thread_starter);
1069 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
1075 sense = (char *) irb->ecw;
1076 cstat = irb->scsw.cmd.cstat;
1077 dstat = irb->scsw.cmd.dstat;
1079 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
1080 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
1081 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
1082 QETH_CARD_TEXT(card, 2, "CGENCHK");
1083 dev_warn(&cdev->dev, "The qeth device driver "
1084 "failed to recover an error on the device\n");
1085 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
1086 CCW_DEVID(cdev), dstat, cstat);
1087 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
1092 if (dstat & DEV_STAT_UNIT_CHECK) {
1093 if (sense[SENSE_RESETTING_EVENT_BYTE] &
1094 SENSE_RESETTING_EVENT_FLAG) {
1095 QETH_CARD_TEXT(card, 2, "REVIND");
1098 if (sense[SENSE_COMMAND_REJECT_BYTE] &
1099 SENSE_COMMAND_REJECT_FLAG) {
1100 QETH_CARD_TEXT(card, 2, "CMDREJi");
1103 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
1104 QETH_CARD_TEXT(card, 2, "AFFE");
1107 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
1108 QETH_CARD_TEXT(card, 2, "ZEROSEN");
1111 QETH_CARD_TEXT(card, 2, "DGENCHK");
1117 static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
1123 switch (PTR_ERR(irb)) {
1125 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
1127 QETH_CARD_TEXT(card, 2, "ckirberr");
1128 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
1131 dev_warn(&cdev->dev, "A hardware operation timed out"
1132 " on the device\n");
1133 QETH_CARD_TEXT(card, 2, "ckirberr");
1134 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT);
1137 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
1138 PTR_ERR(irb), CCW_DEVID(cdev));
1139 QETH_CARD_TEXT(card, 2, "ckirberr");
1140 QETH_CARD_TEXT(card, 2, " rc???");
1141 return PTR_ERR(irb);
1145 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1150 struct qeth_cmd_buffer *iob = NULL;
1151 struct ccwgroup_device *gdev;
1152 struct qeth_channel *channel;
1153 struct qeth_card *card;
1155 /* while we hold the ccwdev lock, this stays valid: */
1156 gdev = dev_get_drvdata(&cdev->dev);
1157 card = dev_get_drvdata(&gdev->dev);
1159 QETH_CARD_TEXT(card, 5, "irq");
1161 if (card->read.ccwdev == cdev) {
1162 channel = &card->read;
1163 QETH_CARD_TEXT(card, 5, "read");
1164 } else if (card->write.ccwdev == cdev) {
1165 channel = &card->write;
1166 QETH_CARD_TEXT(card, 5, "write");
1168 channel = &card->data;
1169 QETH_CARD_TEXT(card, 5, "data");
1173 QETH_CARD_TEXT(card, 5, "irqunsol");
1174 } else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
1175 QETH_CARD_TEXT(card, 5, "irqunexp");
1178 "Received IRQ with intparm %lx, expected %px\n",
1179 intparm, channel->active_cmd);
1180 if (channel->active_cmd)
1181 qeth_cancel_cmd(channel->active_cmd, -EIO);
1183 iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
1186 qeth_unlock_channel(card, channel);
1188 rc = qeth_check_irb_error(card, cdev, irb);
1190 /* IO was terminated, free its resources. */
1192 qeth_cancel_cmd(iob, rc);
1196 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
1197 channel->state = CH_STATE_STOPPED;
1198 wake_up(&card->wait_q);
1201 if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
1202 channel->state = CH_STATE_HALTED;
1203 wake_up(&card->wait_q);
1206 if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
1207 SCSW_FCTL_HALT_FUNC))) {
1208 qeth_cancel_cmd(iob, -ECANCELED);
1212 cstat = irb->scsw.cmd.cstat;
1213 dstat = irb->scsw.cmd.dstat;
1215 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
1216 (dstat & DEV_STAT_UNIT_CHECK) ||
1218 if (irb->esw.esw0.erw.cons) {
1219 dev_warn(&channel->ccwdev->dev,
1220 "The qeth device driver failed to recover "
1221 "an error on the device\n");
1222 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1223 CCW_DEVID(channel->ccwdev), cstat,
1225 print_hex_dump(KERN_WARNING, "qeth: irb ",
1226 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
1227 print_hex_dump(KERN_WARNING, "qeth: sense data ",
1228 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
1231 rc = qeth_get_problem(card, cdev, irb);
1233 card->read_or_write_problem = 1;
1235 qeth_cancel_cmd(iob, rc);
1236 qeth_clear_ipacmd_list(card);
1237 qeth_schedule_recovery(card);
1244 if (irb->scsw.cmd.count > iob->length) {
1245 qeth_cancel_cmd(iob, -EIO);
1249 iob->callback(card, iob,
1250 iob->length - irb->scsw.cmd.count);
1254 static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1255 struct qeth_qdio_out_buffer *buf,
1256 enum iucv_tx_notify notification)
1258 struct sk_buff *skb;
1260 skb_queue_walk(&buf->skb_list, skb) {
1261 struct sock *sk = skb->sk;
1263 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
1264 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1265 if (sk && sk->sk_family == PF_IUCV)
1266 iucv_sk(sk)->sk_txnotify(sk, notification);
1270 static void qeth_tx_complete_buf(struct qeth_qdio_out_q *queue,
1271 struct qeth_qdio_out_buffer *buf, bool error,
1274 struct sk_buff *skb;
1277 if (buf->next_element_to_fill == 0)
1280 QETH_TXQ_STAT_INC(queue, bufs);
1281 QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
1283 QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
1285 QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
1286 QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes);
1289 while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
1290 unsigned int bytes = qdisc_pkt_len(skb);
1291 bool is_tso = skb_is_gso(skb);
1292 unsigned int packets;
1294 packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
1296 if (skb->ip_summed == CHECKSUM_PARTIAL)
1297 QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
1298 if (skb_is_nonlinear(skb))
1299 QETH_TXQ_STAT_INC(queue, skbs_sg);
1301 QETH_TXQ_STAT_INC(queue, skbs_tso);
1302 QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
1306 napi_consume_skb(skb, budget);
1310 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1311 struct qeth_qdio_out_buffer *buf,
1312 bool error, int budget)
1316 /* is PCI flag set on buffer? */
1317 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) {
1318 atomic_dec(&queue->set_pci_flags_count);
1319 QETH_TXQ_STAT_INC(queue, completion_irq);
1322 qeth_tx_complete_buf(queue, buf, error, budget);
1324 for (i = 0; i < queue->max_elements; ++i) {
1325 void *data = phys_to_virt(buf->buffer->element[i].addr);
1327 if (__test_and_clear_bit(i, buf->from_kmem_cache) && data)
1328 kmem_cache_free(qeth_core_header_cache, data);
1331 qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
1332 buf->next_element_to_fill = 0;
1335 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1338 static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf)
1341 qdio_release_aob(buf->aob);
1342 kmem_cache_free(qeth_qdio_outbuf_cache, buf);
1345 static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
1346 struct qeth_qdio_out_q *queue,
1347 bool drain, int budget)
1349 struct qeth_qdio_out_buffer *buf, *tmp;
1351 list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) {
1352 struct qeth_qaob_priv1 *priv;
1353 struct qaob *aob = buf->aob;
1354 enum iucv_tx_notify notify;
1357 priv = (struct qeth_qaob_priv1 *)&aob->user1;
1358 if (drain || READ_ONCE(priv->state) == QETH_QAOB_DONE) {
1359 QETH_CARD_TEXT(card, 5, "fp");
1360 QETH_CARD_TEXT_(card, 5, "%lx", (long) buf);
1362 notify = drain ? TX_NOTIFY_GENERALERROR :
1363 qeth_compute_cq_notification(aob->aorc, 1);
1364 qeth_notify_skbs(queue, buf, notify);
1365 qeth_tx_complete_buf(queue, buf, drain, budget);
1368 i < aob->sb_count && i < queue->max_elements;
1370 void *data = phys_to_virt(aob->sba[i]);
1372 if (test_bit(i, buf->from_kmem_cache) && data)
1373 kmem_cache_free(qeth_core_header_cache,
1377 list_del(&buf->list_entry);
1378 qeth_free_out_buf(buf);
1383 static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
1387 qeth_tx_complete_pending_bufs(q->card, q, true, 0);
1389 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
1393 qeth_clear_output_buffer(q, q->bufs[j], true, 0);
1395 qeth_free_out_buf(q->bufs[j]);
1401 static void qeth_drain_output_queues(struct qeth_card *card)
1405 QETH_CARD_TEXT(card, 2, "clearqdbf");
1406 /* clear outbound buffers to free skbs */
1407 for (i = 0; i < card->qdio.no_out_queues; ++i) {
1408 if (card->qdio.out_qs[i])
1409 qeth_drain_output_queue(card->qdio.out_qs[i], false);
1413 static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
1415 unsigned int max = single ? 1 : card->dev->num_tx_queues;
1417 if (card->qdio.no_out_queues == max)
1420 if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
1421 qeth_free_qdio_queues(card);
1423 if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
1424 dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
1426 card->qdio.no_out_queues = max;
1429 static int qeth_update_from_chp_desc(struct qeth_card *card)
1431 struct ccw_device *ccwdev;
1432 struct channel_path_desc_fmt0 *chp_dsc;
1434 QETH_CARD_TEXT(card, 2, "chp_desc");
1436 ccwdev = card->data.ccwdev;
1437 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
1441 card->info.func_level = 0x4100 + chp_dsc->desc;
1443 if (IS_OSD(card) || IS_OSX(card))
1444 /* CHPP field bit 6 == 1 -> single queue */
1445 qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
1448 QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
1449 QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
1453 static void qeth_init_qdio_info(struct qeth_card *card)
1455 QETH_CARD_TEXT(card, 4, "intqdinf");
1456 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1457 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1458 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1461 card->qdio.no_in_queues = 1;
1462 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1464 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
1466 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1467 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1468 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1469 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1472 static void qeth_set_initial_options(struct qeth_card *card)
1474 card->options.route4.type = NO_ROUTER;
1475 card->options.route6.type = NO_ROUTER;
1476 card->options.isolation = ISOLATION_MODE_NONE;
1477 card->options.cq = QETH_CQ_DISABLED;
1478 card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
1481 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1483 unsigned long flags;
1486 spin_lock_irqsave(&card->thread_mask_lock, flags);
1487 QETH_CARD_TEXT_(card, 4, " %02x%02x%02x",
1488 (u8) card->thread_start_mask,
1489 (u8) card->thread_allowed_mask,
1490 (u8) card->thread_running_mask);
1491 rc = (card->thread_start_mask & thread);
1492 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1496 static int qeth_do_reset(void *data);
1497 static void qeth_start_kernel_thread(struct work_struct *work)
1499 struct task_struct *ts;
1500 struct qeth_card *card = container_of(work, struct qeth_card,
1501 kernel_thread_starter);
1502 QETH_CARD_TEXT(card, 2, "strthrd");
1504 if (card->read.state != CH_STATE_UP &&
1505 card->write.state != CH_STATE_UP)
1507 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1508 ts = kthread_run(qeth_do_reset, card, "qeth_recover");
1510 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1511 qeth_clear_thread_running_bit(card,
1512 QETH_RECOVER_THREAD);
1517 static void qeth_buffer_reclaim_work(struct work_struct *);
1518 static void qeth_setup_card(struct qeth_card *card)
1520 QETH_CARD_TEXT(card, 2, "setupcrd");
1522 card->info.type = CARD_RDEV(card)->id.driver_info;
1523 card->state = CARD_STATE_DOWN;
1524 spin_lock_init(&card->lock);
1525 spin_lock_init(&card->thread_mask_lock);
1526 mutex_init(&card->conf_mutex);
1527 mutex_init(&card->discipline_mutex);
1528 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1529 INIT_LIST_HEAD(&card->cmd_waiter_list);
1530 init_waitqueue_head(&card->wait_q);
1531 qeth_set_initial_options(card);
1532 /* IP address takeover */
1533 INIT_LIST_HEAD(&card->ipato.entries);
1534 qeth_init_qdio_info(card);
1535 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1536 hash_init(card->rx_mode_addrs);
1537 hash_init(card->local_addrs4);
1538 hash_init(card->local_addrs6);
1539 spin_lock_init(&card->local_addrs4_lock);
1540 spin_lock_init(&card->local_addrs6_lock);
1543 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
1545 struct qeth_card *card = container_of(slr, struct qeth_card,
1546 qeth_service_level);
1547 if (card->info.mcl_level[0])
1548 seq_printf(m, "qeth: %s firmware level %s\n",
1549 CARD_BUS_ID(card), card->info.mcl_level);
1552 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
1554 struct qeth_card *card;
1556 QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1557 card = kzalloc(sizeof(*card), GFP_KERNEL);
1560 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1563 dev_set_drvdata(&gdev->dev, card);
1564 CARD_RDEV(card) = gdev->cdev[0];
1565 CARD_WDEV(card) = gdev->cdev[1];
1566 CARD_DDEV(card) = gdev->cdev[2];
1568 card->event_wq = alloc_ordered_workqueue("%s_event", 0,
1569 dev_name(&gdev->dev));
1570 if (!card->event_wq)
1573 card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
1574 if (!card->read_cmd)
1577 card->debugfs = debugfs_create_dir(dev_name(&gdev->dev),
1579 debugfs_create_file("local_addrs", 0400, card->debugfs, card,
1580 &qeth_debugfs_local_addr_fops);
1582 card->qeth_service_level.seq_print = qeth_core_sl_print;
1583 register_service_level(&card->qeth_service_level);
1587 destroy_workqueue(card->event_wq);
1589 dev_set_drvdata(&gdev->dev, NULL);
1595 static int qeth_clear_channel(struct qeth_card *card,
1596 struct qeth_channel *channel)
1600 QETH_CARD_TEXT(card, 3, "clearch");
1601 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1602 rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
1603 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1607 rc = wait_event_interruptible_timeout(card->wait_q,
1608 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1609 if (rc == -ERESTARTSYS)
1611 if (channel->state != CH_STATE_STOPPED)
1613 channel->state = CH_STATE_DOWN;
1617 static int qeth_halt_channel(struct qeth_card *card,
1618 struct qeth_channel *channel)
1622 QETH_CARD_TEXT(card, 3, "haltch");
1623 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1624 rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
1625 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1629 rc = wait_event_interruptible_timeout(card->wait_q,
1630 channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1631 if (rc == -ERESTARTSYS)
1633 if (channel->state != CH_STATE_HALTED)
1638 static int qeth_stop_channel(struct qeth_channel *channel)
1640 struct ccw_device *cdev = channel->ccwdev;
1643 rc = ccw_device_set_offline(cdev);
1645 spin_lock_irq(get_ccwdev_lock(cdev));
1646 if (channel->active_cmd)
1647 dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
1648 channel->active_cmd);
1650 cdev->handler = NULL;
1651 spin_unlock_irq(get_ccwdev_lock(cdev));
1656 static int qeth_start_channel(struct qeth_channel *channel)
1658 struct ccw_device *cdev = channel->ccwdev;
1661 channel->state = CH_STATE_DOWN;
1662 xchg(&channel->active_cmd, NULL);
1664 spin_lock_irq(get_ccwdev_lock(cdev));
1665 cdev->handler = qeth_irq;
1666 spin_unlock_irq(get_ccwdev_lock(cdev));
1668 rc = ccw_device_set_online(cdev);
1675 spin_lock_irq(get_ccwdev_lock(cdev));
1676 cdev->handler = NULL;
1677 spin_unlock_irq(get_ccwdev_lock(cdev));
1681 static int qeth_halt_channels(struct qeth_card *card)
1683 int rc1 = 0, rc2 = 0, rc3 = 0;
1685 QETH_CARD_TEXT(card, 3, "haltchs");
1686 rc1 = qeth_halt_channel(card, &card->read);
1687 rc2 = qeth_halt_channel(card, &card->write);
1688 rc3 = qeth_halt_channel(card, &card->data);
1696 static int qeth_clear_channels(struct qeth_card *card)
1698 int rc1 = 0, rc2 = 0, rc3 = 0;
1700 QETH_CARD_TEXT(card, 3, "clearchs");
1701 rc1 = qeth_clear_channel(card, &card->read);
1702 rc2 = qeth_clear_channel(card, &card->write);
1703 rc3 = qeth_clear_channel(card, &card->data);
1711 static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1715 QETH_CARD_TEXT(card, 3, "clhacrd");
1718 rc = qeth_halt_channels(card);
1721 return qeth_clear_channels(card);
1724 static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1728 QETH_CARD_TEXT(card, 3, "qdioclr");
1729 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1730 QETH_QDIO_CLEANING)) {
1731 case QETH_QDIO_ESTABLISHED:
1733 rc = qdio_shutdown(CARD_DDEV(card),
1734 QDIO_FLAG_CLEANUP_USING_HALT);
1736 rc = qdio_shutdown(CARD_DDEV(card),
1737 QDIO_FLAG_CLEANUP_USING_CLEAR);
1739 QETH_CARD_TEXT_(card, 3, "1err%d", rc);
1740 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1742 case QETH_QDIO_CLEANING:
1747 rc = qeth_clear_halt_card(card, use_halt);
1749 QETH_CARD_TEXT_(card, 3, "2err%d", rc);
1753 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
1755 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1756 struct diag26c_vnic_resp *response = NULL;
1757 struct diag26c_vnic_req *request = NULL;
1758 struct ccw_dev_id id;
1762 QETH_CARD_TEXT(card, 2, "vmlayer");
1764 cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
1768 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
1769 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
1770 if (!request || !response) {
1775 ccw_device_get_id(CARD_RDEV(card), &id);
1776 request->resp_buf_len = sizeof(*response);
1777 request->resp_version = DIAG26C_VERSION6_VM65918;
1778 request->req_format = DIAG26C_VNIC_INFO;
1780 memcpy(&request->sys_name, userid, 8);
1781 request->devno = id.devno;
1783 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1784 rc = diag26c(request, response, DIAG26C_PORT_VNIC);
1785 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1788 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
1790 if (request->resp_buf_len < sizeof(*response) ||
1791 response->version != request->resp_version) {
1796 if (response->protocol == VNIC_INFO_PROT_L2)
1797 disc = QETH_DISCIPLINE_LAYER2;
1798 else if (response->protocol == VNIC_INFO_PROT_L3)
1799 disc = QETH_DISCIPLINE_LAYER3;
1805 QETH_CARD_TEXT_(card, 2, "err%x", rc);
1809 /* Determine whether the device requires a specific layer discipline */
1810 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
1812 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1815 disc = QETH_DISCIPLINE_LAYER2;
1816 else if (IS_VM_NIC(card))
1817 disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
1818 qeth_vm_detect_layer(card);
1821 case QETH_DISCIPLINE_LAYER2:
1822 QETH_CARD_TEXT(card, 3, "force l2");
1824 case QETH_DISCIPLINE_LAYER3:
1825 QETH_CARD_TEXT(card, 3, "force l3");
1828 QETH_CARD_TEXT(card, 3, "force no");
1834 static void qeth_set_blkt_defaults(struct qeth_card *card)
1836 QETH_CARD_TEXT(card, 2, "cfgblkt");
1838 if (card->info.use_v1_blkt) {
1839 card->info.blkt.time_total = 0;
1840 card->info.blkt.inter_packet = 0;
1841 card->info.blkt.inter_packet_jumbo = 0;
1843 card->info.blkt.time_total = 250;
1844 card->info.blkt.inter_packet = 5;
1845 card->info.blkt.inter_packet_jumbo = 15;
1849 static void qeth_idx_init(struct qeth_card *card)
1851 memset(&card->seqno, 0, sizeof(card->seqno));
1853 card->token.issuer_rm_w = 0x00010103UL;
1854 card->token.cm_filter_w = 0x00010108UL;
1855 card->token.cm_connection_w = 0x0001010aUL;
1856 card->token.ulp_filter_w = 0x0001010bUL;
1857 card->token.ulp_connection_w = 0x0001010dUL;
1859 switch (card->info.type) {
1860 case QETH_CARD_TYPE_IQD:
1861 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
1863 case QETH_CARD_TYPE_OSD:
1864 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
1871 static void qeth_idx_finalize_cmd(struct qeth_card *card,
1872 struct qeth_cmd_buffer *iob)
1874 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
1875 QETH_SEQ_NO_LENGTH);
1876 if (iob->channel == &card->write)
1877 card->seqno.trans_hdr++;
1880 static int qeth_peer_func_level(int level)
1882 if ((level & 0xff) == 8)
1883 return (level & 0xff) + 0x400;
1884 if (((level >> 8) & 3) == 1)
1885 return (level & 0xff) + 0x200;
1889 static void qeth_mpc_finalize_cmd(struct qeth_card *card,
1890 struct qeth_cmd_buffer *iob)
1892 qeth_idx_finalize_cmd(card, iob);
1894 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1895 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1896 card->seqno.pdu_hdr++;
1897 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1898 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1900 iob->callback = qeth_release_buffer_cb;
1903 static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
1904 struct qeth_cmd_buffer *reply)
1906 /* MPC cmds are issued strictly in sequence. */
1907 return !IS_IPA(reply->data);
1910 static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
1912 unsigned int data_length)
1914 struct qeth_cmd_buffer *iob;
1916 iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
1920 memcpy(iob->data, data, data_length);
1921 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
1923 iob->finalize = qeth_mpc_finalize_cmd;
1924 iob->match = qeth_mpc_match_reply;
1929 * qeth_send_control_data() - send control command to the card
1930 * @card: qeth_card structure pointer
1931 * @iob: qeth_cmd_buffer pointer
1932 * @reply_cb: callback function pointer
1933 * @cb_card: pointer to the qeth_card structure
1934 * @cb_reply: pointer to the qeth_reply structure
1935 * @cb_cmd: pointer to the original iob for non-IPA
1936 * commands, or to the qeth_ipa_cmd structure
1937 * for the IPA commands.
1938 * @reply_param: private pointer passed to the callback
1940 * Callback function gets called one or more times, with cb_cmd
1941 * pointing to the response returned by the hardware. Callback
1942 * function must return
1943 * > 0 if more reply blocks are expected,
1944 * 0 if the last or only reply block is received, and
1946 * Callback function can get the value of the reply_param pointer from the
1947 * field 'param' of the structure qeth_reply.
1950 static int qeth_send_control_data(struct qeth_card *card,
1951 struct qeth_cmd_buffer *iob,
1952 int (*reply_cb)(struct qeth_card *cb_card,
1953 struct qeth_reply *cb_reply,
1954 unsigned long cb_cmd),
1957 struct qeth_channel *channel = iob->channel;
1958 struct qeth_reply *reply = &iob->reply;
1959 long timeout = iob->timeout;
1962 QETH_CARD_TEXT(card, 2, "sendctl");
1964 reply->callback = reply_cb;
1965 reply->param = reply_param;
1967 timeout = wait_event_interruptible_timeout(card->wait_q,
1968 qeth_trylock_channel(channel, iob),
1972 return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
1976 iob->finalize(card, iob);
1977 QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
1979 qeth_enqueue_cmd(card, iob);
1981 /* This pairs with iob->callback, and keeps the iob alive after IO: */
1984 QETH_CARD_TEXT(card, 6, "noirqpnd");
1985 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1986 rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
1987 (addr_t) iob, 0, 0, timeout);
1988 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1990 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
1991 CARD_DEVID(card), rc);
1992 QETH_CARD_TEXT_(card, 2, " err%d", rc);
1993 qeth_dequeue_cmd(card, iob);
1995 qeth_unlock_channel(card, channel);
1999 timeout = wait_for_completion_interruptible_timeout(&iob->done,
2002 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2004 qeth_dequeue_cmd(card, iob);
2007 /* Wait until the callback for a late reply has completed: */
2008 spin_lock_irq(&iob->lock);
2010 /* Zap any callback that's still pending: */
2012 spin_unlock_irq(&iob->lock);
2023 struct qeth_node_desc {
2024 struct node_descriptor nd1;
2025 struct node_descriptor nd2;
2026 struct node_descriptor nd3;
2029 static void qeth_read_conf_data_cb(struct qeth_card *card,
2030 struct qeth_cmd_buffer *iob,
2031 unsigned int data_length)
2033 struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
2037 QETH_CARD_TEXT(card, 2, "cfgunit");
2039 if (data_length < sizeof(*nd)) {
2044 card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
2045 nd->nd1.plant[1] == _ascebc['M'];
2046 tag = (u8 *)&nd->nd1.tag;
2047 card->info.chpid = tag[0];
2048 card->info.unit_addr2 = tag[1];
2050 tag = (u8 *)&nd->nd2.tag;
2051 card->info.cula = tag[1];
2053 card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
2054 nd->nd3.model[1] == 0xF0 &&
2055 nd->nd3.model[2] >= 0xF1 &&
2056 nd->nd3.model[2] <= 0xF4;
2059 qeth_notify_cmd(iob, rc);
2063 static int qeth_read_conf_data(struct qeth_card *card)
2065 struct qeth_channel *channel = &card->data;
2066 struct qeth_cmd_buffer *iob;
2069 /* scan for RCD command in extended SenseID data */
2070 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
2071 if (!ciw || ciw->cmd == 0)
2073 if (ciw->count < sizeof(struct qeth_node_desc))
2076 iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
2080 iob->callback = qeth_read_conf_data_cb;
2081 qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
2084 return qeth_send_control_data(card, iob, NULL, NULL);
2087 static int qeth_idx_check_activate_response(struct qeth_card *card,
2088 struct qeth_channel *channel,
2089 struct qeth_cmd_buffer *iob)
2093 rc = qeth_check_idx_response(card, iob->data);
2097 if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
2100 /* negative reply: */
2101 QETH_CARD_TEXT_(card, 2, "idxneg%c",
2102 QETH_IDX_ACT_CAUSE_CODE(iob->data));
2104 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
2105 case QETH_IDX_ACT_ERR_EXCL:
2106 dev_err(&channel->ccwdev->dev,
2107 "The adapter is used exclusively by another host\n");
2109 case QETH_IDX_ACT_ERR_AUTH:
2110 case QETH_IDX_ACT_ERR_AUTH_USER:
2111 dev_err(&channel->ccwdev->dev,
2112 "Setting the device online failed because of insufficient authorization\n");
2115 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
2116 CCW_DEVID(channel->ccwdev));
2121 static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
2122 struct qeth_cmd_buffer *iob,
2123 unsigned int data_length)
2125 struct qeth_channel *channel = iob->channel;
2129 QETH_CARD_TEXT(card, 2, "idxrdcb");
2131 rc = qeth_idx_check_activate_response(card, channel, iob);
2135 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2136 if (peer_level != qeth_peer_func_level(card->info.func_level)) {
2137 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2138 CCW_DEVID(channel->ccwdev),
2139 card->info.func_level, peer_level);
2144 memcpy(&card->token.issuer_rm_r,
2145 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2146 QETH_MPC_TOKEN_LENGTH);
2147 memcpy(&card->info.mcl_level[0],
2148 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
2151 qeth_notify_cmd(iob, rc);
2155 static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
2156 struct qeth_cmd_buffer *iob,
2157 unsigned int data_length)
2159 struct qeth_channel *channel = iob->channel;
2163 QETH_CARD_TEXT(card, 2, "idxwrcb");
2165 rc = qeth_idx_check_activate_response(card, channel, iob);
2169 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2170 if ((peer_level & ~0x0100) !=
2171 qeth_peer_func_level(card->info.func_level)) {
2172 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2173 CCW_DEVID(channel->ccwdev),
2174 card->info.func_level, peer_level);
2179 qeth_notify_cmd(iob, rc);
2183 static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
2184 struct qeth_cmd_buffer *iob)
2186 u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
2187 u8 port = ((u8)card->dev->dev_port) | 0x80;
2188 struct ccw1 *ccw = __ccw_from_cmd(iob);
2190 qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
2192 qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
2193 iob->finalize = qeth_idx_finalize_cmd;
2195 port |= QETH_IDX_ACT_INVAL_FRAME;
2196 memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
2197 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2198 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
2199 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
2200 &card->info.func_level, 2);
2201 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2);
2202 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
2205 static int qeth_idx_activate_read_channel(struct qeth_card *card)
2207 struct qeth_channel *channel = &card->read;
2208 struct qeth_cmd_buffer *iob;
2211 QETH_CARD_TEXT(card, 2, "idxread");
2213 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2217 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
2218 qeth_idx_setup_activate_cmd(card, iob);
2219 iob->callback = qeth_idx_activate_read_channel_cb;
2221 rc = qeth_send_control_data(card, iob, NULL, NULL);
2225 channel->state = CH_STATE_UP;
2229 static int qeth_idx_activate_write_channel(struct qeth_card *card)
2231 struct qeth_channel *channel = &card->write;
2232 struct qeth_cmd_buffer *iob;
2235 QETH_CARD_TEXT(card, 2, "idxwrite");
2237 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2241 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
2242 qeth_idx_setup_activate_cmd(card, iob);
2243 iob->callback = qeth_idx_activate_write_channel_cb;
2245 rc = qeth_send_control_data(card, iob, NULL, NULL);
2249 channel->state = CH_STATE_UP;
2253 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2256 struct qeth_cmd_buffer *iob;
2258 QETH_CARD_TEXT(card, 2, "cmenblcb");
2260 iob = (struct qeth_cmd_buffer *) data;
2261 memcpy(&card->token.cm_filter_r,
2262 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
2263 QETH_MPC_TOKEN_LENGTH);
2267 static int qeth_cm_enable(struct qeth_card *card)
2269 struct qeth_cmd_buffer *iob;
2271 QETH_CARD_TEXT(card, 2, "cmenable");
2273 iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
2277 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
2278 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2279 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
2280 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
2282 return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
2285 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2288 struct qeth_cmd_buffer *iob;
2290 QETH_CARD_TEXT(card, 2, "cmsetpcb");
2292 iob = (struct qeth_cmd_buffer *) data;
2293 memcpy(&card->token.cm_connection_r,
2294 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2295 QETH_MPC_TOKEN_LENGTH);
2299 static int qeth_cm_setup(struct qeth_card *card)
2301 struct qeth_cmd_buffer *iob;
2303 QETH_CARD_TEXT(card, 2, "cmsetup");
2305 iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
2309 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2310 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2311 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2312 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2313 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2314 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2315 return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
2318 static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type)
2320 if (link_type == QETH_LINK_TYPE_LANE_TR ||
2321 link_type == QETH_LINK_TYPE_HSTR) {
2322 dev_err(&card->gdev->dev, "Unsupported Token Ring device\n");
2329 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
2331 struct net_device *dev = card->dev;
2332 unsigned int new_mtu;
2335 /* IQD needs accurate max MTU to set up its RX buffers: */
2338 /* tolerate quirky HW: */
2339 max_mtu = ETH_MAX_MTU;
2344 /* move any device with default MTU to new max MTU: */
2345 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
2347 /* adjust RX buffer size to new max MTU: */
2348 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
2349 if (dev->max_mtu && dev->max_mtu != max_mtu)
2350 qeth_free_qdio_queues(card);
2354 /* default MTUs for first setup: */
2355 else if (IS_LAYER2(card))
2356 new_mtu = ETH_DATA_LEN;
2358 new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
2361 dev->max_mtu = max_mtu;
2362 dev->mtu = min(new_mtu, max_mtu);
2367 static int qeth_get_mtu_outof_framesize(int framesize)
2369 switch (framesize) {
2383 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2386 __u16 mtu, framesize;
2388 struct qeth_cmd_buffer *iob;
2391 QETH_CARD_TEXT(card, 2, "ulpenacb");
2393 iob = (struct qeth_cmd_buffer *) data;
2394 memcpy(&card->token.ulp_filter_r,
2395 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2396 QETH_MPC_TOKEN_LENGTH);
2398 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2399 mtu = qeth_get_mtu_outof_framesize(framesize);
2401 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
2403 *(u16 *)reply->param = mtu;
2405 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2406 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2408 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2409 if (!qeth_is_supported_link_type(card, link_type))
2410 return -EPROTONOSUPPORT;
2413 card->info.link_type = link_type;
2414 QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
2418 static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
2420 return IS_LAYER2(card) ? QETH_MPC_PROT_L2 : QETH_MPC_PROT_L3;
2423 static int qeth_ulp_enable(struct qeth_card *card)
2425 u8 prot_type = qeth_mpc_select_prot_type(card);
2426 struct qeth_cmd_buffer *iob;
2430 QETH_CARD_TEXT(card, 2, "ulpenabl");
2432 iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
2436 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
2437 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
2438 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2439 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2440 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2441 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2442 rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
2445 return qeth_update_max_mtu(card, max_mtu);
2448 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2451 struct qeth_cmd_buffer *iob;
2453 QETH_CARD_TEXT(card, 2, "ulpstpcb");
2455 iob = (struct qeth_cmd_buffer *) data;
2456 memcpy(&card->token.ulp_connection_r,
2457 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2458 QETH_MPC_TOKEN_LENGTH);
2459 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2461 QETH_CARD_TEXT(card, 2, "olmlimit");
2462 dev_err(&card->gdev->dev, "A connection could not be "
2463 "established because of an OLM limit\n");
2469 static int qeth_ulp_setup(struct qeth_card *card)
2472 struct qeth_cmd_buffer *iob;
2474 QETH_CARD_TEXT(card, 2, "ulpsetup");
2476 iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
2480 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2481 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2482 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2483 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2484 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2485 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2487 memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2);
2488 temp = (card->info.cula << 8) + card->info.unit_addr2;
2489 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2490 return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
2493 static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx,
2496 struct qeth_qdio_out_buffer *newbuf;
2498 newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, gfp);
2502 newbuf->buffer = q->qdio_bufs[bidx];
2503 skb_queue_head_init(&newbuf->skb_list);
2504 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
2505 atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
2506 q->bufs[bidx] = newbuf;
2510 static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2515 qeth_drain_output_queue(q, true);
2516 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2520 static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
2522 struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2528 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q))
2531 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
2532 if (qeth_alloc_out_buf(q, i, GFP_KERNEL))
2540 qeth_free_out_buf(q->bufs[--i]);
2541 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2547 static void qeth_tx_completion_timer(struct timer_list *timer)
2549 struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);
2551 napi_schedule(&queue->napi);
2552 QETH_TXQ_STAT_INC(queue, completion_timer);
2555 static int qeth_alloc_qdio_queues(struct qeth_card *card)
2559 QETH_CARD_TEXT(card, 2, "allcqdbf");
2561 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
2562 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2565 QETH_CARD_TEXT(card, 2, "inq");
2566 card->qdio.in_q = qeth_alloc_qdio_queue();
2567 if (!card->qdio.in_q)
2570 /* inbound buffer pool */
2571 if (qeth_alloc_buffer_pool(card))
2575 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2576 struct qeth_qdio_out_q *queue;
2578 queue = qeth_alloc_output_queue();
2581 QETH_CARD_TEXT_(card, 2, "outq %i", i);
2582 QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
2583 card->qdio.out_qs[i] = queue;
2585 queue->queue_no = i;
2586 INIT_LIST_HEAD(&queue->pending_bufs);
2587 spin_lock_init(&queue->lock);
2588 timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
2590 queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
2591 queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
2592 queue->rescan_usecs = QETH_TX_TIMER_USECS;
2594 queue->coalesce_usecs = USEC_PER_SEC;
2595 queue->max_coalesced_frames = 0;
2596 queue->rescan_usecs = 10 * USEC_PER_SEC;
2598 queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
2602 if (qeth_alloc_cq(card))
2609 qeth_free_output_queue(card->qdio.out_qs[--i]);
2610 card->qdio.out_qs[i] = NULL;
2612 qeth_free_buffer_pool(card);
2614 qeth_free_qdio_queue(card->qdio.in_q);
2615 card->qdio.in_q = NULL;
2617 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2621 static void qeth_free_qdio_queues(struct qeth_card *card)
2625 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
2626 QETH_QDIO_UNINITIALIZED)
2630 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2631 if (card->qdio.in_q->bufs[j].rx_skb)
2632 dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
2634 qeth_free_qdio_queue(card->qdio.in_q);
2635 card->qdio.in_q = NULL;
2636 /* inbound buffer pool */
2637 qeth_free_buffer_pool(card);
2638 /* free outbound qdio_qs */
2639 for (i = 0; i < card->qdio.no_out_queues; i++) {
2640 qeth_free_output_queue(card->qdio.out_qs[i]);
2641 card->qdio.out_qs[i] = NULL;
2645 static void qeth_fill_qib_parms(struct qeth_card *card,
2646 struct qeth_qib_parms *parms)
2648 struct qeth_qdio_out_q *queue;
2651 parms->pcit_magic[0] = 'P';
2652 parms->pcit_magic[1] = 'C';
2653 parms->pcit_magic[2] = 'I';
2654 parms->pcit_magic[3] = 'T';
2655 ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic));
2656 parms->pcit_a = QETH_PCI_THRESHOLD_A(card);
2657 parms->pcit_b = QETH_PCI_THRESHOLD_B(card);
2658 parms->pcit_c = QETH_PCI_TIMER_VALUE(card);
2660 parms->blkt_magic[0] = 'B';
2661 parms->blkt_magic[1] = 'L';
2662 parms->blkt_magic[2] = 'K';
2663 parms->blkt_magic[3] = 'T';
2664 ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic));
2665 parms->blkt_total = card->info.blkt.time_total;
2666 parms->blkt_inter_packet = card->info.blkt.inter_packet;
2667 parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo;
2669 /* Prio-queueing implicitly uses the default priorities: */
2670 if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1)
2673 parms->pque_magic[0] = 'P';
2674 parms->pque_magic[1] = 'Q';
2675 parms->pque_magic[2] = 'U';
2676 parms->pque_magic[3] = 'E';
2677 ASCEBC(parms->pque_magic, sizeof(parms->pque_magic));
2678 parms->pque_order = QETH_QIB_PQUE_ORDER_RR;
2679 parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL;
2681 qeth_for_each_output_queue(card, queue, i)
2682 parms->pque_priority[i] = queue->priority;
2685 static int qeth_qdio_activate(struct qeth_card *card)
2687 QETH_CARD_TEXT(card, 3, "qdioact");
2688 return qdio_activate(CARD_DDEV(card));
2691 static int qeth_dm_act(struct qeth_card *card)
2693 struct qeth_cmd_buffer *iob;
2695 QETH_CARD_TEXT(card, 2, "dmact");
2697 iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
2701 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2702 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2703 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2704 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2705 return qeth_send_control_data(card, iob, NULL, NULL);
2708 static int qeth_mpc_initialize(struct qeth_card *card)
2712 QETH_CARD_TEXT(card, 2, "mpcinit");
2714 rc = qeth_issue_next_read(card);
2716 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2719 rc = qeth_cm_enable(card);
2721 QETH_CARD_TEXT_(card, 2, "2err%d", rc);
2724 rc = qeth_cm_setup(card);
2726 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
2729 rc = qeth_ulp_enable(card);
2731 QETH_CARD_TEXT_(card, 2, "4err%d", rc);
2734 rc = qeth_ulp_setup(card);
2736 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2739 rc = qeth_alloc_qdio_queues(card);
2741 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2744 rc = qeth_qdio_establish(card);
2746 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
2747 qeth_free_qdio_queues(card);
2750 rc = qeth_qdio_activate(card);
2752 QETH_CARD_TEXT_(card, 2, "7err%d", rc);
2755 rc = qeth_dm_act(card);
2757 QETH_CARD_TEXT_(card, 2, "8err%d", rc);
2764 static void qeth_print_status_message(struct qeth_card *card)
2766 switch (card->info.type) {
2767 case QETH_CARD_TYPE_OSD:
2768 case QETH_CARD_TYPE_OSM:
2769 case QETH_CARD_TYPE_OSX:
2770 /* VM will use a non-zero first character
2771 * to indicate a HiperSockets like reporting
2772 * of the level OSA sets the first character to zero
2774 if (!card->info.mcl_level[0]) {
2775 sprintf(card->info.mcl_level, "%02x%02x",
2776 card->info.mcl_level[2],
2777 card->info.mcl_level[3]);
2781 case QETH_CARD_TYPE_IQD:
2782 if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
2783 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2784 card->info.mcl_level[0]];
2785 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2786 card->info.mcl_level[1]];
2787 card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2788 card->info.mcl_level[2]];
2789 card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2790 card->info.mcl_level[3]];
2791 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2795 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2797 dev_info(&card->gdev->dev,
2798 "Device is a%s card%s%s%s\nwith link type %s.\n",
2799 qeth_get_cardname(card),
2800 (card->info.mcl_level[0]) ? " (level: " : "",
2801 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2802 (card->info.mcl_level[0]) ? ")" : "",
2803 qeth_get_cardname_short(card));
2806 static void qeth_initialize_working_pool_list(struct qeth_card *card)
2808 struct qeth_buffer_pool_entry *entry;
2810 QETH_CARD_TEXT(card, 5, "inwrklst");
2812 list_for_each_entry(entry,
2813 &card->qdio.init_pool.entry_list, init_list) {
2814 qeth_put_buffer_pool_entry(card, entry);
2818 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2819 struct qeth_card *card)
2821 struct qeth_buffer_pool_entry *entry;
2824 if (list_empty(&card->qdio.in_buf_pool.entry_list))
2827 list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) {
2829 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2830 if (page_count(entry->elements[i]) > 1) {
2836 list_del_init(&entry->list);
2841 /* no free buffer in pool so take first one and swap pages */
2842 entry = list_first_entry(&card->qdio.in_buf_pool.entry_list,
2843 struct qeth_buffer_pool_entry, list);
2844 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2845 if (page_count(entry->elements[i]) > 1) {
2846 struct page *page = dev_alloc_page();
2851 __free_page(entry->elements[i]);
2852 entry->elements[i] = page;
2853 QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
2856 list_del_init(&entry->list);
2860 static int qeth_init_input_buffer(struct qeth_card *card,
2861 struct qeth_qdio_buffer *buf)
2863 struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry;
2866 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2867 buf->rx_skb = netdev_alloc_skb(card->dev,
2869 sizeof(struct ipv6hdr));
2875 pool_entry = qeth_find_free_buffer_pool_entry(card);
2879 buf->pool_entry = pool_entry;
2883 * since the buffer is accessed only from the input_tasklet
2884 * there shouldn't be a need to synchronize; also, since we use
2885 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2888 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2889 buf->buffer->element[i].length = PAGE_SIZE;
2890 buf->buffer->element[i].addr =
2891 page_to_phys(pool_entry->elements[i]);
2892 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2893 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
2895 buf->buffer->element[i].eflags = 0;
2896 buf->buffer->element[i].sflags = 0;
2901 static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
2902 struct qeth_qdio_out_q *queue)
2904 if (!IS_IQD(card) ||
2905 qeth_iqd_is_mcast_queue(card, queue) ||
2906 card->options.cq == QETH_CQ_ENABLED ||
2907 qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
2910 return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
2913 static int qeth_init_qdio_queues(struct qeth_card *card)
2915 unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count;
2919 QETH_CARD_TEXT(card, 2, "initqdqs");
2922 qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2923 memset(&card->rx, 0, sizeof(struct qeth_rx));
2925 qeth_initialize_working_pool_list(card);
2926 /*give only as many buffers to hardware as we have buffer pool entries*/
2927 for (i = 0; i < rx_bufs; i++) {
2928 rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
2933 card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
2934 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs,
2937 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2942 rc = qeth_cq_init(card);
2947 /* outbound queue */
2948 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2949 struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];
2951 qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2952 queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
2953 queue->next_buf_to_fill = 0;
2955 queue->prev_hdr = NULL;
2956 queue->coalesced_frames = 0;
2957 queue->bulk_start = 0;
2958 queue->bulk_count = 0;
2959 queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
2960 atomic_set(&queue->used_buffers, 0);
2961 atomic_set(&queue->set_pci_flags_count, 0);
2962 netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
2967 static void qeth_ipa_finalize_cmd(struct qeth_card *card,
2968 struct qeth_cmd_buffer *iob)
2970 qeth_mpc_finalize_cmd(card, iob);
2972 /* override with IPA-specific values: */
2973 __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
2976 static void qeth_prepare_ipa_cmd(struct qeth_card *card,
2977 struct qeth_cmd_buffer *iob, u16 cmd_length)
2979 u8 prot_type = qeth_mpc_select_prot_type(card);
2980 u16 total_length = iob->length;
2982 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
2984 iob->finalize = qeth_ipa_finalize_cmd;
2986 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2987 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
2988 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
2989 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
2990 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
2991 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
2992 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2993 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
2996 static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
2997 struct qeth_cmd_buffer *reply)
2999 struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);
3001 return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
3004 struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
3005 enum qeth_ipa_cmds cmd_code,
3006 enum qeth_prot_versions prot,
3007 unsigned int data_length)
3009 struct qeth_cmd_buffer *iob;
3010 struct qeth_ipacmd_hdr *hdr;
3012 data_length += offsetof(struct qeth_ipa_cmd, data);
3013 iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
3018 qeth_prepare_ipa_cmd(card, iob, data_length);
3019 iob->match = qeth_ipa_match_reply;
3021 hdr = &__ipa_cmd(iob)->hdr;
3022 hdr->command = cmd_code;
3023 hdr->initiator = IPA_CMD_INITIATOR_HOST;
3024 /* hdr->seqno is set by qeth_send_control_data() */
3025 hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH;
3026 hdr->rel_adapter_no = (u8) card->dev->dev_port;
3027 hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
3028 hdr->param_count = 1;
3029 hdr->prot_version = prot;
3032 EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);
3034 static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
3035 struct qeth_reply *reply, unsigned long data)
3037 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3039 return (cmd->hdr.return_code) ? -EIO : 0;
3043 * qeth_send_ipa_cmd() - send an IPA command
3045 * See qeth_send_control_data() for explanation of the arguments.
3048 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
3049 int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
3055 QETH_CARD_TEXT(card, 4, "sendipa");
3057 if (card->read_or_write_problem) {
3062 if (reply_cb == NULL)
3063 reply_cb = qeth_send_ipa_cmd_cb;
3064 rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
3066 qeth_clear_ipacmd_list(card);
3067 qeth_schedule_recovery(card);
3071 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
3073 static int qeth_send_startlan_cb(struct qeth_card *card,
3074 struct qeth_reply *reply, unsigned long data)
3076 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3078 if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
3081 return (cmd->hdr.return_code) ? -EIO : 0;
3084 static int qeth_send_startlan(struct qeth_card *card)
3086 struct qeth_cmd_buffer *iob;
3088 QETH_CARD_TEXT(card, 2, "strtlan");
3090 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
3093 return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
3096 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
3098 if (!cmd->hdr.return_code)
3099 cmd->hdr.return_code =
3100 cmd->data.setadapterparms.hdr.return_code;
3101 return cmd->hdr.return_code;
3104 static int qeth_query_setadapterparms_cb(struct qeth_card *card,
3105 struct qeth_reply *reply, unsigned long data)
3107 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3108 struct qeth_query_cmds_supp *query_cmd;
3110 QETH_CARD_TEXT(card, 3, "quyadpcb");
3111 if (qeth_setadpparms_inspect_rc(cmd))
3114 query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp;
3115 if (query_cmd->lan_type & 0x7f) {
3116 if (!qeth_is_supported_link_type(card, query_cmd->lan_type))
3117 return -EPROTONOSUPPORT;
3119 card->info.link_type = query_cmd->lan_type;
3120 QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
3123 card->options.adp.supported = query_cmd->supported_cmds;
3127 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
3128 enum qeth_ipa_setadp_cmd adp_cmd,
3129 unsigned int data_length)
3131 struct qeth_ipacmd_setadpparms_hdr *hdr;
3132 struct qeth_cmd_buffer *iob;
3134 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
3136 offsetof(struct qeth_ipacmd_setadpparms,
3141 hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
3142 hdr->cmdlength = sizeof(*hdr) + data_length;
3143 hdr->command_code = adp_cmd;
3144 hdr->used_total = 1;
3149 static int qeth_query_setadapterparms(struct qeth_card *card)
3152 struct qeth_cmd_buffer *iob;
3154 QETH_CARD_TEXT(card, 3, "queryadp");
3155 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
3156 SETADP_DATA_SIZEOF(query_cmds_supp));
3159 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
3163 static int qeth_query_ipassists_cb(struct qeth_card *card,
3164 struct qeth_reply *reply, unsigned long data)
3166 struct qeth_ipa_cmd *cmd;
3168 QETH_CARD_TEXT(card, 2, "qipasscb");
3170 cmd = (struct qeth_ipa_cmd *) data;
3172 switch (cmd->hdr.return_code) {
3173 case IPA_RC_SUCCESS:
3175 case IPA_RC_NOTSUPP:
3176 case IPA_RC_L2_UNSUPPORTED_CMD:
3177 QETH_CARD_TEXT(card, 2, "ipaunsup");
3178 card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
3179 card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
3182 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
3183 CARD_DEVID(card), cmd->hdr.return_code);
3187 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
3188 card->options.ipa4 = cmd->hdr.assists;
3189 else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
3190 card->options.ipa6 = cmd->hdr.assists;
3192 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
3197 static int qeth_query_ipassists(struct qeth_card *card,
3198 enum qeth_prot_versions prot)
3201 struct qeth_cmd_buffer *iob;
3203 QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
3204 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
3207 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
3211 static int qeth_query_switch_attributes_cb(struct qeth_card *card,
3212 struct qeth_reply *reply, unsigned long data)
3214 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3215 struct qeth_query_switch_attributes *attrs;
3216 struct qeth_switch_info *sw_info;
3218 QETH_CARD_TEXT(card, 2, "qswiatcb");
3219 if (qeth_setadpparms_inspect_rc(cmd))
3222 sw_info = (struct qeth_switch_info *)reply->param;
3223 attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
3224 sw_info->capabilities = attrs->capabilities;
3225 sw_info->settings = attrs->settings;
3226 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
3231 int qeth_query_switch_attributes(struct qeth_card *card,
3232 struct qeth_switch_info *sw_info)
3234 struct qeth_cmd_buffer *iob;
3236 QETH_CARD_TEXT(card, 2, "qswiattr");
3237 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
3239 if (!netif_carrier_ok(card->dev))
3241 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
3244 return qeth_send_ipa_cmd(card, iob,
3245 qeth_query_switch_attributes_cb, sw_info);
3248 struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
3249 enum qeth_diags_cmds sub_cmd,
3250 unsigned int data_length)
3252 struct qeth_ipacmd_diagass *cmd;
3253 struct qeth_cmd_buffer *iob;
3255 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
3256 DIAG_HDR_LEN + data_length);
3260 cmd = &__ipa_cmd(iob)->data.diagass;
3261 cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
3262 cmd->subcmd = sub_cmd;
3265 EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);
3267 static int qeth_query_setdiagass_cb(struct qeth_card *card,
3268 struct qeth_reply *reply, unsigned long data)
3270 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3271 u16 rc = cmd->hdr.return_code;
3274 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3278 card->info.diagass_support = cmd->data.diagass.ext;
3282 static int qeth_query_setdiagass(struct qeth_card *card)
3284 struct qeth_cmd_buffer *iob;
3286 QETH_CARD_TEXT(card, 2, "qdiagass");
3287 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
3290 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
3293 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
3295 unsigned long info = get_zeroed_page(GFP_KERNEL);
3296 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
3297 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
3298 struct ccw_dev_id ccwid;
3301 tid->chpid = card->info.chpid;
3302 ccw_device_get_id(CARD_RDEV(card), &ccwid);
3303 tid->ssid = ccwid.ssid;
3304 tid->devno = ccwid.devno;
3307 level = stsi(NULL, 0, 0, 0);
3308 if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3309 tid->lparnr = info222->lpar_number;
3310 if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3311 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
3312 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
3317 static int qeth_hw_trap_cb(struct qeth_card *card,
3318 struct qeth_reply *reply, unsigned long data)
3320 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3321 u16 rc = cmd->hdr.return_code;
3324 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3330 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
3332 struct qeth_cmd_buffer *iob;
3333 struct qeth_ipa_cmd *cmd;
3335 QETH_CARD_TEXT(card, 2, "diagtrap");
3336 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
3339 cmd = __ipa_cmd(iob);
3340 cmd->data.diagass.type = 1;
3341 cmd->data.diagass.action = action;
3343 case QETH_DIAGS_TRAP_ARM:
3344 cmd->data.diagass.options = 0x0003;
3345 cmd->data.diagass.ext = 0x00010000 +
3346 sizeof(struct qeth_trap_id);
3347 qeth_get_trap_id(card,
3348 (struct qeth_trap_id *)cmd->data.diagass.cdata);
3350 case QETH_DIAGS_TRAP_DISARM:
3351 cmd->data.diagass.options = 0x0001;
3353 case QETH_DIAGS_TRAP_CAPTURE:
3356 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
3359 static int qeth_check_qdio_errors(struct qeth_card *card,
3360 struct qdio_buffer *buf,
3361 unsigned int qdio_error,
3362 const char *dbftext)
3365 QETH_CARD_TEXT(card, 2, dbftext);
3366 QETH_CARD_TEXT_(card, 2, " F15=%02X",
3367 buf->element[15].sflags);
3368 QETH_CARD_TEXT_(card, 2, " F14=%02X",
3369 buf->element[14].sflags);
3370 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3371 if ((buf->element[15].sflags) == 0x12) {
3372 QETH_CARD_STAT_INC(card, rx_fifo_errors);
3380 static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
3383 struct qeth_qdio_q *queue = card->qdio.in_q;
3384 struct list_head *lh;
3389 /* only requeue at a certain threshold to avoid SIGAs */
3390 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
3391 for (i = queue->next_buf_to_init;
3392 i < queue->next_buf_to_init + count; ++i) {
3393 if (qeth_init_input_buffer(card,
3394 &queue->bufs[QDIO_BUFNR(i)])) {
3401 if (newcount < count) {
3402 /* we are in memory shortage so we switch back to
3403 traditional skb allocation and drop packages */
3404 atomic_set(&card->force_alloc_skb, 3);
3407 atomic_add_unless(&card->force_alloc_skb, -1, 0);
3412 list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
3414 if (i == card->qdio.in_buf_pool.buf_count) {
3415 QETH_CARD_TEXT(card, 2, "qsarbw");
3416 schedule_delayed_work(
3417 &card->buffer_reclaim_work,
3418 QETH_RECLAIM_WORK_TIME);
3423 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
3424 queue->next_buf_to_init, count, NULL);
3426 QETH_CARD_TEXT(card, 2, "qinberr");
3428 queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
3436 static void qeth_buffer_reclaim_work(struct work_struct *work)
3438 struct qeth_card *card = container_of(to_delayed_work(work),
3440 buffer_reclaim_work);
3443 napi_schedule(&card->napi);
3444 /* kick-start the NAPI softirq: */
3448 static void qeth_handle_send_error(struct qeth_card *card,
3449 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
3451 int sbalf15 = buffer->buffer->element[15].sflags;
3453 QETH_CARD_TEXT(card, 6, "hdsnderr");
3454 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3459 if ((sbalf15 >= 15) && (sbalf15 <= 31))
3462 QETH_CARD_TEXT(card, 1, "lnkfail");
3463 QETH_CARD_TEXT_(card, 1, "%04x %02x",
3464 (u16)qdio_err, (u8)sbalf15);
3468 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
3469 * @queue: queue to check for packing buffer
3471 * Returns number of buffers that were prepared for flush.
3473 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
3475 struct qeth_qdio_out_buffer *buffer;
3477 buffer = queue->bufs[queue->next_buf_to_fill];
3478 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
3479 (buffer->next_element_to_fill > 0)) {
3480 /* it's a packing buffer */
3481 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3482 queue->next_buf_to_fill =
3483 QDIO_BUFNR(queue->next_buf_to_fill + 1);
3490 * Switched to packing state if the number of used buffers on a queue
3491 * reaches a certain limit.
3493 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
3495 if (!queue->do_pack) {
3496 if (atomic_read(&queue->used_buffers)
3497 >= QETH_HIGH_WATERMARK_PACK){
3498 /* switch non-PACKING -> PACKING */
3499 QETH_CARD_TEXT(queue->card, 6, "np->pack");
3500 QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3507 * Switches from packing to non-packing mode. If there is a packing
3508 * buffer on the queue this buffer will be prepared to be flushed.
3509 * In that case 1 is returned to inform the caller. If no buffer
3510 * has to be flushed, zero is returned.
3512 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
3514 if (queue->do_pack) {
3515 if (atomic_read(&queue->used_buffers)
3516 <= QETH_LOW_WATERMARK_PACK) {
3517 /* switch PACKING -> non-PACKING */
3518 QETH_CARD_TEXT(queue->card, 6, "pack->np");
3519 QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3521 return qeth_prep_flush_pack_buffer(queue);
3527 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
3530 struct qeth_qdio_out_buffer *buf = queue->bufs[index];
3531 struct qeth_card *card = queue->card;
3532 unsigned int frames, usecs;
3533 struct qaob *aob = NULL;
3537 for (i = index; i < index + count; ++i) {
3538 unsigned int bidx = QDIO_BUFNR(i);
3539 struct sk_buff *skb;
3541 buf = queue->bufs[bidx];
3542 buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
3543 SBAL_EFLAGS_LAST_ENTRY;
3544 queue->coalesced_frames += buf->frames;
3547 skb_queue_walk(&buf->skb_list, skb)
3548 skb_tx_timestamp(skb);
3553 if (card->options.cq == QETH_CQ_ENABLED &&
3554 !qeth_iqd_is_mcast_queue(card, queue) &&
3557 buf->aob = qdio_allocate_aob();
3559 struct qeth_qaob_priv1 *priv;
3562 priv = (struct qeth_qaob_priv1 *)&aob->user1;
3563 priv->state = QETH_QAOB_ISSUED;
3564 priv->queue_no = queue->queue_no;
3568 if (!queue->do_pack) {
3569 if ((atomic_read(&queue->used_buffers) >=
3570 (QETH_HIGH_WATERMARK_PACK -
3571 QETH_WATERMARK_PACK_FUZZ)) &&
3572 !atomic_read(&queue->set_pci_flags_count)) {
3573 /* it's likely that we'll go to packing
3575 atomic_inc(&queue->set_pci_flags_count);
3576 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3579 if (!atomic_read(&queue->set_pci_flags_count)) {
3581 * there's no outstanding PCI any more, so we
3582 * have to request a PCI to be sure the the PCI
3583 * will wake at some time in the future then we
3584 * can flush packed buffers that might still be
3585 * hanging around, which can happen if no
3586 * further send was requested by the stack
3588 atomic_inc(&queue->set_pci_flags_count);
3589 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3594 QETH_TXQ_STAT_INC(queue, doorbell);
3595 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_OUTPUT, queue->queue_no,
3601 /* ignore temporary SIGA errors without busy condition */
3603 /* Fake the TX completion interrupt: */
3604 frames = READ_ONCE(queue->max_coalesced_frames);
3605 usecs = READ_ONCE(queue->coalesce_usecs);
3607 if (frames && queue->coalesced_frames >= frames) {
3608 napi_schedule(&queue->napi);
3609 queue->coalesced_frames = 0;
3610 QETH_TXQ_STAT_INC(queue, coal_frames);
3611 } else if (qeth_use_tx_irqs(card) &&
3612 atomic_read(&queue->used_buffers) >= 32) {
3613 /* Old behaviour carried over from the qdio layer: */
3614 napi_schedule(&queue->napi);
3615 QETH_TXQ_STAT_INC(queue, coal_frames);
3617 qeth_tx_arm_timer(queue, usecs);
3622 QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3623 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
3624 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
3625 QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
3626 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3628 /* this must not happen under normal circumstances. if it
3629 * happens something is really wrong -> recover */
3630 qeth_schedule_recovery(queue->card);
3634 static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
3636 qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
3638 queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
3639 queue->prev_hdr = NULL;
3640 queue->bulk_count = 0;
3643 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
3646 * check if weed have to switch to non-packing mode or if
3647 * we have to get a pci flag out on the queue
3649 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
3650 !atomic_read(&queue->set_pci_flags_count)) {
3651 unsigned int index, flush_cnt;
3654 spin_lock(&queue->lock);
3656 index = queue->next_buf_to_fill;
3657 q_was_packing = queue->do_pack;
3659 flush_cnt = qeth_switch_to_nonpacking_if_needed(queue);
3660 if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count))
3661 flush_cnt = qeth_prep_flush_pack_buffer(queue);
3664 qeth_flush_buffers(queue, index, flush_cnt);
3666 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
3669 spin_unlock(&queue->lock);
3673 static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
3675 struct qeth_card *card = (struct qeth_card *)card_ptr;
3677 napi_schedule_irqoff(&card->napi);
3680 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
3684 if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
3688 if (card->options.cq == cq) {
3693 qeth_free_qdio_queues(card);
3694 card->options.cq = cq;
3701 EXPORT_SYMBOL_GPL(qeth_configure_cq);
3703 static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob)
3705 struct qeth_qaob_priv1 *priv = (struct qeth_qaob_priv1 *)&aob->user1;
3706 unsigned int queue_no = priv->queue_no;
3708 BUILD_BUG_ON(sizeof(*priv) > ARRAY_SIZE(aob->user1));
3710 if (xchg(&priv->state, QETH_QAOB_DONE) == QETH_QAOB_PENDING &&
3711 queue_no < card->qdio.no_out_queues)
3712 napi_schedule(&card->qdio.out_qs[queue_no]->napi);
3715 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
3716 unsigned int queue, int first_element,
3719 struct qeth_qdio_q *cq = card->qdio.c_q;
3723 QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
3724 QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
3725 QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
3728 netif_tx_stop_all_queues(card->dev);
3729 qeth_schedule_recovery(card);
3733 for (i = first_element; i < first_element + count; ++i) {
3734 struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
3737 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
3738 buffer->element[e].addr) {
3739 unsigned long phys_aob_addr = buffer->element[e].addr;
3741 qeth_qdio_handle_aob(card, phys_to_virt(phys_aob_addr));
3744 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3746 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
3747 cq->next_buf_to_init, count, NULL);
3749 dev_warn(&card->gdev->dev,
3750 "QDIO reported an error, rc=%i\n", rc);
3751 QETH_CARD_TEXT(card, 2, "qcqherr");
3754 cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
3757 static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
3758 unsigned int qdio_err, int queue,
3759 int first_elem, int count,
3760 unsigned long card_ptr)
3762 struct qeth_card *card = (struct qeth_card *)card_ptr;
3764 QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
3765 QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
3768 qeth_schedule_recovery(card);
3771 static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3772 unsigned int qdio_error, int __queue,
3773 int first_element, int count,
3774 unsigned long card_ptr)
3776 struct qeth_card *card = (struct qeth_card *) card_ptr;
3778 QETH_CARD_TEXT(card, 2, "achkcond");
3779 netif_tx_stop_all_queues(card->dev);
3780 qeth_schedule_recovery(card);
3784 * Note: Function assumes that we have 4 outbound queues.
3786 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
3788 struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
3791 switch (card->qdio.do_prio_queueing) {
3792 case QETH_PRIO_Q_ING_TOS:
3793 case QETH_PRIO_Q_ING_PREC:
3794 switch (vlan_get_protocol(skb)) {
3795 case htons(ETH_P_IP):
3796 tos = ipv4_get_dsfield(ip_hdr(skb));
3798 case htons(ETH_P_IPV6):
3799 tos = ipv6_get_dsfield(ipv6_hdr(skb));
3802 return card->qdio.default_out_queue;
3804 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
3805 return ~tos >> 6 & 3;
3806 if (tos & IPTOS_MINCOST)
3808 if (tos & IPTOS_RELIABILITY)
3810 if (tos & IPTOS_THROUGHPUT)
3812 if (tos & IPTOS_LOWDELAY)
3815 case QETH_PRIO_Q_ING_SKB:
3816 if (skb->priority > 5)
3818 return ~skb->priority >> 1 & 3;
3819 case QETH_PRIO_Q_ING_VLAN:
3820 if (veth->h_vlan_proto == htons(ETH_P_8021Q))
3821 return ~ntohs(veth->h_vlan_TCI) >>
3822 (VLAN_PRIO_SHIFT + 1) & 3;
3824 case QETH_PRIO_Q_ING_FIXED:
3825 return card->qdio.default_out_queue;
3829 return card->qdio.default_out_queue;
3831 EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3834 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags.
3837 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3838 * fragmented part of the SKB. Returns zero for linear SKB.
3840 static int qeth_get_elements_for_frags(struct sk_buff *skb)
3842 int cnt, elements = 0;
3844 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3845 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3847 elements += qeth_get_elements_for_range(
3848 (addr_t)skb_frag_address(frag),
3849 (addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3855 * qeth_count_elements() - Counts the number of QDIO buffer elements needed
3856 * to transmit an skb.
3857 * @skb: the skb to operate on.
3858 * @data_offset: skip this part of the skb's linear data
3860 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3861 * skb's data (both its linear part and paged fragments).
3863 static unsigned int qeth_count_elements(struct sk_buff *skb,
3864 unsigned int data_offset)
3866 unsigned int elements = qeth_get_elements_for_frags(skb);
3867 addr_t end = (addr_t)skb->data + skb_headlen(skb);
3868 addr_t start = (addr_t)skb->data + data_offset;
3871 elements += qeth_get_elements_for_range(start, end);
3875 #define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \
3879 * qeth_add_hw_header() - add a HW header to an skb.
3880 * @skb: skb that the HW header should be added to.
3881 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
3882 * it contains a valid pointer to a qeth_hdr.
3883 * @hdr_len: length of the HW header.
3884 * @proto_len: length of protocol headers that need to be in same page as the
3887 * Returns the pushed length. If the header can't be pushed on
3888 * (eg. because it would cross a page boundary), it is allocated from
3889 * the cache instead and 0 is returned.
3890 * The number of needed buffer elements is returned in @elements.
3891 * Error to create the hdr is indicated by returning with < 0.
3893 static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
3894 struct sk_buff *skb, struct qeth_hdr **hdr,
3895 unsigned int hdr_len, unsigned int proto_len,
3896 unsigned int *elements)
3898 gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
3899 const unsigned int contiguous = proto_len ? proto_len : 1;
3900 const unsigned int max_elements = queue->max_elements;
3901 unsigned int __elements;
3907 start = (addr_t)skb->data - hdr_len;
3908 end = (addr_t)skb->data;
3910 if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
3911 /* Push HW header into same page as first protocol header. */
3913 /* ... but TSO always needs a separate element for headers: */
3914 if (skb_is_gso(skb))
3915 __elements = 1 + qeth_count_elements(skb, proto_len);
3917 __elements = qeth_count_elements(skb, 0);
3918 } else if (!proto_len && PAGE_ALIGNED(skb->data)) {
3919 /* Push HW header into preceding page, flush with skb->data. */
3921 __elements = 1 + qeth_count_elements(skb, 0);
3923 /* Use header cache, copy protocol headers up. */
3925 __elements = 1 + qeth_count_elements(skb, proto_len);
3928 /* Compress skb to fit into one IO buffer: */
3929 if (__elements > max_elements) {
3930 if (!skb_is_nonlinear(skb)) {
3931 /* Drop it, no easy way of shrinking it further. */
3932 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
3933 max_elements, __elements, skb->len);
3937 rc = skb_linearize(skb);
3939 QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
3943 QETH_TXQ_STAT_INC(queue, skbs_linearized);
3944 /* Linearization changed the layout, re-evaluate: */
3948 *elements = __elements;
3949 /* Add the header: */
3951 *hdr = skb_push(skb, hdr_len);
3955 /* Fall back to cache element with known-good alignment: */
3956 if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
3958 *hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
3961 /* Copy protocol headers behind HW header: */
3962 skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
3966 static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
3967 struct sk_buff *curr_skb,
3968 struct qeth_hdr *curr_hdr)
3970 struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
3971 struct qeth_hdr *prev_hdr = queue->prev_hdr;
3976 /* All packets must have the same target: */
3977 if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
3978 struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);
3980 return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
3981 eth_hdr(curr_skb)->h_dest) &&
3982 qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
3985 return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
3986 qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
3990 * qeth_fill_buffer() - map skb into an output buffer
3991 * @buf: buffer to transport the skb
3992 * @skb: skb to map into the buffer
3993 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
3994 * from qeth_core_header_cache.
3995 * @offset: when mapping the skb, start at skb->data + offset
3996 * @hd_len: if > 0, build a dedicated header element of this size
3998 static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
3999 struct sk_buff *skb, struct qeth_hdr *hdr,
4000 unsigned int offset, unsigned int hd_len)
4002 struct qdio_buffer *buffer = buf->buffer;
4003 int element = buf->next_element_to_fill;
4004 int length = skb_headlen(skb) - offset;
4005 char *data = skb->data + offset;
4006 unsigned int elem_length, cnt;
4007 bool is_first_elem = true;
4009 __skb_queue_tail(&buf->skb_list, skb);
4011 /* build dedicated element for HW Header */
4013 is_first_elem = false;
4015 buffer->element[element].addr = virt_to_phys(hdr);
4016 buffer->element[element].length = hd_len;
4017 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
4019 /* HW header is allocated from cache: */
4020 if ((void *)hdr != skb->data)
4021 __set_bit(element, buf->from_kmem_cache);
4022 /* HW header was pushed and is contiguous with linear part: */
4023 else if (length > 0 && !PAGE_ALIGNED(data) &&
4024 (data == (char *)hdr + hd_len))
4025 buffer->element[element].eflags |=
4026 SBAL_EFLAGS_CONTIGUOUS;
4031 /* map linear part into buffer element(s) */
4032 while (length > 0) {
4033 elem_length = min_t(unsigned int, length,
4034 PAGE_SIZE - offset_in_page(data));
4036 buffer->element[element].addr = virt_to_phys(data);
4037 buffer->element[element].length = elem_length;
4038 length -= elem_length;
4039 if (is_first_elem) {
4040 is_first_elem = false;
4041 if (length || skb_is_nonlinear(skb))
4042 /* skb needs additional elements */
4043 buffer->element[element].eflags =
4044 SBAL_EFLAGS_FIRST_FRAG;
4046 buffer->element[element].eflags = 0;
4048 buffer->element[element].eflags =
4049 SBAL_EFLAGS_MIDDLE_FRAG;
4052 data += elem_length;
4056 /* map page frags into buffer element(s) */
4057 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
4058 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
4060 data = skb_frag_address(frag);
4061 length = skb_frag_size(frag);
4062 while (length > 0) {
4063 elem_length = min_t(unsigned int, length,
4064 PAGE_SIZE - offset_in_page(data));
4066 buffer->element[element].addr = virt_to_phys(data);
4067 buffer->element[element].length = elem_length;
4068 buffer->element[element].eflags =
4069 SBAL_EFLAGS_MIDDLE_FRAG;
4071 length -= elem_length;
4072 data += elem_length;
4077 if (buffer->element[element - 1].eflags)
4078 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
4079 buf->next_element_to_fill = element;
4083 static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4084 struct sk_buff *skb, unsigned int elements,
4085 struct qeth_hdr *hdr, unsigned int offset,
4086 unsigned int hd_len)
4088 unsigned int bytes = qdisc_pkt_len(skb);
4089 struct qeth_qdio_out_buffer *buffer;
4090 unsigned int next_element;
4091 struct netdev_queue *txq;
4092 bool stopped = false;
4095 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
4096 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
4098 /* Just a sanity check, the wake/stop logic should ensure that we always
4099 * get a free buffer.
4101 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4104 flush = !qeth_iqd_may_bulk(queue, skb, hdr);
4107 (buffer->next_element_to_fill + elements > queue->max_elements)) {
4108 if (buffer->next_element_to_fill > 0) {
4109 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4110 queue->bulk_count++;
4113 if (queue->bulk_count >= queue->bulk_max)
4117 qeth_flush_queue(queue);
4119 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
4120 queue->bulk_count)];
4122 /* Sanity-check again: */
4123 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4127 if (buffer->next_element_to_fill == 0 &&
4128 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4129 /* If a TX completion happens right _here_ and misses to wake
4130 * the txq, then our re-check below will catch the race.
4132 QETH_TXQ_STAT_INC(queue, stopped);
4133 netif_tx_stop_queue(txq);
4137 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4138 buffer->bytes += bytes;
4139 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4140 queue->prev_hdr = hdr;
4142 flush = __netdev_tx_sent_queue(txq, bytes,
4143 !stopped && netdev_xmit_more());
4145 if (flush || next_element >= queue->max_elements) {
4146 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4147 queue->bulk_count++;
4149 if (queue->bulk_count >= queue->bulk_max)
4153 qeth_flush_queue(queue);
4156 if (stopped && !qeth_out_queue_is_full(queue))
4157 netif_tx_start_queue(txq);
4161 static int qeth_do_send_packet(struct qeth_card *card,
4162 struct qeth_qdio_out_q *queue,
4163 struct sk_buff *skb, struct qeth_hdr *hdr,
4164 unsigned int offset, unsigned int hd_len,
4165 unsigned int elements_needed)
4167 unsigned int start_index = queue->next_buf_to_fill;
4168 struct qeth_qdio_out_buffer *buffer;
4169 unsigned int next_element;
4170 struct netdev_queue *txq;
4171 bool stopped = false;
4172 int flush_count = 0;
4176 buffer = queue->bufs[queue->next_buf_to_fill];
4178 /* Just a sanity check, the wake/stop logic should ensure that we always
4179 * get a free buffer.
4181 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4184 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
4186 /* check if we need to switch packing state of this queue */
4187 qeth_switch_to_packing_if_needed(queue);
4188 if (queue->do_pack) {
4190 /* does packet fit in current buffer? */
4191 if (buffer->next_element_to_fill + elements_needed >
4192 queue->max_elements) {
4193 /* ... no -> set state PRIMED */
4194 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4196 queue->next_buf_to_fill =
4197 QDIO_BUFNR(queue->next_buf_to_fill + 1);
4198 buffer = queue->bufs[queue->next_buf_to_fill];
4200 /* We stepped forward, so sanity-check again: */
4201 if (atomic_read(&buffer->state) !=
4202 QETH_QDIO_BUF_EMPTY) {
4203 qeth_flush_buffers(queue, start_index,
4211 if (buffer->next_element_to_fill == 0 &&
4212 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4213 /* If a TX completion happens right _here_ and misses to wake
4214 * the txq, then our re-check below will catch the race.
4216 QETH_TXQ_STAT_INC(queue, stopped);
4217 netif_tx_stop_queue(txq);
4221 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4222 buffer->bytes += qdisc_pkt_len(skb);
4223 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4226 QETH_TXQ_STAT_INC(queue, skbs_pack);
4227 if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
4229 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4230 queue->next_buf_to_fill =
4231 QDIO_BUFNR(queue->next_buf_to_fill + 1);
4235 qeth_flush_buffers(queue, start_index, flush_count);
4239 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
4241 if (stopped && !qeth_out_queue_is_full(queue))
4242 netif_tx_start_queue(txq);
4246 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
4247 unsigned int payload_len, struct sk_buff *skb,
4248 unsigned int proto_len)
4250 struct qeth_hdr_ext_tso *ext = &hdr->ext;
4252 ext->hdr_tot_len = sizeof(*ext);
4253 ext->imb_hdr_no = 1;
4255 ext->hdr_version = 1;
4257 ext->payload_len = payload_len;
4258 ext->mss = skb_shinfo(skb)->gso_size;
4259 ext->dg_hdr_len = proto_len;
4262 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4263 struct qeth_qdio_out_q *queue, __be16 proto,
4264 void (*fill_header)(struct qeth_qdio_out_q *queue,
4265 struct qeth_hdr *hdr, struct sk_buff *skb,
4266 __be16 proto, unsigned int data_len))
4268 unsigned int proto_len, hw_hdr_len;
4269 unsigned int frame_len = skb->len;
4270 bool is_tso = skb_is_gso(skb);
4271 unsigned int data_offset = 0;
4272 struct qeth_hdr *hdr = NULL;
4273 unsigned int hd_len = 0;
4274 unsigned int elements;
4278 hw_hdr_len = sizeof(struct qeth_hdr_tso);
4279 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4281 hw_hdr_len = sizeof(struct qeth_hdr);
4282 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4285 rc = skb_cow_head(skb, hw_hdr_len);
4289 push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
4293 if (is_tso || !push_len) {
4294 /* HW header needs its own buffer element. */
4295 hd_len = hw_hdr_len + proto_len;
4296 data_offset = push_len + proto_len;
4298 memset(hdr, 0, hw_hdr_len);
4299 fill_header(queue, hdr, skb, proto, frame_len);
4301 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
4302 frame_len - proto_len, skb, proto_len);
4305 rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
4308 /* TODO: drop skb_orphan() once TX completion is fast enough */
4310 spin_lock(&queue->lock);
4311 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
4313 spin_unlock(&queue->lock);
4316 if (rc && !push_len)
4317 kmem_cache_free(qeth_core_header_cache, hdr);
4321 EXPORT_SYMBOL_GPL(qeth_xmit);
4323 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
4324 struct qeth_reply *reply, unsigned long data)
4326 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4327 struct qeth_ipacmd_setadpparms *setparms;
4329 QETH_CARD_TEXT(card, 4, "prmadpcb");
4331 setparms = &(cmd->data.setadapterparms);
4332 if (qeth_setadpparms_inspect_rc(cmd)) {
4333 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
4334 setparms->data.mode = SET_PROMISC_MODE_OFF;
4336 card->info.promisc_mode = setparms->data.mode;
4337 return (cmd->hdr.return_code) ? -EIO : 0;
4340 void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
4342 enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
4343 SET_PROMISC_MODE_OFF;
4344 struct qeth_cmd_buffer *iob;
4345 struct qeth_ipa_cmd *cmd;
4347 QETH_CARD_TEXT(card, 4, "setprom");
4348 QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
4350 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4351 SETADP_DATA_SIZEOF(mode));
4354 cmd = __ipa_cmd(iob);
4355 cmd->data.setadapterparms.data.mode = mode;
4356 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
4358 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
4360 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
4361 struct qeth_reply *reply, unsigned long data)
4363 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4364 struct qeth_ipacmd_setadpparms *adp_cmd;
4366 QETH_CARD_TEXT(card, 4, "chgmaccb");
4367 if (qeth_setadpparms_inspect_rc(cmd))
4370 adp_cmd = &cmd->data.setadapterparms;
4371 if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
4372 return -EADDRNOTAVAIL;
4374 if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
4375 !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4376 return -EADDRNOTAVAIL;
4378 ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
4382 int qeth_setadpparms_change_macaddr(struct qeth_card *card)
4385 struct qeth_cmd_buffer *iob;
4386 struct qeth_ipa_cmd *cmd;
4388 QETH_CARD_TEXT(card, 4, "chgmac");
4390 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4391 SETADP_DATA_SIZEOF(change_addr));
4394 cmd = __ipa_cmd(iob);
4395 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4396 cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
4397 ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
4398 card->dev->dev_addr);
4399 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
4403 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
4405 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4406 struct qeth_reply *reply, unsigned long data)
4408 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4409 struct qeth_set_access_ctrl *access_ctrl_req;
4411 QETH_CARD_TEXT(card, 4, "setaccb");
4413 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4414 QETH_CARD_TEXT_(card, 2, "rc=%d",
4415 cmd->data.setadapterparms.hdr.return_code);
4416 if (cmd->data.setadapterparms.hdr.return_code !=
4417 SET_ACCESS_CTRL_RC_SUCCESS)
4418 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4419 access_ctrl_req->subcmd_code, CARD_DEVID(card),
4420 cmd->data.setadapterparms.hdr.return_code);
4421 switch (qeth_setadpparms_inspect_rc(cmd)) {
4422 case SET_ACCESS_CTRL_RC_SUCCESS:
4423 if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE)
4424 dev_info(&card->gdev->dev,
4425 "QDIO data connection isolation is deactivated\n");
4427 dev_info(&card->gdev->dev,
4428 "QDIO data connection isolation is activated\n");
4430 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4431 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4434 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4435 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4438 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
4439 dev_err(&card->gdev->dev, "Adapter does not "
4440 "support QDIO data connection isolation\n");
4442 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
4443 dev_err(&card->gdev->dev,
4444 "Adapter is dedicated. "
4445 "QDIO data connection isolation not supported\n");
4447 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
4448 dev_err(&card->gdev->dev,
4449 "TSO does not permit QDIO data connection isolation\n");
4451 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
4452 dev_err(&card->gdev->dev, "The adjacent switch port does not "
4453 "support reflective relay mode\n");
4455 case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
4456 dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
4457 "enabled at the adjacent switch port");
4459 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
4460 dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
4461 "at the adjacent switch failed\n");
4462 /* benign error while disabling ISOLATION_MODE_FWD */
4469 int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
4470 enum qeth_ipa_isolation_modes mode)
4473 struct qeth_cmd_buffer *iob;
4474 struct qeth_ipa_cmd *cmd;
4475 struct qeth_set_access_ctrl *access_ctrl_req;
4477 QETH_CARD_TEXT(card, 4, "setacctl");
4479 if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
4480 dev_err(&card->gdev->dev,
4481 "Adapter does not support QDIO data connection isolation\n");
4485 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4486 SETADP_DATA_SIZEOF(set_access_ctrl));
4489 cmd = __ipa_cmd(iob);
4490 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4491 access_ctrl_req->subcmd_code = mode;
4493 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4496 QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
4497 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4498 rc, CARD_DEVID(card));
4504 void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
4506 struct qeth_card *card;
4508 card = dev->ml_priv;
4509 QETH_CARD_TEXT(card, 4, "txtimeo");
4510 qeth_schedule_recovery(card);
4512 EXPORT_SYMBOL_GPL(qeth_tx_timeout);
4514 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4516 struct qeth_card *card = dev->ml_priv;
4520 case MII_BMCR: /* Basic mode control register */
4522 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
4523 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
4524 (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
4525 rc |= BMCR_SPEED100;
4527 case MII_BMSR: /* Basic mode status register */
4528 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4529 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4532 case MII_PHYSID1: /* PHYS ID 1 */
4533 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4535 rc = (rc >> 5) & 0xFFFF;
4537 case MII_PHYSID2: /* PHYS ID 2 */
4538 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4540 case MII_ADVERTISE: /* Advertisement control reg */
4543 case MII_LPA: /* Link partner ability reg */
4544 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4545 LPA_100BASE4 | LPA_LPACK;
4547 case MII_EXPANSION: /* Expansion register */
4549 case MII_DCOUNTER: /* disconnect counter */
4551 case MII_FCSCOUNTER: /* false carrier counter */
4553 case MII_NWAYTEST: /* N-way auto-neg test register */
4555 case MII_RERRCOUNTER: /* rx error counter */
4556 rc = card->stats.rx_length_errors +
4557 card->stats.rx_frame_errors +
4558 card->stats.rx_fifo_errors;
4560 case MII_SREVISION: /* silicon revision */
4562 case MII_RESV1: /* reserved 1 */
4564 case MII_LBRERROR: /* loopback, rx, bypass error */
4566 case MII_PHYADDR: /* physical address */
4568 case MII_RESV2: /* reserved 2 */
4570 case MII_TPISTATUS: /* TPI status for 10mbps */
4572 case MII_NCONFIG: /* network interface config */
4580 static int qeth_snmp_command_cb(struct qeth_card *card,
4581 struct qeth_reply *reply, unsigned long data)
4583 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4584 struct qeth_arp_query_info *qinfo = reply->param;
4585 struct qeth_ipacmd_setadpparms *adp_cmd;
4586 unsigned int data_len;
4589 QETH_CARD_TEXT(card, 3, "snpcmdcb");
4591 if (cmd->hdr.return_code) {
4592 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4595 if (cmd->data.setadapterparms.hdr.return_code) {
4596 cmd->hdr.return_code =
4597 cmd->data.setadapterparms.hdr.return_code;
4598 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4602 adp_cmd = &cmd->data.setadapterparms;
4603 data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
4604 if (adp_cmd->hdr.seq_no == 1) {
4605 snmp_data = &adp_cmd->data.snmp;
4607 snmp_data = &adp_cmd->data.snmp.request;
4608 data_len -= offsetof(struct qeth_snmp_cmd, request);
4611 /* check if there is enough room in userspace */
4612 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4613 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
4616 QETH_CARD_TEXT_(card, 4, "snore%i",
4617 cmd->data.setadapterparms.hdr.used_total);
4618 QETH_CARD_TEXT_(card, 4, "sseqn%i",
4619 cmd->data.setadapterparms.hdr.seq_no);
4620 /*copy entries to user buffer*/
4621 memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
4622 qinfo->udata_offset += data_len;
4624 if (cmd->data.setadapterparms.hdr.seq_no <
4625 cmd->data.setadapterparms.hdr.used_total)
4630 static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4632 struct qeth_snmp_ureq __user *ureq;
4633 struct qeth_cmd_buffer *iob;
4634 unsigned int req_len;
4635 struct qeth_arp_query_info qinfo = {0, };
4638 QETH_CARD_TEXT(card, 3, "snmpcmd");
4640 if (IS_VM_NIC(card))
4643 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4647 ureq = (struct qeth_snmp_ureq __user *) udata;
4648 if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
4649 get_user(req_len, &ureq->hdr.req_len))
4652 /* Sanitize user input, to avoid overflows in iob size calculation: */
4653 if (req_len > QETH_BUFSIZE)
4656 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
4660 if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
4661 &ureq->cmd, req_len)) {
4666 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
4671 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4673 rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
4675 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4676 CARD_DEVID(card), rc);
4678 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4686 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4687 struct qeth_reply *reply,
4690 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4691 struct qeth_qoat_priv *priv = reply->param;
4694 QETH_CARD_TEXT(card, 3, "qoatcb");
4695 if (qeth_setadpparms_inspect_rc(cmd))
4698 resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4700 if (resdatalen > (priv->buffer_len - priv->response_len))
4703 memcpy(priv->buffer + priv->response_len,
4704 &cmd->data.setadapterparms.hdr, resdatalen);
4705 priv->response_len += resdatalen;
4707 if (cmd->data.setadapterparms.hdr.seq_no <
4708 cmd->data.setadapterparms.hdr.used_total)
4713 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4716 struct qeth_cmd_buffer *iob;
4717 struct qeth_ipa_cmd *cmd;
4718 struct qeth_query_oat *oat_req;
4719 struct qeth_query_oat_data oat_data;
4720 struct qeth_qoat_priv priv;
4723 QETH_CARD_TEXT(card, 3, "qoatcmd");
4725 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT))
4728 if (copy_from_user(&oat_data, udata, sizeof(oat_data)))
4731 priv.buffer_len = oat_data.buffer_len;
4732 priv.response_len = 0;
4733 priv.buffer = vzalloc(oat_data.buffer_len);
4737 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4738 SETADP_DATA_SIZEOF(query_oat));
4743 cmd = __ipa_cmd(iob);
4744 oat_req = &cmd->data.setadapterparms.data.query_oat;
4745 oat_req->subcmd_code = oat_data.command;
4747 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv);
4749 tmp = is_compat_task() ? compat_ptr(oat_data.ptr) :
4750 u64_to_user_ptr(oat_data.ptr);
4751 oat_data.response_len = priv.response_len;
4753 if (copy_to_user(tmp, priv.buffer, priv.response_len) ||
4754 copy_to_user(udata, &oat_data, sizeof(oat_data)))
4763 static int qeth_query_card_info_cb(struct qeth_card *card,
4764 struct qeth_reply *reply, unsigned long data)
4766 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4767 struct qeth_link_info *link_info = reply->param;
4768 struct qeth_query_card_info *card_info;
4770 QETH_CARD_TEXT(card, 2, "qcrdincb");
4771 if (qeth_setadpparms_inspect_rc(cmd))
4774 card_info = &cmd->data.setadapterparms.data.card_info;
4775 netdev_dbg(card->dev,
4776 "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
4777 card_info->card_type, card_info->port_mode,
4778 card_info->port_speed);
4780 switch (card_info->port_mode) {
4781 case CARD_INFO_PORTM_FULLDUPLEX:
4782 link_info->duplex = DUPLEX_FULL;
4784 case CARD_INFO_PORTM_HALFDUPLEX:
4785 link_info->duplex = DUPLEX_HALF;
4788 link_info->duplex = DUPLEX_UNKNOWN;
4791 switch (card_info->card_type) {
4792 case CARD_INFO_TYPE_1G_COPPER_A:
4793 case CARD_INFO_TYPE_1G_COPPER_B:
4794 link_info->speed = SPEED_1000;
4795 link_info->port = PORT_TP;
4797 case CARD_INFO_TYPE_1G_FIBRE_A:
4798 case CARD_INFO_TYPE_1G_FIBRE_B:
4799 link_info->speed = SPEED_1000;
4800 link_info->port = PORT_FIBRE;
4802 case CARD_INFO_TYPE_10G_FIBRE_A:
4803 case CARD_INFO_TYPE_10G_FIBRE_B:
4804 link_info->speed = SPEED_10000;
4805 link_info->port = PORT_FIBRE;
4808 switch (card_info->port_speed) {
4809 case CARD_INFO_PORTS_10M:
4810 link_info->speed = SPEED_10;
4812 case CARD_INFO_PORTS_100M:
4813 link_info->speed = SPEED_100;
4815 case CARD_INFO_PORTS_1G:
4816 link_info->speed = SPEED_1000;
4818 case CARD_INFO_PORTS_10G:
4819 link_info->speed = SPEED_10000;
4821 case CARD_INFO_PORTS_25G:
4822 link_info->speed = SPEED_25000;
4825 link_info->speed = SPEED_UNKNOWN;
4828 link_info->port = PORT_OTHER;
4834 int qeth_query_card_info(struct qeth_card *card,
4835 struct qeth_link_info *link_info)
4837 struct qeth_cmd_buffer *iob;
4839 QETH_CARD_TEXT(card, 2, "qcrdinfo");
4840 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
4842 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
4846 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, link_info);
4849 static int qeth_init_link_info_oat_cb(struct qeth_card *card,
4850 struct qeth_reply *reply_priv,
4853 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4854 struct qeth_link_info *link_info = reply_priv->param;
4855 struct qeth_query_oat_physical_if *phys_if;
4856 struct qeth_query_oat_reply *reply;
4858 if (qeth_setadpparms_inspect_rc(cmd))
4861 /* Multi-part reply is unexpected, don't bother: */
4862 if (cmd->data.setadapterparms.hdr.used_total > 1)
4865 /* Expect the reply to start with phys_if data: */
4866 reply = &cmd->data.setadapterparms.data.query_oat.reply[0];
4867 if (reply->type != QETH_QOAT_REPLY_TYPE_PHYS_IF ||
4868 reply->length < sizeof(*reply))
4871 phys_if = &reply->phys_if;
4873 switch (phys_if->speed_duplex) {
4874 case QETH_QOAT_PHYS_SPEED_10M_HALF:
4875 link_info->speed = SPEED_10;
4876 link_info->duplex = DUPLEX_HALF;
4878 case QETH_QOAT_PHYS_SPEED_10M_FULL:
4879 link_info->speed = SPEED_10;
4880 link_info->duplex = DUPLEX_FULL;
4882 case QETH_QOAT_PHYS_SPEED_100M_HALF:
4883 link_info->speed = SPEED_100;
4884 link_info->duplex = DUPLEX_HALF;
4886 case QETH_QOAT_PHYS_SPEED_100M_FULL:
4887 link_info->speed = SPEED_100;
4888 link_info->duplex = DUPLEX_FULL;
4890 case QETH_QOAT_PHYS_SPEED_1000M_HALF:
4891 link_info->speed = SPEED_1000;
4892 link_info->duplex = DUPLEX_HALF;
4894 case QETH_QOAT_PHYS_SPEED_1000M_FULL:
4895 link_info->speed = SPEED_1000;
4896 link_info->duplex = DUPLEX_FULL;
4898 case QETH_QOAT_PHYS_SPEED_10G_FULL:
4899 link_info->speed = SPEED_10000;
4900 link_info->duplex = DUPLEX_FULL;
4902 case QETH_QOAT_PHYS_SPEED_25G_FULL:
4903 link_info->speed = SPEED_25000;
4904 link_info->duplex = DUPLEX_FULL;
4906 case QETH_QOAT_PHYS_SPEED_UNKNOWN:
4908 link_info->speed = SPEED_UNKNOWN;
4909 link_info->duplex = DUPLEX_UNKNOWN;
4913 switch (phys_if->media_type) {
4914 case QETH_QOAT_PHYS_MEDIA_COPPER:
4915 link_info->port = PORT_TP;
4916 link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
4918 case QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT:
4919 link_info->port = PORT_FIBRE;
4920 link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT;
4922 case QETH_QOAT_PHYS_MEDIA_FIBRE_LONG:
4923 link_info->port = PORT_FIBRE;
4924 link_info->link_mode = QETH_LINK_MODE_FIBRE_LONG;
4927 link_info->port = PORT_OTHER;
4928 link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
4935 static void qeth_init_link_info(struct qeth_card *card)
4937 card->info.link_info.duplex = DUPLEX_FULL;
4939 if (IS_IQD(card) || IS_VM_NIC(card)) {
4940 card->info.link_info.speed = SPEED_10000;
4941 card->info.link_info.port = PORT_FIBRE;
4942 card->info.link_info.link_mode = QETH_LINK_MODE_FIBRE_SHORT;
4944 switch (card->info.link_type) {
4945 case QETH_LINK_TYPE_FAST_ETH:
4946 case QETH_LINK_TYPE_LANE_ETH100:
4947 card->info.link_info.speed = SPEED_100;
4948 card->info.link_info.port = PORT_TP;
4950 case QETH_LINK_TYPE_GBIT_ETH:
4951 case QETH_LINK_TYPE_LANE_ETH1000:
4952 card->info.link_info.speed = SPEED_1000;
4953 card->info.link_info.port = PORT_FIBRE;
4955 case QETH_LINK_TYPE_10GBIT_ETH:
4956 card->info.link_info.speed = SPEED_10000;
4957 card->info.link_info.port = PORT_FIBRE;
4959 case QETH_LINK_TYPE_25GBIT_ETH:
4960 card->info.link_info.speed = SPEED_25000;
4961 card->info.link_info.port = PORT_FIBRE;
4964 dev_info(&card->gdev->dev, "Unknown link type %x\n",
4965 card->info.link_type);
4966 card->info.link_info.speed = SPEED_UNKNOWN;
4967 card->info.link_info.port = PORT_OTHER;
4970 card->info.link_info.link_mode = QETH_LINK_MODE_UNKNOWN;
4973 /* Get more accurate data via QUERY OAT: */
4974 if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
4975 struct qeth_link_info link_info;
4976 struct qeth_cmd_buffer *iob;
4978 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4979 SETADP_DATA_SIZEOF(query_oat));
4981 struct qeth_ipa_cmd *cmd = __ipa_cmd(iob);
4982 struct qeth_query_oat *oat_req;
4984 oat_req = &cmd->data.setadapterparms.data.query_oat;
4985 oat_req->subcmd_code = QETH_QOAT_SCOPE_INTERFACE;
4987 if (!qeth_send_ipa_cmd(card, iob,
4988 qeth_init_link_info_oat_cb,
4990 if (link_info.speed != SPEED_UNKNOWN)
4991 card->info.link_info.speed = link_info.speed;
4992 if (link_info.duplex != DUPLEX_UNKNOWN)
4993 card->info.link_info.duplex = link_info.duplex;
4994 if (link_info.port != PORT_OTHER)
4995 card->info.link_info.port = link_info.port;
4996 if (link_info.link_mode != QETH_LINK_MODE_UNKNOWN)
4997 card->info.link_info.link_mode = link_info.link_mode;
5004 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
5005 * @card: pointer to a qeth_card
5008 * 0, if a MAC address has been set for the card's netdevice
5009 * a return code, for various error conditions
5011 int qeth_vm_request_mac(struct qeth_card *card)
5013 struct diag26c_mac_resp *response;
5014 struct diag26c_mac_req *request;
5017 QETH_CARD_TEXT(card, 2, "vmreqmac");
5019 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
5020 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
5021 if (!request || !response) {
5026 request->resp_buf_len = sizeof(*response);
5027 request->resp_version = DIAG26C_VERSION2;
5028 request->op_code = DIAG26C_GET_MAC;
5029 request->devno = card->info.ddev_devno;
5031 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
5032 rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
5033 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
5036 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
5038 if (request->resp_buf_len < sizeof(*response) ||
5039 response->version != request->resp_version) {
5041 QETH_CARD_TEXT(card, 2, "badresp");
5042 QETH_CARD_HEX(card, 2, &request->resp_buf_len,
5043 sizeof(request->resp_buf_len));
5044 } else if (!is_valid_ether_addr(response->mac)) {
5046 QETH_CARD_TEXT(card, 2, "badmac");
5047 QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
5049 ether_addr_copy(card->dev->dev_addr, response->mac);
5057 EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
5059 static void qeth_determine_capabilities(struct qeth_card *card)
5061 struct qeth_channel *channel = &card->data;
5062 struct ccw_device *ddev = channel->ccwdev;
5064 int ddev_offline = 0;
5066 QETH_CARD_TEXT(card, 2, "detcapab");
5067 if (!ddev->online) {
5069 rc = qeth_start_channel(channel);
5071 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5076 rc = qeth_read_conf_data(card);
5078 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
5079 CARD_DEVID(card), rc);
5080 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5084 rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
5086 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5088 QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
5089 QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
5090 QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
5091 QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
5092 QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
5093 if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
5094 ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
5095 ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
5096 dev_info(&card->gdev->dev,
5097 "Completion Queueing supported\n");
5099 card->options.cq = QETH_CQ_NOTAVAILABLE;
5103 if (ddev_offline == 1)
5104 qeth_stop_channel(channel);
5109 static void qeth_read_ccw_conf_data(struct qeth_card *card)
5111 struct qeth_card_info *info = &card->info;
5112 struct ccw_device *cdev = CARD_DDEV(card);
5113 struct ccw_dev_id dev_id;
5115 QETH_CARD_TEXT(card, 2, "ccwconfd");
5116 ccw_device_get_id(cdev, &dev_id);
5118 info->ddev_devno = dev_id.devno;
5119 info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) &&
5120 !ccw_device_get_iid(cdev, &info->iid) &&
5121 !ccw_device_get_chid(cdev, 0, &info->chid);
5122 info->ssid = dev_id.ssid;
5124 dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n",
5125 info->chid, info->chpid);
5127 QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno);
5128 QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid);
5129 QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid);
5130 QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid);
5131 QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid);
5132 QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid);
5133 QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid);
5136 static int qeth_qdio_establish(struct qeth_card *card)
5138 struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
5139 struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
5140 struct qeth_qib_parms *qib_parms = NULL;
5141 struct qdio_initialize init_data;
5145 QETH_CARD_TEXT(card, 2, "qdioest");
5147 if (!IS_IQD(card) && !IS_VM_NIC(card)) {
5148 qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
5152 qeth_fill_qib_parms(card, qib_parms);
5155 in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
5156 if (card->options.cq == QETH_CQ_ENABLED)
5157 in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
5159 for (i = 0; i < card->qdio.no_out_queues; i++)
5160 out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
5162 memset(&init_data, 0, sizeof(struct qdio_initialize));
5163 init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT :
5165 init_data.qib_param_field_format = 0;
5166 init_data.qib_param_field = (void *)qib_parms;
5167 init_data.no_input_qs = card->qdio.no_in_queues;
5168 init_data.no_output_qs = card->qdio.no_out_queues;
5169 init_data.input_handler = qeth_qdio_input_handler;
5170 init_data.output_handler = qeth_qdio_output_handler;
5171 init_data.irq_poll = qeth_qdio_poll;
5172 init_data.int_parm = (unsigned long) card;
5173 init_data.input_sbal_addr_array = in_sbal_ptrs;
5174 init_data.output_sbal_addr_array = out_sbal_ptrs;
5176 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
5177 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
5178 rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs,
5179 init_data.no_output_qs);
5181 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5184 rc = qdio_establish(CARD_DDEV(card), &init_data);
5186 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5187 qdio_free(CARD_DDEV(card));
5191 switch (card->options.cq) {
5192 case QETH_CQ_ENABLED:
5193 dev_info(&card->gdev->dev, "Completion Queue support enabled");
5195 case QETH_CQ_DISABLED:
5196 dev_info(&card->gdev->dev, "Completion Queue support disabled");
5207 static void qeth_core_free_card(struct qeth_card *card)
5209 QETH_CARD_TEXT(card, 2, "freecrd");
5211 unregister_service_level(&card->qeth_service_level);
5212 debugfs_remove_recursive(card->debugfs);
5213 qeth_put_cmd(card->read_cmd);
5214 destroy_workqueue(card->event_wq);
5215 dev_set_drvdata(&card->gdev->dev, NULL);
5219 static void qeth_trace_features(struct qeth_card *card)
5221 QETH_CARD_TEXT(card, 2, "features");
5222 QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
5223 QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
5224 QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
5225 QETH_CARD_HEX(card, 2, &card->info.diagass_support,
5226 sizeof(card->info.diagass_support));
5229 static struct ccw_device_id qeth_ids[] = {
5230 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
5231 .driver_info = QETH_CARD_TYPE_OSD},
5232 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
5233 .driver_info = QETH_CARD_TYPE_IQD},
5234 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
5235 .driver_info = QETH_CARD_TYPE_OSM},
5236 #ifdef CONFIG_QETH_OSX
5237 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
5238 .driver_info = QETH_CARD_TYPE_OSX},
5242 MODULE_DEVICE_TABLE(ccw, qeth_ids);
5244 static struct ccw_driver qeth_ccw_driver = {
5246 .owner = THIS_MODULE,
5250 .probe = ccwgroup_probe_ccwdev,
5251 .remove = ccwgroup_remove_ccwdev,
5254 static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
5259 QETH_CARD_TEXT(card, 2, "hrdsetup");
5260 atomic_set(&card->force_alloc_skb, 0);
5261 rc = qeth_update_from_chp_desc(card);
5266 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
5268 rc = qeth_qdio_clear_card(card, !IS_IQD(card));
5269 qeth_stop_channel(&card->data);
5270 qeth_stop_channel(&card->write);
5271 qeth_stop_channel(&card->read);
5272 qdio_free(CARD_DDEV(card));
5274 rc = qeth_start_channel(&card->read);
5277 rc = qeth_start_channel(&card->write);
5280 rc = qeth_start_channel(&card->data);
5284 if (rc == -ERESTARTSYS) {
5285 QETH_CARD_TEXT(card, 2, "break1");
5288 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5295 qeth_determine_capabilities(card);
5296 qeth_read_ccw_conf_data(card);
5297 qeth_idx_init(card);
5299 rc = qeth_idx_activate_read_channel(card);
5301 QETH_CARD_TEXT(card, 2, "break2");
5304 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5311 rc = qeth_idx_activate_write_channel(card);
5313 QETH_CARD_TEXT(card, 2, "break3");
5316 QETH_CARD_TEXT_(card, 2, "4err%d", rc);
5322 card->read_or_write_problem = 0;
5323 rc = qeth_mpc_initialize(card);
5325 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5329 rc = qeth_send_startlan(card);
5331 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5332 if (rc == -ENETDOWN) {
5333 dev_warn(&card->gdev->dev, "The LAN is offline\n");
5334 *carrier_ok = false;
5342 card->options.ipa4.supported = 0;
5343 card->options.ipa6.supported = 0;
5344 card->options.adp.supported = 0;
5345 card->options.sbp.supported_funcs = 0;
5346 card->info.diagass_support = 0;
5347 rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
5350 if (qeth_is_supported(card, IPA_IPV6)) {
5351 rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
5355 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
5356 rc = qeth_query_setadapterparms(card);
5358 QETH_CARD_TEXT_(card, 2, "7err%d", rc);
5362 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
5363 rc = qeth_query_setdiagass(card);
5365 QETH_CARD_TEXT_(card, 2, "8err%d", rc);
5368 qeth_trace_features(card);
5370 if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
5371 (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
5372 card->info.hwtrap = 0;
5374 if (card->options.isolation != ISOLATION_MODE_NONE) {
5375 rc = qeth_setadpparms_set_access_ctrl(card,
5376 card->options.isolation);
5381 qeth_init_link_info(card);
5383 rc = qeth_init_qdio_queues(card);
5385 QETH_CARD_TEXT_(card, 2, "9err%d", rc);
5391 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
5392 "an error on the device\n");
5393 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
5394 CARD_DEVID(card), rc);
5398 static int qeth_set_online(struct qeth_card *card,
5399 const struct qeth_discipline *disc)
5404 mutex_lock(&card->conf_mutex);
5405 QETH_CARD_TEXT(card, 2, "setonlin");
5407 rc = qeth_hardsetup_card(card, &carrier_ok);
5409 QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
5414 qeth_print_status_message(card);
5416 if (card->dev->reg_state != NETREG_REGISTERED)
5417 /* no need for locking / error handling at this early stage: */
5418 qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
5420 rc = disc->set_online(card, carrier_ok);
5424 /* let user_space know that device is online */
5425 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5427 mutex_unlock(&card->conf_mutex);
5432 qeth_qdio_clear_card(card, 0);
5433 qeth_clear_working_pool_list(card);
5434 qeth_flush_local_addrs(card);
5436 qeth_stop_channel(&card->data);
5437 qeth_stop_channel(&card->write);
5438 qeth_stop_channel(&card->read);
5439 qdio_free(CARD_DDEV(card));
5441 mutex_unlock(&card->conf_mutex);
5445 int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
5450 mutex_lock(&card->conf_mutex);
5451 QETH_CARD_TEXT(card, 3, "setoffl");
5453 if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
5454 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
5455 card->info.hwtrap = 1;
5458 /* cancel any stalled cmd that might block the rtnl: */
5459 qeth_clear_ipacmd_list(card);
5462 card->info.open_when_online = card->dev->flags & IFF_UP;
5463 dev_close(card->dev);
5464 netif_device_detach(card->dev);
5465 netif_carrier_off(card->dev);
5468 cancel_work_sync(&card->rx_mode_work);
5470 disc->set_offline(card);
5472 qeth_qdio_clear_card(card, 0);
5473 qeth_drain_output_queues(card);
5474 qeth_clear_working_pool_list(card);
5475 qeth_flush_local_addrs(card);
5476 card->info.promisc_mode = 0;
5478 rc = qeth_stop_channel(&card->data);
5479 rc2 = qeth_stop_channel(&card->write);
5480 rc3 = qeth_stop_channel(&card->read);
5482 rc = (rc2) ? rc2 : rc3;
5484 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5485 qdio_free(CARD_DDEV(card));
5487 /* let user_space know that device is offline */
5488 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5490 mutex_unlock(&card->conf_mutex);
5493 EXPORT_SYMBOL_GPL(qeth_set_offline);
5495 static int qeth_do_reset(void *data)
5497 const struct qeth_discipline *disc;
5498 struct qeth_card *card = data;
5501 /* Lock-free, other users will block until we are done. */
5502 disc = card->discipline;
5504 QETH_CARD_TEXT(card, 2, "recover1");
5505 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
5507 QETH_CARD_TEXT(card, 2, "recover2");
5508 dev_warn(&card->gdev->dev,
5509 "A recovery process has been started for the device\n");
5511 qeth_set_offline(card, disc, true);
5512 rc = qeth_set_online(card, disc);
5514 dev_info(&card->gdev->dev,
5515 "Device successfully recovered!\n");
5517 ccwgroup_set_offline(card->gdev);
5518 dev_warn(&card->gdev->dev,
5519 "The qeth device driver failed to recover an error on the device\n");
5521 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
5522 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
5526 #if IS_ENABLED(CONFIG_QETH_L3)
5527 static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
5528 struct qeth_hdr *hdr)
5530 struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
5531 struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
5532 struct net_device *dev = skb->dev;
5534 if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
5535 dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
5536 "FAKELL", skb->len);
5540 if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
5541 u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
5543 unsigned char tg_addr[ETH_ALEN];
5545 skb_reset_network_header(skb);
5546 switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
5547 case QETH_CAST_MULTICAST:
5548 if (prot == ETH_P_IP)
5549 ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
5551 ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
5552 QETH_CARD_STAT_INC(card, rx_multicast);
5554 case QETH_CAST_BROADCAST:
5555 ether_addr_copy(tg_addr, dev->broadcast);
5556 QETH_CARD_STAT_INC(card, rx_multicast);
5559 if (card->options.sniffer)
5560 skb->pkt_type = PACKET_OTHERHOST;
5561 ether_addr_copy(tg_addr, dev->dev_addr);
5564 if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
5565 dev_hard_header(skb, dev, prot, tg_addr,
5566 &l3_hdr->next_hop.rx.src_mac, skb->len);
5568 dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
5572 /* copy VLAN tag from hdr into skb */
5573 if (!card->options.sniffer &&
5574 (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
5575 QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
5576 u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
5578 l3_hdr->next_hop.rx.vlan_id;
5580 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
5585 static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
5586 struct qeth_hdr *hdr, bool uses_frags)
5588 struct napi_struct *napi = &card->napi;
5591 switch (hdr->hdr.l2.id) {
5592 #if IS_ENABLED(CONFIG_QETH_L3)
5593 case QETH_HEADER_TYPE_LAYER3:
5594 qeth_l3_rebuild_skb(card, skb, hdr);
5595 is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5598 case QETH_HEADER_TYPE_LAYER2:
5599 is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5604 napi_free_frags(napi);
5606 dev_kfree_skb_any(skb);
5610 if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
5611 skb->ip_summed = CHECKSUM_UNNECESSARY;
5612 QETH_CARD_STAT_INC(card, rx_skb_csum);
5614 skb->ip_summed = CHECKSUM_NONE;
5617 QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
5618 QETH_CARD_STAT_INC(card, rx_packets);
5619 if (skb_is_nonlinear(skb)) {
5620 QETH_CARD_STAT_INC(card, rx_sg_skbs);
5621 QETH_CARD_STAT_ADD(card, rx_sg_frags,
5622 skb_shinfo(skb)->nr_frags);
5626 napi_gro_frags(napi);
5628 skb->protocol = eth_type_trans(skb, skb->dev);
5629 napi_gro_receive(napi, skb);
5633 static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
5635 struct page *page = virt_to_page(data);
5636 unsigned int next_frag;
5638 next_frag = skb_shinfo(skb)->nr_frags;
5640 skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
5644 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
5646 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
5649 static int qeth_extract_skb(struct qeth_card *card,
5650 struct qeth_qdio_buffer *qethbuffer, u8 *element_no,
5653 struct qeth_priv *priv = netdev_priv(card->dev);
5654 struct qdio_buffer *buffer = qethbuffer->buffer;
5655 struct napi_struct *napi = &card->napi;
5656 struct qdio_buffer_element *element;
5657 unsigned int linear_len = 0;
5658 bool uses_frags = false;
5659 int offset = *__offset;
5660 bool use_rx_sg = false;
5661 unsigned int headroom;
5662 struct qeth_hdr *hdr;
5663 struct sk_buff *skb;
5666 element = &buffer->element[*element_no];
5669 /* qeth_hdr must not cross element boundaries */
5670 while (element->length < offset + sizeof(struct qeth_hdr)) {
5671 if (qeth_is_last_sbale(element))
5677 hdr = phys_to_virt(element->addr) + offset;
5678 offset += sizeof(*hdr);
5681 switch (hdr->hdr.l2.id) {
5682 case QETH_HEADER_TYPE_LAYER2:
5683 skb_len = hdr->hdr.l2.pkt_length;
5684 linear_len = ETH_HLEN;
5687 case QETH_HEADER_TYPE_LAYER3:
5688 skb_len = hdr->hdr.l3.length;
5689 if (!IS_LAYER3(card)) {
5690 QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5694 if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
5695 linear_len = ETH_HLEN;
5700 if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
5701 linear_len = sizeof(struct ipv6hdr);
5703 linear_len = sizeof(struct iphdr);
5704 headroom = ETH_HLEN;
5707 if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
5708 QETH_CARD_STAT_INC(card, rx_frame_errors);
5710 QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5712 /* Can't determine packet length, drop the whole buffer. */
5713 return -EPROTONOSUPPORT;
5716 if (skb_len < linear_len) {
5717 QETH_CARD_STAT_INC(card, rx_dropped_runt);
5721 use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
5722 (skb_len > READ_ONCE(priv->rx_copybreak) &&
5723 !atomic_read(&card->force_alloc_skb));
5726 /* QETH_CQ_ENABLED only: */
5727 if (qethbuffer->rx_skb &&
5728 skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
5729 skb = qethbuffer->rx_skb;
5730 qethbuffer->rx_skb = NULL;
5734 skb = napi_get_frags(napi);
5736 /* -ENOMEM, no point in falling back further. */
5737 QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5741 if (skb_tailroom(skb) >= linear_len + headroom) {
5746 netdev_info_once(card->dev,
5747 "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
5748 linear_len + headroom, skb_tailroom(skb));
5749 /* Shouldn't happen. Don't optimize, fall back to linear skb. */
5752 linear_len = skb_len;
5753 skb = napi_alloc_skb(napi, linear_len + headroom);
5755 QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5761 skb_reserve(skb, headroom);
5764 int data_len = min(skb_len, (int)(element->length - offset));
5765 char *data = phys_to_virt(element->addr) + offset;
5767 skb_len -= data_len;
5770 /* Extract data from current element: */
5771 if (skb && data_len) {
5773 unsigned int copy_len;
5775 copy_len = min_t(unsigned int, linear_len,
5778 skb_put_data(skb, data, copy_len);
5779 linear_len -= copy_len;
5780 data_len -= copy_len;
5785 qeth_create_skb_frag(skb, data, data_len);
5788 /* Step forward to next element: */
5790 if (qeth_is_last_sbale(element)) {
5791 QETH_CARD_TEXT(card, 4, "unexeob");
5792 QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5795 napi_free_frags(napi);
5797 dev_kfree_skb_any(skb);
5798 QETH_CARD_STAT_INC(card,
5808 /* This packet was skipped, go get another one: */
5812 *element_no = element - &buffer->element[0];
5815 qeth_receive_skb(card, skb, hdr, uses_frags);
5819 static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
5820 struct qeth_qdio_buffer *buf, bool *done)
5822 unsigned int work_done = 0;
5825 if (qeth_extract_skb(card, buf, &card->rx.buf_element,
5826 &card->rx.e_offset)) {
5838 static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
5840 struct qeth_rx *ctx = &card->rx;
5841 unsigned int work_done = 0;
5843 while (budget > 0) {
5844 struct qeth_qdio_buffer *buffer;
5845 unsigned int skbs_done = 0;
5848 /* Fetch completed RX buffers: */
5849 if (!card->rx.b_count) {
5850 card->rx.qdio_err = 0;
5851 card->rx.b_count = qdio_inspect_queue(CARD_DDEV(card),
5854 &card->rx.qdio_err);
5855 if (card->rx.b_count <= 0) {
5856 card->rx.b_count = 0;
5861 /* Process one completed RX buffer: */
5862 buffer = &card->qdio.in_q->bufs[card->rx.b_index];
5863 if (!(card->rx.qdio_err &&
5864 qeth_check_qdio_errors(card, buffer->buffer,
5865 card->rx.qdio_err, "qinerr")))
5866 skbs_done = qeth_extract_skbs(card, budget, buffer,
5871 work_done += skbs_done;
5872 budget -= skbs_done;
5875 QETH_CARD_STAT_INC(card, rx_bufs);
5876 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
5877 buffer->pool_entry = NULL;
5880 ctx->bufs_refill -= qeth_rx_refill_queue(card,
5883 /* Step forward to next buffer: */
5884 card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
5885 card->rx.buf_element = 0;
5886 card->rx.e_offset = 0;
5893 static void qeth_cq_poll(struct qeth_card *card)
5895 unsigned int work_done = 0;
5897 while (work_done < QDIO_MAX_BUFFERS_PER_Q) {
5898 unsigned int start, error;
5901 completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start,
5906 qeth_qdio_cq_handler(card, error, 1, start, completed);
5907 work_done += completed;
5911 int qeth_poll(struct napi_struct *napi, int budget)
5913 struct qeth_card *card = container_of(napi, struct qeth_card, napi);
5914 unsigned int work_done;
5916 work_done = qeth_rx_poll(card, budget);
5918 if (qeth_use_tx_irqs(card)) {
5919 struct qeth_qdio_out_q *queue;
5922 qeth_for_each_output_queue(card, queue, i) {
5923 if (!qeth_out_queue_is_empty(queue))
5924 napi_schedule(&queue->napi);
5928 if (card->options.cq == QETH_CQ_ENABLED)
5932 struct qeth_rx *ctx = &card->rx;
5934 /* Process any substantial refill backlog: */
5935 ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill);
5937 /* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
5938 if (work_done >= budget)
5942 if (napi_complete_done(napi, work_done) &&
5943 qdio_start_irq(CARD_DDEV(card)))
5944 napi_schedule(napi);
5948 EXPORT_SYMBOL_GPL(qeth_poll);
5950 static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
5951 unsigned int bidx, unsigned int qdio_error,
5954 struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
5955 u8 sflags = buffer->buffer->element[15].sflags;
5956 struct qeth_card *card = queue->card;
5957 bool error = !!qdio_error;
5959 if (qdio_error == QDIO_ERROR_SLSB_PENDING) {
5960 struct qaob *aob = buffer->aob;
5961 struct qeth_qaob_priv1 *priv;
5962 enum iucv_tx_notify notify;
5965 netdev_WARN_ONCE(card->dev,
5966 "Pending TX buffer %#x without QAOB on TX queue %u\n",
5967 bidx, queue->queue_no);
5968 qeth_schedule_recovery(card);
5972 QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
5974 priv = (struct qeth_qaob_priv1 *)&aob->user1;
5975 /* QAOB hasn't completed yet: */
5976 if (xchg(&priv->state, QETH_QAOB_PENDING) != QETH_QAOB_DONE) {
5977 qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
5979 /* Prepare the queue slot for immediate re-use: */
5980 qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
5981 if (qeth_alloc_out_buf(queue, bidx, GFP_ATOMIC)) {
5982 QETH_CARD_TEXT(card, 2, "outofbuf");
5983 qeth_schedule_recovery(card);
5986 list_add(&buffer->list_entry, &queue->pending_bufs);
5987 /* Skip clearing the buffer: */
5991 /* QAOB already completed: */
5992 notify = qeth_compute_cq_notification(aob->aorc, 0);
5993 qeth_notify_skbs(queue, buffer, notify);
5994 error = !!aob->aorc;
5995 memset(aob, 0, sizeof(*aob));
5996 } else if (card->options.cq == QETH_CQ_ENABLED) {
5997 qeth_notify_skbs(queue, buffer,
5998 qeth_compute_cq_notification(sflags, 0));
6001 qeth_clear_output_buffer(queue, buffer, error, budget);
6004 static int qeth_tx_poll(struct napi_struct *napi, int budget)
6006 struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
6007 unsigned int queue_no = queue->queue_no;
6008 struct qeth_card *card = queue->card;
6009 struct net_device *dev = card->dev;
6010 unsigned int work_done = 0;
6011 struct netdev_queue *txq;
6014 txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
6016 txq = netdev_get_tx_queue(dev, queue_no);
6019 unsigned int start, error, i;
6020 unsigned int packets = 0;
6021 unsigned int bytes = 0;
6024 qeth_tx_complete_pending_bufs(card, queue, false, budget);
6026 if (qeth_out_queue_is_empty(queue)) {
6027 napi_complete(napi);
6031 /* Give the CPU a breather: */
6032 if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
6033 QETH_TXQ_STAT_INC(queue, completion_yield);
6034 if (napi_complete_done(napi, 0))
6035 napi_schedule(napi);
6039 completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false,
6041 if (completed <= 0) {
6042 /* Ensure we see TX completion for pending work: */
6043 if (napi_complete_done(napi, 0) &&
6044 !atomic_read(&queue->set_pci_flags_count))
6045 qeth_tx_arm_timer(queue, queue->rescan_usecs);
6049 for (i = start; i < start + completed; i++) {
6050 struct qeth_qdio_out_buffer *buffer;
6051 unsigned int bidx = QDIO_BUFNR(i);
6053 buffer = queue->bufs[bidx];
6054 packets += buffer->frames;
6055 bytes += buffer->bytes;
6057 qeth_handle_send_error(card, buffer, error);
6059 qeth_iqd_tx_complete(queue, bidx, error, budget);
6061 qeth_clear_output_buffer(queue, buffer, error,
6065 atomic_sub(completed, &queue->used_buffers);
6066 work_done += completed;
6068 netdev_tx_completed_queue(txq, packets, bytes);
6070 qeth_check_outbound_queue(queue);
6072 /* xmit may have observed the full-condition, but not yet
6073 * stopped the txq. In which case the code below won't trigger.
6074 * So before returning, xmit will re-check the txq's fill level
6075 * and wake it up if needed.
6077 if (netif_tx_queue_stopped(txq) &&
6078 !qeth_out_queue_is_full(queue))
6079 netif_tx_wake_queue(txq);
6083 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
6085 if (!cmd->hdr.return_code)
6086 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
6087 return cmd->hdr.return_code;
6090 static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
6091 struct qeth_reply *reply,
6094 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6095 struct qeth_ipa_caps *caps = reply->param;
6097 if (qeth_setassparms_inspect_rc(cmd))
6100 caps->supported = cmd->data.setassparms.data.caps.supported;
6101 caps->enabled = cmd->data.setassparms.data.caps.enabled;
6105 int qeth_setassparms_cb(struct qeth_card *card,
6106 struct qeth_reply *reply, unsigned long data)
6108 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6110 QETH_CARD_TEXT(card, 4, "defadpcb");
6112 if (cmd->hdr.return_code)
6115 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
6116 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
6117 card->options.ipa4.enabled = cmd->hdr.assists.enabled;
6118 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
6119 card->options.ipa6.enabled = cmd->hdr.assists.enabled;
6122 EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
6124 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
6125 enum qeth_ipa_funcs ipa_func,
6127 unsigned int data_length,
6128 enum qeth_prot_versions prot)
6130 struct qeth_ipacmd_setassparms *setassparms;
6131 struct qeth_ipacmd_setassparms_hdr *hdr;
6132 struct qeth_cmd_buffer *iob;
6134 QETH_CARD_TEXT(card, 4, "getasscm");
6135 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
6137 offsetof(struct qeth_ipacmd_setassparms,
6142 setassparms = &__ipa_cmd(iob)->data.setassparms;
6143 setassparms->assist_no = ipa_func;
6145 hdr = &setassparms->hdr;
6146 hdr->length = sizeof(*hdr) + data_length;
6147 hdr->command_code = cmd_code;
6150 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
6152 int qeth_send_simple_setassparms_prot(struct qeth_card *card,
6153 enum qeth_ipa_funcs ipa_func,
6154 u16 cmd_code, u32 *data,
6155 enum qeth_prot_versions prot)
6157 unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
6158 struct qeth_cmd_buffer *iob;
6160 QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
6161 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
6166 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
6167 return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
6169 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
6171 static void qeth_unregister_dbf_views(void)
6175 for (x = 0; x < QETH_DBF_INFOS; x++) {
6176 debug_unregister(qeth_dbf[x].id);
6177 qeth_dbf[x].id = NULL;
6181 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
6183 char dbf_txt_buf[32];
6186 if (!debug_level_enabled(id, level))
6188 va_start(args, fmt);
6189 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
6191 debug_text_event(id, level, dbf_txt_buf);
6193 EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
6195 static int qeth_register_dbf_views(void)
6200 for (x = 0; x < QETH_DBF_INFOS; x++) {
6201 /* register the areas */
6202 qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
6206 if (qeth_dbf[x].id == NULL) {
6207 qeth_unregister_dbf_views();
6211 /* register a view */
6212 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
6214 qeth_unregister_dbf_views();
6218 /* set a passing level */
6219 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
6225 static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */
6227 int qeth_setup_discipline(struct qeth_card *card,
6228 enum qeth_discipline_id discipline)
6232 mutex_lock(&qeth_mod_mutex);
6233 switch (discipline) {
6234 case QETH_DISCIPLINE_LAYER3:
6235 card->discipline = try_then_request_module(
6236 symbol_get(qeth_l3_discipline), "qeth_l3");
6238 case QETH_DISCIPLINE_LAYER2:
6239 card->discipline = try_then_request_module(
6240 symbol_get(qeth_l2_discipline), "qeth_l2");
6245 mutex_unlock(&qeth_mod_mutex);
6247 if (!card->discipline) {
6248 dev_err(&card->gdev->dev, "There is no kernel module to "
6249 "support discipline %d\n", discipline);
6253 rc = card->discipline->setup(card->gdev);
6255 if (discipline == QETH_DISCIPLINE_LAYER2)
6256 symbol_put(qeth_l2_discipline);
6258 symbol_put(qeth_l3_discipline);
6259 card->discipline = NULL;
6264 card->options.layer = discipline;
6268 void qeth_remove_discipline(struct qeth_card *card)
6270 card->discipline->remove(card->gdev);
6272 if (IS_LAYER2(card))
6273 symbol_put(qeth_l2_discipline);
6275 symbol_put(qeth_l3_discipline);
6276 card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
6277 card->discipline = NULL;
6280 static const struct device_type qeth_generic_devtype = {
6281 .name = "qeth_generic",
6284 #define DBF_NAME_LEN 20
6286 struct qeth_dbf_entry {
6287 char dbf_name[DBF_NAME_LEN];
6288 debug_info_t *dbf_info;
6289 struct list_head dbf_list;
6292 static LIST_HEAD(qeth_dbf_list);
6293 static DEFINE_MUTEX(qeth_dbf_list_mutex);
6295 static debug_info_t *qeth_get_dbf_entry(char *name)
6297 struct qeth_dbf_entry *entry;
6298 debug_info_t *rc = NULL;
6300 mutex_lock(&qeth_dbf_list_mutex);
6301 list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
6302 if (strcmp(entry->dbf_name, name) == 0) {
6303 rc = entry->dbf_info;
6307 mutex_unlock(&qeth_dbf_list_mutex);
6311 static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
6313 struct qeth_dbf_entry *new_entry;
6315 card->debug = debug_register(name, 2, 1, 8);
6317 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
6320 if (debug_register_view(card->debug, &debug_hex_ascii_view))
6322 new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
6325 strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
6326 new_entry->dbf_info = card->debug;
6327 mutex_lock(&qeth_dbf_list_mutex);
6328 list_add(&new_entry->dbf_list, &qeth_dbf_list);
6329 mutex_unlock(&qeth_dbf_list_mutex);
6334 debug_unregister(card->debug);
6339 static void qeth_clear_dbf_list(void)
6341 struct qeth_dbf_entry *entry, *tmp;
6343 mutex_lock(&qeth_dbf_list_mutex);
6344 list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
6345 list_del(&entry->dbf_list);
6346 debug_unregister(entry->dbf_info);
6349 mutex_unlock(&qeth_dbf_list_mutex);
6352 static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
6354 struct net_device *dev;
6355 struct qeth_priv *priv;
6357 switch (card->info.type) {
6358 case QETH_CARD_TYPE_IQD:
6359 dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN,
6360 ether_setup, QETH_MAX_OUT_QUEUES, 1);
6362 case QETH_CARD_TYPE_OSM:
6363 dev = alloc_etherdev(sizeof(*priv));
6366 dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
6372 priv = netdev_priv(dev);
6373 priv->rx_copybreak = QETH_RX_COPYBREAK;
6374 priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1;
6376 dev->ml_priv = card;
6377 dev->watchdog_timeo = QETH_TX_TIMEOUT;
6379 /* initialized when device first goes online: */
6382 SET_NETDEV_DEV(dev, &card->gdev->dev);
6383 netif_carrier_off(dev);
6385 dev->ethtool_ops = &qeth_ethtool_ops;
6386 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
6387 dev->hw_features |= NETIF_F_SG;
6388 dev->vlan_features |= NETIF_F_SG;
6390 dev->features |= NETIF_F_SG;
6395 struct net_device *qeth_clone_netdev(struct net_device *orig)
6397 struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
6402 clone->dev_port = orig->dev_port;
6406 static int qeth_core_probe_device(struct ccwgroup_device *gdev)
6408 struct qeth_card *card;
6411 enum qeth_discipline_id enforced_disc;
6412 char dbf_name[DBF_NAME_LEN];
6414 QETH_DBF_TEXT(SETUP, 2, "probedev");
6417 if (!get_device(dev))
6420 QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
6422 card = qeth_alloc_card(gdev);
6424 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
6429 snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
6430 dev_name(&gdev->dev));
6431 card->debug = qeth_get_dbf_entry(dbf_name);
6433 rc = qeth_add_dbf_entry(card, dbf_name);
6438 qeth_setup_card(card);
6439 card->dev = qeth_alloc_netdev(card);
6445 qeth_determine_capabilities(card);
6446 qeth_set_blkt_defaults(card);
6448 card->qdio.no_out_queues = card->dev->num_tx_queues;
6449 rc = qeth_update_from_chp_desc(card);
6453 gdev->dev.groups = qeth_dev_groups;
6455 enforced_disc = qeth_enforce_discipline(card);
6456 switch (enforced_disc) {
6457 case QETH_DISCIPLINE_UNDETERMINED:
6458 gdev->dev.type = &qeth_generic_devtype;
6461 card->info.layer_enforced = true;
6462 /* It's so early that we don't need the discipline_mutex yet. */
6463 rc = qeth_setup_discipline(card, enforced_disc);
6465 goto err_setup_disc;
6474 free_netdev(card->dev);
6476 qeth_core_free_card(card);
6482 static void qeth_core_remove_device(struct ccwgroup_device *gdev)
6484 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6486 QETH_CARD_TEXT(card, 2, "removedv");
6488 mutex_lock(&card->discipline_mutex);
6489 if (card->discipline)
6490 qeth_remove_discipline(card);
6491 mutex_unlock(&card->discipline_mutex);
6493 qeth_free_qdio_queues(card);
6495 free_netdev(card->dev);
6496 qeth_core_free_card(card);
6497 put_device(&gdev->dev);
6500 static int qeth_core_set_online(struct ccwgroup_device *gdev)
6502 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6504 enum qeth_discipline_id def_discipline;
6506 mutex_lock(&card->discipline_mutex);
6507 if (!card->discipline) {
6508 def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
6509 QETH_DISCIPLINE_LAYER2;
6510 rc = qeth_setup_discipline(card, def_discipline);
6515 rc = qeth_set_online(card, card->discipline);
6518 mutex_unlock(&card->discipline_mutex);
6522 static int qeth_core_set_offline(struct ccwgroup_device *gdev)
6524 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6527 mutex_lock(&card->discipline_mutex);
6528 rc = qeth_set_offline(card, card->discipline, false);
6529 mutex_unlock(&card->discipline_mutex);
6534 static void qeth_core_shutdown(struct ccwgroup_device *gdev)
6536 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6538 qeth_set_allowed_threads(card, 0, 1);
6539 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
6540 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
6541 qeth_qdio_clear_card(card, 0);
6542 qeth_drain_output_queues(card);
6543 qdio_free(CARD_DDEV(card));
6546 static ssize_t group_store(struct device_driver *ddrv, const char *buf,
6551 err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
6554 return err ? err : count;
6556 static DRIVER_ATTR_WO(group);
6558 static struct attribute *qeth_drv_attrs[] = {
6559 &driver_attr_group.attr,
6562 static struct attribute_group qeth_drv_attr_group = {
6563 .attrs = qeth_drv_attrs,
6565 static const struct attribute_group *qeth_drv_attr_groups[] = {
6566 &qeth_drv_attr_group,
6570 static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
6572 .groups = qeth_drv_attr_groups,
6573 .owner = THIS_MODULE,
6576 .ccw_driver = &qeth_ccw_driver,
6577 .setup = qeth_core_probe_device,
6578 .remove = qeth_core_remove_device,
6579 .set_online = qeth_core_set_online,
6580 .set_offline = qeth_core_set_offline,
6581 .shutdown = qeth_core_shutdown,
6584 int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
6586 struct qeth_card *card = dev->ml_priv;
6590 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
6591 rc = qeth_snmp_command(card, data);
6593 case SIOC_QETH_GET_CARD_TYPE:
6594 if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
6598 case SIOC_QETH_QUERY_OAT:
6599 rc = qeth_query_oat_command(card, data);
6602 if (card->discipline->do_ioctl)
6603 rc = card->discipline->do_ioctl(dev, rq, data, cmd);
6608 QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
6611 EXPORT_SYMBOL_GPL(qeth_siocdevprivate);
6613 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6615 struct qeth_card *card = dev->ml_priv;
6616 struct mii_ioctl_data *mii_data;
6621 mii_data = if_mii(rq);
6622 mii_data->phy_id = 0;
6625 mii_data = if_mii(rq);
6626 if (mii_data->phy_id != 0)
6629 mii_data->val_out = qeth_mdio_read(dev,
6630 mii_data->phy_id, mii_data->reg_num);
6636 QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
6639 EXPORT_SYMBOL_GPL(qeth_do_ioctl);
6641 static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
6644 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6645 u32 *features = reply->param;
6647 if (qeth_setassparms_inspect_rc(cmd))
6650 *features = cmd->data.setassparms.data.flags_32bit;
6654 static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6655 enum qeth_prot_versions prot)
6657 return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
6661 static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6662 enum qeth_prot_versions prot, u8 *lp2lp)
6664 u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
6665 struct qeth_cmd_buffer *iob;
6666 struct qeth_ipa_caps caps;
6670 /* some L3 HW requires combined L3+L4 csum offload: */
6671 if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
6672 cstype == IPA_OUTBOUND_CHECKSUM)
6673 required_features |= QETH_IPA_CHECKSUM_IP_HDR;
6675 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
6680 rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
6684 if ((required_features & features) != required_features) {
6685 qeth_set_csum_off(card, cstype, prot);
6689 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
6690 SETASS_DATA_SIZEOF(flags_32bit),
6693 qeth_set_csum_off(card, cstype, prot);
6697 if (features & QETH_IPA_CHECKSUM_LP2LP)
6698 required_features |= QETH_IPA_CHECKSUM_LP2LP;
6699 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
6700 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6702 qeth_set_csum_off(card, cstype, prot);
6706 if (!qeth_ipa_caps_supported(&caps, required_features) ||
6707 !qeth_ipa_caps_enabled(&caps, required_features)) {
6708 qeth_set_csum_off(card, cstype, prot);
6712 dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
6713 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
6716 *lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP);
6721 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6722 enum qeth_prot_versions prot, u8 *lp2lp)
6724 return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) :
6725 qeth_set_csum_off(card, cstype, prot);
6728 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
6731 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6732 struct qeth_tso_start_data *tso_data = reply->param;
6734 if (qeth_setassparms_inspect_rc(cmd))
6737 tso_data->mss = cmd->data.setassparms.data.tso.mss;
6738 tso_data->supported = cmd->data.setassparms.data.tso.supported;
6742 static int qeth_set_tso_off(struct qeth_card *card,
6743 enum qeth_prot_versions prot)
6745 return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
6746 IPA_CMD_ASS_STOP, NULL, prot);
6749 static int qeth_set_tso_on(struct qeth_card *card,
6750 enum qeth_prot_versions prot)
6752 struct qeth_tso_start_data tso_data;
6753 struct qeth_cmd_buffer *iob;
6754 struct qeth_ipa_caps caps;
6757 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6758 IPA_CMD_ASS_START, 0, prot);
6762 rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6766 if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
6767 qeth_set_tso_off(card, prot);
6771 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6773 SETASS_DATA_SIZEOF(caps), prot);
6775 qeth_set_tso_off(card, prot);
6779 /* enable TSO capability */
6780 __ipa_cmd(iob)->data.setassparms.data.caps.enabled =
6781 QETH_IPA_LARGE_SEND_TCP;
6782 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6784 qeth_set_tso_off(card, prot);
6788 if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
6789 !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
6790 qeth_set_tso_off(card, prot);
6794 dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
6799 static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
6800 enum qeth_prot_versions prot)
6802 return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
6805 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
6807 int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
6810 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
6811 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6812 QETH_PROT_IPV4, NULL);
6813 if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6814 /* no/one Offload Assist available, so the rc is trivial */
6817 rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6818 QETH_PROT_IPV6, NULL);
6821 /* enable: success if any Assist is active */
6822 return (rc_ipv6) ? rc_ipv4 : 0;
6824 /* disable: failure if any Assist is still active */
6825 return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
6829 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6830 * @dev: a net_device
6832 void qeth_enable_hw_features(struct net_device *dev)
6834 struct qeth_card *card = dev->ml_priv;
6835 netdev_features_t features;
6837 features = dev->features;
6838 /* force-off any feature that might need an IPA sequence.
6839 * netdev_update_features() will restart them.
6841 dev->features &= ~dev->hw_features;
6842 /* toggle VLAN filter, so that VIDs are re-programmed: */
6843 if (IS_LAYER2(card) && IS_VM_NIC(card)) {
6844 dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
6845 dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6847 netdev_update_features(dev);
6848 if (features != dev->features)
6849 dev_warn(&card->gdev->dev,
6850 "Device recovery failed to restore all offload features\n");
6852 EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6854 static void qeth_check_restricted_features(struct qeth_card *card,
6855 netdev_features_t changed,
6856 netdev_features_t actual)
6858 netdev_features_t ipv6_features = NETIF_F_TSO6;
6859 netdev_features_t ipv4_features = NETIF_F_TSO;
6861 if (!card->info.has_lp2lp_cso_v6)
6862 ipv6_features |= NETIF_F_IPV6_CSUM;
6863 if (!card->info.has_lp2lp_cso_v4)
6864 ipv4_features |= NETIF_F_IP_CSUM;
6866 if ((changed & ipv6_features) && !(actual & ipv6_features))
6867 qeth_flush_local_addrs6(card);
6868 if ((changed & ipv4_features) && !(actual & ipv4_features))
6869 qeth_flush_local_addrs4(card);
6872 int qeth_set_features(struct net_device *dev, netdev_features_t features)
6874 struct qeth_card *card = dev->ml_priv;
6875 netdev_features_t changed = dev->features ^ features;
6878 QETH_CARD_TEXT(card, 2, "setfeat");
6879 QETH_CARD_HEX(card, 2, &features, sizeof(features));
6881 if ((changed & NETIF_F_IP_CSUM)) {
6882 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6883 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4,
6884 &card->info.has_lp2lp_cso_v4);
6886 changed ^= NETIF_F_IP_CSUM;
6888 if (changed & NETIF_F_IPV6_CSUM) {
6889 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6890 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6,
6891 &card->info.has_lp2lp_cso_v6);
6893 changed ^= NETIF_F_IPV6_CSUM;
6895 if (changed & NETIF_F_RXCSUM) {
6896 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6898 changed ^= NETIF_F_RXCSUM;
6900 if (changed & NETIF_F_TSO) {
6901 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
6904 changed ^= NETIF_F_TSO;
6906 if (changed & NETIF_F_TSO6) {
6907 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
6910 changed ^= NETIF_F_TSO6;
6913 qeth_check_restricted_features(card, dev->features ^ features,
6914 dev->features ^ changed);
6916 /* everything changed successfully? */
6917 if ((dev->features ^ features) == changed)
6919 /* something went wrong. save changed features and return error */
6920 dev->features ^= changed;
6923 EXPORT_SYMBOL_GPL(qeth_set_features);
6925 netdev_features_t qeth_fix_features(struct net_device *dev,
6926 netdev_features_t features)
6928 struct qeth_card *card = dev->ml_priv;
6930 QETH_CARD_TEXT(card, 2, "fixfeat");
6931 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
6932 features &= ~NETIF_F_IP_CSUM;
6933 if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
6934 features &= ~NETIF_F_IPV6_CSUM;
6935 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
6936 !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6937 features &= ~NETIF_F_RXCSUM;
6938 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
6939 features &= ~NETIF_F_TSO;
6940 if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
6941 features &= ~NETIF_F_TSO6;
6943 QETH_CARD_HEX(card, 2, &features, sizeof(features));
6946 EXPORT_SYMBOL_GPL(qeth_fix_features);
6948 netdev_features_t qeth_features_check(struct sk_buff *skb,
6949 struct net_device *dev,
6950 netdev_features_t features)
6952 struct qeth_card *card = dev->ml_priv;
6954 /* Traffic with local next-hop is not eligible for some offloads: */
6955 if (skb->ip_summed == CHECKSUM_PARTIAL &&
6956 READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) {
6957 netdev_features_t restricted = 0;
6959 if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
6960 restricted |= NETIF_F_ALL_TSO;
6962 switch (vlan_get_protocol(skb)) {
6963 case htons(ETH_P_IP):
6964 if (!card->info.has_lp2lp_cso_v4)
6965 restricted |= NETIF_F_IP_CSUM;
6967 if (restricted && qeth_next_hop_is_local_v4(card, skb))
6968 features &= ~restricted;
6970 case htons(ETH_P_IPV6):
6971 if (!card->info.has_lp2lp_cso_v6)
6972 restricted |= NETIF_F_IPV6_CSUM;
6974 if (restricted && qeth_next_hop_is_local_v6(card, skb))
6975 features &= ~restricted;
6982 /* GSO segmentation builds skbs with
6983 * a (small) linear part for the headers, and
6984 * page frags for the data.
6985 * Compared to a linear skb, the header-only part consumes an
6986 * additional buffer element. This reduces buffer utilization, and
6987 * hurts throughput. So compress small segments into one element.
6989 if (netif_needs_gso(skb, features)) {
6990 /* match skb_segment(): */
6991 unsigned int doffset = skb->data - skb_mac_header(skb);
6992 unsigned int hsize = skb_shinfo(skb)->gso_size;
6993 unsigned int hroom = skb_headroom(skb);
6995 /* linearize only if resulting skb allocations are order-0: */
6996 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
6997 features &= ~NETIF_F_SG;
7000 return vlan_features_check(skb, features);
7002 EXPORT_SYMBOL_GPL(qeth_features_check);
7004 void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7006 struct qeth_card *card = dev->ml_priv;
7007 struct qeth_qdio_out_q *queue;
7010 QETH_CARD_TEXT(card, 5, "getstat");
7012 stats->rx_packets = card->stats.rx_packets;
7013 stats->rx_bytes = card->stats.rx_bytes;
7014 stats->rx_errors = card->stats.rx_length_errors +
7015 card->stats.rx_frame_errors +
7016 card->stats.rx_fifo_errors;
7017 stats->rx_dropped = card->stats.rx_dropped_nomem +
7018 card->stats.rx_dropped_notsupp +
7019 card->stats.rx_dropped_runt;
7020 stats->multicast = card->stats.rx_multicast;
7021 stats->rx_length_errors = card->stats.rx_length_errors;
7022 stats->rx_frame_errors = card->stats.rx_frame_errors;
7023 stats->rx_fifo_errors = card->stats.rx_fifo_errors;
7025 for (i = 0; i < card->qdio.no_out_queues; i++) {
7026 queue = card->qdio.out_qs[i];
7028 stats->tx_packets += queue->stats.tx_packets;
7029 stats->tx_bytes += queue->stats.tx_bytes;
7030 stats->tx_errors += queue->stats.tx_errors;
7031 stats->tx_dropped += queue->stats.tx_dropped;
7034 EXPORT_SYMBOL_GPL(qeth_get_stats64);
7036 #define TC_IQD_UCAST 0
7037 static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
7038 unsigned int ucast_txqs)
7042 /* IQD requires mcast traffic to be placed on a dedicated queue, and
7043 * qeth_iqd_select_queue() deals with this.
7044 * For unicast traffic, we defer the queue selection to the stack.
7045 * By installing a trivial prio map that spans over only the unicast
7046 * queues, we can encourage the stack to spread the ucast traffic evenly
7047 * without selecting the mcast queue.
7050 /* One traffic class, spanning over all active ucast queues: */
7051 netdev_set_num_tc(dev, 1);
7052 netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
7053 QETH_IQD_MIN_UCAST_TXQ);
7055 /* Map all priorities to this traffic class: */
7056 for (prio = 0; prio <= TC_BITMASK; prio++)
7057 netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
7060 int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
7062 struct net_device *dev = card->dev;
7065 /* Per netif_setup_tc(), adjust the mapping first: */
7067 qeth_iqd_set_prio_tc_map(dev, count - 1);
7069 rc = netif_set_real_num_tx_queues(dev, count);
7071 if (rc && IS_IQD(card))
7072 qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);
7076 EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues);
7078 u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
7079 u8 cast_type, struct net_device *sb_dev)
7083 if (cast_type != RTN_UNICAST)
7084 return QETH_IQD_MCAST_TXQ;
7085 if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
7086 return QETH_IQD_MIN_UCAST_TXQ;
7088 txq = netdev_pick_tx(dev, skb, sb_dev);
7089 return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
7091 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
7093 int qeth_open(struct net_device *dev)
7095 struct qeth_card *card = dev->ml_priv;
7096 struct qeth_qdio_out_q *queue;
7099 QETH_CARD_TEXT(card, 4, "qethopen");
7101 card->data.state = CH_STATE_UP;
7102 netif_tx_start_all_queues(dev);
7105 qeth_for_each_output_queue(card, queue, i) {
7106 netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
7108 napi_enable(&queue->napi);
7109 napi_schedule(&queue->napi);
7112 napi_enable(&card->napi);
7113 napi_schedule(&card->napi);
7114 /* kick-start the NAPI softirq: */
7119 EXPORT_SYMBOL_GPL(qeth_open);
7121 int qeth_stop(struct net_device *dev)
7123 struct qeth_card *card = dev->ml_priv;
7124 struct qeth_qdio_out_q *queue;
7127 QETH_CARD_TEXT(card, 4, "qethstop");
7129 napi_disable(&card->napi);
7130 cancel_delayed_work_sync(&card->buffer_reclaim_work);
7131 qdio_stop_irq(CARD_DDEV(card));
7133 /* Quiesce the NAPI instances: */
7134 qeth_for_each_output_queue(card, queue, i)
7135 napi_disable(&queue->napi);
7137 /* Stop .ndo_start_xmit, might still access queue->napi. */
7138 netif_tx_disable(dev);
7140 qeth_for_each_output_queue(card, queue, i) {
7141 del_timer_sync(&queue->timer);
7142 /* Queues may get re-allocated, so remove the NAPIs. */
7143 netif_napi_del(&queue->napi);
7148 EXPORT_SYMBOL_GPL(qeth_stop);
7150 static int __init qeth_core_init(void)
7154 pr_info("loading core functions\n");
7156 qeth_debugfs_root = debugfs_create_dir("qeth", NULL);
7158 rc = qeth_register_dbf_views();
7161 qeth_core_root_dev = root_device_register("qeth");
7162 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
7165 qeth_core_header_cache =
7166 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
7167 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
7169 if (!qeth_core_header_cache) {
7173 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
7174 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
7175 if (!qeth_qdio_outbuf_cache) {
7179 rc = ccw_driver_register(&qeth_ccw_driver);
7182 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
7189 ccw_driver_unregister(&qeth_ccw_driver);
7191 kmem_cache_destroy(qeth_qdio_outbuf_cache);
7193 kmem_cache_destroy(qeth_core_header_cache);
7195 root_device_unregister(qeth_core_root_dev);
7197 qeth_unregister_dbf_views();
7199 debugfs_remove_recursive(qeth_debugfs_root);
7200 pr_err("Initializing the qeth device driver failed\n");
7204 static void __exit qeth_core_exit(void)
7206 qeth_clear_dbf_list();
7207 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
7208 ccw_driver_unregister(&qeth_ccw_driver);
7209 kmem_cache_destroy(qeth_qdio_outbuf_cache);
7210 kmem_cache_destroy(qeth_core_header_cache);
7211 root_device_unregister(qeth_core_root_dev);
7212 qeth_unregister_dbf_views();
7213 debugfs_remove_recursive(qeth_debugfs_root);
7214 pr_info("core functions removed\n");
7217 module_init(qeth_core_init);
7218 module_exit(qeth_core_exit);
7219 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
7220 MODULE_DESCRIPTION("qeth core functions");
7221 MODULE_LICENSE("GPL");