1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5 * Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
10 #define KMSG_COMPONENT "qeth"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/compat.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/log2.h>
22 #include <linux/tcp.h>
23 #include <linux/mii.h>
25 #include <linux/kthread.h>
26 #include <linux/slab.h>
27 #include <linux/if_vlan.h>
28 #include <linux/netdevice.h>
29 #include <linux/netdev_features.h>
30 #include <linux/rcutree.h>
31 #include <linux/skbuff.h>
32 #include <linux/vmalloc.h>
34 #include <net/iucv/af_iucv.h>
35 #include <net/dsfield.h>
38 #include <asm/ebcdic.h>
39 #include <asm/chpid.h>
40 #include <asm/sysinfo.h>
43 #include <asm/ccwdev.h>
44 #include <asm/cpcmd.h>
46 #include "qeth_core.h"
48 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
49 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
51 [QETH_DBF_SETUP] = {"qeth_setup",
52 8, 1, 8, 5, &debug_hex_ascii_view, NULL},
53 [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
54 &debug_sprintf_view, NULL},
55 [QETH_DBF_CTRL] = {"qeth_control",
56 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
58 EXPORT_SYMBOL_GPL(qeth_dbf);
60 static struct kmem_cache *qeth_core_header_cache;
61 static struct kmem_cache *qeth_qdio_outbuf_cache;
63 static struct device *qeth_core_root_dev;
64 static struct dentry *qeth_debugfs_root;
65 static struct lock_class_key qdio_out_skb_queue_key;
67 static void qeth_issue_next_read_cb(struct qeth_card *card,
68 struct qeth_cmd_buffer *iob,
69 unsigned int data_length);
70 static int qeth_qdio_establish(struct qeth_card *);
71 static void qeth_free_qdio_queues(struct qeth_card *card);
73 static void qeth_close_dev_handler(struct work_struct *work)
75 struct qeth_card *card;
77 card = container_of(work, struct qeth_card, close_dev_work);
78 QETH_CARD_TEXT(card, 2, "cldevhdl");
79 ccwgroup_set_offline(card->gdev);
82 static const char *qeth_get_cardname(struct qeth_card *card)
84 if (IS_VM_NIC(card)) {
85 switch (card->info.type) {
86 case QETH_CARD_TYPE_OSD:
87 return " Virtual NIC QDIO";
88 case QETH_CARD_TYPE_IQD:
89 return " Virtual NIC Hiper";
90 case QETH_CARD_TYPE_OSM:
91 return " Virtual NIC QDIO - OSM";
92 case QETH_CARD_TYPE_OSX:
93 return " Virtual NIC QDIO - OSX";
98 switch (card->info.type) {
99 case QETH_CARD_TYPE_OSD:
100 return " OSD Express";
101 case QETH_CARD_TYPE_IQD:
102 return " HiperSockets";
103 case QETH_CARD_TYPE_OSM:
105 case QETH_CARD_TYPE_OSX:
114 /* max length to be returned: 14 */
115 const char *qeth_get_cardname_short(struct qeth_card *card)
117 if (IS_VM_NIC(card)) {
118 switch (card->info.type) {
119 case QETH_CARD_TYPE_OSD:
120 return "Virt.NIC QDIO";
121 case QETH_CARD_TYPE_IQD:
122 return "Virt.NIC Hiper";
123 case QETH_CARD_TYPE_OSM:
124 return "Virt.NIC OSM";
125 case QETH_CARD_TYPE_OSX:
126 return "Virt.NIC OSX";
131 switch (card->info.type) {
132 case QETH_CARD_TYPE_OSD:
133 switch (card->info.link_type) {
134 case QETH_LINK_TYPE_FAST_ETH:
136 case QETH_LINK_TYPE_HSTR:
138 case QETH_LINK_TYPE_GBIT_ETH:
140 case QETH_LINK_TYPE_10GBIT_ETH:
142 case QETH_LINK_TYPE_25GBIT_ETH:
144 case QETH_LINK_TYPE_LANE_ETH100:
145 return "OSD_FE_LANE";
146 case QETH_LINK_TYPE_LANE_TR:
147 return "OSD_TR_LANE";
148 case QETH_LINK_TYPE_LANE_ETH1000:
149 return "OSD_GbE_LANE";
150 case QETH_LINK_TYPE_LANE:
151 return "OSD_ATM_LANE";
153 return "OSD_Express";
155 case QETH_CARD_TYPE_IQD:
156 return "HiperSockets";
157 case QETH_CARD_TYPE_OSM:
159 case QETH_CARD_TYPE_OSX:
168 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
169 int clear_start_mask)
173 spin_lock_irqsave(&card->thread_mask_lock, flags);
174 card->thread_allowed_mask = threads;
175 if (clear_start_mask)
176 card->thread_start_mask &= threads;
177 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
178 wake_up(&card->wait_q);
180 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
182 int qeth_threads_running(struct qeth_card *card, unsigned long threads)
187 spin_lock_irqsave(&card->thread_mask_lock, flags);
188 rc = (card->thread_running_mask & threads);
189 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
192 EXPORT_SYMBOL_GPL(qeth_threads_running);
194 static void qeth_clear_working_pool_list(struct qeth_card *card)
196 struct qeth_buffer_pool_entry *pool_entry, *tmp;
197 struct qeth_qdio_q *queue = card->qdio.in_q;
200 QETH_CARD_TEXT(card, 5, "clwrklst");
201 list_for_each_entry_safe(pool_entry, tmp,
202 &card->qdio.in_buf_pool.entry_list, list)
203 list_del(&pool_entry->list);
205 for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
206 queue->bufs[i].pool_entry = NULL;
209 static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
213 for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
214 if (entry->elements[i])
215 __free_page(entry->elements[i]);
221 static void qeth_free_buffer_pool(struct qeth_card *card)
223 struct qeth_buffer_pool_entry *entry, *tmp;
225 list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
227 list_del(&entry->init_list);
228 qeth_free_pool_entry(entry);
232 static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
234 struct qeth_buffer_pool_entry *entry;
237 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
241 for (i = 0; i < pages; i++) {
242 entry->elements[i] = __dev_alloc_page(GFP_KERNEL);
244 if (!entry->elements[i]) {
245 qeth_free_pool_entry(entry);
253 static int qeth_alloc_buffer_pool(struct qeth_card *card)
255 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
258 QETH_CARD_TEXT(card, 5, "alocpool");
259 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
260 struct qeth_buffer_pool_entry *entry;
262 entry = qeth_alloc_pool_entry(buf_elements);
264 qeth_free_buffer_pool(card);
268 list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
273 int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
275 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
276 struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
277 struct qeth_buffer_pool_entry *entry, *tmp;
278 int delta = count - pool->buf_count;
281 QETH_CARD_TEXT(card, 2, "realcbp");
283 /* Defer until queue is allocated: */
284 if (!card->qdio.in_q)
287 /* Remove entries from the pool: */
289 entry = list_first_entry(&pool->entry_list,
290 struct qeth_buffer_pool_entry,
292 list_del(&entry->init_list);
293 qeth_free_pool_entry(entry);
298 /* Allocate additional entries: */
300 entry = qeth_alloc_pool_entry(buf_elements);
302 list_for_each_entry_safe(entry, tmp, &entries,
304 list_del(&entry->init_list);
305 qeth_free_pool_entry(entry);
311 list_add(&entry->init_list, &entries);
316 list_splice(&entries, &pool->entry_list);
319 card->qdio.in_buf_pool.buf_count = count;
320 pool->buf_count = count;
323 EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
325 static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
330 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
334 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
336 struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
342 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
347 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
348 q->bufs[i].buffer = q->qdio_bufs[i];
350 QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
354 static int qeth_cq_init(struct qeth_card *card)
358 if (card->options.cq == QETH_CQ_ENABLED) {
359 QETH_CARD_TEXT(card, 2, "cqinit");
360 qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
361 QDIO_MAX_BUFFERS_PER_Q);
362 card->qdio.c_q->next_buf_to_init = 127;
363 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
364 card->qdio.no_in_queues - 1, 0, 127, NULL);
366 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
375 static int qeth_alloc_cq(struct qeth_card *card)
377 if (card->options.cq == QETH_CQ_ENABLED) {
378 QETH_CARD_TEXT(card, 2, "cqon");
379 card->qdio.c_q = qeth_alloc_qdio_queue();
380 if (!card->qdio.c_q) {
381 dev_err(&card->gdev->dev, "Failed to create completion queue\n");
385 card->qdio.no_in_queues = 2;
387 QETH_CARD_TEXT(card, 2, "nocq");
388 card->qdio.c_q = NULL;
389 card->qdio.no_in_queues = 1;
391 QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
395 static void qeth_free_cq(struct qeth_card *card)
397 if (card->qdio.c_q) {
398 --card->qdio.no_in_queues;
399 qeth_free_qdio_queue(card->qdio.c_q);
400 card->qdio.c_q = NULL;
404 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
407 enum iucv_tx_notify n;
411 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
417 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
418 TX_NOTIFY_UNREACHABLE;
421 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
422 TX_NOTIFY_GENERALERROR;
429 static void qeth_put_cmd(struct qeth_cmd_buffer *iob)
431 if (refcount_dec_and_test(&iob->ref_count)) {
436 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
439 ccw->cmd_code = cmd_code;
440 ccw->flags = flags | CCW_FLAG_SLI;
442 ccw->cda = (__u32) __pa(data);
445 static int __qeth_issue_next_read(struct qeth_card *card)
447 struct qeth_cmd_buffer *iob = card->read_cmd;
448 struct qeth_channel *channel = iob->channel;
449 struct ccw1 *ccw = __ccw_from_cmd(iob);
452 QETH_CARD_TEXT(card, 5, "issnxrd");
453 if (channel->state != CH_STATE_UP)
456 memset(iob->data, 0, iob->length);
457 qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
458 iob->callback = qeth_issue_next_read_cb;
459 /* keep the cmd alive after completion: */
462 QETH_CARD_TEXT(card, 6, "noirqpnd");
463 rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
465 channel->active_cmd = iob;
467 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
468 rc, CARD_DEVID(card));
469 qeth_unlock_channel(card, channel);
471 card->read_or_write_problem = 1;
472 qeth_schedule_recovery(card);
477 static int qeth_issue_next_read(struct qeth_card *card)
481 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
482 ret = __qeth_issue_next_read(card);
483 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
488 static void qeth_enqueue_cmd(struct qeth_card *card,
489 struct qeth_cmd_buffer *iob)
491 spin_lock_irq(&card->lock);
492 list_add_tail(&iob->list_entry, &card->cmd_waiter_list);
493 spin_unlock_irq(&card->lock);
496 static void qeth_dequeue_cmd(struct qeth_card *card,
497 struct qeth_cmd_buffer *iob)
499 spin_lock_irq(&card->lock);
500 list_del(&iob->list_entry);
501 spin_unlock_irq(&card->lock);
504 static void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
507 complete(&iob->done);
510 static void qeth_flush_local_addrs4(struct qeth_card *card)
512 struct qeth_local_addr *addr;
513 struct hlist_node *tmp;
516 spin_lock_irq(&card->local_addrs4_lock);
517 hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) {
518 hash_del_rcu(&addr->hnode);
519 kfree_rcu(addr, rcu);
521 spin_unlock_irq(&card->local_addrs4_lock);
524 static void qeth_flush_local_addrs6(struct qeth_card *card)
526 struct qeth_local_addr *addr;
527 struct hlist_node *tmp;
530 spin_lock_irq(&card->local_addrs6_lock);
531 hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) {
532 hash_del_rcu(&addr->hnode);
533 kfree_rcu(addr, rcu);
535 spin_unlock_irq(&card->local_addrs6_lock);
538 static void qeth_flush_local_addrs(struct qeth_card *card)
540 qeth_flush_local_addrs4(card);
541 qeth_flush_local_addrs6(card);
544 static void qeth_add_local_addrs4(struct qeth_card *card,
545 struct qeth_ipacmd_local_addrs4 *cmd)
549 if (cmd->addr_length !=
550 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
551 dev_err_ratelimited(&card->gdev->dev,
552 "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
557 spin_lock(&card->local_addrs4_lock);
558 for (i = 0; i < cmd->count; i++) {
559 unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr);
560 struct qeth_local_addr *addr;
561 bool duplicate = false;
563 hash_for_each_possible(card->local_addrs4, addr, hnode, key) {
564 if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) {
573 addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
575 dev_err(&card->gdev->dev,
576 "Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
577 &cmd->addrs[i].addr);
581 ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr);
582 hash_add_rcu(card->local_addrs4, &addr->hnode, key);
584 spin_unlock(&card->local_addrs4_lock);
587 static void qeth_add_local_addrs6(struct qeth_card *card,
588 struct qeth_ipacmd_local_addrs6 *cmd)
592 if (cmd->addr_length !=
593 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
594 dev_err_ratelimited(&card->gdev->dev,
595 "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
600 spin_lock(&card->local_addrs6_lock);
601 for (i = 0; i < cmd->count; i++) {
602 u32 key = ipv6_addr_hash(&cmd->addrs[i].addr);
603 struct qeth_local_addr *addr;
604 bool duplicate = false;
606 hash_for_each_possible(card->local_addrs6, addr, hnode, key) {
607 if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) {
616 addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
618 dev_err(&card->gdev->dev,
619 "Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
620 &cmd->addrs[i].addr);
624 addr->addr = cmd->addrs[i].addr;
625 hash_add_rcu(card->local_addrs6, &addr->hnode, key);
627 spin_unlock(&card->local_addrs6_lock);
630 static void qeth_del_local_addrs4(struct qeth_card *card,
631 struct qeth_ipacmd_local_addrs4 *cmd)
635 if (cmd->addr_length !=
636 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
637 dev_err_ratelimited(&card->gdev->dev,
638 "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
643 spin_lock(&card->local_addrs4_lock);
644 for (i = 0; i < cmd->count; i++) {
645 struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i];
646 unsigned int key = ipv4_addr_hash(addr->addr);
647 struct qeth_local_addr *tmp;
649 hash_for_each_possible(card->local_addrs4, tmp, hnode, key) {
650 if (tmp->addr.s6_addr32[3] == addr->addr) {
651 hash_del_rcu(&tmp->hnode);
657 spin_unlock(&card->local_addrs4_lock);
660 static void qeth_del_local_addrs6(struct qeth_card *card,
661 struct qeth_ipacmd_local_addrs6 *cmd)
665 if (cmd->addr_length !=
666 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
667 dev_err_ratelimited(&card->gdev->dev,
668 "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
673 spin_lock(&card->local_addrs6_lock);
674 for (i = 0; i < cmd->count; i++) {
675 struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i];
676 u32 key = ipv6_addr_hash(&addr->addr);
677 struct qeth_local_addr *tmp;
679 hash_for_each_possible(card->local_addrs6, tmp, hnode, key) {
680 if (ipv6_addr_equal(&tmp->addr, &addr->addr)) {
681 hash_del_rcu(&tmp->hnode);
687 spin_unlock(&card->local_addrs6_lock);
690 static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
693 struct qeth_local_addr *tmp;
694 bool is_local = false;
698 if (hash_empty(card->local_addrs4))
702 next_hop = qeth_next_hop_v4_rcu(skb,
703 qeth_dst_check_rcu(skb, htons(ETH_P_IP)));
704 key = ipv4_addr_hash(next_hop);
706 hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
707 if (tmp->addr.s6_addr32[3] == next_hop) {
717 static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
720 struct qeth_local_addr *tmp;
721 struct in6_addr *next_hop;
722 bool is_local = false;
725 if (hash_empty(card->local_addrs6))
729 next_hop = qeth_next_hop_v6_rcu(skb,
730 qeth_dst_check_rcu(skb, htons(ETH_P_IPV6)));
731 key = ipv6_addr_hash(next_hop);
733 hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
734 if (ipv6_addr_equal(&tmp->addr, next_hop)) {
744 static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v)
746 struct qeth_card *card = m->private;
747 struct qeth_local_addr *tmp;
751 hash_for_each_rcu(card->local_addrs4, i, tmp, hnode)
752 seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]);
753 hash_for_each_rcu(card->local_addrs6, i, tmp, hnode)
754 seq_printf(m, "%pI6c\n", &tmp->addr);
760 DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr);
762 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
763 struct qeth_card *card)
765 const char *ipa_name;
766 int com = cmd->hdr.command;
768 ipa_name = qeth_get_ipa_cmd_name(com);
771 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
772 ipa_name, com, CARD_DEVID(card), rc,
773 qeth_get_ipa_msg(rc));
775 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
776 ipa_name, com, CARD_DEVID(card));
779 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
780 struct qeth_ipa_cmd *cmd)
782 QETH_CARD_TEXT(card, 5, "chkipad");
784 if (IS_IPA_REPLY(cmd)) {
785 if (cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
786 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
790 /* handle unsolicited event: */
791 switch (cmd->hdr.command) {
792 case IPA_CMD_STOPLAN:
793 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
794 dev_err(&card->gdev->dev,
795 "Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
796 netdev_name(card->dev));
797 schedule_work(&card->close_dev_work);
799 dev_warn(&card->gdev->dev,
800 "The link for interface %s on CHPID 0x%X failed\n",
801 netdev_name(card->dev), card->info.chpid);
802 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
803 netif_carrier_off(card->dev);
806 case IPA_CMD_STARTLAN:
807 dev_info(&card->gdev->dev,
808 "The link for %s on CHPID 0x%X has been restored\n",
809 netdev_name(card->dev), card->info.chpid);
810 if (card->info.hwtrap)
811 card->info.hwtrap = 2;
812 qeth_schedule_recovery(card);
814 case IPA_CMD_SETBRIDGEPORT_IQD:
815 case IPA_CMD_SETBRIDGEPORT_OSA:
816 case IPA_CMD_ADDRESS_CHANGE_NOTIF:
817 if (card->discipline->control_event_handler(card, cmd))
820 case IPA_CMD_REGISTER_LOCAL_ADDR:
821 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
822 qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
823 else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
824 qeth_add_local_addrs6(card, &cmd->data.local_addrs6);
826 QETH_CARD_TEXT(card, 3, "irla");
828 case IPA_CMD_UNREGISTER_LOCAL_ADDR:
829 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
830 qeth_del_local_addrs4(card, &cmd->data.local_addrs4);
831 else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
832 qeth_del_local_addrs6(card, &cmd->data.local_addrs6);
834 QETH_CARD_TEXT(card, 3, "urla");
837 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
842 static void qeth_clear_ipacmd_list(struct qeth_card *card)
844 struct qeth_cmd_buffer *iob;
847 QETH_CARD_TEXT(card, 4, "clipalst");
849 spin_lock_irqsave(&card->lock, flags);
850 list_for_each_entry(iob, &card->cmd_waiter_list, list_entry)
851 qeth_notify_cmd(iob, -ECANCELED);
852 spin_unlock_irqrestore(&card->lock, flags);
855 static int qeth_check_idx_response(struct qeth_card *card,
856 unsigned char *buffer)
858 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
859 if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
860 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
862 QETH_CARD_TEXT(card, 2, "ckidxres");
863 QETH_CARD_TEXT(card, 2, " idxterm");
864 QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
865 if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
866 buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
867 dev_err(&card->gdev->dev,
868 "The device does not support the configured transport mode\n");
869 return -EPROTONOSUPPORT;
876 static void qeth_release_buffer_cb(struct qeth_card *card,
877 struct qeth_cmd_buffer *iob,
878 unsigned int data_length)
883 static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
885 qeth_notify_cmd(iob, rc);
889 static struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
891 unsigned int ccws, long timeout)
893 struct qeth_cmd_buffer *iob;
895 if (length > QETH_BUFSIZE)
898 iob = kzalloc(sizeof(*iob), GFP_KERNEL);
902 iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
903 GFP_KERNEL | GFP_DMA);
909 init_completion(&iob->done);
910 spin_lock_init(&iob->lock);
911 refcount_set(&iob->ref_count, 1);
912 iob->channel = channel;
913 iob->timeout = timeout;
914 iob->length = length;
918 static void qeth_issue_next_read_cb(struct qeth_card *card,
919 struct qeth_cmd_buffer *iob,
920 unsigned int data_length)
922 struct qeth_cmd_buffer *request = NULL;
923 struct qeth_ipa_cmd *cmd = NULL;
924 struct qeth_reply *reply = NULL;
925 struct qeth_cmd_buffer *tmp;
929 QETH_CARD_TEXT(card, 4, "sndctlcb");
930 rc = qeth_check_idx_response(card, iob->data);
935 qeth_schedule_recovery(card);
938 qeth_clear_ipacmd_list(card);
942 cmd = __ipa_reply(iob);
944 cmd = qeth_check_ipa_data(card, cmd);
949 /* match against pending cmd requests */
950 spin_lock_irqsave(&card->lock, flags);
951 list_for_each_entry(tmp, &card->cmd_waiter_list, list_entry) {
952 if (tmp->match && tmp->match(tmp, iob)) {
954 /* take the object outside the lock */
955 qeth_get_cmd(request);
959 spin_unlock_irqrestore(&card->lock, flags);
964 reply = &request->reply;
965 if (!reply->callback) {
970 spin_lock_irqsave(&request->lock, flags);
972 /* Bail out when the requestor has already left: */
975 rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
977 spin_unlock_irqrestore(&request->lock, flags);
981 qeth_notify_cmd(request, rc);
982 qeth_put_cmd(request);
984 memcpy(&card->seqno.pdu_hdr_ack,
985 QETH_PDU_HEADER_SEQ_NO(iob->data),
987 __qeth_issue_next_read(card);
992 static int qeth_set_thread_start_bit(struct qeth_card *card,
993 unsigned long thread)
998 spin_lock_irqsave(&card->thread_mask_lock, flags);
999 if (!(card->thread_allowed_mask & thread))
1001 else if (card->thread_start_mask & thread)
1004 card->thread_start_mask |= thread;
1005 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1010 static void qeth_clear_thread_start_bit(struct qeth_card *card,
1011 unsigned long thread)
1013 unsigned long flags;
1015 spin_lock_irqsave(&card->thread_mask_lock, flags);
1016 card->thread_start_mask &= ~thread;
1017 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1018 wake_up(&card->wait_q);
1021 static void qeth_clear_thread_running_bit(struct qeth_card *card,
1022 unsigned long thread)
1024 unsigned long flags;
1026 spin_lock_irqsave(&card->thread_mask_lock, flags);
1027 card->thread_running_mask &= ~thread;
1028 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1029 wake_up_all(&card->wait_q);
1032 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
1034 unsigned long flags;
1037 spin_lock_irqsave(&card->thread_mask_lock, flags);
1038 if (card->thread_start_mask & thread) {
1039 if ((card->thread_allowed_mask & thread) &&
1040 !(card->thread_running_mask & thread)) {
1042 card->thread_start_mask &= ~thread;
1043 card->thread_running_mask |= thread;
1047 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1051 static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
1055 wait_event(card->wait_q,
1056 (rc = __qeth_do_run_thread(card, thread)) >= 0);
1060 int qeth_schedule_recovery(struct qeth_card *card)
1064 QETH_CARD_TEXT(card, 2, "startrec");
1066 rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
1068 schedule_work(&card->kernel_thread_starter);
1073 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
1079 sense = (char *) irb->ecw;
1080 cstat = irb->scsw.cmd.cstat;
1081 dstat = irb->scsw.cmd.dstat;
1083 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
1084 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
1085 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
1086 QETH_CARD_TEXT(card, 2, "CGENCHK");
1087 dev_warn(&cdev->dev, "The qeth device driver "
1088 "failed to recover an error on the device\n");
1089 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
1090 CCW_DEVID(cdev), dstat, cstat);
1091 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
1096 if (dstat & DEV_STAT_UNIT_CHECK) {
1097 if (sense[SENSE_RESETTING_EVENT_BYTE] &
1098 SENSE_RESETTING_EVENT_FLAG) {
1099 QETH_CARD_TEXT(card, 2, "REVIND");
1102 if (sense[SENSE_COMMAND_REJECT_BYTE] &
1103 SENSE_COMMAND_REJECT_FLAG) {
1104 QETH_CARD_TEXT(card, 2, "CMDREJi");
1107 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
1108 QETH_CARD_TEXT(card, 2, "AFFE");
1111 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
1112 QETH_CARD_TEXT(card, 2, "ZEROSEN");
1115 QETH_CARD_TEXT(card, 2, "DGENCHK");
1121 static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
1127 switch (PTR_ERR(irb)) {
1129 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
1131 QETH_CARD_TEXT(card, 2, "ckirberr");
1132 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
1135 dev_warn(&cdev->dev, "A hardware operation timed out"
1136 " on the device\n");
1137 QETH_CARD_TEXT(card, 2, "ckirberr");
1138 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT);
1141 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
1142 PTR_ERR(irb), CCW_DEVID(cdev));
1143 QETH_CARD_TEXT(card, 2, "ckirberr");
1144 QETH_CARD_TEXT(card, 2, " rc???");
1145 return PTR_ERR(irb);
1149 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1154 struct qeth_cmd_buffer *iob = NULL;
1155 struct ccwgroup_device *gdev;
1156 struct qeth_channel *channel;
1157 struct qeth_card *card;
1159 /* while we hold the ccwdev lock, this stays valid: */
1160 gdev = dev_get_drvdata(&cdev->dev);
1161 card = dev_get_drvdata(&gdev->dev);
1163 QETH_CARD_TEXT(card, 5, "irq");
1165 if (card->read.ccwdev == cdev) {
1166 channel = &card->read;
1167 QETH_CARD_TEXT(card, 5, "read");
1168 } else if (card->write.ccwdev == cdev) {
1169 channel = &card->write;
1170 QETH_CARD_TEXT(card, 5, "write");
1172 channel = &card->data;
1173 QETH_CARD_TEXT(card, 5, "data");
1177 QETH_CARD_TEXT(card, 5, "irqunsol");
1178 } else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
1179 QETH_CARD_TEXT(card, 5, "irqunexp");
1182 "Received IRQ with intparm %lx, expected %px\n",
1183 intparm, channel->active_cmd);
1184 if (channel->active_cmd)
1185 qeth_cancel_cmd(channel->active_cmd, -EIO);
1187 iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
1190 qeth_unlock_channel(card, channel);
1192 rc = qeth_check_irb_error(card, cdev, irb);
1194 /* IO was terminated, free its resources. */
1196 qeth_cancel_cmd(iob, rc);
1200 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
1201 channel->state = CH_STATE_STOPPED;
1202 wake_up(&card->wait_q);
1205 if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
1206 channel->state = CH_STATE_HALTED;
1207 wake_up(&card->wait_q);
1210 if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
1211 SCSW_FCTL_HALT_FUNC))) {
1212 qeth_cancel_cmd(iob, -ECANCELED);
1216 cstat = irb->scsw.cmd.cstat;
1217 dstat = irb->scsw.cmd.dstat;
1219 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
1220 (dstat & DEV_STAT_UNIT_CHECK) ||
1222 if (irb->esw.esw0.erw.cons) {
1223 dev_warn(&channel->ccwdev->dev,
1224 "The qeth device driver failed to recover "
1225 "an error on the device\n");
1226 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1227 CCW_DEVID(channel->ccwdev), cstat,
1229 print_hex_dump(KERN_WARNING, "qeth: irb ",
1230 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
1231 print_hex_dump(KERN_WARNING, "qeth: sense data ",
1232 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
1235 rc = qeth_get_problem(card, cdev, irb);
1237 card->read_or_write_problem = 1;
1239 qeth_cancel_cmd(iob, rc);
1240 qeth_clear_ipacmd_list(card);
1241 qeth_schedule_recovery(card);
1248 if (irb->scsw.cmd.count > iob->length) {
1249 qeth_cancel_cmd(iob, -EIO);
1253 iob->callback(card, iob,
1254 iob->length - irb->scsw.cmd.count);
1258 static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1259 struct qeth_qdio_out_buffer *buf,
1260 enum iucv_tx_notify notification)
1262 struct sk_buff *skb;
1264 skb_queue_walk(&buf->skb_list, skb) {
1265 struct sock *sk = skb->sk;
1267 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
1268 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1269 if (sk && sk->sk_family == PF_IUCV)
1270 iucv_sk(sk)->sk_txnotify(sk, notification);
1274 static void qeth_tx_complete_buf(struct qeth_qdio_out_q *queue,
1275 struct qeth_qdio_out_buffer *buf, bool error,
1278 struct sk_buff *skb;
1281 if (buf->next_element_to_fill == 0)
1284 QETH_TXQ_STAT_INC(queue, bufs);
1285 QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
1287 QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
1289 QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
1290 QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes);
1293 while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
1294 unsigned int bytes = qdisc_pkt_len(skb);
1295 bool is_tso = skb_is_gso(skb);
1296 unsigned int packets;
1298 packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
1300 if (skb->ip_summed == CHECKSUM_PARTIAL)
1301 QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
1302 if (skb_is_nonlinear(skb))
1303 QETH_TXQ_STAT_INC(queue, skbs_sg);
1305 QETH_TXQ_STAT_INC(queue, skbs_tso);
1306 QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
1310 napi_consume_skb(skb, budget);
1314 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1315 struct qeth_qdio_out_buffer *buf,
1316 bool error, int budget)
1320 /* is PCI flag set on buffer? */
1321 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) {
1322 atomic_dec(&queue->set_pci_flags_count);
1323 QETH_TXQ_STAT_INC(queue, completion_irq);
1326 qeth_tx_complete_buf(queue, buf, error, budget);
1328 for (i = 0; i < queue->max_elements; ++i) {
1329 void *data = phys_to_virt(buf->buffer->element[i].addr);
1331 if (__test_and_clear_bit(i, buf->from_kmem_cache) && data)
1332 kmem_cache_free(qeth_core_header_cache, data);
1335 qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
1336 buf->next_element_to_fill = 0;
1339 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1342 static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf)
1345 qdio_release_aob(buf->aob);
1346 kmem_cache_free(qeth_qdio_outbuf_cache, buf);
1349 static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
1350 struct qeth_qdio_out_q *queue,
1351 bool drain, int budget)
1353 struct qeth_qdio_out_buffer *buf, *tmp;
1355 list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) {
1356 struct qeth_qaob_priv1 *priv;
1357 struct qaob *aob = buf->aob;
1358 enum iucv_tx_notify notify;
1361 priv = (struct qeth_qaob_priv1 *)&aob->user1;
1362 if (drain || READ_ONCE(priv->state) == QETH_QAOB_DONE) {
1363 QETH_CARD_TEXT(card, 5, "fp");
1364 QETH_CARD_TEXT_(card, 5, "%lx", (long) buf);
1366 notify = drain ? TX_NOTIFY_GENERALERROR :
1367 qeth_compute_cq_notification(aob->aorc, 1);
1368 qeth_notify_skbs(queue, buf, notify);
1369 qeth_tx_complete_buf(queue, buf, drain, budget);
1372 i < aob->sb_count && i < queue->max_elements;
1374 void *data = phys_to_virt(aob->sba[i]);
1376 if (test_bit(i, buf->from_kmem_cache) && data)
1377 kmem_cache_free(qeth_core_header_cache,
1381 list_del(&buf->list_entry);
1382 qeth_free_out_buf(buf);
1387 static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
1391 qeth_tx_complete_pending_bufs(q->card, q, true, 0);
1393 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
1397 qeth_clear_output_buffer(q, q->bufs[j], true, 0);
1399 qeth_free_out_buf(q->bufs[j]);
1405 static void qeth_drain_output_queues(struct qeth_card *card)
1409 QETH_CARD_TEXT(card, 2, "clearqdbf");
1410 /* clear outbound buffers to free skbs */
1411 for (i = 0; i < card->qdio.no_out_queues; ++i) {
1412 if (card->qdio.out_qs[i])
1413 qeth_drain_output_queue(card->qdio.out_qs[i], false);
1417 static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
1419 unsigned int max = single ? 1 : card->dev->num_tx_queues;
1421 if (card->qdio.no_out_queues == max)
1424 if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
1425 qeth_free_qdio_queues(card);
1427 if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
1428 dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
1430 card->qdio.no_out_queues = max;
1433 static int qeth_update_from_chp_desc(struct qeth_card *card)
1435 struct ccw_device *ccwdev;
1436 struct channel_path_desc_fmt0 *chp_dsc;
1438 QETH_CARD_TEXT(card, 2, "chp_desc");
1440 ccwdev = card->data.ccwdev;
1441 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
1445 card->info.func_level = 0x4100 + chp_dsc->desc;
1447 if (IS_OSD(card) || IS_OSX(card))
1448 /* CHPP field bit 6 == 1 -> single queue */
1449 qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
1452 QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
1453 QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
1457 static void qeth_init_qdio_info(struct qeth_card *card)
1459 QETH_CARD_TEXT(card, 4, "intqdinf");
1460 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1461 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1462 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1465 card->qdio.no_in_queues = 1;
1466 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1468 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
1470 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1471 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1472 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1473 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1476 static void qeth_set_initial_options(struct qeth_card *card)
1478 card->options.route4.type = NO_ROUTER;
1479 card->options.route6.type = NO_ROUTER;
1480 card->options.isolation = ISOLATION_MODE_NONE;
1481 card->options.cq = QETH_CQ_DISABLED;
1482 card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
1485 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1487 unsigned long flags;
1490 spin_lock_irqsave(&card->thread_mask_lock, flags);
1491 QETH_CARD_TEXT_(card, 4, " %02x%02x%02x",
1492 (u8) card->thread_start_mask,
1493 (u8) card->thread_allowed_mask,
1494 (u8) card->thread_running_mask);
1495 rc = (card->thread_start_mask & thread);
1496 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1500 static int qeth_do_reset(void *data);
1501 static void qeth_start_kernel_thread(struct work_struct *work)
1503 struct task_struct *ts;
1504 struct qeth_card *card = container_of(work, struct qeth_card,
1505 kernel_thread_starter);
1506 QETH_CARD_TEXT(card, 2, "strthrd");
1508 if (card->read.state != CH_STATE_UP &&
1509 card->write.state != CH_STATE_UP)
1511 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1512 ts = kthread_run(qeth_do_reset, card, "qeth_recover");
1514 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1515 qeth_clear_thread_running_bit(card,
1516 QETH_RECOVER_THREAD);
1521 static void qeth_buffer_reclaim_work(struct work_struct *);
1522 static void qeth_setup_card(struct qeth_card *card)
1524 QETH_CARD_TEXT(card, 2, "setupcrd");
1526 card->info.type = CARD_RDEV(card)->id.driver_info;
1527 card->state = CARD_STATE_DOWN;
1528 spin_lock_init(&card->lock);
1529 spin_lock_init(&card->thread_mask_lock);
1530 mutex_init(&card->conf_mutex);
1531 mutex_init(&card->discipline_mutex);
1532 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1533 INIT_LIST_HEAD(&card->cmd_waiter_list);
1534 init_waitqueue_head(&card->wait_q);
1535 qeth_set_initial_options(card);
1536 /* IP address takeover */
1537 INIT_LIST_HEAD(&card->ipato.entries);
1538 qeth_init_qdio_info(card);
1539 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1540 INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
1541 hash_init(card->rx_mode_addrs);
1542 hash_init(card->local_addrs4);
1543 hash_init(card->local_addrs6);
1544 spin_lock_init(&card->local_addrs4_lock);
1545 spin_lock_init(&card->local_addrs6_lock);
1548 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
1550 struct qeth_card *card = container_of(slr, struct qeth_card,
1551 qeth_service_level);
1552 if (card->info.mcl_level[0])
1553 seq_printf(m, "qeth: %s firmware level %s\n",
1554 CARD_BUS_ID(card), card->info.mcl_level);
1557 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
1559 struct qeth_card *card;
1561 QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1562 card = kzalloc(sizeof(*card), GFP_KERNEL);
1565 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1568 dev_set_drvdata(&gdev->dev, card);
1569 CARD_RDEV(card) = gdev->cdev[0];
1570 CARD_WDEV(card) = gdev->cdev[1];
1571 CARD_DDEV(card) = gdev->cdev[2];
1573 card->event_wq = alloc_ordered_workqueue("%s_event", 0,
1574 dev_name(&gdev->dev));
1575 if (!card->event_wq)
1578 card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
1579 if (!card->read_cmd)
1582 card->debugfs = debugfs_create_dir(dev_name(&gdev->dev),
1584 debugfs_create_file("local_addrs", 0400, card->debugfs, card,
1585 &qeth_debugfs_local_addr_fops);
1587 card->qeth_service_level.seq_print = qeth_core_sl_print;
1588 register_service_level(&card->qeth_service_level);
1592 destroy_workqueue(card->event_wq);
1594 dev_set_drvdata(&gdev->dev, NULL);
1600 static int qeth_clear_channel(struct qeth_card *card,
1601 struct qeth_channel *channel)
1605 QETH_CARD_TEXT(card, 3, "clearch");
1606 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1607 rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
1608 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1612 rc = wait_event_interruptible_timeout(card->wait_q,
1613 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1614 if (rc == -ERESTARTSYS)
1616 if (channel->state != CH_STATE_STOPPED)
1618 channel->state = CH_STATE_DOWN;
1622 static int qeth_halt_channel(struct qeth_card *card,
1623 struct qeth_channel *channel)
1627 QETH_CARD_TEXT(card, 3, "haltch");
1628 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1629 rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
1630 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1634 rc = wait_event_interruptible_timeout(card->wait_q,
1635 channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1636 if (rc == -ERESTARTSYS)
1638 if (channel->state != CH_STATE_HALTED)
1643 static int qeth_stop_channel(struct qeth_channel *channel)
1645 struct ccw_device *cdev = channel->ccwdev;
1648 rc = ccw_device_set_offline(cdev);
1650 spin_lock_irq(get_ccwdev_lock(cdev));
1651 if (channel->active_cmd)
1652 dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
1653 channel->active_cmd);
1655 cdev->handler = NULL;
1656 spin_unlock_irq(get_ccwdev_lock(cdev));
1661 static int qeth_start_channel(struct qeth_channel *channel)
1663 struct ccw_device *cdev = channel->ccwdev;
1666 channel->state = CH_STATE_DOWN;
1667 xchg(&channel->active_cmd, NULL);
1669 spin_lock_irq(get_ccwdev_lock(cdev));
1670 cdev->handler = qeth_irq;
1671 spin_unlock_irq(get_ccwdev_lock(cdev));
1673 rc = ccw_device_set_online(cdev);
1680 spin_lock_irq(get_ccwdev_lock(cdev));
1681 cdev->handler = NULL;
1682 spin_unlock_irq(get_ccwdev_lock(cdev));
1686 static int qeth_halt_channels(struct qeth_card *card)
1688 int rc1 = 0, rc2 = 0, rc3 = 0;
1690 QETH_CARD_TEXT(card, 3, "haltchs");
1691 rc1 = qeth_halt_channel(card, &card->read);
1692 rc2 = qeth_halt_channel(card, &card->write);
1693 rc3 = qeth_halt_channel(card, &card->data);
1701 static int qeth_clear_channels(struct qeth_card *card)
1703 int rc1 = 0, rc2 = 0, rc3 = 0;
1705 QETH_CARD_TEXT(card, 3, "clearchs");
1706 rc1 = qeth_clear_channel(card, &card->read);
1707 rc2 = qeth_clear_channel(card, &card->write);
1708 rc3 = qeth_clear_channel(card, &card->data);
1716 static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1720 QETH_CARD_TEXT(card, 3, "clhacrd");
1723 rc = qeth_halt_channels(card);
1726 return qeth_clear_channels(card);
1729 static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1733 QETH_CARD_TEXT(card, 3, "qdioclr");
1734 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1735 QETH_QDIO_CLEANING)) {
1736 case QETH_QDIO_ESTABLISHED:
1738 rc = qdio_shutdown(CARD_DDEV(card),
1739 QDIO_FLAG_CLEANUP_USING_HALT);
1741 rc = qdio_shutdown(CARD_DDEV(card),
1742 QDIO_FLAG_CLEANUP_USING_CLEAR);
1744 QETH_CARD_TEXT_(card, 3, "1err%d", rc);
1745 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1747 case QETH_QDIO_CLEANING:
1752 rc = qeth_clear_halt_card(card, use_halt);
1754 QETH_CARD_TEXT_(card, 3, "2err%d", rc);
1758 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
1760 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1761 struct diag26c_vnic_resp *response = NULL;
1762 struct diag26c_vnic_req *request = NULL;
1763 struct ccw_dev_id id;
1767 QETH_CARD_TEXT(card, 2, "vmlayer");
1769 cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
1773 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
1774 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
1775 if (!request || !response) {
1780 ccw_device_get_id(CARD_RDEV(card), &id);
1781 request->resp_buf_len = sizeof(*response);
1782 request->resp_version = DIAG26C_VERSION6_VM65918;
1783 request->req_format = DIAG26C_VNIC_INFO;
1785 memcpy(&request->sys_name, userid, 8);
1786 request->devno = id.devno;
1788 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1789 rc = diag26c(request, response, DIAG26C_PORT_VNIC);
1790 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1793 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
1795 if (request->resp_buf_len < sizeof(*response) ||
1796 response->version != request->resp_version) {
1801 if (response->protocol == VNIC_INFO_PROT_L2)
1802 disc = QETH_DISCIPLINE_LAYER2;
1803 else if (response->protocol == VNIC_INFO_PROT_L3)
1804 disc = QETH_DISCIPLINE_LAYER3;
1810 QETH_CARD_TEXT_(card, 2, "err%x", rc);
1814 /* Determine whether the device requires a specific layer discipline */
1815 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
1817 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1820 disc = QETH_DISCIPLINE_LAYER2;
1821 else if (IS_VM_NIC(card))
1822 disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
1823 qeth_vm_detect_layer(card);
1826 case QETH_DISCIPLINE_LAYER2:
1827 QETH_CARD_TEXT(card, 3, "force l2");
1829 case QETH_DISCIPLINE_LAYER3:
1830 QETH_CARD_TEXT(card, 3, "force l3");
1833 QETH_CARD_TEXT(card, 3, "force no");
1839 static void qeth_set_blkt_defaults(struct qeth_card *card)
1841 QETH_CARD_TEXT(card, 2, "cfgblkt");
1843 if (card->info.use_v1_blkt) {
1844 card->info.blkt.time_total = 0;
1845 card->info.blkt.inter_packet = 0;
1846 card->info.blkt.inter_packet_jumbo = 0;
1848 card->info.blkt.time_total = 250;
1849 card->info.blkt.inter_packet = 5;
1850 card->info.blkt.inter_packet_jumbo = 15;
1854 static void qeth_idx_init(struct qeth_card *card)
1856 memset(&card->seqno, 0, sizeof(card->seqno));
1858 card->token.issuer_rm_w = 0x00010103UL;
1859 card->token.cm_filter_w = 0x00010108UL;
1860 card->token.cm_connection_w = 0x0001010aUL;
1861 card->token.ulp_filter_w = 0x0001010bUL;
1862 card->token.ulp_connection_w = 0x0001010dUL;
1864 switch (card->info.type) {
1865 case QETH_CARD_TYPE_IQD:
1866 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
1868 case QETH_CARD_TYPE_OSD:
1869 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
1876 static void qeth_idx_finalize_cmd(struct qeth_card *card,
1877 struct qeth_cmd_buffer *iob)
1879 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
1880 QETH_SEQ_NO_LENGTH);
1881 if (iob->channel == &card->write)
1882 card->seqno.trans_hdr++;
1885 static int qeth_peer_func_level(int level)
1887 if ((level & 0xff) == 8)
1888 return (level & 0xff) + 0x400;
1889 if (((level >> 8) & 3) == 1)
1890 return (level & 0xff) + 0x200;
1894 static void qeth_mpc_finalize_cmd(struct qeth_card *card,
1895 struct qeth_cmd_buffer *iob)
1897 qeth_idx_finalize_cmd(card, iob);
1899 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1900 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1901 card->seqno.pdu_hdr++;
1902 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1903 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1905 iob->callback = qeth_release_buffer_cb;
1908 static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
1909 struct qeth_cmd_buffer *reply)
1911 /* MPC cmds are issued strictly in sequence. */
1912 return !IS_IPA(reply->data);
1915 static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
1917 unsigned int data_length)
1919 struct qeth_cmd_buffer *iob;
1921 iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
1925 memcpy(iob->data, data, data_length);
1926 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
1928 iob->finalize = qeth_mpc_finalize_cmd;
1929 iob->match = qeth_mpc_match_reply;
1934 * qeth_send_control_data() - send control command to the card
1935 * @card: qeth_card structure pointer
1936 * @iob: qeth_cmd_buffer pointer
1937 * @reply_cb: callback function pointer
1938 * @cb_card: pointer to the qeth_card structure
1939 * @cb_reply: pointer to the qeth_reply structure
1940 * @cb_cmd: pointer to the original iob for non-IPA
1941 * commands, or to the qeth_ipa_cmd structure
1942 * for the IPA commands.
1943 * @reply_param: private pointer passed to the callback
1945 * Callback function gets called one or more times, with cb_cmd
1946 * pointing to the response returned by the hardware. Callback
1947 * function must return
1948 * > 0 if more reply blocks are expected,
1949 * 0 if the last or only reply block is received, and
1951 * Callback function can get the value of the reply_param pointer from the
1952 * field 'param' of the structure qeth_reply.
1955 static int qeth_send_control_data(struct qeth_card *card,
1956 struct qeth_cmd_buffer *iob,
1957 int (*reply_cb)(struct qeth_card *cb_card,
1958 struct qeth_reply *cb_reply,
1959 unsigned long cb_cmd),
1962 struct qeth_channel *channel = iob->channel;
1963 struct qeth_reply *reply = &iob->reply;
1964 long timeout = iob->timeout;
1967 QETH_CARD_TEXT(card, 2, "sendctl");
1969 reply->callback = reply_cb;
1970 reply->param = reply_param;
1972 timeout = wait_event_interruptible_timeout(card->wait_q,
1973 qeth_trylock_channel(channel, iob),
1977 return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
1981 iob->finalize(card, iob);
1982 QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
1984 qeth_enqueue_cmd(card, iob);
1986 /* This pairs with iob->callback, and keeps the iob alive after IO: */
1989 QETH_CARD_TEXT(card, 6, "noirqpnd");
1990 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1991 rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
1992 (addr_t) iob, 0, 0, timeout);
1993 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1995 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
1996 CARD_DEVID(card), rc);
1997 QETH_CARD_TEXT_(card, 2, " err%d", rc);
1998 qeth_dequeue_cmd(card, iob);
2000 qeth_unlock_channel(card, channel);
2004 timeout = wait_for_completion_interruptible_timeout(&iob->done,
2007 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2009 qeth_dequeue_cmd(card, iob);
2012 /* Wait until the callback for a late reply has completed: */
2013 spin_lock_irq(&iob->lock);
2015 /* Zap any callback that's still pending: */
2017 spin_unlock_irq(&iob->lock);
2028 struct qeth_node_desc {
2029 struct node_descriptor nd1;
2030 struct node_descriptor nd2;
2031 struct node_descriptor nd3;
2034 static void qeth_read_conf_data_cb(struct qeth_card *card,
2035 struct qeth_cmd_buffer *iob,
2036 unsigned int data_length)
2038 struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
2042 QETH_CARD_TEXT(card, 2, "cfgunit");
2044 if (data_length < sizeof(*nd)) {
2049 card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
2050 nd->nd1.plant[1] == _ascebc['M'];
2051 tag = (u8 *)&nd->nd1.tag;
2052 card->info.chpid = tag[0];
2053 card->info.unit_addr2 = tag[1];
2055 tag = (u8 *)&nd->nd2.tag;
2056 card->info.cula = tag[1];
2058 card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
2059 nd->nd3.model[1] == 0xF0 &&
2060 nd->nd3.model[2] >= 0xF1 &&
2061 nd->nd3.model[2] <= 0xF4;
2064 qeth_notify_cmd(iob, rc);
2068 static int qeth_read_conf_data(struct qeth_card *card)
2070 struct qeth_channel *channel = &card->data;
2071 struct qeth_cmd_buffer *iob;
2074 /* scan for RCD command in extended SenseID data */
2075 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
2076 if (!ciw || ciw->cmd == 0)
2078 if (ciw->count < sizeof(struct qeth_node_desc))
2081 iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
2085 iob->callback = qeth_read_conf_data_cb;
2086 qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
2089 return qeth_send_control_data(card, iob, NULL, NULL);
2092 static int qeth_idx_check_activate_response(struct qeth_card *card,
2093 struct qeth_channel *channel,
2094 struct qeth_cmd_buffer *iob)
2098 rc = qeth_check_idx_response(card, iob->data);
2102 if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
2105 /* negative reply: */
2106 QETH_CARD_TEXT_(card, 2, "idxneg%c",
2107 QETH_IDX_ACT_CAUSE_CODE(iob->data));
2109 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
2110 case QETH_IDX_ACT_ERR_EXCL:
2111 dev_err(&channel->ccwdev->dev,
2112 "The adapter is used exclusively by another host\n");
2114 case QETH_IDX_ACT_ERR_AUTH:
2115 case QETH_IDX_ACT_ERR_AUTH_USER:
2116 dev_err(&channel->ccwdev->dev,
2117 "Setting the device online failed because of insufficient authorization\n");
2120 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
2121 CCW_DEVID(channel->ccwdev));
2126 static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
2127 struct qeth_cmd_buffer *iob,
2128 unsigned int data_length)
2130 struct qeth_channel *channel = iob->channel;
2134 QETH_CARD_TEXT(card, 2, "idxrdcb");
2136 rc = qeth_idx_check_activate_response(card, channel, iob);
2140 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2141 if (peer_level != qeth_peer_func_level(card->info.func_level)) {
2142 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2143 CCW_DEVID(channel->ccwdev),
2144 card->info.func_level, peer_level);
2149 memcpy(&card->token.issuer_rm_r,
2150 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2151 QETH_MPC_TOKEN_LENGTH);
2152 memcpy(&card->info.mcl_level[0],
2153 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
2156 qeth_notify_cmd(iob, rc);
2160 static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
2161 struct qeth_cmd_buffer *iob,
2162 unsigned int data_length)
2164 struct qeth_channel *channel = iob->channel;
2168 QETH_CARD_TEXT(card, 2, "idxwrcb");
2170 rc = qeth_idx_check_activate_response(card, channel, iob);
2174 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2175 if ((peer_level & ~0x0100) !=
2176 qeth_peer_func_level(card->info.func_level)) {
2177 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2178 CCW_DEVID(channel->ccwdev),
2179 card->info.func_level, peer_level);
2184 qeth_notify_cmd(iob, rc);
2188 static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
2189 struct qeth_cmd_buffer *iob)
2191 u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
2192 u8 port = ((u8)card->dev->dev_port) | 0x80;
2193 struct ccw1 *ccw = __ccw_from_cmd(iob);
2195 qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
2197 qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
2198 iob->finalize = qeth_idx_finalize_cmd;
2200 port |= QETH_IDX_ACT_INVAL_FRAME;
2201 memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
2202 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2203 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
2204 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
2205 &card->info.func_level, 2);
2206 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2);
2207 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
2210 static int qeth_idx_activate_read_channel(struct qeth_card *card)
2212 struct qeth_channel *channel = &card->read;
2213 struct qeth_cmd_buffer *iob;
2216 QETH_CARD_TEXT(card, 2, "idxread");
2218 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2222 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
2223 qeth_idx_setup_activate_cmd(card, iob);
2224 iob->callback = qeth_idx_activate_read_channel_cb;
2226 rc = qeth_send_control_data(card, iob, NULL, NULL);
2230 channel->state = CH_STATE_UP;
2234 static int qeth_idx_activate_write_channel(struct qeth_card *card)
2236 struct qeth_channel *channel = &card->write;
2237 struct qeth_cmd_buffer *iob;
2240 QETH_CARD_TEXT(card, 2, "idxwrite");
2242 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2246 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
2247 qeth_idx_setup_activate_cmd(card, iob);
2248 iob->callback = qeth_idx_activate_write_channel_cb;
2250 rc = qeth_send_control_data(card, iob, NULL, NULL);
2254 channel->state = CH_STATE_UP;
2258 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2261 struct qeth_cmd_buffer *iob;
2263 QETH_CARD_TEXT(card, 2, "cmenblcb");
2265 iob = (struct qeth_cmd_buffer *) data;
2266 memcpy(&card->token.cm_filter_r,
2267 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
2268 QETH_MPC_TOKEN_LENGTH);
2272 static int qeth_cm_enable(struct qeth_card *card)
2274 struct qeth_cmd_buffer *iob;
2276 QETH_CARD_TEXT(card, 2, "cmenable");
2278 iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
2282 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
2283 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2284 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
2285 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
2287 return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
2290 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2293 struct qeth_cmd_buffer *iob;
2295 QETH_CARD_TEXT(card, 2, "cmsetpcb");
2297 iob = (struct qeth_cmd_buffer *) data;
2298 memcpy(&card->token.cm_connection_r,
2299 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2300 QETH_MPC_TOKEN_LENGTH);
2304 static int qeth_cm_setup(struct qeth_card *card)
2306 struct qeth_cmd_buffer *iob;
2308 QETH_CARD_TEXT(card, 2, "cmsetup");
2310 iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
2314 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2315 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2316 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2317 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2318 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2319 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2320 return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
2323 static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type)
2325 if (link_type == QETH_LINK_TYPE_LANE_TR ||
2326 link_type == QETH_LINK_TYPE_HSTR) {
2327 dev_err(&card->gdev->dev, "Unsupported Token Ring device\n");
2334 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
2336 struct net_device *dev = card->dev;
2337 unsigned int new_mtu;
2340 /* IQD needs accurate max MTU to set up its RX buffers: */
2343 /* tolerate quirky HW: */
2344 max_mtu = ETH_MAX_MTU;
2349 /* move any device with default MTU to new max MTU: */
2350 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
2352 /* adjust RX buffer size to new max MTU: */
2353 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
2354 if (dev->max_mtu && dev->max_mtu != max_mtu)
2355 qeth_free_qdio_queues(card);
2359 /* default MTUs for first setup: */
2360 else if (IS_LAYER2(card))
2361 new_mtu = ETH_DATA_LEN;
2363 new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
2366 dev->max_mtu = max_mtu;
2367 dev->mtu = min(new_mtu, max_mtu);
2372 static int qeth_get_mtu_outof_framesize(int framesize)
2374 switch (framesize) {
2388 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2391 __u16 mtu, framesize;
2393 struct qeth_cmd_buffer *iob;
2396 QETH_CARD_TEXT(card, 2, "ulpenacb");
2398 iob = (struct qeth_cmd_buffer *) data;
2399 memcpy(&card->token.ulp_filter_r,
2400 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2401 QETH_MPC_TOKEN_LENGTH);
2403 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2404 mtu = qeth_get_mtu_outof_framesize(framesize);
2406 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
2408 *(u16 *)reply->param = mtu;
2410 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2411 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2413 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2414 if (!qeth_is_supported_link_type(card, link_type))
2415 return -EPROTONOSUPPORT;
2418 card->info.link_type = link_type;
2419 QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
2423 static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
2425 return IS_LAYER2(card) ? QETH_MPC_PROT_L2 : QETH_MPC_PROT_L3;
2428 static int qeth_ulp_enable(struct qeth_card *card)
2430 u8 prot_type = qeth_mpc_select_prot_type(card);
2431 struct qeth_cmd_buffer *iob;
2435 QETH_CARD_TEXT(card, 2, "ulpenabl");
2437 iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
2441 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
2442 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
2443 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2444 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2445 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2446 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2447 rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
2450 return qeth_update_max_mtu(card, max_mtu);
2453 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2456 struct qeth_cmd_buffer *iob;
2458 QETH_CARD_TEXT(card, 2, "ulpstpcb");
2460 iob = (struct qeth_cmd_buffer *) data;
2461 memcpy(&card->token.ulp_connection_r,
2462 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2463 QETH_MPC_TOKEN_LENGTH);
2464 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2466 QETH_CARD_TEXT(card, 2, "olmlimit");
2467 dev_err(&card->gdev->dev, "A connection could not be "
2468 "established because of an OLM limit\n");
2474 static int qeth_ulp_setup(struct qeth_card *card)
2477 struct qeth_cmd_buffer *iob;
2479 QETH_CARD_TEXT(card, 2, "ulpsetup");
2481 iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
2485 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2486 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2487 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2488 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2489 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2490 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2492 memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2);
2493 temp = (card->info.cula << 8) + card->info.unit_addr2;
2494 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2495 return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
2498 static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx,
2501 struct qeth_qdio_out_buffer *newbuf;
2503 newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, gfp);
2507 newbuf->buffer = q->qdio_bufs[bidx];
2508 skb_queue_head_init(&newbuf->skb_list);
2509 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
2510 atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
2511 q->bufs[bidx] = newbuf;
2515 static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2520 qeth_drain_output_queue(q, true);
2521 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2525 static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
2527 struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2533 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q))
2536 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
2537 if (qeth_alloc_out_buf(q, i, GFP_KERNEL))
2545 qeth_free_out_buf(q->bufs[--i]);
2546 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2552 static void qeth_tx_completion_timer(struct timer_list *timer)
2554 struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);
2556 napi_schedule(&queue->napi);
2557 QETH_TXQ_STAT_INC(queue, completion_timer);
2560 static int qeth_alloc_qdio_queues(struct qeth_card *card)
2564 QETH_CARD_TEXT(card, 2, "allcqdbf");
2566 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
2567 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2570 QETH_CARD_TEXT(card, 2, "inq");
2571 card->qdio.in_q = qeth_alloc_qdio_queue();
2572 if (!card->qdio.in_q)
2575 /* inbound buffer pool */
2576 if (qeth_alloc_buffer_pool(card))
2580 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2581 struct qeth_qdio_out_q *queue;
2583 queue = qeth_alloc_output_queue();
2586 QETH_CARD_TEXT_(card, 2, "outq %i", i);
2587 QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
2588 card->qdio.out_qs[i] = queue;
2590 queue->queue_no = i;
2591 INIT_LIST_HEAD(&queue->pending_bufs);
2592 spin_lock_init(&queue->lock);
2593 timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
2595 queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
2596 queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
2597 queue->rescan_usecs = QETH_TX_TIMER_USECS;
2599 queue->coalesce_usecs = USEC_PER_SEC;
2600 queue->max_coalesced_frames = 0;
2601 queue->rescan_usecs = 10 * USEC_PER_SEC;
2603 queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
2607 if (qeth_alloc_cq(card))
2614 qeth_free_output_queue(card->qdio.out_qs[--i]);
2615 card->qdio.out_qs[i] = NULL;
2617 qeth_free_buffer_pool(card);
2619 qeth_free_qdio_queue(card->qdio.in_q);
2620 card->qdio.in_q = NULL;
2622 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2626 static void qeth_free_qdio_queues(struct qeth_card *card)
2630 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
2631 QETH_QDIO_UNINITIALIZED)
2635 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2636 if (card->qdio.in_q->bufs[j].rx_skb)
2637 dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
2639 qeth_free_qdio_queue(card->qdio.in_q);
2640 card->qdio.in_q = NULL;
2641 /* inbound buffer pool */
2642 qeth_free_buffer_pool(card);
2643 /* free outbound qdio_qs */
2644 for (i = 0; i < card->qdio.no_out_queues; i++) {
2645 qeth_free_output_queue(card->qdio.out_qs[i]);
2646 card->qdio.out_qs[i] = NULL;
2650 static void qeth_fill_qib_parms(struct qeth_card *card,
2651 struct qeth_qib_parms *parms)
2653 struct qeth_qdio_out_q *queue;
2656 parms->pcit_magic[0] = 'P';
2657 parms->pcit_magic[1] = 'C';
2658 parms->pcit_magic[2] = 'I';
2659 parms->pcit_magic[3] = 'T';
2660 ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic));
2661 parms->pcit_a = QETH_PCI_THRESHOLD_A(card);
2662 parms->pcit_b = QETH_PCI_THRESHOLD_B(card);
2663 parms->pcit_c = QETH_PCI_TIMER_VALUE(card);
2665 parms->blkt_magic[0] = 'B';
2666 parms->blkt_magic[1] = 'L';
2667 parms->blkt_magic[2] = 'K';
2668 parms->blkt_magic[3] = 'T';
2669 ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic));
2670 parms->blkt_total = card->info.blkt.time_total;
2671 parms->blkt_inter_packet = card->info.blkt.inter_packet;
2672 parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo;
2674 /* Prio-queueing implicitly uses the default priorities: */
2675 if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1)
2678 parms->pque_magic[0] = 'P';
2679 parms->pque_magic[1] = 'Q';
2680 parms->pque_magic[2] = 'U';
2681 parms->pque_magic[3] = 'E';
2682 ASCEBC(parms->pque_magic, sizeof(parms->pque_magic));
2683 parms->pque_order = QETH_QIB_PQUE_ORDER_RR;
2684 parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL;
2686 qeth_for_each_output_queue(card, queue, i)
2687 parms->pque_priority[i] = queue->priority;
2690 static int qeth_qdio_activate(struct qeth_card *card)
2692 QETH_CARD_TEXT(card, 3, "qdioact");
2693 return qdio_activate(CARD_DDEV(card));
2696 static int qeth_dm_act(struct qeth_card *card)
2698 struct qeth_cmd_buffer *iob;
2700 QETH_CARD_TEXT(card, 2, "dmact");
2702 iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
2706 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2707 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2708 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2709 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2710 return qeth_send_control_data(card, iob, NULL, NULL);
2713 static int qeth_mpc_initialize(struct qeth_card *card)
2717 QETH_CARD_TEXT(card, 2, "mpcinit");
2719 rc = qeth_issue_next_read(card);
2721 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2724 rc = qeth_cm_enable(card);
2726 QETH_CARD_TEXT_(card, 2, "2err%d", rc);
2729 rc = qeth_cm_setup(card);
2731 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
2734 rc = qeth_ulp_enable(card);
2736 QETH_CARD_TEXT_(card, 2, "4err%d", rc);
2739 rc = qeth_ulp_setup(card);
2741 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2744 rc = qeth_alloc_qdio_queues(card);
2746 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2749 rc = qeth_qdio_establish(card);
2751 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
2752 qeth_free_qdio_queues(card);
2755 rc = qeth_qdio_activate(card);
2757 QETH_CARD_TEXT_(card, 2, "7err%d", rc);
2760 rc = qeth_dm_act(card);
2762 QETH_CARD_TEXT_(card, 2, "8err%d", rc);
2769 static void qeth_print_status_message(struct qeth_card *card)
2771 switch (card->info.type) {
2772 case QETH_CARD_TYPE_OSD:
2773 case QETH_CARD_TYPE_OSM:
2774 case QETH_CARD_TYPE_OSX:
2775 /* VM will use a non-zero first character
2776 * to indicate a HiperSockets like reporting
2777 * of the level OSA sets the first character to zero
2779 if (!card->info.mcl_level[0]) {
2780 sprintf(card->info.mcl_level, "%02x%02x",
2781 card->info.mcl_level[2],
2782 card->info.mcl_level[3]);
2786 case QETH_CARD_TYPE_IQD:
2787 if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
2788 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2789 card->info.mcl_level[0]];
2790 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2791 card->info.mcl_level[1]];
2792 card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2793 card->info.mcl_level[2]];
2794 card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2795 card->info.mcl_level[3]];
2796 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2800 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2802 dev_info(&card->gdev->dev,
2803 "Device is a%s card%s%s%s\nwith link type %s.\n",
2804 qeth_get_cardname(card),
2805 (card->info.mcl_level[0]) ? " (level: " : "",
2806 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2807 (card->info.mcl_level[0]) ? ")" : "",
2808 qeth_get_cardname_short(card));
2811 static void qeth_initialize_working_pool_list(struct qeth_card *card)
2813 struct qeth_buffer_pool_entry *entry;
2815 QETH_CARD_TEXT(card, 5, "inwrklst");
2817 list_for_each_entry(entry,
2818 &card->qdio.init_pool.entry_list, init_list) {
2819 qeth_put_buffer_pool_entry(card, entry);
2823 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2824 struct qeth_card *card)
2826 struct qeth_buffer_pool_entry *entry;
2829 if (list_empty(&card->qdio.in_buf_pool.entry_list))
2832 list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) {
2834 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2835 if (page_count(entry->elements[i]) > 1) {
2841 list_del_init(&entry->list);
2846 /* no free buffer in pool so take first one and swap pages */
2847 entry = list_first_entry(&card->qdio.in_buf_pool.entry_list,
2848 struct qeth_buffer_pool_entry, list);
2849 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2850 if (page_count(entry->elements[i]) > 1) {
2851 struct page *page = dev_alloc_page();
2856 __free_page(entry->elements[i]);
2857 entry->elements[i] = page;
2858 QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
2861 list_del_init(&entry->list);
2865 static int qeth_init_input_buffer(struct qeth_card *card,
2866 struct qeth_qdio_buffer *buf)
2868 struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry;
2871 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2872 buf->rx_skb = netdev_alloc_skb(card->dev,
2874 sizeof(struct ipv6hdr));
2880 pool_entry = qeth_find_free_buffer_pool_entry(card);
2884 buf->pool_entry = pool_entry;
2888 * since the buffer is accessed only from the input_tasklet
2889 * there shouldn't be a need to synchronize; also, since we use
2890 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2893 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2894 buf->buffer->element[i].length = PAGE_SIZE;
2895 buf->buffer->element[i].addr =
2896 page_to_phys(pool_entry->elements[i]);
2897 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2898 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
2900 buf->buffer->element[i].eflags = 0;
2901 buf->buffer->element[i].sflags = 0;
2906 static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
2907 struct qeth_qdio_out_q *queue)
2909 if (!IS_IQD(card) ||
2910 qeth_iqd_is_mcast_queue(card, queue) ||
2911 card->options.cq == QETH_CQ_ENABLED ||
2912 qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
2915 return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
2918 static int qeth_init_qdio_queues(struct qeth_card *card)
2920 unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count;
2924 QETH_CARD_TEXT(card, 2, "initqdqs");
2927 qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2928 memset(&card->rx, 0, sizeof(struct qeth_rx));
2930 qeth_initialize_working_pool_list(card);
2931 /*give only as many buffers to hardware as we have buffer pool entries*/
2932 for (i = 0; i < rx_bufs; i++) {
2933 rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
2938 card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
2939 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs,
2942 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2947 rc = qeth_cq_init(card);
2952 /* outbound queue */
2953 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2954 struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];
2956 qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2957 queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
2958 queue->next_buf_to_fill = 0;
2960 queue->prev_hdr = NULL;
2961 queue->coalesced_frames = 0;
2962 queue->bulk_start = 0;
2963 queue->bulk_count = 0;
2964 queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
2965 atomic_set(&queue->used_buffers, 0);
2966 atomic_set(&queue->set_pci_flags_count, 0);
2967 netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
2972 static void qeth_ipa_finalize_cmd(struct qeth_card *card,
2973 struct qeth_cmd_buffer *iob)
2975 qeth_mpc_finalize_cmd(card, iob);
2977 /* override with IPA-specific values: */
2978 __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
2981 static void qeth_prepare_ipa_cmd(struct qeth_card *card,
2982 struct qeth_cmd_buffer *iob, u16 cmd_length)
2984 u8 prot_type = qeth_mpc_select_prot_type(card);
2985 u16 total_length = iob->length;
2987 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
2989 iob->finalize = qeth_ipa_finalize_cmd;
2991 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2992 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
2993 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
2994 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
2995 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
2996 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
2997 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2998 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
3001 static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
3002 struct qeth_cmd_buffer *reply)
3004 struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);
3006 return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
3009 struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
3010 enum qeth_ipa_cmds cmd_code,
3011 enum qeth_prot_versions prot,
3012 unsigned int data_length)
3014 struct qeth_cmd_buffer *iob;
3015 struct qeth_ipacmd_hdr *hdr;
3017 data_length += offsetof(struct qeth_ipa_cmd, data);
3018 iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
3023 qeth_prepare_ipa_cmd(card, iob, data_length);
3024 iob->match = qeth_ipa_match_reply;
3026 hdr = &__ipa_cmd(iob)->hdr;
3027 hdr->command = cmd_code;
3028 hdr->initiator = IPA_CMD_INITIATOR_HOST;
3029 /* hdr->seqno is set by qeth_send_control_data() */
3030 hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH;
3031 hdr->rel_adapter_no = (u8) card->dev->dev_port;
3032 hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
3033 hdr->param_count = 1;
3034 hdr->prot_version = prot;
3037 EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);
3039 static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
3040 struct qeth_reply *reply, unsigned long data)
3042 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3044 return (cmd->hdr.return_code) ? -EIO : 0;
3048 * qeth_send_ipa_cmd() - send an IPA command
3050 * See qeth_send_control_data() for explanation of the arguments.
3053 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
3054 int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
3060 QETH_CARD_TEXT(card, 4, "sendipa");
3062 if (card->read_or_write_problem) {
3067 if (reply_cb == NULL)
3068 reply_cb = qeth_send_ipa_cmd_cb;
3069 rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
3071 qeth_clear_ipacmd_list(card);
3072 qeth_schedule_recovery(card);
3076 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
3078 static int qeth_send_startlan_cb(struct qeth_card *card,
3079 struct qeth_reply *reply, unsigned long data)
3081 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3083 if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
3086 return (cmd->hdr.return_code) ? -EIO : 0;
3089 static int qeth_send_startlan(struct qeth_card *card)
3091 struct qeth_cmd_buffer *iob;
3093 QETH_CARD_TEXT(card, 2, "strtlan");
3095 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
3098 return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
3101 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
3103 if (!cmd->hdr.return_code)
3104 cmd->hdr.return_code =
3105 cmd->data.setadapterparms.hdr.return_code;
3106 return cmd->hdr.return_code;
3109 static int qeth_query_setadapterparms_cb(struct qeth_card *card,
3110 struct qeth_reply *reply, unsigned long data)
3112 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3113 struct qeth_query_cmds_supp *query_cmd;
3115 QETH_CARD_TEXT(card, 3, "quyadpcb");
3116 if (qeth_setadpparms_inspect_rc(cmd))
3119 query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp;
3120 if (query_cmd->lan_type & 0x7f) {
3121 if (!qeth_is_supported_link_type(card, query_cmd->lan_type))
3122 return -EPROTONOSUPPORT;
3124 card->info.link_type = query_cmd->lan_type;
3125 QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
3128 card->options.adp.supported = query_cmd->supported_cmds;
3132 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
3133 enum qeth_ipa_setadp_cmd adp_cmd,
3134 unsigned int data_length)
3136 struct qeth_ipacmd_setadpparms_hdr *hdr;
3137 struct qeth_cmd_buffer *iob;
3139 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
3141 offsetof(struct qeth_ipacmd_setadpparms,
3146 hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
3147 hdr->cmdlength = sizeof(*hdr) + data_length;
3148 hdr->command_code = adp_cmd;
3149 hdr->used_total = 1;
3154 static int qeth_query_setadapterparms(struct qeth_card *card)
3157 struct qeth_cmd_buffer *iob;
3159 QETH_CARD_TEXT(card, 3, "queryadp");
3160 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
3161 SETADP_DATA_SIZEOF(query_cmds_supp));
3164 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
3168 static int qeth_query_ipassists_cb(struct qeth_card *card,
3169 struct qeth_reply *reply, unsigned long data)
3171 struct qeth_ipa_cmd *cmd;
3173 QETH_CARD_TEXT(card, 2, "qipasscb");
3175 cmd = (struct qeth_ipa_cmd *) data;
3177 switch (cmd->hdr.return_code) {
3178 case IPA_RC_SUCCESS:
3180 case IPA_RC_NOTSUPP:
3181 case IPA_RC_L2_UNSUPPORTED_CMD:
3182 QETH_CARD_TEXT(card, 2, "ipaunsup");
3183 card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
3184 card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
3187 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
3188 CARD_DEVID(card), cmd->hdr.return_code);
3192 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
3193 card->options.ipa4 = cmd->hdr.assists;
3194 else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
3195 card->options.ipa6 = cmd->hdr.assists;
3197 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
3202 static int qeth_query_ipassists(struct qeth_card *card,
3203 enum qeth_prot_versions prot)
3206 struct qeth_cmd_buffer *iob;
3208 QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
3209 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
3212 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
3216 static int qeth_query_switch_attributes_cb(struct qeth_card *card,
3217 struct qeth_reply *reply, unsigned long data)
3219 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3220 struct qeth_query_switch_attributes *attrs;
3221 struct qeth_switch_info *sw_info;
3223 QETH_CARD_TEXT(card, 2, "qswiatcb");
3224 if (qeth_setadpparms_inspect_rc(cmd))
3227 sw_info = (struct qeth_switch_info *)reply->param;
3228 attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
3229 sw_info->capabilities = attrs->capabilities;
3230 sw_info->settings = attrs->settings;
3231 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
3236 int qeth_query_switch_attributes(struct qeth_card *card,
3237 struct qeth_switch_info *sw_info)
3239 struct qeth_cmd_buffer *iob;
3241 QETH_CARD_TEXT(card, 2, "qswiattr");
3242 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
3244 if (!netif_carrier_ok(card->dev))
3246 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
3249 return qeth_send_ipa_cmd(card, iob,
3250 qeth_query_switch_attributes_cb, sw_info);
3253 struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
3254 enum qeth_diags_cmds sub_cmd,
3255 unsigned int data_length)
3257 struct qeth_ipacmd_diagass *cmd;
3258 struct qeth_cmd_buffer *iob;
3260 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
3261 DIAG_HDR_LEN + data_length);
3265 cmd = &__ipa_cmd(iob)->data.diagass;
3266 cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
3267 cmd->subcmd = sub_cmd;
3270 EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);
3272 static int qeth_query_setdiagass_cb(struct qeth_card *card,
3273 struct qeth_reply *reply, unsigned long data)
3275 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3276 u16 rc = cmd->hdr.return_code;
3279 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3283 card->info.diagass_support = cmd->data.diagass.ext;
3287 static int qeth_query_setdiagass(struct qeth_card *card)
3289 struct qeth_cmd_buffer *iob;
3291 QETH_CARD_TEXT(card, 2, "qdiagass");
3292 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
3295 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
3298 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
3300 unsigned long info = get_zeroed_page(GFP_KERNEL);
3301 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
3302 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
3303 struct ccw_dev_id ccwid;
3306 tid->chpid = card->info.chpid;
3307 ccw_device_get_id(CARD_RDEV(card), &ccwid);
3308 tid->ssid = ccwid.ssid;
3309 tid->devno = ccwid.devno;
3312 level = stsi(NULL, 0, 0, 0);
3313 if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3314 tid->lparnr = info222->lpar_number;
3315 if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3316 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
3317 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
3322 static int qeth_hw_trap_cb(struct qeth_card *card,
3323 struct qeth_reply *reply, unsigned long data)
3325 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3326 u16 rc = cmd->hdr.return_code;
3329 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3335 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
3337 struct qeth_cmd_buffer *iob;
3338 struct qeth_ipa_cmd *cmd;
3340 QETH_CARD_TEXT(card, 2, "diagtrap");
3341 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
3344 cmd = __ipa_cmd(iob);
3345 cmd->data.diagass.type = 1;
3346 cmd->data.diagass.action = action;
3348 case QETH_DIAGS_TRAP_ARM:
3349 cmd->data.diagass.options = 0x0003;
3350 cmd->data.diagass.ext = 0x00010000 +
3351 sizeof(struct qeth_trap_id);
3352 qeth_get_trap_id(card,
3353 (struct qeth_trap_id *)cmd->data.diagass.cdata);
3355 case QETH_DIAGS_TRAP_DISARM:
3356 cmd->data.diagass.options = 0x0001;
3358 case QETH_DIAGS_TRAP_CAPTURE:
3361 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
3364 static int qeth_check_qdio_errors(struct qeth_card *card,
3365 struct qdio_buffer *buf,
3366 unsigned int qdio_error,
3367 const char *dbftext)
3370 QETH_CARD_TEXT(card, 2, dbftext);
3371 QETH_CARD_TEXT_(card, 2, " F15=%02X",
3372 buf->element[15].sflags);
3373 QETH_CARD_TEXT_(card, 2, " F14=%02X",
3374 buf->element[14].sflags);
3375 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3376 if ((buf->element[15].sflags) == 0x12) {
3377 QETH_CARD_STAT_INC(card, rx_fifo_errors);
3385 static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
3388 struct qeth_qdio_q *queue = card->qdio.in_q;
3389 struct list_head *lh;
3394 /* only requeue at a certain threshold to avoid SIGAs */
3395 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
3396 for (i = queue->next_buf_to_init;
3397 i < queue->next_buf_to_init + count; ++i) {
3398 if (qeth_init_input_buffer(card,
3399 &queue->bufs[QDIO_BUFNR(i)])) {
3406 if (newcount < count) {
3407 /* we are in memory shortage so we switch back to
3408 traditional skb allocation and drop packages */
3409 atomic_set(&card->force_alloc_skb, 3);
3412 atomic_add_unless(&card->force_alloc_skb, -1, 0);
3417 list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
3419 if (i == card->qdio.in_buf_pool.buf_count) {
3420 QETH_CARD_TEXT(card, 2, "qsarbw");
3421 schedule_delayed_work(
3422 &card->buffer_reclaim_work,
3423 QETH_RECLAIM_WORK_TIME);
3428 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
3429 queue->next_buf_to_init, count, NULL);
3431 QETH_CARD_TEXT(card, 2, "qinberr");
3433 queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
3441 static void qeth_buffer_reclaim_work(struct work_struct *work)
3443 struct qeth_card *card = container_of(to_delayed_work(work),
3445 buffer_reclaim_work);
3448 napi_schedule(&card->napi);
3449 /* kick-start the NAPI softirq: */
3453 static void qeth_handle_send_error(struct qeth_card *card,
3454 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
3456 int sbalf15 = buffer->buffer->element[15].sflags;
3458 QETH_CARD_TEXT(card, 6, "hdsnderr");
3459 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3464 if ((sbalf15 >= 15) && (sbalf15 <= 31))
3467 QETH_CARD_TEXT(card, 1, "lnkfail");
3468 QETH_CARD_TEXT_(card, 1, "%04x %02x",
3469 (u16)qdio_err, (u8)sbalf15);
3473 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
3474 * @queue: queue to check for packing buffer
3476 * Returns number of buffers that were prepared for flush.
3478 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
3480 struct qeth_qdio_out_buffer *buffer;
3482 buffer = queue->bufs[queue->next_buf_to_fill];
3483 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
3484 (buffer->next_element_to_fill > 0)) {
3485 /* it's a packing buffer */
3486 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3487 queue->next_buf_to_fill =
3488 QDIO_BUFNR(queue->next_buf_to_fill + 1);
3495 * Switched to packing state if the number of used buffers on a queue
3496 * reaches a certain limit.
3498 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
3500 if (!queue->do_pack) {
3501 if (atomic_read(&queue->used_buffers)
3502 >= QETH_HIGH_WATERMARK_PACK){
3503 /* switch non-PACKING -> PACKING */
3504 QETH_CARD_TEXT(queue->card, 6, "np->pack");
3505 QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3512 * Switches from packing to non-packing mode. If there is a packing
3513 * buffer on the queue this buffer will be prepared to be flushed.
3514 * In that case 1 is returned to inform the caller. If no buffer
3515 * has to be flushed, zero is returned.
3517 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
3519 if (queue->do_pack) {
3520 if (atomic_read(&queue->used_buffers)
3521 <= QETH_LOW_WATERMARK_PACK) {
3522 /* switch PACKING -> non-PACKING */
3523 QETH_CARD_TEXT(queue->card, 6, "pack->np");
3524 QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3526 return qeth_prep_flush_pack_buffer(queue);
3532 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
3535 struct qeth_qdio_out_buffer *buf = queue->bufs[index];
3536 struct qeth_card *card = queue->card;
3537 unsigned int frames, usecs;
3538 struct qaob *aob = NULL;
3542 for (i = index; i < index + count; ++i) {
3543 unsigned int bidx = QDIO_BUFNR(i);
3544 struct sk_buff *skb;
3546 buf = queue->bufs[bidx];
3547 buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
3548 SBAL_EFLAGS_LAST_ENTRY;
3549 queue->coalesced_frames += buf->frames;
3552 skb_queue_walk(&buf->skb_list, skb)
3553 skb_tx_timestamp(skb);
3558 if (card->options.cq == QETH_CQ_ENABLED &&
3559 !qeth_iqd_is_mcast_queue(card, queue) &&
3562 buf->aob = qdio_allocate_aob();
3564 struct qeth_qaob_priv1 *priv;
3567 priv = (struct qeth_qaob_priv1 *)&aob->user1;
3568 priv->state = QETH_QAOB_ISSUED;
3569 priv->queue_no = queue->queue_no;
3573 if (!queue->do_pack) {
3574 if ((atomic_read(&queue->used_buffers) >=
3575 (QETH_HIGH_WATERMARK_PACK -
3576 QETH_WATERMARK_PACK_FUZZ)) &&
3577 !atomic_read(&queue->set_pci_flags_count)) {
3578 /* it's likely that we'll go to packing
3580 atomic_inc(&queue->set_pci_flags_count);
3581 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3584 if (!atomic_read(&queue->set_pci_flags_count)) {
3586 * there's no outstanding PCI any more, so we
3587 * have to request a PCI to be sure the the PCI
3588 * will wake at some time in the future then we
3589 * can flush packed buffers that might still be
3590 * hanging around, which can happen if no
3591 * further send was requested by the stack
3593 atomic_inc(&queue->set_pci_flags_count);
3594 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3599 QETH_TXQ_STAT_INC(queue, doorbell);
3600 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_OUTPUT, queue->queue_no,
3606 /* ignore temporary SIGA errors without busy condition */
3608 /* Fake the TX completion interrupt: */
3609 frames = READ_ONCE(queue->max_coalesced_frames);
3610 usecs = READ_ONCE(queue->coalesce_usecs);
3612 if (frames && queue->coalesced_frames >= frames) {
3613 napi_schedule(&queue->napi);
3614 queue->coalesced_frames = 0;
3615 QETH_TXQ_STAT_INC(queue, coal_frames);
3616 } else if (qeth_use_tx_irqs(card) &&
3617 atomic_read(&queue->used_buffers) >= 32) {
3618 /* Old behaviour carried over from the qdio layer: */
3619 napi_schedule(&queue->napi);
3620 QETH_TXQ_STAT_INC(queue, coal_frames);
3622 qeth_tx_arm_timer(queue, usecs);
3627 QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3628 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
3629 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
3630 QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
3631 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3633 /* this must not happen under normal circumstances. if it
3634 * happens something is really wrong -> recover */
3635 qeth_schedule_recovery(queue->card);
3639 static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
3641 qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
3643 queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
3644 queue->prev_hdr = NULL;
3645 queue->bulk_count = 0;
3648 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
3651 * check if weed have to switch to non-packing mode or if
3652 * we have to get a pci flag out on the queue
3654 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
3655 !atomic_read(&queue->set_pci_flags_count)) {
3656 unsigned int index, flush_cnt;
3659 spin_lock(&queue->lock);
3661 index = queue->next_buf_to_fill;
3662 q_was_packing = queue->do_pack;
3664 flush_cnt = qeth_switch_to_nonpacking_if_needed(queue);
3665 if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count))
3666 flush_cnt = qeth_prep_flush_pack_buffer(queue);
3669 qeth_flush_buffers(queue, index, flush_cnt);
3671 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
3674 spin_unlock(&queue->lock);
3678 static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
3680 struct qeth_card *card = (struct qeth_card *)card_ptr;
3682 napi_schedule_irqoff(&card->napi);
3685 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
3689 if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
3693 if (card->options.cq == cq) {
3698 qeth_free_qdio_queues(card);
3699 card->options.cq = cq;
3706 EXPORT_SYMBOL_GPL(qeth_configure_cq);
3708 static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob)
3710 struct qeth_qaob_priv1 *priv = (struct qeth_qaob_priv1 *)&aob->user1;
3711 unsigned int queue_no = priv->queue_no;
3713 BUILD_BUG_ON(sizeof(*priv) > ARRAY_SIZE(aob->user1));
3715 if (xchg(&priv->state, QETH_QAOB_DONE) == QETH_QAOB_PENDING &&
3716 queue_no < card->qdio.no_out_queues)
3717 napi_schedule(&card->qdio.out_qs[queue_no]->napi);
3720 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
3721 unsigned int queue, int first_element,
3724 struct qeth_qdio_q *cq = card->qdio.c_q;
3728 QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
3729 QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
3730 QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
3733 netif_tx_stop_all_queues(card->dev);
3734 qeth_schedule_recovery(card);
3738 for (i = first_element; i < first_element + count; ++i) {
3739 struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
3742 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
3743 buffer->element[e].addr) {
3744 unsigned long phys_aob_addr = buffer->element[e].addr;
3746 qeth_qdio_handle_aob(card, phys_to_virt(phys_aob_addr));
3749 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3751 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
3752 cq->next_buf_to_init, count, NULL);
3754 dev_warn(&card->gdev->dev,
3755 "QDIO reported an error, rc=%i\n", rc);
3756 QETH_CARD_TEXT(card, 2, "qcqherr");
3759 cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
3762 static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
3763 unsigned int qdio_err, int queue,
3764 int first_elem, int count,
3765 unsigned long card_ptr)
3767 struct qeth_card *card = (struct qeth_card *)card_ptr;
3769 QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
3770 QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
3773 qeth_schedule_recovery(card);
3776 static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3777 unsigned int qdio_error, int __queue,
3778 int first_element, int count,
3779 unsigned long card_ptr)
3781 struct qeth_card *card = (struct qeth_card *) card_ptr;
3783 QETH_CARD_TEXT(card, 2, "achkcond");
3784 netif_tx_stop_all_queues(card->dev);
3785 qeth_schedule_recovery(card);
3789 * Note: Function assumes that we have 4 outbound queues.
3791 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
3793 struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
3796 switch (card->qdio.do_prio_queueing) {
3797 case QETH_PRIO_Q_ING_TOS:
3798 case QETH_PRIO_Q_ING_PREC:
3799 switch (vlan_get_protocol(skb)) {
3800 case htons(ETH_P_IP):
3801 tos = ipv4_get_dsfield(ip_hdr(skb));
3803 case htons(ETH_P_IPV6):
3804 tos = ipv6_get_dsfield(ipv6_hdr(skb));
3807 return card->qdio.default_out_queue;
3809 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
3810 return ~tos >> 6 & 3;
3811 if (tos & IPTOS_MINCOST)
3813 if (tos & IPTOS_RELIABILITY)
3815 if (tos & IPTOS_THROUGHPUT)
3817 if (tos & IPTOS_LOWDELAY)
3820 case QETH_PRIO_Q_ING_SKB:
3821 if (skb->priority > 5)
3823 return ~skb->priority >> 1 & 3;
3824 case QETH_PRIO_Q_ING_VLAN:
3825 if (veth->h_vlan_proto == htons(ETH_P_8021Q))
3826 return ~ntohs(veth->h_vlan_TCI) >>
3827 (VLAN_PRIO_SHIFT + 1) & 3;
3829 case QETH_PRIO_Q_ING_FIXED:
3830 return card->qdio.default_out_queue;
3834 return card->qdio.default_out_queue;
3836 EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3839 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags.
3842 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3843 * fragmented part of the SKB. Returns zero for linear SKB.
3845 static int qeth_get_elements_for_frags(struct sk_buff *skb)
3847 int cnt, elements = 0;
3849 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3850 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3852 elements += qeth_get_elements_for_range(
3853 (addr_t)skb_frag_address(frag),
3854 (addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3860 * qeth_count_elements() - Counts the number of QDIO buffer elements needed
3861 * to transmit an skb.
3862 * @skb: the skb to operate on.
3863 * @data_offset: skip this part of the skb's linear data
3865 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3866 * skb's data (both its linear part and paged fragments).
3868 static unsigned int qeth_count_elements(struct sk_buff *skb,
3869 unsigned int data_offset)
3871 unsigned int elements = qeth_get_elements_for_frags(skb);
3872 addr_t end = (addr_t)skb->data + skb_headlen(skb);
3873 addr_t start = (addr_t)skb->data + data_offset;
3876 elements += qeth_get_elements_for_range(start, end);
3880 #define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \
3884 * qeth_add_hw_header() - add a HW header to an skb.
3885 * @skb: skb that the HW header should be added to.
3886 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
3887 * it contains a valid pointer to a qeth_hdr.
3888 * @hdr_len: length of the HW header.
3889 * @proto_len: length of protocol headers that need to be in same page as the
3892 * Returns the pushed length. If the header can't be pushed on
3893 * (eg. because it would cross a page boundary), it is allocated from
3894 * the cache instead and 0 is returned.
3895 * The number of needed buffer elements is returned in @elements.
3896 * Error to create the hdr is indicated by returning with < 0.
3898 static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
3899 struct sk_buff *skb, struct qeth_hdr **hdr,
3900 unsigned int hdr_len, unsigned int proto_len,
3901 unsigned int *elements)
3903 gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
3904 const unsigned int contiguous = proto_len ? proto_len : 1;
3905 const unsigned int max_elements = queue->max_elements;
3906 unsigned int __elements;
3912 start = (addr_t)skb->data - hdr_len;
3913 end = (addr_t)skb->data;
3915 if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
3916 /* Push HW header into same page as first protocol header. */
3918 /* ... but TSO always needs a separate element for headers: */
3919 if (skb_is_gso(skb))
3920 __elements = 1 + qeth_count_elements(skb, proto_len);
3922 __elements = qeth_count_elements(skb, 0);
3923 } else if (!proto_len && PAGE_ALIGNED(skb->data)) {
3924 /* Push HW header into preceding page, flush with skb->data. */
3926 __elements = 1 + qeth_count_elements(skb, 0);
3928 /* Use header cache, copy protocol headers up. */
3930 __elements = 1 + qeth_count_elements(skb, proto_len);
3933 /* Compress skb to fit into one IO buffer: */
3934 if (__elements > max_elements) {
3935 if (!skb_is_nonlinear(skb)) {
3936 /* Drop it, no easy way of shrinking it further. */
3937 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
3938 max_elements, __elements, skb->len);
3942 rc = skb_linearize(skb);
3944 QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
3948 QETH_TXQ_STAT_INC(queue, skbs_linearized);
3949 /* Linearization changed the layout, re-evaluate: */
3953 *elements = __elements;
3954 /* Add the header: */
3956 *hdr = skb_push(skb, hdr_len);
3960 /* Fall back to cache element with known-good alignment: */
3961 if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
3963 *hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
3966 /* Copy protocol headers behind HW header: */
3967 skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
3971 static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
3972 struct sk_buff *curr_skb,
3973 struct qeth_hdr *curr_hdr)
3975 struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
3976 struct qeth_hdr *prev_hdr = queue->prev_hdr;
3981 /* All packets must have the same target: */
3982 if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
3983 struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);
3985 return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
3986 eth_hdr(curr_skb)->h_dest) &&
3987 qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
3990 return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
3991 qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
3995 * qeth_fill_buffer() - map skb into an output buffer
3996 * @buf: buffer to transport the skb
3997 * @skb: skb to map into the buffer
3998 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
3999 * from qeth_core_header_cache.
4000 * @offset: when mapping the skb, start at skb->data + offset
4001 * @hd_len: if > 0, build a dedicated header element of this size
4003 static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
4004 struct sk_buff *skb, struct qeth_hdr *hdr,
4005 unsigned int offset, unsigned int hd_len)
4007 struct qdio_buffer *buffer = buf->buffer;
4008 int element = buf->next_element_to_fill;
4009 int length = skb_headlen(skb) - offset;
4010 char *data = skb->data + offset;
4011 unsigned int elem_length, cnt;
4012 bool is_first_elem = true;
4014 __skb_queue_tail(&buf->skb_list, skb);
4016 /* build dedicated element for HW Header */
4018 is_first_elem = false;
4020 buffer->element[element].addr = virt_to_phys(hdr);
4021 buffer->element[element].length = hd_len;
4022 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
4024 /* HW header is allocated from cache: */
4025 if ((void *)hdr != skb->data)
4026 __set_bit(element, buf->from_kmem_cache);
4027 /* HW header was pushed and is contiguous with linear part: */
4028 else if (length > 0 && !PAGE_ALIGNED(data) &&
4029 (data == (char *)hdr + hd_len))
4030 buffer->element[element].eflags |=
4031 SBAL_EFLAGS_CONTIGUOUS;
4036 /* map linear part into buffer element(s) */
4037 while (length > 0) {
4038 elem_length = min_t(unsigned int, length,
4039 PAGE_SIZE - offset_in_page(data));
4041 buffer->element[element].addr = virt_to_phys(data);
4042 buffer->element[element].length = elem_length;
4043 length -= elem_length;
4044 if (is_first_elem) {
4045 is_first_elem = false;
4046 if (length || skb_is_nonlinear(skb))
4047 /* skb needs additional elements */
4048 buffer->element[element].eflags =
4049 SBAL_EFLAGS_FIRST_FRAG;
4051 buffer->element[element].eflags = 0;
4053 buffer->element[element].eflags =
4054 SBAL_EFLAGS_MIDDLE_FRAG;
4057 data += elem_length;
4061 /* map page frags into buffer element(s) */
4062 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
4063 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
4065 data = skb_frag_address(frag);
4066 length = skb_frag_size(frag);
4067 while (length > 0) {
4068 elem_length = min_t(unsigned int, length,
4069 PAGE_SIZE - offset_in_page(data));
4071 buffer->element[element].addr = virt_to_phys(data);
4072 buffer->element[element].length = elem_length;
4073 buffer->element[element].eflags =
4074 SBAL_EFLAGS_MIDDLE_FRAG;
4076 length -= elem_length;
4077 data += elem_length;
4082 if (buffer->element[element - 1].eflags)
4083 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
4084 buf->next_element_to_fill = element;
4088 static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4089 struct sk_buff *skb, unsigned int elements,
4090 struct qeth_hdr *hdr, unsigned int offset,
4091 unsigned int hd_len)
4093 unsigned int bytes = qdisc_pkt_len(skb);
4094 struct qeth_qdio_out_buffer *buffer;
4095 unsigned int next_element;
4096 struct netdev_queue *txq;
4097 bool stopped = false;
4100 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
4101 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
4103 /* Just a sanity check, the wake/stop logic should ensure that we always
4104 * get a free buffer.
4106 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4109 flush = !qeth_iqd_may_bulk(queue, skb, hdr);
4112 (buffer->next_element_to_fill + elements > queue->max_elements)) {
4113 if (buffer->next_element_to_fill > 0) {
4114 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4115 queue->bulk_count++;
4118 if (queue->bulk_count >= queue->bulk_max)
4122 qeth_flush_queue(queue);
4124 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
4125 queue->bulk_count)];
4127 /* Sanity-check again: */
4128 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4132 if (buffer->next_element_to_fill == 0 &&
4133 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4134 /* If a TX completion happens right _here_ and misses to wake
4135 * the txq, then our re-check below will catch the race.
4137 QETH_TXQ_STAT_INC(queue, stopped);
4138 netif_tx_stop_queue(txq);
4142 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4143 buffer->bytes += bytes;
4144 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4145 queue->prev_hdr = hdr;
4147 flush = __netdev_tx_sent_queue(txq, bytes,
4148 !stopped && netdev_xmit_more());
4150 if (flush || next_element >= queue->max_elements) {
4151 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4152 queue->bulk_count++;
4154 if (queue->bulk_count >= queue->bulk_max)
4158 qeth_flush_queue(queue);
4161 if (stopped && !qeth_out_queue_is_full(queue))
4162 netif_tx_start_queue(txq);
4166 static int qeth_do_send_packet(struct qeth_card *card,
4167 struct qeth_qdio_out_q *queue,
4168 struct sk_buff *skb, struct qeth_hdr *hdr,
4169 unsigned int offset, unsigned int hd_len,
4170 unsigned int elements_needed)
4172 unsigned int start_index = queue->next_buf_to_fill;
4173 struct qeth_qdio_out_buffer *buffer;
4174 unsigned int next_element;
4175 struct netdev_queue *txq;
4176 bool stopped = false;
4177 int flush_count = 0;
4181 buffer = queue->bufs[queue->next_buf_to_fill];
4183 /* Just a sanity check, the wake/stop logic should ensure that we always
4184 * get a free buffer.
4186 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4189 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
4191 /* check if we need to switch packing state of this queue */
4192 qeth_switch_to_packing_if_needed(queue);
4193 if (queue->do_pack) {
4195 /* does packet fit in current buffer? */
4196 if (buffer->next_element_to_fill + elements_needed >
4197 queue->max_elements) {
4198 /* ... no -> set state PRIMED */
4199 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4201 queue->next_buf_to_fill =
4202 QDIO_BUFNR(queue->next_buf_to_fill + 1);
4203 buffer = queue->bufs[queue->next_buf_to_fill];
4205 /* We stepped forward, so sanity-check again: */
4206 if (atomic_read(&buffer->state) !=
4207 QETH_QDIO_BUF_EMPTY) {
4208 qeth_flush_buffers(queue, start_index,
4216 if (buffer->next_element_to_fill == 0 &&
4217 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4218 /* If a TX completion happens right _here_ and misses to wake
4219 * the txq, then our re-check below will catch the race.
4221 QETH_TXQ_STAT_INC(queue, stopped);
4222 netif_tx_stop_queue(txq);
4226 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4227 buffer->bytes += qdisc_pkt_len(skb);
4228 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4231 QETH_TXQ_STAT_INC(queue, skbs_pack);
4232 if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
4234 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4235 queue->next_buf_to_fill =
4236 QDIO_BUFNR(queue->next_buf_to_fill + 1);
4240 qeth_flush_buffers(queue, start_index, flush_count);
4244 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
4246 if (stopped && !qeth_out_queue_is_full(queue))
4247 netif_tx_start_queue(txq);
4251 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
4252 unsigned int payload_len, struct sk_buff *skb,
4253 unsigned int proto_len)
4255 struct qeth_hdr_ext_tso *ext = &hdr->ext;
4257 ext->hdr_tot_len = sizeof(*ext);
4258 ext->imb_hdr_no = 1;
4260 ext->hdr_version = 1;
4262 ext->payload_len = payload_len;
4263 ext->mss = skb_shinfo(skb)->gso_size;
4264 ext->dg_hdr_len = proto_len;
4267 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4268 struct qeth_qdio_out_q *queue, __be16 proto,
4269 void (*fill_header)(struct qeth_qdio_out_q *queue,
4270 struct qeth_hdr *hdr, struct sk_buff *skb,
4271 __be16 proto, unsigned int data_len))
4273 unsigned int proto_len, hw_hdr_len;
4274 unsigned int frame_len = skb->len;
4275 bool is_tso = skb_is_gso(skb);
4276 unsigned int data_offset = 0;
4277 struct qeth_hdr *hdr = NULL;
4278 unsigned int hd_len = 0;
4279 unsigned int elements;
4283 hw_hdr_len = sizeof(struct qeth_hdr_tso);
4284 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4286 hw_hdr_len = sizeof(struct qeth_hdr);
4287 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4290 rc = skb_cow_head(skb, hw_hdr_len);
4294 push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
4298 if (is_tso || !push_len) {
4299 /* HW header needs its own buffer element. */
4300 hd_len = hw_hdr_len + proto_len;
4301 data_offset = push_len + proto_len;
4303 memset(hdr, 0, hw_hdr_len);
4304 fill_header(queue, hdr, skb, proto, frame_len);
4306 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
4307 frame_len - proto_len, skb, proto_len);
4310 rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
4313 /* TODO: drop skb_orphan() once TX completion is fast enough */
4315 spin_lock(&queue->lock);
4316 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
4318 spin_unlock(&queue->lock);
4321 if (rc && !push_len)
4322 kmem_cache_free(qeth_core_header_cache, hdr);
4326 EXPORT_SYMBOL_GPL(qeth_xmit);
4328 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
4329 struct qeth_reply *reply, unsigned long data)
4331 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4332 struct qeth_ipacmd_setadpparms *setparms;
4334 QETH_CARD_TEXT(card, 4, "prmadpcb");
4336 setparms = &(cmd->data.setadapterparms);
4337 if (qeth_setadpparms_inspect_rc(cmd)) {
4338 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
4339 setparms->data.mode = SET_PROMISC_MODE_OFF;
4341 card->info.promisc_mode = setparms->data.mode;
4342 return (cmd->hdr.return_code) ? -EIO : 0;
4345 void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
4347 enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
4348 SET_PROMISC_MODE_OFF;
4349 struct qeth_cmd_buffer *iob;
4350 struct qeth_ipa_cmd *cmd;
4352 QETH_CARD_TEXT(card, 4, "setprom");
4353 QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
4355 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4356 SETADP_DATA_SIZEOF(mode));
4359 cmd = __ipa_cmd(iob);
4360 cmd->data.setadapterparms.data.mode = mode;
4361 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
4363 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
4365 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
4366 struct qeth_reply *reply, unsigned long data)
4368 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4369 struct qeth_ipacmd_setadpparms *adp_cmd;
4371 QETH_CARD_TEXT(card, 4, "chgmaccb");
4372 if (qeth_setadpparms_inspect_rc(cmd))
4375 adp_cmd = &cmd->data.setadapterparms;
4376 if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
4377 return -EADDRNOTAVAIL;
4379 if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
4380 !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4381 return -EADDRNOTAVAIL;
4383 ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
4387 int qeth_setadpparms_change_macaddr(struct qeth_card *card)
4390 struct qeth_cmd_buffer *iob;
4391 struct qeth_ipa_cmd *cmd;
4393 QETH_CARD_TEXT(card, 4, "chgmac");
4395 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4396 SETADP_DATA_SIZEOF(change_addr));
4399 cmd = __ipa_cmd(iob);
4400 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4401 cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
4402 ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
4403 card->dev->dev_addr);
4404 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
4408 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
4410 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4411 struct qeth_reply *reply, unsigned long data)
4413 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4414 struct qeth_set_access_ctrl *access_ctrl_req;
4416 QETH_CARD_TEXT(card, 4, "setaccb");
4418 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4419 QETH_CARD_TEXT_(card, 2, "rc=%d",
4420 cmd->data.setadapterparms.hdr.return_code);
4421 if (cmd->data.setadapterparms.hdr.return_code !=
4422 SET_ACCESS_CTRL_RC_SUCCESS)
4423 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4424 access_ctrl_req->subcmd_code, CARD_DEVID(card),
4425 cmd->data.setadapterparms.hdr.return_code);
4426 switch (qeth_setadpparms_inspect_rc(cmd)) {
4427 case SET_ACCESS_CTRL_RC_SUCCESS:
4428 if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE)
4429 dev_info(&card->gdev->dev,
4430 "QDIO data connection isolation is deactivated\n");
4432 dev_info(&card->gdev->dev,
4433 "QDIO data connection isolation is activated\n");
4435 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4436 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4439 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4440 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4443 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
4444 dev_err(&card->gdev->dev, "Adapter does not "
4445 "support QDIO data connection isolation\n");
4447 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
4448 dev_err(&card->gdev->dev,
4449 "Adapter is dedicated. "
4450 "QDIO data connection isolation not supported\n");
4452 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
4453 dev_err(&card->gdev->dev,
4454 "TSO does not permit QDIO data connection isolation\n");
4456 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
4457 dev_err(&card->gdev->dev, "The adjacent switch port does not "
4458 "support reflective relay mode\n");
4460 case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
4461 dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
4462 "enabled at the adjacent switch port");
4464 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
4465 dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
4466 "at the adjacent switch failed\n");
4467 /* benign error while disabling ISOLATION_MODE_FWD */
4474 int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
4475 enum qeth_ipa_isolation_modes mode)
4478 struct qeth_cmd_buffer *iob;
4479 struct qeth_ipa_cmd *cmd;
4480 struct qeth_set_access_ctrl *access_ctrl_req;
4482 QETH_CARD_TEXT(card, 4, "setacctl");
4484 if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
4485 dev_err(&card->gdev->dev,
4486 "Adapter does not support QDIO data connection isolation\n");
4490 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4491 SETADP_DATA_SIZEOF(set_access_ctrl));
4494 cmd = __ipa_cmd(iob);
4495 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4496 access_ctrl_req->subcmd_code = mode;
4498 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4501 QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
4502 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4503 rc, CARD_DEVID(card));
4509 void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
4511 struct qeth_card *card;
4513 card = dev->ml_priv;
4514 QETH_CARD_TEXT(card, 4, "txtimeo");
4515 qeth_schedule_recovery(card);
4517 EXPORT_SYMBOL_GPL(qeth_tx_timeout);
4519 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4521 struct qeth_card *card = dev->ml_priv;
4525 case MII_BMCR: /* Basic mode control register */
4527 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
4528 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
4529 (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
4530 rc |= BMCR_SPEED100;
4532 case MII_BMSR: /* Basic mode status register */
4533 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4534 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4537 case MII_PHYSID1: /* PHYS ID 1 */
4538 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4540 rc = (rc >> 5) & 0xFFFF;
4542 case MII_PHYSID2: /* PHYS ID 2 */
4543 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4545 case MII_ADVERTISE: /* Advertisement control reg */
4548 case MII_LPA: /* Link partner ability reg */
4549 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4550 LPA_100BASE4 | LPA_LPACK;
4552 case MII_EXPANSION: /* Expansion register */
4554 case MII_DCOUNTER: /* disconnect counter */
4556 case MII_FCSCOUNTER: /* false carrier counter */
4558 case MII_NWAYTEST: /* N-way auto-neg test register */
4560 case MII_RERRCOUNTER: /* rx error counter */
4561 rc = card->stats.rx_length_errors +
4562 card->stats.rx_frame_errors +
4563 card->stats.rx_fifo_errors;
4565 case MII_SREVISION: /* silicon revision */
4567 case MII_RESV1: /* reserved 1 */
4569 case MII_LBRERROR: /* loopback, rx, bypass error */
4571 case MII_PHYADDR: /* physical address */
4573 case MII_RESV2: /* reserved 2 */
4575 case MII_TPISTATUS: /* TPI status for 10mbps */
4577 case MII_NCONFIG: /* network interface config */
4585 static int qeth_snmp_command_cb(struct qeth_card *card,
4586 struct qeth_reply *reply, unsigned long data)
4588 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4589 struct qeth_arp_query_info *qinfo = reply->param;
4590 struct qeth_ipacmd_setadpparms *adp_cmd;
4591 unsigned int data_len;
4594 QETH_CARD_TEXT(card, 3, "snpcmdcb");
4596 if (cmd->hdr.return_code) {
4597 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4600 if (cmd->data.setadapterparms.hdr.return_code) {
4601 cmd->hdr.return_code =
4602 cmd->data.setadapterparms.hdr.return_code;
4603 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4607 adp_cmd = &cmd->data.setadapterparms;
4608 data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
4609 if (adp_cmd->hdr.seq_no == 1) {
4610 snmp_data = &adp_cmd->data.snmp;
4612 snmp_data = &adp_cmd->data.snmp.request;
4613 data_len -= offsetof(struct qeth_snmp_cmd, request);
4616 /* check if there is enough room in userspace */
4617 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4618 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
4621 QETH_CARD_TEXT_(card, 4, "snore%i",
4622 cmd->data.setadapterparms.hdr.used_total);
4623 QETH_CARD_TEXT_(card, 4, "sseqn%i",
4624 cmd->data.setadapterparms.hdr.seq_no);
4625 /*copy entries to user buffer*/
4626 memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
4627 qinfo->udata_offset += data_len;
4629 if (cmd->data.setadapterparms.hdr.seq_no <
4630 cmd->data.setadapterparms.hdr.used_total)
4635 static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4637 struct qeth_snmp_ureq __user *ureq;
4638 struct qeth_cmd_buffer *iob;
4639 unsigned int req_len;
4640 struct qeth_arp_query_info qinfo = {0, };
4643 QETH_CARD_TEXT(card, 3, "snmpcmd");
4645 if (IS_VM_NIC(card))
4648 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4652 ureq = (struct qeth_snmp_ureq __user *) udata;
4653 if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
4654 get_user(req_len, &ureq->hdr.req_len))
4657 /* Sanitize user input, to avoid overflows in iob size calculation: */
4658 if (req_len > QETH_BUFSIZE)
4661 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
4665 if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
4666 &ureq->cmd, req_len)) {
4671 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
4676 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4678 rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
4680 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4681 CARD_DEVID(card), rc);
4683 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4691 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4692 struct qeth_reply *reply,
4695 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4696 struct qeth_qoat_priv *priv = reply->param;
4699 QETH_CARD_TEXT(card, 3, "qoatcb");
4700 if (qeth_setadpparms_inspect_rc(cmd))
4703 resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4705 if (resdatalen > (priv->buffer_len - priv->response_len))
4708 memcpy(priv->buffer + priv->response_len,
4709 &cmd->data.setadapterparms.hdr, resdatalen);
4710 priv->response_len += resdatalen;
4712 if (cmd->data.setadapterparms.hdr.seq_no <
4713 cmd->data.setadapterparms.hdr.used_total)
4718 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4721 struct qeth_cmd_buffer *iob;
4722 struct qeth_ipa_cmd *cmd;
4723 struct qeth_query_oat *oat_req;
4724 struct qeth_query_oat_data oat_data;
4725 struct qeth_qoat_priv priv;
4728 QETH_CARD_TEXT(card, 3, "qoatcmd");
4730 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT))
4733 if (copy_from_user(&oat_data, udata, sizeof(oat_data)))
4736 priv.buffer_len = oat_data.buffer_len;
4737 priv.response_len = 0;
4738 priv.buffer = vzalloc(oat_data.buffer_len);
4742 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4743 SETADP_DATA_SIZEOF(query_oat));
4748 cmd = __ipa_cmd(iob);
4749 oat_req = &cmd->data.setadapterparms.data.query_oat;
4750 oat_req->subcmd_code = oat_data.command;
4752 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv);
4754 tmp = is_compat_task() ? compat_ptr(oat_data.ptr) :
4755 u64_to_user_ptr(oat_data.ptr);
4756 oat_data.response_len = priv.response_len;
4758 if (copy_to_user(tmp, priv.buffer, priv.response_len) ||
4759 copy_to_user(udata, &oat_data, sizeof(oat_data)))
4768 static int qeth_query_card_info_cb(struct qeth_card *card,
4769 struct qeth_reply *reply, unsigned long data)
4771 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4772 struct qeth_link_info *link_info = reply->param;
4773 struct qeth_query_card_info *card_info;
4775 QETH_CARD_TEXT(card, 2, "qcrdincb");
4776 if (qeth_setadpparms_inspect_rc(cmd))
4779 card_info = &cmd->data.setadapterparms.data.card_info;
4780 netdev_dbg(card->dev,
4781 "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
4782 card_info->card_type, card_info->port_mode,
4783 card_info->port_speed);
4785 switch (card_info->port_mode) {
4786 case CARD_INFO_PORTM_FULLDUPLEX:
4787 link_info->duplex = DUPLEX_FULL;
4789 case CARD_INFO_PORTM_HALFDUPLEX:
4790 link_info->duplex = DUPLEX_HALF;
4793 link_info->duplex = DUPLEX_UNKNOWN;
4796 switch (card_info->card_type) {
4797 case CARD_INFO_TYPE_1G_COPPER_A:
4798 case CARD_INFO_TYPE_1G_COPPER_B:
4799 link_info->speed = SPEED_1000;
4800 link_info->port = PORT_TP;
4802 case CARD_INFO_TYPE_1G_FIBRE_A:
4803 case CARD_INFO_TYPE_1G_FIBRE_B:
4804 link_info->speed = SPEED_1000;
4805 link_info->port = PORT_FIBRE;
4807 case CARD_INFO_TYPE_10G_FIBRE_A:
4808 case CARD_INFO_TYPE_10G_FIBRE_B:
4809 link_info->speed = SPEED_10000;
4810 link_info->port = PORT_FIBRE;
4813 switch (card_info->port_speed) {
4814 case CARD_INFO_PORTS_10M:
4815 link_info->speed = SPEED_10;
4817 case CARD_INFO_PORTS_100M:
4818 link_info->speed = SPEED_100;
4820 case CARD_INFO_PORTS_1G:
4821 link_info->speed = SPEED_1000;
4823 case CARD_INFO_PORTS_10G:
4824 link_info->speed = SPEED_10000;
4826 case CARD_INFO_PORTS_25G:
4827 link_info->speed = SPEED_25000;
4830 link_info->speed = SPEED_UNKNOWN;
4833 link_info->port = PORT_OTHER;
4839 int qeth_query_card_info(struct qeth_card *card,
4840 struct qeth_link_info *link_info)
4842 struct qeth_cmd_buffer *iob;
4844 QETH_CARD_TEXT(card, 2, "qcrdinfo");
4845 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
4847 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
4851 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, link_info);
4854 static int qeth_init_link_info_oat_cb(struct qeth_card *card,
4855 struct qeth_reply *reply_priv,
4858 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4859 struct qeth_link_info *link_info = reply_priv->param;
4860 struct qeth_query_oat_physical_if *phys_if;
4861 struct qeth_query_oat_reply *reply;
4863 if (qeth_setadpparms_inspect_rc(cmd))
4866 /* Multi-part reply is unexpected, don't bother: */
4867 if (cmd->data.setadapterparms.hdr.used_total > 1)
4870 /* Expect the reply to start with phys_if data: */
4871 reply = &cmd->data.setadapterparms.data.query_oat.reply[0];
4872 if (reply->type != QETH_QOAT_REPLY_TYPE_PHYS_IF ||
4873 reply->length < sizeof(*reply))
4876 phys_if = &reply->phys_if;
4878 switch (phys_if->speed_duplex) {
4879 case QETH_QOAT_PHYS_SPEED_10M_HALF:
4880 link_info->speed = SPEED_10;
4881 link_info->duplex = DUPLEX_HALF;
4883 case QETH_QOAT_PHYS_SPEED_10M_FULL:
4884 link_info->speed = SPEED_10;
4885 link_info->duplex = DUPLEX_FULL;
4887 case QETH_QOAT_PHYS_SPEED_100M_HALF:
4888 link_info->speed = SPEED_100;
4889 link_info->duplex = DUPLEX_HALF;
4891 case QETH_QOAT_PHYS_SPEED_100M_FULL:
4892 link_info->speed = SPEED_100;
4893 link_info->duplex = DUPLEX_FULL;
4895 case QETH_QOAT_PHYS_SPEED_1000M_HALF:
4896 link_info->speed = SPEED_1000;
4897 link_info->duplex = DUPLEX_HALF;
4899 case QETH_QOAT_PHYS_SPEED_1000M_FULL:
4900 link_info->speed = SPEED_1000;
4901 link_info->duplex = DUPLEX_FULL;
4903 case QETH_QOAT_PHYS_SPEED_10G_FULL:
4904 link_info->speed = SPEED_10000;
4905 link_info->duplex = DUPLEX_FULL;
4907 case QETH_QOAT_PHYS_SPEED_25G_FULL:
4908 link_info->speed = SPEED_25000;
4909 link_info->duplex = DUPLEX_FULL;
4911 case QETH_QOAT_PHYS_SPEED_UNKNOWN:
4913 link_info->speed = SPEED_UNKNOWN;
4914 link_info->duplex = DUPLEX_UNKNOWN;
4918 switch (phys_if->media_type) {
4919 case QETH_QOAT_PHYS_MEDIA_COPPER:
4920 link_info->port = PORT_TP;
4921 link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
4923 case QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT:
4924 link_info->port = PORT_FIBRE;
4925 link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT;
4927 case QETH_QOAT_PHYS_MEDIA_FIBRE_LONG:
4928 link_info->port = PORT_FIBRE;
4929 link_info->link_mode = QETH_LINK_MODE_FIBRE_LONG;
4932 link_info->port = PORT_OTHER;
4933 link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
4940 static void qeth_init_link_info(struct qeth_card *card)
4942 card->info.link_info.duplex = DUPLEX_FULL;
4944 if (IS_IQD(card) || IS_VM_NIC(card)) {
4945 card->info.link_info.speed = SPEED_10000;
4946 card->info.link_info.port = PORT_FIBRE;
4947 card->info.link_info.link_mode = QETH_LINK_MODE_FIBRE_SHORT;
4949 switch (card->info.link_type) {
4950 case QETH_LINK_TYPE_FAST_ETH:
4951 case QETH_LINK_TYPE_LANE_ETH100:
4952 card->info.link_info.speed = SPEED_100;
4953 card->info.link_info.port = PORT_TP;
4955 case QETH_LINK_TYPE_GBIT_ETH:
4956 case QETH_LINK_TYPE_LANE_ETH1000:
4957 card->info.link_info.speed = SPEED_1000;
4958 card->info.link_info.port = PORT_FIBRE;
4960 case QETH_LINK_TYPE_10GBIT_ETH:
4961 card->info.link_info.speed = SPEED_10000;
4962 card->info.link_info.port = PORT_FIBRE;
4964 case QETH_LINK_TYPE_25GBIT_ETH:
4965 card->info.link_info.speed = SPEED_25000;
4966 card->info.link_info.port = PORT_FIBRE;
4969 dev_info(&card->gdev->dev, "Unknown link type %x\n",
4970 card->info.link_type);
4971 card->info.link_info.speed = SPEED_UNKNOWN;
4972 card->info.link_info.port = PORT_OTHER;
4975 card->info.link_info.link_mode = QETH_LINK_MODE_UNKNOWN;
4978 /* Get more accurate data via QUERY OAT: */
4979 if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
4980 struct qeth_link_info link_info;
4981 struct qeth_cmd_buffer *iob;
4983 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4984 SETADP_DATA_SIZEOF(query_oat));
4986 struct qeth_ipa_cmd *cmd = __ipa_cmd(iob);
4987 struct qeth_query_oat *oat_req;
4989 oat_req = &cmd->data.setadapterparms.data.query_oat;
4990 oat_req->subcmd_code = QETH_QOAT_SCOPE_INTERFACE;
4992 if (!qeth_send_ipa_cmd(card, iob,
4993 qeth_init_link_info_oat_cb,
4995 if (link_info.speed != SPEED_UNKNOWN)
4996 card->info.link_info.speed = link_info.speed;
4997 if (link_info.duplex != DUPLEX_UNKNOWN)
4998 card->info.link_info.duplex = link_info.duplex;
4999 if (link_info.port != PORT_OTHER)
5000 card->info.link_info.port = link_info.port;
5001 if (link_info.link_mode != QETH_LINK_MODE_UNKNOWN)
5002 card->info.link_info.link_mode = link_info.link_mode;
5009 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
5010 * @card: pointer to a qeth_card
5013 * 0, if a MAC address has been set for the card's netdevice
5014 * a return code, for various error conditions
5016 int qeth_vm_request_mac(struct qeth_card *card)
5018 struct diag26c_mac_resp *response;
5019 struct diag26c_mac_req *request;
5022 QETH_CARD_TEXT(card, 2, "vmreqmac");
5024 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
5025 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
5026 if (!request || !response) {
5031 request->resp_buf_len = sizeof(*response);
5032 request->resp_version = DIAG26C_VERSION2;
5033 request->op_code = DIAG26C_GET_MAC;
5034 request->devno = card->info.ddev_devno;
5036 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
5037 rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
5038 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
5041 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
5043 if (request->resp_buf_len < sizeof(*response) ||
5044 response->version != request->resp_version) {
5046 QETH_CARD_TEXT(card, 2, "badresp");
5047 QETH_CARD_HEX(card, 2, &request->resp_buf_len,
5048 sizeof(request->resp_buf_len));
5049 } else if (!is_valid_ether_addr(response->mac)) {
5051 QETH_CARD_TEXT(card, 2, "badmac");
5052 QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
5054 ether_addr_copy(card->dev->dev_addr, response->mac);
5062 EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
5064 static void qeth_determine_capabilities(struct qeth_card *card)
5066 struct qeth_channel *channel = &card->data;
5067 struct ccw_device *ddev = channel->ccwdev;
5069 int ddev_offline = 0;
5071 QETH_CARD_TEXT(card, 2, "detcapab");
5072 if (!ddev->online) {
5074 rc = qeth_start_channel(channel);
5076 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5081 rc = qeth_read_conf_data(card);
5083 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
5084 CARD_DEVID(card), rc);
5085 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5089 rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
5091 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5093 QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
5094 QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
5095 QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
5096 QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
5097 QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
5098 if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
5099 ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
5100 ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
5101 dev_info(&card->gdev->dev,
5102 "Completion Queueing supported\n");
5104 card->options.cq = QETH_CQ_NOTAVAILABLE;
5108 if (ddev_offline == 1)
5109 qeth_stop_channel(channel);
5114 static void qeth_read_ccw_conf_data(struct qeth_card *card)
5116 struct qeth_card_info *info = &card->info;
5117 struct ccw_device *cdev = CARD_DDEV(card);
5118 struct ccw_dev_id dev_id;
5120 QETH_CARD_TEXT(card, 2, "ccwconfd");
5121 ccw_device_get_id(cdev, &dev_id);
5123 info->ddev_devno = dev_id.devno;
5124 info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) &&
5125 !ccw_device_get_iid(cdev, &info->iid) &&
5126 !ccw_device_get_chid(cdev, 0, &info->chid);
5127 info->ssid = dev_id.ssid;
5129 dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n",
5130 info->chid, info->chpid);
5132 QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno);
5133 QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid);
5134 QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid);
5135 QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid);
5136 QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid);
5137 QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid);
5138 QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid);
5141 static int qeth_qdio_establish(struct qeth_card *card)
5143 struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
5144 struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
5145 struct qeth_qib_parms *qib_parms = NULL;
5146 struct qdio_initialize init_data;
5150 QETH_CARD_TEXT(card, 2, "qdioest");
5152 if (!IS_IQD(card) && !IS_VM_NIC(card)) {
5153 qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
5157 qeth_fill_qib_parms(card, qib_parms);
5160 in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
5161 if (card->options.cq == QETH_CQ_ENABLED)
5162 in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
5164 for (i = 0; i < card->qdio.no_out_queues; i++)
5165 out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
5167 memset(&init_data, 0, sizeof(struct qdio_initialize));
5168 init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT :
5170 init_data.qib_param_field_format = 0;
5171 init_data.qib_param_field = (void *)qib_parms;
5172 init_data.no_input_qs = card->qdio.no_in_queues;
5173 init_data.no_output_qs = card->qdio.no_out_queues;
5174 init_data.input_handler = qeth_qdio_input_handler;
5175 init_data.output_handler = qeth_qdio_output_handler;
5176 init_data.irq_poll = qeth_qdio_poll;
5177 init_data.int_parm = (unsigned long) card;
5178 init_data.input_sbal_addr_array = in_sbal_ptrs;
5179 init_data.output_sbal_addr_array = out_sbal_ptrs;
5181 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
5182 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
5183 rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs,
5184 init_data.no_output_qs);
5186 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5189 rc = qdio_establish(CARD_DDEV(card), &init_data);
5191 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5192 qdio_free(CARD_DDEV(card));
5196 switch (card->options.cq) {
5197 case QETH_CQ_ENABLED:
5198 dev_info(&card->gdev->dev, "Completion Queue support enabled");
5200 case QETH_CQ_DISABLED:
5201 dev_info(&card->gdev->dev, "Completion Queue support disabled");
5212 static void qeth_core_free_card(struct qeth_card *card)
5214 QETH_CARD_TEXT(card, 2, "freecrd");
5216 unregister_service_level(&card->qeth_service_level);
5217 debugfs_remove_recursive(card->debugfs);
5218 qeth_put_cmd(card->read_cmd);
5219 destroy_workqueue(card->event_wq);
5220 dev_set_drvdata(&card->gdev->dev, NULL);
5224 static void qeth_trace_features(struct qeth_card *card)
5226 QETH_CARD_TEXT(card, 2, "features");
5227 QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
5228 QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
5229 QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
5230 QETH_CARD_HEX(card, 2, &card->info.diagass_support,
5231 sizeof(card->info.diagass_support));
5234 static struct ccw_device_id qeth_ids[] = {
5235 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
5236 .driver_info = QETH_CARD_TYPE_OSD},
5237 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
5238 .driver_info = QETH_CARD_TYPE_IQD},
5239 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
5240 .driver_info = QETH_CARD_TYPE_OSM},
5241 #ifdef CONFIG_QETH_OSX
5242 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
5243 .driver_info = QETH_CARD_TYPE_OSX},
5247 MODULE_DEVICE_TABLE(ccw, qeth_ids);
5249 static struct ccw_driver qeth_ccw_driver = {
5251 .owner = THIS_MODULE,
5255 .probe = ccwgroup_probe_ccwdev,
5256 .remove = ccwgroup_remove_ccwdev,
5259 static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
5264 QETH_CARD_TEXT(card, 2, "hrdsetup");
5265 atomic_set(&card->force_alloc_skb, 0);
5266 rc = qeth_update_from_chp_desc(card);
5271 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
5273 rc = qeth_qdio_clear_card(card, !IS_IQD(card));
5274 qeth_stop_channel(&card->data);
5275 qeth_stop_channel(&card->write);
5276 qeth_stop_channel(&card->read);
5277 qdio_free(CARD_DDEV(card));
5279 rc = qeth_start_channel(&card->read);
5282 rc = qeth_start_channel(&card->write);
5285 rc = qeth_start_channel(&card->data);
5289 if (rc == -ERESTARTSYS) {
5290 QETH_CARD_TEXT(card, 2, "break1");
5293 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5300 qeth_determine_capabilities(card);
5301 qeth_read_ccw_conf_data(card);
5302 qeth_idx_init(card);
5304 rc = qeth_idx_activate_read_channel(card);
5306 QETH_CARD_TEXT(card, 2, "break2");
5309 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5316 rc = qeth_idx_activate_write_channel(card);
5318 QETH_CARD_TEXT(card, 2, "break3");
5321 QETH_CARD_TEXT_(card, 2, "4err%d", rc);
5327 card->read_or_write_problem = 0;
5328 rc = qeth_mpc_initialize(card);
5330 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5334 rc = qeth_send_startlan(card);
5336 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5337 if (rc == -ENETDOWN) {
5338 dev_warn(&card->gdev->dev, "The LAN is offline\n");
5339 *carrier_ok = false;
5347 card->options.ipa4.supported = 0;
5348 card->options.ipa6.supported = 0;
5349 card->options.adp.supported = 0;
5350 card->options.sbp.supported_funcs = 0;
5351 card->info.diagass_support = 0;
5352 rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
5355 if (qeth_is_supported(card, IPA_IPV6)) {
5356 rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
5360 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
5361 rc = qeth_query_setadapterparms(card);
5363 QETH_CARD_TEXT_(card, 2, "7err%d", rc);
5367 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
5368 rc = qeth_query_setdiagass(card);
5370 QETH_CARD_TEXT_(card, 2, "8err%d", rc);
5373 qeth_trace_features(card);
5375 if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
5376 (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
5377 card->info.hwtrap = 0;
5379 if (card->options.isolation != ISOLATION_MODE_NONE) {
5380 rc = qeth_setadpparms_set_access_ctrl(card,
5381 card->options.isolation);
5386 qeth_init_link_info(card);
5388 rc = qeth_init_qdio_queues(card);
5390 QETH_CARD_TEXT_(card, 2, "9err%d", rc);
5396 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
5397 "an error on the device\n");
5398 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
5399 CARD_DEVID(card), rc);
5403 static int qeth_set_online(struct qeth_card *card,
5404 const struct qeth_discipline *disc)
5409 mutex_lock(&card->conf_mutex);
5410 QETH_CARD_TEXT(card, 2, "setonlin");
5412 rc = qeth_hardsetup_card(card, &carrier_ok);
5414 QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
5419 qeth_print_status_message(card);
5421 if (card->dev->reg_state != NETREG_REGISTERED)
5422 /* no need for locking / error handling at this early stage: */
5423 qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
5425 rc = disc->set_online(card, carrier_ok);
5429 /* let user_space know that device is online */
5430 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5432 mutex_unlock(&card->conf_mutex);
5437 qeth_qdio_clear_card(card, 0);
5438 qeth_clear_working_pool_list(card);
5439 qeth_flush_local_addrs(card);
5441 qeth_stop_channel(&card->data);
5442 qeth_stop_channel(&card->write);
5443 qeth_stop_channel(&card->read);
5444 qdio_free(CARD_DDEV(card));
5446 mutex_unlock(&card->conf_mutex);
5450 int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
5455 mutex_lock(&card->conf_mutex);
5456 QETH_CARD_TEXT(card, 3, "setoffl");
5458 if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
5459 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
5460 card->info.hwtrap = 1;
5463 /* cancel any stalled cmd that might block the rtnl: */
5464 qeth_clear_ipacmd_list(card);
5467 card->info.open_when_online = card->dev->flags & IFF_UP;
5468 dev_close(card->dev);
5469 netif_device_detach(card->dev);
5470 netif_carrier_off(card->dev);
5473 cancel_work_sync(&card->rx_mode_work);
5475 disc->set_offline(card);
5477 qeth_qdio_clear_card(card, 0);
5478 qeth_drain_output_queues(card);
5479 qeth_clear_working_pool_list(card);
5480 qeth_flush_local_addrs(card);
5481 card->info.promisc_mode = 0;
5483 rc = qeth_stop_channel(&card->data);
5484 rc2 = qeth_stop_channel(&card->write);
5485 rc3 = qeth_stop_channel(&card->read);
5487 rc = (rc2) ? rc2 : rc3;
5489 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5490 qdio_free(CARD_DDEV(card));
5492 /* let user_space know that device is offline */
5493 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5495 mutex_unlock(&card->conf_mutex);
5498 EXPORT_SYMBOL_GPL(qeth_set_offline);
5500 static int qeth_do_reset(void *data)
5502 const struct qeth_discipline *disc;
5503 struct qeth_card *card = data;
5506 /* Lock-free, other users will block until we are done. */
5507 disc = card->discipline;
5509 QETH_CARD_TEXT(card, 2, "recover1");
5510 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
5512 QETH_CARD_TEXT(card, 2, "recover2");
5513 dev_warn(&card->gdev->dev,
5514 "A recovery process has been started for the device\n");
5516 qeth_set_offline(card, disc, true);
5517 rc = qeth_set_online(card, disc);
5519 dev_info(&card->gdev->dev,
5520 "Device successfully recovered!\n");
5522 ccwgroup_set_offline(card->gdev);
5523 dev_warn(&card->gdev->dev,
5524 "The qeth device driver failed to recover an error on the device\n");
5526 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
5527 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
5531 #if IS_ENABLED(CONFIG_QETH_L3)
5532 static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
5533 struct qeth_hdr *hdr)
5535 struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
5536 struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
5537 struct net_device *dev = skb->dev;
5539 if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
5540 dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
5541 "FAKELL", skb->len);
5545 if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
5546 u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
5548 unsigned char tg_addr[ETH_ALEN];
5550 skb_reset_network_header(skb);
5551 switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
5552 case QETH_CAST_MULTICAST:
5553 if (prot == ETH_P_IP)
5554 ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
5556 ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
5557 QETH_CARD_STAT_INC(card, rx_multicast);
5559 case QETH_CAST_BROADCAST:
5560 ether_addr_copy(tg_addr, dev->broadcast);
5561 QETH_CARD_STAT_INC(card, rx_multicast);
5564 if (card->options.sniffer)
5565 skb->pkt_type = PACKET_OTHERHOST;
5566 ether_addr_copy(tg_addr, dev->dev_addr);
5569 if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
5570 dev_hard_header(skb, dev, prot, tg_addr,
5571 &l3_hdr->next_hop.rx.src_mac, skb->len);
5573 dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
5577 /* copy VLAN tag from hdr into skb */
5578 if (!card->options.sniffer &&
5579 (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
5580 QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
5581 u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
5583 l3_hdr->next_hop.rx.vlan_id;
5585 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
5590 static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
5591 struct qeth_hdr *hdr, bool uses_frags)
5593 struct napi_struct *napi = &card->napi;
5596 switch (hdr->hdr.l2.id) {
5597 #if IS_ENABLED(CONFIG_QETH_L3)
5598 case QETH_HEADER_TYPE_LAYER3:
5599 qeth_l3_rebuild_skb(card, skb, hdr);
5600 is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5603 case QETH_HEADER_TYPE_LAYER2:
5604 is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5609 napi_free_frags(napi);
5611 dev_kfree_skb_any(skb);
5615 if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
5616 skb->ip_summed = CHECKSUM_UNNECESSARY;
5617 QETH_CARD_STAT_INC(card, rx_skb_csum);
5619 skb->ip_summed = CHECKSUM_NONE;
5622 QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
5623 QETH_CARD_STAT_INC(card, rx_packets);
5624 if (skb_is_nonlinear(skb)) {
5625 QETH_CARD_STAT_INC(card, rx_sg_skbs);
5626 QETH_CARD_STAT_ADD(card, rx_sg_frags,
5627 skb_shinfo(skb)->nr_frags);
5631 napi_gro_frags(napi);
5633 skb->protocol = eth_type_trans(skb, skb->dev);
5634 napi_gro_receive(napi, skb);
5638 static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
5640 struct page *page = virt_to_page(data);
5641 unsigned int next_frag;
5643 next_frag = skb_shinfo(skb)->nr_frags;
5645 skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
5649 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
5651 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
5654 static int qeth_extract_skb(struct qeth_card *card,
5655 struct qeth_qdio_buffer *qethbuffer, u8 *element_no,
5658 struct qeth_priv *priv = netdev_priv(card->dev);
5659 struct qdio_buffer *buffer = qethbuffer->buffer;
5660 struct napi_struct *napi = &card->napi;
5661 struct qdio_buffer_element *element;
5662 unsigned int linear_len = 0;
5663 bool uses_frags = false;
5664 int offset = *__offset;
5665 bool use_rx_sg = false;
5666 unsigned int headroom;
5667 struct qeth_hdr *hdr;
5668 struct sk_buff *skb;
5671 element = &buffer->element[*element_no];
5674 /* qeth_hdr must not cross element boundaries */
5675 while (element->length < offset + sizeof(struct qeth_hdr)) {
5676 if (qeth_is_last_sbale(element))
5682 hdr = phys_to_virt(element->addr) + offset;
5683 offset += sizeof(*hdr);
5686 switch (hdr->hdr.l2.id) {
5687 case QETH_HEADER_TYPE_LAYER2:
5688 skb_len = hdr->hdr.l2.pkt_length;
5689 linear_len = ETH_HLEN;
5692 case QETH_HEADER_TYPE_LAYER3:
5693 skb_len = hdr->hdr.l3.length;
5694 if (!IS_LAYER3(card)) {
5695 QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5699 if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
5700 linear_len = ETH_HLEN;
5705 if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
5706 linear_len = sizeof(struct ipv6hdr);
5708 linear_len = sizeof(struct iphdr);
5709 headroom = ETH_HLEN;
5712 if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
5713 QETH_CARD_STAT_INC(card, rx_frame_errors);
5715 QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5717 /* Can't determine packet length, drop the whole buffer. */
5718 return -EPROTONOSUPPORT;
5721 if (skb_len < linear_len) {
5722 QETH_CARD_STAT_INC(card, rx_dropped_runt);
5726 use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
5727 (skb_len > READ_ONCE(priv->rx_copybreak) &&
5728 !atomic_read(&card->force_alloc_skb));
5731 /* QETH_CQ_ENABLED only: */
5732 if (qethbuffer->rx_skb &&
5733 skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
5734 skb = qethbuffer->rx_skb;
5735 qethbuffer->rx_skb = NULL;
5739 skb = napi_get_frags(napi);
5741 /* -ENOMEM, no point in falling back further. */
5742 QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5746 if (skb_tailroom(skb) >= linear_len + headroom) {
5751 netdev_info_once(card->dev,
5752 "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
5753 linear_len + headroom, skb_tailroom(skb));
5754 /* Shouldn't happen. Don't optimize, fall back to linear skb. */
5757 linear_len = skb_len;
5758 skb = napi_alloc_skb(napi, linear_len + headroom);
5760 QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5766 skb_reserve(skb, headroom);
5769 int data_len = min(skb_len, (int)(element->length - offset));
5770 char *data = phys_to_virt(element->addr) + offset;
5772 skb_len -= data_len;
5775 /* Extract data from current element: */
5776 if (skb && data_len) {
5778 unsigned int copy_len;
5780 copy_len = min_t(unsigned int, linear_len,
5783 skb_put_data(skb, data, copy_len);
5784 linear_len -= copy_len;
5785 data_len -= copy_len;
5790 qeth_create_skb_frag(skb, data, data_len);
5793 /* Step forward to next element: */
5795 if (qeth_is_last_sbale(element)) {
5796 QETH_CARD_TEXT(card, 4, "unexeob");
5797 QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5800 napi_free_frags(napi);
5802 dev_kfree_skb_any(skb);
5803 QETH_CARD_STAT_INC(card,
5813 /* This packet was skipped, go get another one: */
5817 *element_no = element - &buffer->element[0];
5820 qeth_receive_skb(card, skb, hdr, uses_frags);
5824 static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
5825 struct qeth_qdio_buffer *buf, bool *done)
5827 unsigned int work_done = 0;
5830 if (qeth_extract_skb(card, buf, &card->rx.buf_element,
5831 &card->rx.e_offset)) {
5843 static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
5845 struct qeth_rx *ctx = &card->rx;
5846 unsigned int work_done = 0;
5848 while (budget > 0) {
5849 struct qeth_qdio_buffer *buffer;
5850 unsigned int skbs_done = 0;
5853 /* Fetch completed RX buffers: */
5854 if (!card->rx.b_count) {
5855 card->rx.qdio_err = 0;
5856 card->rx.b_count = qdio_inspect_queue(CARD_DDEV(card),
5859 &card->rx.qdio_err);
5860 if (card->rx.b_count <= 0) {
5861 card->rx.b_count = 0;
5866 /* Process one completed RX buffer: */
5867 buffer = &card->qdio.in_q->bufs[card->rx.b_index];
5868 if (!(card->rx.qdio_err &&
5869 qeth_check_qdio_errors(card, buffer->buffer,
5870 card->rx.qdio_err, "qinerr")))
5871 skbs_done = qeth_extract_skbs(card, budget, buffer,
5876 work_done += skbs_done;
5877 budget -= skbs_done;
5880 QETH_CARD_STAT_INC(card, rx_bufs);
5881 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
5882 buffer->pool_entry = NULL;
5885 ctx->bufs_refill -= qeth_rx_refill_queue(card,
5888 /* Step forward to next buffer: */
5889 card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
5890 card->rx.buf_element = 0;
5891 card->rx.e_offset = 0;
5898 static void qeth_cq_poll(struct qeth_card *card)
5900 unsigned int work_done = 0;
5902 while (work_done < QDIO_MAX_BUFFERS_PER_Q) {
5903 unsigned int start, error;
5906 completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start,
5911 qeth_qdio_cq_handler(card, error, 1, start, completed);
5912 work_done += completed;
5916 int qeth_poll(struct napi_struct *napi, int budget)
5918 struct qeth_card *card = container_of(napi, struct qeth_card, napi);
5919 unsigned int work_done;
5921 work_done = qeth_rx_poll(card, budget);
5923 if (qeth_use_tx_irqs(card)) {
5924 struct qeth_qdio_out_q *queue;
5927 qeth_for_each_output_queue(card, queue, i) {
5928 if (!qeth_out_queue_is_empty(queue))
5929 napi_schedule(&queue->napi);
5933 if (card->options.cq == QETH_CQ_ENABLED)
5937 struct qeth_rx *ctx = &card->rx;
5939 /* Process any substantial refill backlog: */
5940 ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill);
5942 /* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
5943 if (work_done >= budget)
5947 if (napi_complete_done(napi, work_done) &&
5948 qdio_start_irq(CARD_DDEV(card)))
5949 napi_schedule(napi);
5953 EXPORT_SYMBOL_GPL(qeth_poll);
5955 static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
5956 unsigned int bidx, unsigned int qdio_error,
5959 struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
5960 u8 sflags = buffer->buffer->element[15].sflags;
5961 struct qeth_card *card = queue->card;
5962 bool error = !!qdio_error;
5964 if (qdio_error == QDIO_ERROR_SLSB_PENDING) {
5965 struct qaob *aob = buffer->aob;
5966 struct qeth_qaob_priv1 *priv;
5967 enum iucv_tx_notify notify;
5970 netdev_WARN_ONCE(card->dev,
5971 "Pending TX buffer %#x without QAOB on TX queue %u\n",
5972 bidx, queue->queue_no);
5973 qeth_schedule_recovery(card);
5977 QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
5979 priv = (struct qeth_qaob_priv1 *)&aob->user1;
5980 /* QAOB hasn't completed yet: */
5981 if (xchg(&priv->state, QETH_QAOB_PENDING) != QETH_QAOB_DONE) {
5982 qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
5984 /* Prepare the queue slot for immediate re-use: */
5985 qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
5986 if (qeth_alloc_out_buf(queue, bidx, GFP_ATOMIC)) {
5987 QETH_CARD_TEXT(card, 2, "outofbuf");
5988 qeth_schedule_recovery(card);
5991 list_add(&buffer->list_entry, &queue->pending_bufs);
5992 /* Skip clearing the buffer: */
5996 /* QAOB already completed: */
5997 notify = qeth_compute_cq_notification(aob->aorc, 0);
5998 qeth_notify_skbs(queue, buffer, notify);
5999 error = !!aob->aorc;
6000 memset(aob, 0, sizeof(*aob));
6001 } else if (card->options.cq == QETH_CQ_ENABLED) {
6002 qeth_notify_skbs(queue, buffer,
6003 qeth_compute_cq_notification(sflags, 0));
6006 qeth_clear_output_buffer(queue, buffer, error, budget);
6009 static int qeth_tx_poll(struct napi_struct *napi, int budget)
6011 struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
6012 unsigned int queue_no = queue->queue_no;
6013 struct qeth_card *card = queue->card;
6014 struct net_device *dev = card->dev;
6015 unsigned int work_done = 0;
6016 struct netdev_queue *txq;
6019 txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
6021 txq = netdev_get_tx_queue(dev, queue_no);
6024 unsigned int start, error, i;
6025 unsigned int packets = 0;
6026 unsigned int bytes = 0;
6029 qeth_tx_complete_pending_bufs(card, queue, false, budget);
6031 if (qeth_out_queue_is_empty(queue)) {
6032 napi_complete(napi);
6036 /* Give the CPU a breather: */
6037 if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
6038 QETH_TXQ_STAT_INC(queue, completion_yield);
6039 if (napi_complete_done(napi, 0))
6040 napi_schedule(napi);
6044 completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false,
6046 if (completed <= 0) {
6047 /* Ensure we see TX completion for pending work: */
6048 if (napi_complete_done(napi, 0) &&
6049 !atomic_read(&queue->set_pci_flags_count))
6050 qeth_tx_arm_timer(queue, queue->rescan_usecs);
6054 for (i = start; i < start + completed; i++) {
6055 struct qeth_qdio_out_buffer *buffer;
6056 unsigned int bidx = QDIO_BUFNR(i);
6058 buffer = queue->bufs[bidx];
6059 packets += buffer->frames;
6060 bytes += buffer->bytes;
6062 qeth_handle_send_error(card, buffer, error);
6064 qeth_iqd_tx_complete(queue, bidx, error, budget);
6066 qeth_clear_output_buffer(queue, buffer, error,
6070 atomic_sub(completed, &queue->used_buffers);
6071 work_done += completed;
6073 netdev_tx_completed_queue(txq, packets, bytes);
6075 qeth_check_outbound_queue(queue);
6077 /* xmit may have observed the full-condition, but not yet
6078 * stopped the txq. In which case the code below won't trigger.
6079 * So before returning, xmit will re-check the txq's fill level
6080 * and wake it up if needed.
6082 if (netif_tx_queue_stopped(txq) &&
6083 !qeth_out_queue_is_full(queue))
6084 netif_tx_wake_queue(txq);
6088 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
6090 if (!cmd->hdr.return_code)
6091 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
6092 return cmd->hdr.return_code;
6095 static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
6096 struct qeth_reply *reply,
6099 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6100 struct qeth_ipa_caps *caps = reply->param;
6102 if (qeth_setassparms_inspect_rc(cmd))
6105 caps->supported = cmd->data.setassparms.data.caps.supported;
6106 caps->enabled = cmd->data.setassparms.data.caps.enabled;
6110 int qeth_setassparms_cb(struct qeth_card *card,
6111 struct qeth_reply *reply, unsigned long data)
6113 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6115 QETH_CARD_TEXT(card, 4, "defadpcb");
6117 if (cmd->hdr.return_code)
6120 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
6121 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
6122 card->options.ipa4.enabled = cmd->hdr.assists.enabled;
6123 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
6124 card->options.ipa6.enabled = cmd->hdr.assists.enabled;
6127 EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
6129 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
6130 enum qeth_ipa_funcs ipa_func,
6132 unsigned int data_length,
6133 enum qeth_prot_versions prot)
6135 struct qeth_ipacmd_setassparms *setassparms;
6136 struct qeth_ipacmd_setassparms_hdr *hdr;
6137 struct qeth_cmd_buffer *iob;
6139 QETH_CARD_TEXT(card, 4, "getasscm");
6140 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
6142 offsetof(struct qeth_ipacmd_setassparms,
6147 setassparms = &__ipa_cmd(iob)->data.setassparms;
6148 setassparms->assist_no = ipa_func;
6150 hdr = &setassparms->hdr;
6151 hdr->length = sizeof(*hdr) + data_length;
6152 hdr->command_code = cmd_code;
6155 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
6157 int qeth_send_simple_setassparms_prot(struct qeth_card *card,
6158 enum qeth_ipa_funcs ipa_func,
6159 u16 cmd_code, u32 *data,
6160 enum qeth_prot_versions prot)
6162 unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
6163 struct qeth_cmd_buffer *iob;
6165 QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
6166 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
6171 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
6172 return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
6174 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
6176 static void qeth_unregister_dbf_views(void)
6180 for (x = 0; x < QETH_DBF_INFOS; x++) {
6181 debug_unregister(qeth_dbf[x].id);
6182 qeth_dbf[x].id = NULL;
6186 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
6188 char dbf_txt_buf[32];
6191 if (!debug_level_enabled(id, level))
6193 va_start(args, fmt);
6194 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
6196 debug_text_event(id, level, dbf_txt_buf);
6198 EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
6200 static int qeth_register_dbf_views(void)
6205 for (x = 0; x < QETH_DBF_INFOS; x++) {
6206 /* register the areas */
6207 qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
6211 if (qeth_dbf[x].id == NULL) {
6212 qeth_unregister_dbf_views();
6216 /* register a view */
6217 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
6219 qeth_unregister_dbf_views();
6223 /* set a passing level */
6224 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
6230 static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */
6232 int qeth_setup_discipline(struct qeth_card *card,
6233 enum qeth_discipline_id discipline)
6237 mutex_lock(&qeth_mod_mutex);
6238 switch (discipline) {
6239 case QETH_DISCIPLINE_LAYER3:
6240 card->discipline = try_then_request_module(
6241 symbol_get(qeth_l3_discipline), "qeth_l3");
6243 case QETH_DISCIPLINE_LAYER2:
6244 card->discipline = try_then_request_module(
6245 symbol_get(qeth_l2_discipline), "qeth_l2");
6250 mutex_unlock(&qeth_mod_mutex);
6252 if (!card->discipline) {
6253 dev_err(&card->gdev->dev, "There is no kernel module to "
6254 "support discipline %d\n", discipline);
6258 rc = card->discipline->setup(card->gdev);
6260 if (discipline == QETH_DISCIPLINE_LAYER2)
6261 symbol_put(qeth_l2_discipline);
6263 symbol_put(qeth_l3_discipline);
6264 card->discipline = NULL;
6269 card->options.layer = discipline;
6273 void qeth_remove_discipline(struct qeth_card *card)
6275 card->discipline->remove(card->gdev);
6277 if (IS_LAYER2(card))
6278 symbol_put(qeth_l2_discipline);
6280 symbol_put(qeth_l3_discipline);
6281 card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
6282 card->discipline = NULL;
6285 static const struct device_type qeth_generic_devtype = {
6286 .name = "qeth_generic",
6289 #define DBF_NAME_LEN 20
6291 struct qeth_dbf_entry {
6292 char dbf_name[DBF_NAME_LEN];
6293 debug_info_t *dbf_info;
6294 struct list_head dbf_list;
6297 static LIST_HEAD(qeth_dbf_list);
6298 static DEFINE_MUTEX(qeth_dbf_list_mutex);
6300 static debug_info_t *qeth_get_dbf_entry(char *name)
6302 struct qeth_dbf_entry *entry;
6303 debug_info_t *rc = NULL;
6305 mutex_lock(&qeth_dbf_list_mutex);
6306 list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
6307 if (strcmp(entry->dbf_name, name) == 0) {
6308 rc = entry->dbf_info;
6312 mutex_unlock(&qeth_dbf_list_mutex);
6316 static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
6318 struct qeth_dbf_entry *new_entry;
6320 card->debug = debug_register(name, 2, 1, 8);
6322 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
6325 if (debug_register_view(card->debug, &debug_hex_ascii_view))
6327 new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
6330 strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
6331 new_entry->dbf_info = card->debug;
6332 mutex_lock(&qeth_dbf_list_mutex);
6333 list_add(&new_entry->dbf_list, &qeth_dbf_list);
6334 mutex_unlock(&qeth_dbf_list_mutex);
6339 debug_unregister(card->debug);
6344 static void qeth_clear_dbf_list(void)
6346 struct qeth_dbf_entry *entry, *tmp;
6348 mutex_lock(&qeth_dbf_list_mutex);
6349 list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
6350 list_del(&entry->dbf_list);
6351 debug_unregister(entry->dbf_info);
6354 mutex_unlock(&qeth_dbf_list_mutex);
6357 static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
6359 struct net_device *dev;
6360 struct qeth_priv *priv;
6362 switch (card->info.type) {
6363 case QETH_CARD_TYPE_IQD:
6364 dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN,
6365 ether_setup, QETH_MAX_OUT_QUEUES, 1);
6367 case QETH_CARD_TYPE_OSM:
6368 dev = alloc_etherdev(sizeof(*priv));
6371 dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
6377 priv = netdev_priv(dev);
6378 priv->rx_copybreak = QETH_RX_COPYBREAK;
6379 priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1;
6381 dev->ml_priv = card;
6382 dev->watchdog_timeo = QETH_TX_TIMEOUT;
6384 /* initialized when device first goes online: */
6387 SET_NETDEV_DEV(dev, &card->gdev->dev);
6388 netif_carrier_off(dev);
6390 dev->ethtool_ops = &qeth_ethtool_ops;
6391 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
6392 dev->hw_features |= NETIF_F_SG;
6393 dev->vlan_features |= NETIF_F_SG;
6395 dev->features |= NETIF_F_SG;
6400 struct net_device *qeth_clone_netdev(struct net_device *orig)
6402 struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
6407 clone->dev_port = orig->dev_port;
6411 static int qeth_core_probe_device(struct ccwgroup_device *gdev)
6413 struct qeth_card *card;
6416 enum qeth_discipline_id enforced_disc;
6417 char dbf_name[DBF_NAME_LEN];
6419 QETH_DBF_TEXT(SETUP, 2, "probedev");
6422 if (!get_device(dev))
6425 QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
6427 card = qeth_alloc_card(gdev);
6429 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
6434 snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
6435 dev_name(&gdev->dev));
6436 card->debug = qeth_get_dbf_entry(dbf_name);
6438 rc = qeth_add_dbf_entry(card, dbf_name);
6443 qeth_setup_card(card);
6444 card->dev = qeth_alloc_netdev(card);
6450 qeth_determine_capabilities(card);
6451 qeth_set_blkt_defaults(card);
6453 card->qdio.no_out_queues = card->dev->num_tx_queues;
6454 rc = qeth_update_from_chp_desc(card);
6458 gdev->dev.groups = qeth_dev_groups;
6460 enforced_disc = qeth_enforce_discipline(card);
6461 switch (enforced_disc) {
6462 case QETH_DISCIPLINE_UNDETERMINED:
6463 gdev->dev.type = &qeth_generic_devtype;
6466 card->info.layer_enforced = true;
6467 /* It's so early that we don't need the discipline_mutex yet. */
6468 rc = qeth_setup_discipline(card, enforced_disc);
6470 goto err_setup_disc;
6479 free_netdev(card->dev);
6481 qeth_core_free_card(card);
6487 static void qeth_core_remove_device(struct ccwgroup_device *gdev)
6489 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6491 QETH_CARD_TEXT(card, 2, "removedv");
6493 mutex_lock(&card->discipline_mutex);
6494 if (card->discipline)
6495 qeth_remove_discipline(card);
6496 mutex_unlock(&card->discipline_mutex);
6498 qeth_free_qdio_queues(card);
6500 free_netdev(card->dev);
6501 qeth_core_free_card(card);
6502 put_device(&gdev->dev);
6505 static int qeth_core_set_online(struct ccwgroup_device *gdev)
6507 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6509 enum qeth_discipline_id def_discipline;
6511 mutex_lock(&card->discipline_mutex);
6512 if (!card->discipline) {
6513 def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
6514 QETH_DISCIPLINE_LAYER2;
6515 rc = qeth_setup_discipline(card, def_discipline);
6520 rc = qeth_set_online(card, card->discipline);
6523 mutex_unlock(&card->discipline_mutex);
6527 static int qeth_core_set_offline(struct ccwgroup_device *gdev)
6529 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6532 mutex_lock(&card->discipline_mutex);
6533 rc = qeth_set_offline(card, card->discipline, false);
6534 mutex_unlock(&card->discipline_mutex);
6539 static void qeth_core_shutdown(struct ccwgroup_device *gdev)
6541 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6543 qeth_set_allowed_threads(card, 0, 1);
6544 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
6545 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
6546 qeth_qdio_clear_card(card, 0);
6547 qeth_drain_output_queues(card);
6548 qdio_free(CARD_DDEV(card));
6551 static ssize_t group_store(struct device_driver *ddrv, const char *buf,
6556 err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
6559 return err ? err : count;
6561 static DRIVER_ATTR_WO(group);
6563 static struct attribute *qeth_drv_attrs[] = {
6564 &driver_attr_group.attr,
6567 static struct attribute_group qeth_drv_attr_group = {
6568 .attrs = qeth_drv_attrs,
6570 static const struct attribute_group *qeth_drv_attr_groups[] = {
6571 &qeth_drv_attr_group,
6575 static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
6577 .groups = qeth_drv_attr_groups,
6578 .owner = THIS_MODULE,
6581 .ccw_driver = &qeth_ccw_driver,
6582 .setup = qeth_core_probe_device,
6583 .remove = qeth_core_remove_device,
6584 .set_online = qeth_core_set_online,
6585 .set_offline = qeth_core_set_offline,
6586 .shutdown = qeth_core_shutdown,
6589 int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
6591 struct qeth_card *card = dev->ml_priv;
6595 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
6596 rc = qeth_snmp_command(card, data);
6598 case SIOC_QETH_GET_CARD_TYPE:
6599 if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
6603 case SIOC_QETH_QUERY_OAT:
6604 rc = qeth_query_oat_command(card, data);
6607 if (card->discipline->do_ioctl)
6608 rc = card->discipline->do_ioctl(dev, rq, data, cmd);
6613 QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
6616 EXPORT_SYMBOL_GPL(qeth_siocdevprivate);
6618 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6620 struct qeth_card *card = dev->ml_priv;
6621 struct mii_ioctl_data *mii_data;
6626 mii_data = if_mii(rq);
6627 mii_data->phy_id = 0;
6630 mii_data = if_mii(rq);
6631 if (mii_data->phy_id != 0)
6634 mii_data->val_out = qeth_mdio_read(dev,
6635 mii_data->phy_id, mii_data->reg_num);
6641 QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
6644 EXPORT_SYMBOL_GPL(qeth_do_ioctl);
6646 static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
6649 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6650 u32 *features = reply->param;
6652 if (qeth_setassparms_inspect_rc(cmd))
6655 *features = cmd->data.setassparms.data.flags_32bit;
6659 static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6660 enum qeth_prot_versions prot)
6662 return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
6666 static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6667 enum qeth_prot_versions prot, u8 *lp2lp)
6669 u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
6670 struct qeth_cmd_buffer *iob;
6671 struct qeth_ipa_caps caps;
6675 /* some L3 HW requires combined L3+L4 csum offload: */
6676 if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
6677 cstype == IPA_OUTBOUND_CHECKSUM)
6678 required_features |= QETH_IPA_CHECKSUM_IP_HDR;
6680 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
6685 rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
6689 if ((required_features & features) != required_features) {
6690 qeth_set_csum_off(card, cstype, prot);
6694 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
6695 SETASS_DATA_SIZEOF(flags_32bit),
6698 qeth_set_csum_off(card, cstype, prot);
6702 if (features & QETH_IPA_CHECKSUM_LP2LP)
6703 required_features |= QETH_IPA_CHECKSUM_LP2LP;
6704 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
6705 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6707 qeth_set_csum_off(card, cstype, prot);
6711 if (!qeth_ipa_caps_supported(&caps, required_features) ||
6712 !qeth_ipa_caps_enabled(&caps, required_features)) {
6713 qeth_set_csum_off(card, cstype, prot);
6717 dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
6718 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
6721 *lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP);
6726 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6727 enum qeth_prot_versions prot, u8 *lp2lp)
6729 return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) :
6730 qeth_set_csum_off(card, cstype, prot);
6733 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
6736 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6737 struct qeth_tso_start_data *tso_data = reply->param;
6739 if (qeth_setassparms_inspect_rc(cmd))
6742 tso_data->mss = cmd->data.setassparms.data.tso.mss;
6743 tso_data->supported = cmd->data.setassparms.data.tso.supported;
6747 static int qeth_set_tso_off(struct qeth_card *card,
6748 enum qeth_prot_versions prot)
6750 return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
6751 IPA_CMD_ASS_STOP, NULL, prot);
6754 static int qeth_set_tso_on(struct qeth_card *card,
6755 enum qeth_prot_versions prot)
6757 struct qeth_tso_start_data tso_data;
6758 struct qeth_cmd_buffer *iob;
6759 struct qeth_ipa_caps caps;
6762 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6763 IPA_CMD_ASS_START, 0, prot);
6767 rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6771 if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
6772 qeth_set_tso_off(card, prot);
6776 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6778 SETASS_DATA_SIZEOF(caps), prot);
6780 qeth_set_tso_off(card, prot);
6784 /* enable TSO capability */
6785 __ipa_cmd(iob)->data.setassparms.data.caps.enabled =
6786 QETH_IPA_LARGE_SEND_TCP;
6787 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6789 qeth_set_tso_off(card, prot);
6793 if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
6794 !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
6795 qeth_set_tso_off(card, prot);
6799 dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
6804 static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
6805 enum qeth_prot_versions prot)
6807 return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
6810 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
6812 int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
6815 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
6816 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6817 QETH_PROT_IPV4, NULL);
6818 if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6819 /* no/one Offload Assist available, so the rc is trivial */
6822 rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6823 QETH_PROT_IPV6, NULL);
6826 /* enable: success if any Assist is active */
6827 return (rc_ipv6) ? rc_ipv4 : 0;
6829 /* disable: failure if any Assist is still active */
6830 return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
6834 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6835 * @dev: a net_device
6837 void qeth_enable_hw_features(struct net_device *dev)
6839 struct qeth_card *card = dev->ml_priv;
6840 netdev_features_t features;
6842 features = dev->features;
6843 /* force-off any feature that might need an IPA sequence.
6844 * netdev_update_features() will restart them.
6846 dev->features &= ~dev->hw_features;
6847 /* toggle VLAN filter, so that VIDs are re-programmed: */
6848 if (IS_LAYER2(card) && IS_VM_NIC(card)) {
6849 dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
6850 dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6852 netdev_update_features(dev);
6853 if (features != dev->features)
6854 dev_warn(&card->gdev->dev,
6855 "Device recovery failed to restore all offload features\n");
6857 EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6859 static void qeth_check_restricted_features(struct qeth_card *card,
6860 netdev_features_t changed,
6861 netdev_features_t actual)
6863 netdev_features_t ipv6_features = NETIF_F_TSO6;
6864 netdev_features_t ipv4_features = NETIF_F_TSO;
6866 if (!card->info.has_lp2lp_cso_v6)
6867 ipv6_features |= NETIF_F_IPV6_CSUM;
6868 if (!card->info.has_lp2lp_cso_v4)
6869 ipv4_features |= NETIF_F_IP_CSUM;
6871 if ((changed & ipv6_features) && !(actual & ipv6_features))
6872 qeth_flush_local_addrs6(card);
6873 if ((changed & ipv4_features) && !(actual & ipv4_features))
6874 qeth_flush_local_addrs4(card);
6877 int qeth_set_features(struct net_device *dev, netdev_features_t features)
6879 struct qeth_card *card = dev->ml_priv;
6880 netdev_features_t changed = dev->features ^ features;
6883 QETH_CARD_TEXT(card, 2, "setfeat");
6884 QETH_CARD_HEX(card, 2, &features, sizeof(features));
6886 if ((changed & NETIF_F_IP_CSUM)) {
6887 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6888 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4,
6889 &card->info.has_lp2lp_cso_v4);
6891 changed ^= NETIF_F_IP_CSUM;
6893 if (changed & NETIF_F_IPV6_CSUM) {
6894 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6895 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6,
6896 &card->info.has_lp2lp_cso_v6);
6898 changed ^= NETIF_F_IPV6_CSUM;
6900 if (changed & NETIF_F_RXCSUM) {
6901 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6903 changed ^= NETIF_F_RXCSUM;
6905 if (changed & NETIF_F_TSO) {
6906 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
6909 changed ^= NETIF_F_TSO;
6911 if (changed & NETIF_F_TSO6) {
6912 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
6915 changed ^= NETIF_F_TSO6;
6918 qeth_check_restricted_features(card, dev->features ^ features,
6919 dev->features ^ changed);
6921 /* everything changed successfully? */
6922 if ((dev->features ^ features) == changed)
6924 /* something went wrong. save changed features and return error */
6925 dev->features ^= changed;
6928 EXPORT_SYMBOL_GPL(qeth_set_features);
6930 netdev_features_t qeth_fix_features(struct net_device *dev,
6931 netdev_features_t features)
6933 struct qeth_card *card = dev->ml_priv;
6935 QETH_CARD_TEXT(card, 2, "fixfeat");
6936 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
6937 features &= ~NETIF_F_IP_CSUM;
6938 if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
6939 features &= ~NETIF_F_IPV6_CSUM;
6940 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
6941 !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6942 features &= ~NETIF_F_RXCSUM;
6943 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
6944 features &= ~NETIF_F_TSO;
6945 if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
6946 features &= ~NETIF_F_TSO6;
6948 QETH_CARD_HEX(card, 2, &features, sizeof(features));
6951 EXPORT_SYMBOL_GPL(qeth_fix_features);
6953 netdev_features_t qeth_features_check(struct sk_buff *skb,
6954 struct net_device *dev,
6955 netdev_features_t features)
6957 struct qeth_card *card = dev->ml_priv;
6959 /* Traffic with local next-hop is not eligible for some offloads: */
6960 if (skb->ip_summed == CHECKSUM_PARTIAL &&
6961 READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) {
6962 netdev_features_t restricted = 0;
6964 if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
6965 restricted |= NETIF_F_ALL_TSO;
6967 switch (vlan_get_protocol(skb)) {
6968 case htons(ETH_P_IP):
6969 if (!card->info.has_lp2lp_cso_v4)
6970 restricted |= NETIF_F_IP_CSUM;
6972 if (restricted && qeth_next_hop_is_local_v4(card, skb))
6973 features &= ~restricted;
6975 case htons(ETH_P_IPV6):
6976 if (!card->info.has_lp2lp_cso_v6)
6977 restricted |= NETIF_F_IPV6_CSUM;
6979 if (restricted && qeth_next_hop_is_local_v6(card, skb))
6980 features &= ~restricted;
6987 /* GSO segmentation builds skbs with
6988 * a (small) linear part for the headers, and
6989 * page frags for the data.
6990 * Compared to a linear skb, the header-only part consumes an
6991 * additional buffer element. This reduces buffer utilization, and
6992 * hurts throughput. So compress small segments into one element.
6994 if (netif_needs_gso(skb, features)) {
6995 /* match skb_segment(): */
6996 unsigned int doffset = skb->data - skb_mac_header(skb);
6997 unsigned int hsize = skb_shinfo(skb)->gso_size;
6998 unsigned int hroom = skb_headroom(skb);
7000 /* linearize only if resulting skb allocations are order-0: */
7001 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
7002 features &= ~NETIF_F_SG;
7005 return vlan_features_check(skb, features);
7007 EXPORT_SYMBOL_GPL(qeth_features_check);
7009 void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7011 struct qeth_card *card = dev->ml_priv;
7012 struct qeth_qdio_out_q *queue;
7015 QETH_CARD_TEXT(card, 5, "getstat");
7017 stats->rx_packets = card->stats.rx_packets;
7018 stats->rx_bytes = card->stats.rx_bytes;
7019 stats->rx_errors = card->stats.rx_length_errors +
7020 card->stats.rx_frame_errors +
7021 card->stats.rx_fifo_errors;
7022 stats->rx_dropped = card->stats.rx_dropped_nomem +
7023 card->stats.rx_dropped_notsupp +
7024 card->stats.rx_dropped_runt;
7025 stats->multicast = card->stats.rx_multicast;
7026 stats->rx_length_errors = card->stats.rx_length_errors;
7027 stats->rx_frame_errors = card->stats.rx_frame_errors;
7028 stats->rx_fifo_errors = card->stats.rx_fifo_errors;
7030 for (i = 0; i < card->qdio.no_out_queues; i++) {
7031 queue = card->qdio.out_qs[i];
7033 stats->tx_packets += queue->stats.tx_packets;
7034 stats->tx_bytes += queue->stats.tx_bytes;
7035 stats->tx_errors += queue->stats.tx_errors;
7036 stats->tx_dropped += queue->stats.tx_dropped;
7039 EXPORT_SYMBOL_GPL(qeth_get_stats64);
7041 #define TC_IQD_UCAST 0
7042 static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
7043 unsigned int ucast_txqs)
7047 /* IQD requires mcast traffic to be placed on a dedicated queue, and
7048 * qeth_iqd_select_queue() deals with this.
7049 * For unicast traffic, we defer the queue selection to the stack.
7050 * By installing a trivial prio map that spans over only the unicast
7051 * queues, we can encourage the stack to spread the ucast traffic evenly
7052 * without selecting the mcast queue.
7055 /* One traffic class, spanning over all active ucast queues: */
7056 netdev_set_num_tc(dev, 1);
7057 netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
7058 QETH_IQD_MIN_UCAST_TXQ);
7060 /* Map all priorities to this traffic class: */
7061 for (prio = 0; prio <= TC_BITMASK; prio++)
7062 netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
7065 int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
7067 struct net_device *dev = card->dev;
7070 /* Per netif_setup_tc(), adjust the mapping first: */
7072 qeth_iqd_set_prio_tc_map(dev, count - 1);
7074 rc = netif_set_real_num_tx_queues(dev, count);
7076 if (rc && IS_IQD(card))
7077 qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);
7081 EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues);
7083 u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
7084 u8 cast_type, struct net_device *sb_dev)
7088 if (cast_type != RTN_UNICAST)
7089 return QETH_IQD_MCAST_TXQ;
7090 if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
7091 return QETH_IQD_MIN_UCAST_TXQ;
7093 txq = netdev_pick_tx(dev, skb, sb_dev);
7094 return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
7096 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
7098 int qeth_open(struct net_device *dev)
7100 struct qeth_card *card = dev->ml_priv;
7101 struct qeth_qdio_out_q *queue;
7104 QETH_CARD_TEXT(card, 4, "qethopen");
7106 card->data.state = CH_STATE_UP;
7107 netif_tx_start_all_queues(dev);
7110 qeth_for_each_output_queue(card, queue, i) {
7111 netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
7113 napi_enable(&queue->napi);
7114 napi_schedule(&queue->napi);
7117 napi_enable(&card->napi);
7118 napi_schedule(&card->napi);
7119 /* kick-start the NAPI softirq: */
7124 EXPORT_SYMBOL_GPL(qeth_open);
7126 int qeth_stop(struct net_device *dev)
7128 struct qeth_card *card = dev->ml_priv;
7129 struct qeth_qdio_out_q *queue;
7132 QETH_CARD_TEXT(card, 4, "qethstop");
7134 napi_disable(&card->napi);
7135 cancel_delayed_work_sync(&card->buffer_reclaim_work);
7136 qdio_stop_irq(CARD_DDEV(card));
7138 /* Quiesce the NAPI instances: */
7139 qeth_for_each_output_queue(card, queue, i)
7140 napi_disable(&queue->napi);
7142 /* Stop .ndo_start_xmit, might still access queue->napi. */
7143 netif_tx_disable(dev);
7145 qeth_for_each_output_queue(card, queue, i) {
7146 del_timer_sync(&queue->timer);
7147 /* Queues may get re-allocated, so remove the NAPIs. */
7148 netif_napi_del(&queue->napi);
7153 EXPORT_SYMBOL_GPL(qeth_stop);
7155 static int __init qeth_core_init(void)
7159 pr_info("loading core functions\n");
7161 qeth_debugfs_root = debugfs_create_dir("qeth", NULL);
7163 rc = qeth_register_dbf_views();
7166 qeth_core_root_dev = root_device_register("qeth");
7167 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
7170 qeth_core_header_cache =
7171 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
7172 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
7174 if (!qeth_core_header_cache) {
7178 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
7179 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
7180 if (!qeth_qdio_outbuf_cache) {
7184 rc = ccw_driver_register(&qeth_ccw_driver);
7187 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
7194 ccw_driver_unregister(&qeth_ccw_driver);
7196 kmem_cache_destroy(qeth_qdio_outbuf_cache);
7198 kmem_cache_destroy(qeth_core_header_cache);
7200 root_device_unregister(qeth_core_root_dev);
7202 qeth_unregister_dbf_views();
7204 debugfs_remove_recursive(qeth_debugfs_root);
7205 pr_err("Initializing the qeth device driver failed\n");
7209 static void __exit qeth_core_exit(void)
7211 qeth_clear_dbf_list();
7212 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
7213 ccw_driver_unregister(&qeth_ccw_driver);
7214 kmem_cache_destroy(qeth_qdio_outbuf_cache);
7215 kmem_cache_destroy(qeth_core_header_cache);
7216 root_device_unregister(qeth_core_root_dev);
7217 qeth_unregister_dbf_views();
7218 debugfs_remove_recursive(qeth_debugfs_root);
7219 pr_info("core functions removed\n");
7222 module_init(qeth_core_init);
7223 module_exit(qeth_core_exit);
7224 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
7225 MODULE_DESCRIPTION("qeth core functions");
7226 MODULE_LICENSE("GPL");