1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5 * Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
10 #define KMSG_COMPONENT "qeth"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/compat.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/log2.h>
22 #include <linux/tcp.h>
23 #include <linux/mii.h>
25 #include <linux/kthread.h>
26 #include <linux/slab.h>
27 #include <linux/if_vlan.h>
28 #include <linux/netdevice.h>
29 #include <linux/netdev_features.h>
30 #include <linux/rcutree.h>
31 #include <linux/skbuff.h>
32 #include <linux/vmalloc.h>
34 #include <net/iucv/af_iucv.h>
35 #include <net/dsfield.h>
38 #include <asm/ebcdic.h>
39 #include <asm/chpid.h>
40 #include <asm/sysinfo.h>
43 #include <asm/ccwdev.h>
44 #include <asm/cpcmd.h>
46 #include "qeth_core.h"
48 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
49 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
51 [QETH_DBF_SETUP] = {"qeth_setup",
52 8, 1, 8, 5, &debug_hex_ascii_view, NULL},
53 [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
54 &debug_sprintf_view, NULL},
55 [QETH_DBF_CTRL] = {"qeth_control",
56 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
58 EXPORT_SYMBOL_GPL(qeth_dbf);
60 struct kmem_cache *qeth_core_header_cache;
61 EXPORT_SYMBOL_GPL(qeth_core_header_cache);
62 static struct kmem_cache *qeth_qdio_outbuf_cache;
64 static struct device *qeth_core_root_dev;
65 static struct dentry *qeth_debugfs_root;
66 static struct lock_class_key qdio_out_skb_queue_key;
68 static void qeth_issue_next_read_cb(struct qeth_card *card,
69 struct qeth_cmd_buffer *iob,
70 unsigned int data_length);
71 static int qeth_qdio_establish(struct qeth_card *);
72 static void qeth_free_qdio_queues(struct qeth_card *card);
73 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
74 struct qeth_qdio_out_buffer *buf,
75 enum iucv_tx_notify notification);
77 static void qeth_close_dev_handler(struct work_struct *work)
79 struct qeth_card *card;
81 card = container_of(work, struct qeth_card, close_dev_work);
82 QETH_CARD_TEXT(card, 2, "cldevhdl");
83 ccwgroup_set_offline(card->gdev);
86 static const char *qeth_get_cardname(struct qeth_card *card)
88 if (IS_VM_NIC(card)) {
89 switch (card->info.type) {
90 case QETH_CARD_TYPE_OSD:
91 return " Virtual NIC QDIO";
92 case QETH_CARD_TYPE_IQD:
93 return " Virtual NIC Hiper";
94 case QETH_CARD_TYPE_OSM:
95 return " Virtual NIC QDIO - OSM";
96 case QETH_CARD_TYPE_OSX:
97 return " Virtual NIC QDIO - OSX";
102 switch (card->info.type) {
103 case QETH_CARD_TYPE_OSD:
104 return " OSD Express";
105 case QETH_CARD_TYPE_IQD:
106 return " HiperSockets";
107 case QETH_CARD_TYPE_OSN:
109 case QETH_CARD_TYPE_OSM:
111 case QETH_CARD_TYPE_OSX:
120 /* max length to be returned: 14 */
121 const char *qeth_get_cardname_short(struct qeth_card *card)
123 if (IS_VM_NIC(card)) {
124 switch (card->info.type) {
125 case QETH_CARD_TYPE_OSD:
126 return "Virt.NIC QDIO";
127 case QETH_CARD_TYPE_IQD:
128 return "Virt.NIC Hiper";
129 case QETH_CARD_TYPE_OSM:
130 return "Virt.NIC OSM";
131 case QETH_CARD_TYPE_OSX:
132 return "Virt.NIC OSX";
137 switch (card->info.type) {
138 case QETH_CARD_TYPE_OSD:
139 switch (card->info.link_type) {
140 case QETH_LINK_TYPE_FAST_ETH:
142 case QETH_LINK_TYPE_HSTR:
144 case QETH_LINK_TYPE_GBIT_ETH:
146 case QETH_LINK_TYPE_10GBIT_ETH:
148 case QETH_LINK_TYPE_25GBIT_ETH:
150 case QETH_LINK_TYPE_LANE_ETH100:
151 return "OSD_FE_LANE";
152 case QETH_LINK_TYPE_LANE_TR:
153 return "OSD_TR_LANE";
154 case QETH_LINK_TYPE_LANE_ETH1000:
155 return "OSD_GbE_LANE";
156 case QETH_LINK_TYPE_LANE:
157 return "OSD_ATM_LANE";
159 return "OSD_Express";
161 case QETH_CARD_TYPE_IQD:
162 return "HiperSockets";
163 case QETH_CARD_TYPE_OSN:
165 case QETH_CARD_TYPE_OSM:
167 case QETH_CARD_TYPE_OSX:
176 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
177 int clear_start_mask)
181 spin_lock_irqsave(&card->thread_mask_lock, flags);
182 card->thread_allowed_mask = threads;
183 if (clear_start_mask)
184 card->thread_start_mask &= threads;
185 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
186 wake_up(&card->wait_q);
188 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
190 int qeth_threads_running(struct qeth_card *card, unsigned long threads)
195 spin_lock_irqsave(&card->thread_mask_lock, flags);
196 rc = (card->thread_running_mask & threads);
197 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
200 EXPORT_SYMBOL_GPL(qeth_threads_running);
202 static void qeth_clear_working_pool_list(struct qeth_card *card)
204 struct qeth_buffer_pool_entry *pool_entry, *tmp;
205 struct qeth_qdio_q *queue = card->qdio.in_q;
208 QETH_CARD_TEXT(card, 5, "clwrklst");
209 list_for_each_entry_safe(pool_entry, tmp,
210 &card->qdio.in_buf_pool.entry_list, list)
211 list_del(&pool_entry->list);
213 for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
214 queue->bufs[i].pool_entry = NULL;
217 static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
221 for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
222 if (entry->elements[i])
223 __free_page(entry->elements[i]);
229 static void qeth_free_buffer_pool(struct qeth_card *card)
231 struct qeth_buffer_pool_entry *entry, *tmp;
233 list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
235 list_del(&entry->init_list);
236 qeth_free_pool_entry(entry);
240 static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
242 struct qeth_buffer_pool_entry *entry;
245 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
249 for (i = 0; i < pages; i++) {
250 entry->elements[i] = __dev_alloc_page(GFP_KERNEL);
252 if (!entry->elements[i]) {
253 qeth_free_pool_entry(entry);
261 static int qeth_alloc_buffer_pool(struct qeth_card *card)
263 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
266 QETH_CARD_TEXT(card, 5, "alocpool");
267 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
268 struct qeth_buffer_pool_entry *entry;
270 entry = qeth_alloc_pool_entry(buf_elements);
272 qeth_free_buffer_pool(card);
276 list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
281 int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
283 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
284 struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
285 struct qeth_buffer_pool_entry *entry, *tmp;
286 int delta = count - pool->buf_count;
289 QETH_CARD_TEXT(card, 2, "realcbp");
291 /* Defer until queue is allocated: */
292 if (!card->qdio.in_q)
295 /* Remove entries from the pool: */
297 entry = list_first_entry(&pool->entry_list,
298 struct qeth_buffer_pool_entry,
300 list_del(&entry->init_list);
301 qeth_free_pool_entry(entry);
306 /* Allocate additional entries: */
308 entry = qeth_alloc_pool_entry(buf_elements);
310 list_for_each_entry_safe(entry, tmp, &entries,
312 list_del(&entry->init_list);
313 qeth_free_pool_entry(entry);
319 list_add(&entry->init_list, &entries);
324 list_splice(&entries, &pool->entry_list);
327 card->qdio.in_buf_pool.buf_count = count;
328 pool->buf_count = count;
331 EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
333 static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
338 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
342 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
344 struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
350 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
355 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
356 q->bufs[i].buffer = q->qdio_bufs[i];
358 QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
362 static int qeth_cq_init(struct qeth_card *card)
366 if (card->options.cq == QETH_CQ_ENABLED) {
367 QETH_CARD_TEXT(card, 2, "cqinit");
368 qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
369 QDIO_MAX_BUFFERS_PER_Q);
370 card->qdio.c_q->next_buf_to_init = 127;
371 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
372 card->qdio.no_in_queues - 1, 0,
375 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
384 static int qeth_alloc_cq(struct qeth_card *card)
388 if (card->options.cq == QETH_CQ_ENABLED) {
390 struct qdio_outbuf_state *outbuf_states;
392 QETH_CARD_TEXT(card, 2, "cqon");
393 card->qdio.c_q = qeth_alloc_qdio_queue();
394 if (!card->qdio.c_q) {
398 card->qdio.no_in_queues = 2;
399 card->qdio.out_bufstates =
400 kcalloc(card->qdio.no_out_queues *
401 QDIO_MAX_BUFFERS_PER_Q,
402 sizeof(struct qdio_outbuf_state),
404 outbuf_states = card->qdio.out_bufstates;
405 if (outbuf_states == NULL) {
409 for (i = 0; i < card->qdio.no_out_queues; ++i) {
410 card->qdio.out_qs[i]->bufstates = outbuf_states;
411 outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
414 QETH_CARD_TEXT(card, 2, "nocq");
415 card->qdio.c_q = NULL;
416 card->qdio.no_in_queues = 1;
418 QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
423 qeth_free_qdio_queue(card->qdio.c_q);
424 card->qdio.c_q = NULL;
426 dev_err(&card->gdev->dev, "Failed to create completion queue\n");
430 static void qeth_free_cq(struct qeth_card *card)
432 if (card->qdio.c_q) {
433 --card->qdio.no_in_queues;
434 qeth_free_qdio_queue(card->qdio.c_q);
435 card->qdio.c_q = NULL;
437 kfree(card->qdio.out_bufstates);
438 card->qdio.out_bufstates = NULL;
441 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
444 enum iucv_tx_notify n;
448 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
454 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
455 TX_NOTIFY_UNREACHABLE;
458 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
459 TX_NOTIFY_GENERALERROR;
466 static void qeth_qdio_handle_aob(struct qeth_card *card,
467 unsigned long phys_aob_addr)
469 enum qeth_qdio_out_buffer_state new_state = QETH_QDIO_BUF_QAOB_OK;
471 struct qeth_qdio_out_buffer *buffer;
472 enum iucv_tx_notify notification;
473 struct qeth_qdio_out_q *queue;
476 aob = (struct qaob *) phys_to_virt(phys_aob_addr);
477 QETH_CARD_TEXT(card, 5, "haob");
478 QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
479 buffer = (struct qeth_qdio_out_buffer *) aob->user1;
480 QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
483 QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
484 new_state = QETH_QDIO_BUF_QAOB_ERROR;
487 switch (atomic_xchg(&buffer->state, new_state)) {
488 case QETH_QDIO_BUF_PRIMED:
489 /* Faster than TX completion code, let it handle the async
493 case QETH_QDIO_BUF_PENDING:
494 /* TX completion code is active and will handle the async
498 case QETH_QDIO_BUF_NEED_QAOB:
499 /* TX completion code is already finished. */
500 notification = qeth_compute_cq_notification(aob->aorc, 1);
501 qeth_notify_skbs(buffer->q, buffer, notification);
503 /* Free dangling allocations. The attached skbs are handled by
504 * qeth_tx_complete_pending_bufs().
507 i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
509 void *data = phys_to_virt(aob->sba[i]);
511 if (data && buffer->is_header[i])
512 kmem_cache_free(qeth_core_header_cache, data);
513 buffer->is_header[i] = 0;
517 atomic_set(&buffer->state, QETH_QDIO_BUF_EMPTY);
518 napi_schedule(&queue->napi);
524 qdio_release_aob(aob);
527 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
530 ccw->cmd_code = cmd_code;
531 ccw->flags = flags | CCW_FLAG_SLI;
533 ccw->cda = (__u32) __pa(data);
536 static int __qeth_issue_next_read(struct qeth_card *card)
538 struct qeth_cmd_buffer *iob = card->read_cmd;
539 struct qeth_channel *channel = iob->channel;
540 struct ccw1 *ccw = __ccw_from_cmd(iob);
543 QETH_CARD_TEXT(card, 5, "issnxrd");
544 if (channel->state != CH_STATE_UP)
547 memset(iob->data, 0, iob->length);
548 qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
549 iob->callback = qeth_issue_next_read_cb;
550 /* keep the cmd alive after completion: */
553 QETH_CARD_TEXT(card, 6, "noirqpnd");
554 rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
556 channel->active_cmd = iob;
558 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
559 rc, CARD_DEVID(card));
560 qeth_unlock_channel(card, channel);
562 card->read_or_write_problem = 1;
563 qeth_schedule_recovery(card);
568 static int qeth_issue_next_read(struct qeth_card *card)
572 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
573 ret = __qeth_issue_next_read(card);
574 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
579 static void qeth_enqueue_cmd(struct qeth_card *card,
580 struct qeth_cmd_buffer *iob)
582 spin_lock_irq(&card->lock);
583 list_add_tail(&iob->list_entry, &card->cmd_waiter_list);
584 spin_unlock_irq(&card->lock);
587 static void qeth_dequeue_cmd(struct qeth_card *card,
588 struct qeth_cmd_buffer *iob)
590 spin_lock_irq(&card->lock);
591 list_del(&iob->list_entry);
592 spin_unlock_irq(&card->lock);
595 void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
598 complete(&iob->done);
600 EXPORT_SYMBOL_GPL(qeth_notify_cmd);
602 static void qeth_flush_local_addrs4(struct qeth_card *card)
604 struct qeth_local_addr *addr;
605 struct hlist_node *tmp;
608 spin_lock_irq(&card->local_addrs4_lock);
609 hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) {
610 hash_del_rcu(&addr->hnode);
611 kfree_rcu(addr, rcu);
613 spin_unlock_irq(&card->local_addrs4_lock);
616 static void qeth_flush_local_addrs6(struct qeth_card *card)
618 struct qeth_local_addr *addr;
619 struct hlist_node *tmp;
622 spin_lock_irq(&card->local_addrs6_lock);
623 hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) {
624 hash_del_rcu(&addr->hnode);
625 kfree_rcu(addr, rcu);
627 spin_unlock_irq(&card->local_addrs6_lock);
630 static void qeth_flush_local_addrs(struct qeth_card *card)
632 qeth_flush_local_addrs4(card);
633 qeth_flush_local_addrs6(card);
636 static void qeth_add_local_addrs4(struct qeth_card *card,
637 struct qeth_ipacmd_local_addrs4 *cmd)
641 if (cmd->addr_length !=
642 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
643 dev_err_ratelimited(&card->gdev->dev,
644 "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
649 spin_lock(&card->local_addrs4_lock);
650 for (i = 0; i < cmd->count; i++) {
651 unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr);
652 struct qeth_local_addr *addr;
653 bool duplicate = false;
655 hash_for_each_possible(card->local_addrs4, addr, hnode, key) {
656 if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) {
665 addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
667 dev_err(&card->gdev->dev,
668 "Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
669 &cmd->addrs[i].addr);
673 ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr);
674 hash_add_rcu(card->local_addrs4, &addr->hnode, key);
676 spin_unlock(&card->local_addrs4_lock);
679 static void qeth_add_local_addrs6(struct qeth_card *card,
680 struct qeth_ipacmd_local_addrs6 *cmd)
684 if (cmd->addr_length !=
685 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
686 dev_err_ratelimited(&card->gdev->dev,
687 "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
692 spin_lock(&card->local_addrs6_lock);
693 for (i = 0; i < cmd->count; i++) {
694 u32 key = ipv6_addr_hash(&cmd->addrs[i].addr);
695 struct qeth_local_addr *addr;
696 bool duplicate = false;
698 hash_for_each_possible(card->local_addrs6, addr, hnode, key) {
699 if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) {
708 addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
710 dev_err(&card->gdev->dev,
711 "Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
712 &cmd->addrs[i].addr);
716 addr->addr = cmd->addrs[i].addr;
717 hash_add_rcu(card->local_addrs6, &addr->hnode, key);
719 spin_unlock(&card->local_addrs6_lock);
722 static void qeth_del_local_addrs4(struct qeth_card *card,
723 struct qeth_ipacmd_local_addrs4 *cmd)
727 if (cmd->addr_length !=
728 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
729 dev_err_ratelimited(&card->gdev->dev,
730 "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
735 spin_lock(&card->local_addrs4_lock);
736 for (i = 0; i < cmd->count; i++) {
737 struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i];
738 unsigned int key = ipv4_addr_hash(addr->addr);
739 struct qeth_local_addr *tmp;
741 hash_for_each_possible(card->local_addrs4, tmp, hnode, key) {
742 if (tmp->addr.s6_addr32[3] == addr->addr) {
743 hash_del_rcu(&tmp->hnode);
749 spin_unlock(&card->local_addrs4_lock);
752 static void qeth_del_local_addrs6(struct qeth_card *card,
753 struct qeth_ipacmd_local_addrs6 *cmd)
757 if (cmd->addr_length !=
758 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
759 dev_err_ratelimited(&card->gdev->dev,
760 "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
765 spin_lock(&card->local_addrs6_lock);
766 for (i = 0; i < cmd->count; i++) {
767 struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i];
768 u32 key = ipv6_addr_hash(&addr->addr);
769 struct qeth_local_addr *tmp;
771 hash_for_each_possible(card->local_addrs6, tmp, hnode, key) {
772 if (ipv6_addr_equal(&tmp->addr, &addr->addr)) {
773 hash_del_rcu(&tmp->hnode);
779 spin_unlock(&card->local_addrs6_lock);
782 static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
785 struct qeth_local_addr *tmp;
786 bool is_local = false;
790 if (hash_empty(card->local_addrs4))
794 next_hop = qeth_next_hop_v4_rcu(skb,
795 qeth_dst_check_rcu(skb, htons(ETH_P_IP)));
796 key = ipv4_addr_hash(next_hop);
798 hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
799 if (tmp->addr.s6_addr32[3] == next_hop) {
809 static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
812 struct qeth_local_addr *tmp;
813 struct in6_addr *next_hop;
814 bool is_local = false;
817 if (hash_empty(card->local_addrs6))
821 next_hop = qeth_next_hop_v6_rcu(skb,
822 qeth_dst_check_rcu(skb, htons(ETH_P_IPV6)));
823 key = ipv6_addr_hash(next_hop);
825 hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
826 if (ipv6_addr_equal(&tmp->addr, next_hop)) {
836 static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v)
838 struct qeth_card *card = m->private;
839 struct qeth_local_addr *tmp;
843 hash_for_each_rcu(card->local_addrs4, i, tmp, hnode)
844 seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]);
845 hash_for_each_rcu(card->local_addrs6, i, tmp, hnode)
846 seq_printf(m, "%pI6c\n", &tmp->addr);
852 DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr);
854 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
855 struct qeth_card *card)
857 const char *ipa_name;
858 int com = cmd->hdr.command;
860 ipa_name = qeth_get_ipa_cmd_name(com);
863 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
864 ipa_name, com, CARD_DEVID(card), rc,
865 qeth_get_ipa_msg(rc));
867 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
868 ipa_name, com, CARD_DEVID(card));
871 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
872 struct qeth_ipa_cmd *cmd)
874 QETH_CARD_TEXT(card, 5, "chkipad");
876 if (IS_IPA_REPLY(cmd)) {
877 if (cmd->hdr.command != IPA_CMD_SETCCID &&
878 cmd->hdr.command != IPA_CMD_DELCCID &&
879 cmd->hdr.command != IPA_CMD_MODCCID &&
880 cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
881 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
885 /* handle unsolicited event: */
886 switch (cmd->hdr.command) {
887 case IPA_CMD_STOPLAN:
888 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
889 dev_err(&card->gdev->dev,
890 "Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
891 netdev_name(card->dev));
892 schedule_work(&card->close_dev_work);
894 dev_warn(&card->gdev->dev,
895 "The link for interface %s on CHPID 0x%X failed\n",
896 netdev_name(card->dev), card->info.chpid);
897 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
898 netif_carrier_off(card->dev);
901 case IPA_CMD_STARTLAN:
902 dev_info(&card->gdev->dev,
903 "The link for %s on CHPID 0x%X has been restored\n",
904 netdev_name(card->dev), card->info.chpid);
905 if (card->info.hwtrap)
906 card->info.hwtrap = 2;
907 qeth_schedule_recovery(card);
909 case IPA_CMD_SETBRIDGEPORT_IQD:
910 case IPA_CMD_SETBRIDGEPORT_OSA:
911 case IPA_CMD_ADDRESS_CHANGE_NOTIF:
912 if (card->discipline->control_event_handler(card, cmd))
915 case IPA_CMD_MODCCID:
917 case IPA_CMD_REGISTER_LOCAL_ADDR:
918 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
919 qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
920 else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
921 qeth_add_local_addrs6(card, &cmd->data.local_addrs6);
923 QETH_CARD_TEXT(card, 3, "irla");
925 case IPA_CMD_UNREGISTER_LOCAL_ADDR:
926 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
927 qeth_del_local_addrs4(card, &cmd->data.local_addrs4);
928 else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
929 qeth_del_local_addrs6(card, &cmd->data.local_addrs6);
931 QETH_CARD_TEXT(card, 3, "urla");
934 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
939 static void qeth_clear_ipacmd_list(struct qeth_card *card)
941 struct qeth_cmd_buffer *iob;
944 QETH_CARD_TEXT(card, 4, "clipalst");
946 spin_lock_irqsave(&card->lock, flags);
947 list_for_each_entry(iob, &card->cmd_waiter_list, list_entry)
948 qeth_notify_cmd(iob, -ECANCELED);
949 spin_unlock_irqrestore(&card->lock, flags);
952 static int qeth_check_idx_response(struct qeth_card *card,
953 unsigned char *buffer)
955 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
956 if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
957 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
959 QETH_CARD_TEXT(card, 2, "ckidxres");
960 QETH_CARD_TEXT(card, 2, " idxterm");
961 QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
962 if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
963 buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
964 dev_err(&card->gdev->dev,
965 "The device does not support the configured transport mode\n");
966 return -EPROTONOSUPPORT;
973 void qeth_put_cmd(struct qeth_cmd_buffer *iob)
975 if (refcount_dec_and_test(&iob->ref_count)) {
980 EXPORT_SYMBOL_GPL(qeth_put_cmd);
982 static void qeth_release_buffer_cb(struct qeth_card *card,
983 struct qeth_cmd_buffer *iob,
984 unsigned int data_length)
989 static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
991 qeth_notify_cmd(iob, rc);
995 struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
996 unsigned int length, unsigned int ccws,
999 struct qeth_cmd_buffer *iob;
1001 if (length > QETH_BUFSIZE)
1004 iob = kzalloc(sizeof(*iob), GFP_KERNEL);
1008 iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
1009 GFP_KERNEL | GFP_DMA);
1015 init_completion(&iob->done);
1016 spin_lock_init(&iob->lock);
1017 refcount_set(&iob->ref_count, 1);
1018 iob->channel = channel;
1019 iob->timeout = timeout;
1020 iob->length = length;
1023 EXPORT_SYMBOL_GPL(qeth_alloc_cmd);
1025 static void qeth_issue_next_read_cb(struct qeth_card *card,
1026 struct qeth_cmd_buffer *iob,
1027 unsigned int data_length)
1029 struct qeth_cmd_buffer *request = NULL;
1030 struct qeth_ipa_cmd *cmd = NULL;
1031 struct qeth_reply *reply = NULL;
1032 struct qeth_cmd_buffer *tmp;
1033 unsigned long flags;
1036 QETH_CARD_TEXT(card, 4, "sndctlcb");
1037 rc = qeth_check_idx_response(card, iob->data);
1042 qeth_schedule_recovery(card);
1045 qeth_clear_ipacmd_list(card);
1049 cmd = __ipa_reply(iob);
1051 cmd = qeth_check_ipa_data(card, cmd);
1054 if (IS_OSN(card) && card->osn_info.assist_cb &&
1055 cmd->hdr.command != IPA_CMD_STARTLAN) {
1056 card->osn_info.assist_cb(card->dev, cmd);
1061 /* match against pending cmd requests */
1062 spin_lock_irqsave(&card->lock, flags);
1063 list_for_each_entry(tmp, &card->cmd_waiter_list, list_entry) {
1064 if (tmp->match && tmp->match(tmp, iob)) {
1066 /* take the object outside the lock */
1067 qeth_get_cmd(request);
1071 spin_unlock_irqrestore(&card->lock, flags);
1076 reply = &request->reply;
1077 if (!reply->callback) {
1082 spin_lock_irqsave(&request->lock, flags);
1084 /* Bail out when the requestor has already left: */
1087 rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
1088 (unsigned long)iob);
1089 spin_unlock_irqrestore(&request->lock, flags);
1093 qeth_notify_cmd(request, rc);
1094 qeth_put_cmd(request);
1096 memcpy(&card->seqno.pdu_hdr_ack,
1097 QETH_PDU_HEADER_SEQ_NO(iob->data),
1098 QETH_SEQ_NO_LENGTH);
1099 __qeth_issue_next_read(card);
1104 static int qeth_set_thread_start_bit(struct qeth_card *card,
1105 unsigned long thread)
1107 unsigned long flags;
1110 spin_lock_irqsave(&card->thread_mask_lock, flags);
1111 if (!(card->thread_allowed_mask & thread))
1113 else if (card->thread_start_mask & thread)
1116 card->thread_start_mask |= thread;
1117 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1122 static void qeth_clear_thread_start_bit(struct qeth_card *card,
1123 unsigned long thread)
1125 unsigned long flags;
1127 spin_lock_irqsave(&card->thread_mask_lock, flags);
1128 card->thread_start_mask &= ~thread;
1129 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1130 wake_up(&card->wait_q);
1133 static void qeth_clear_thread_running_bit(struct qeth_card *card,
1134 unsigned long thread)
1136 unsigned long flags;
1138 spin_lock_irqsave(&card->thread_mask_lock, flags);
1139 card->thread_running_mask &= ~thread;
1140 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1141 wake_up_all(&card->wait_q);
1144 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
1146 unsigned long flags;
1149 spin_lock_irqsave(&card->thread_mask_lock, flags);
1150 if (card->thread_start_mask & thread) {
1151 if ((card->thread_allowed_mask & thread) &&
1152 !(card->thread_running_mask & thread)) {
1154 card->thread_start_mask &= ~thread;
1155 card->thread_running_mask |= thread;
1159 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1163 static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
1167 wait_event(card->wait_q,
1168 (rc = __qeth_do_run_thread(card, thread)) >= 0);
1172 int qeth_schedule_recovery(struct qeth_card *card)
1176 QETH_CARD_TEXT(card, 2, "startrec");
1178 rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
1180 schedule_work(&card->kernel_thread_starter);
1185 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
1191 sense = (char *) irb->ecw;
1192 cstat = irb->scsw.cmd.cstat;
1193 dstat = irb->scsw.cmd.dstat;
1195 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
1196 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
1197 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
1198 QETH_CARD_TEXT(card, 2, "CGENCHK");
1199 dev_warn(&cdev->dev, "The qeth device driver "
1200 "failed to recover an error on the device\n");
1201 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
1202 CCW_DEVID(cdev), dstat, cstat);
1203 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
1208 if (dstat & DEV_STAT_UNIT_CHECK) {
1209 if (sense[SENSE_RESETTING_EVENT_BYTE] &
1210 SENSE_RESETTING_EVENT_FLAG) {
1211 QETH_CARD_TEXT(card, 2, "REVIND");
1214 if (sense[SENSE_COMMAND_REJECT_BYTE] &
1215 SENSE_COMMAND_REJECT_FLAG) {
1216 QETH_CARD_TEXT(card, 2, "CMDREJi");
1219 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
1220 QETH_CARD_TEXT(card, 2, "AFFE");
1223 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
1224 QETH_CARD_TEXT(card, 2, "ZEROSEN");
1227 QETH_CARD_TEXT(card, 2, "DGENCHK");
1233 static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
1239 switch (PTR_ERR(irb)) {
1241 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
1243 QETH_CARD_TEXT(card, 2, "ckirberr");
1244 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
1247 dev_warn(&cdev->dev, "A hardware operation timed out"
1248 " on the device\n");
1249 QETH_CARD_TEXT(card, 2, "ckirberr");
1250 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT);
1253 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
1254 PTR_ERR(irb), CCW_DEVID(cdev));
1255 QETH_CARD_TEXT(card, 2, "ckirberr");
1256 QETH_CARD_TEXT(card, 2, " rc???");
1257 return PTR_ERR(irb);
1261 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1266 struct qeth_cmd_buffer *iob = NULL;
1267 struct ccwgroup_device *gdev;
1268 struct qeth_channel *channel;
1269 struct qeth_card *card;
1271 /* while we hold the ccwdev lock, this stays valid: */
1272 gdev = dev_get_drvdata(&cdev->dev);
1273 card = dev_get_drvdata(&gdev->dev);
1275 QETH_CARD_TEXT(card, 5, "irq");
1277 if (card->read.ccwdev == cdev) {
1278 channel = &card->read;
1279 QETH_CARD_TEXT(card, 5, "read");
1280 } else if (card->write.ccwdev == cdev) {
1281 channel = &card->write;
1282 QETH_CARD_TEXT(card, 5, "write");
1284 channel = &card->data;
1285 QETH_CARD_TEXT(card, 5, "data");
1289 QETH_CARD_TEXT(card, 5, "irqunsol");
1290 } else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
1291 QETH_CARD_TEXT(card, 5, "irqunexp");
1294 "Received IRQ with intparm %lx, expected %px\n",
1295 intparm, channel->active_cmd);
1296 if (channel->active_cmd)
1297 qeth_cancel_cmd(channel->active_cmd, -EIO);
1299 iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
1302 channel->active_cmd = NULL;
1303 qeth_unlock_channel(card, channel);
1305 rc = qeth_check_irb_error(card, cdev, irb);
1307 /* IO was terminated, free its resources. */
1309 qeth_cancel_cmd(iob, rc);
1313 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
1314 channel->state = CH_STATE_STOPPED;
1315 wake_up(&card->wait_q);
1318 if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
1319 channel->state = CH_STATE_HALTED;
1320 wake_up(&card->wait_q);
1323 if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
1324 SCSW_FCTL_HALT_FUNC))) {
1325 qeth_cancel_cmd(iob, -ECANCELED);
1329 cstat = irb->scsw.cmd.cstat;
1330 dstat = irb->scsw.cmd.dstat;
1332 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
1333 (dstat & DEV_STAT_UNIT_CHECK) ||
1335 if (irb->esw.esw0.erw.cons) {
1336 dev_warn(&channel->ccwdev->dev,
1337 "The qeth device driver failed to recover "
1338 "an error on the device\n");
1339 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1340 CCW_DEVID(channel->ccwdev), cstat,
1342 print_hex_dump(KERN_WARNING, "qeth: irb ",
1343 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
1344 print_hex_dump(KERN_WARNING, "qeth: sense data ",
1345 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
1348 rc = qeth_get_problem(card, cdev, irb);
1350 card->read_or_write_problem = 1;
1352 qeth_cancel_cmd(iob, rc);
1353 qeth_clear_ipacmd_list(card);
1354 qeth_schedule_recovery(card);
1361 if (irb->scsw.cmd.count > iob->length) {
1362 qeth_cancel_cmd(iob, -EIO);
1366 iob->callback(card, iob,
1367 iob->length - irb->scsw.cmd.count);
1371 static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1372 struct qeth_qdio_out_buffer *buf,
1373 enum iucv_tx_notify notification)
1375 struct sk_buff *skb;
1377 skb_queue_walk(&buf->skb_list, skb) {
1378 struct sock *sk = skb->sk;
1380 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
1381 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1382 if (sk && sk->sk_family == PF_IUCV)
1383 iucv_sk(sk)->sk_txnotify(sk, notification);
1387 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
1390 struct qeth_qdio_out_q *queue = buf->q;
1391 struct sk_buff *skb;
1394 if (buf->next_element_to_fill == 0)
1397 QETH_TXQ_STAT_INC(queue, bufs);
1398 QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
1400 QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
1402 QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
1403 QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes);
1406 while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
1407 unsigned int bytes = qdisc_pkt_len(skb);
1408 bool is_tso = skb_is_gso(skb);
1409 unsigned int packets;
1411 packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
1413 if (skb->ip_summed == CHECKSUM_PARTIAL)
1414 QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
1415 if (skb_is_nonlinear(skb))
1416 QETH_TXQ_STAT_INC(queue, skbs_sg);
1418 QETH_TXQ_STAT_INC(queue, skbs_tso);
1419 QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
1423 napi_consume_skb(skb, budget);
1427 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1428 struct qeth_qdio_out_buffer *buf,
1429 bool error, int budget)
1433 /* is PCI flag set on buffer? */
1434 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
1435 atomic_dec(&queue->set_pci_flags_count);
1437 qeth_tx_complete_buf(buf, error, budget);
1439 for (i = 0; i < queue->max_elements; ++i) {
1440 void *data = phys_to_virt(buf->buffer->element[i].addr);
1442 if (data && buf->is_header[i])
1443 kmem_cache_free(qeth_core_header_cache, data);
1444 buf->is_header[i] = 0;
1447 qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
1448 buf->next_element_to_fill = 0;
1451 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1454 static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
1455 struct qeth_qdio_out_q *queue,
1458 struct qeth_qdio_out_buffer *buf, *tmp;
1460 list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) {
1461 if (drain || atomic_read(&buf->state) == QETH_QDIO_BUF_EMPTY) {
1462 QETH_CARD_TEXT(card, 5, "fp");
1463 QETH_CARD_TEXT_(card, 5, "%lx", (long) buf);
1466 qeth_notify_skbs(queue, buf,
1467 TX_NOTIFY_GENERALERROR);
1468 qeth_tx_complete_buf(buf, drain, 0);
1470 list_del(&buf->list_entry);
1471 kmem_cache_free(qeth_qdio_outbuf_cache, buf);
1476 static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
1480 qeth_tx_complete_pending_bufs(q->card, q, true);
1482 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
1486 qeth_clear_output_buffer(q, q->bufs[j], true, 0);
1488 kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
1494 static void qeth_drain_output_queues(struct qeth_card *card)
1498 QETH_CARD_TEXT(card, 2, "clearqdbf");
1499 /* clear outbound buffers to free skbs */
1500 for (i = 0; i < card->qdio.no_out_queues; ++i) {
1501 if (card->qdio.out_qs[i])
1502 qeth_drain_output_queue(card->qdio.out_qs[i], false);
1506 static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
1508 unsigned int max = single ? 1 : card->dev->num_tx_queues;
1510 if (card->qdio.no_out_queues == max)
1513 if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
1514 qeth_free_qdio_queues(card);
1516 if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
1517 dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
1519 card->qdio.no_out_queues = max;
1522 static int qeth_update_from_chp_desc(struct qeth_card *card)
1524 struct ccw_device *ccwdev;
1525 struct channel_path_desc_fmt0 *chp_dsc;
1527 QETH_CARD_TEXT(card, 2, "chp_desc");
1529 ccwdev = card->data.ccwdev;
1530 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
1534 card->info.func_level = 0x4100 + chp_dsc->desc;
1536 if (IS_OSD(card) || IS_OSX(card))
1537 /* CHPP field bit 6 == 1 -> single queue */
1538 qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
1541 QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
1542 QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
1546 static void qeth_init_qdio_info(struct qeth_card *card)
1548 QETH_CARD_TEXT(card, 4, "intqdinf");
1549 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1550 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1551 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1554 card->qdio.no_in_queues = 1;
1555 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1557 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
1559 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1560 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1561 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1562 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1565 static void qeth_set_initial_options(struct qeth_card *card)
1567 card->options.route4.type = NO_ROUTER;
1568 card->options.route6.type = NO_ROUTER;
1569 card->options.isolation = ISOLATION_MODE_NONE;
1570 card->options.cq = QETH_CQ_DISABLED;
1571 card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
1574 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1576 unsigned long flags;
1579 spin_lock_irqsave(&card->thread_mask_lock, flags);
1580 QETH_CARD_TEXT_(card, 4, " %02x%02x%02x",
1581 (u8) card->thread_start_mask,
1582 (u8) card->thread_allowed_mask,
1583 (u8) card->thread_running_mask);
1584 rc = (card->thread_start_mask & thread);
1585 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1589 static int qeth_do_reset(void *data);
1590 static void qeth_start_kernel_thread(struct work_struct *work)
1592 struct task_struct *ts;
1593 struct qeth_card *card = container_of(work, struct qeth_card,
1594 kernel_thread_starter);
1595 QETH_CARD_TEXT(card, 2, "strthrd");
1597 if (card->read.state != CH_STATE_UP &&
1598 card->write.state != CH_STATE_UP)
1600 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1601 ts = kthread_run(qeth_do_reset, card, "qeth_recover");
1603 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1604 qeth_clear_thread_running_bit(card,
1605 QETH_RECOVER_THREAD);
1610 static void qeth_buffer_reclaim_work(struct work_struct *);
1611 static void qeth_setup_card(struct qeth_card *card)
1613 QETH_CARD_TEXT(card, 2, "setupcrd");
1615 card->info.type = CARD_RDEV(card)->id.driver_info;
1616 card->state = CARD_STATE_DOWN;
1617 spin_lock_init(&card->lock);
1618 spin_lock_init(&card->thread_mask_lock);
1619 mutex_init(&card->conf_mutex);
1620 mutex_init(&card->discipline_mutex);
1621 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1622 INIT_LIST_HEAD(&card->cmd_waiter_list);
1623 init_waitqueue_head(&card->wait_q);
1624 qeth_set_initial_options(card);
1625 /* IP address takeover */
1626 INIT_LIST_HEAD(&card->ipato.entries);
1627 qeth_init_qdio_info(card);
1628 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1629 INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
1630 hash_init(card->rx_mode_addrs);
1631 hash_init(card->local_addrs4);
1632 hash_init(card->local_addrs6);
1633 spin_lock_init(&card->local_addrs4_lock);
1634 spin_lock_init(&card->local_addrs6_lock);
1637 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
1639 struct qeth_card *card = container_of(slr, struct qeth_card,
1640 qeth_service_level);
1641 if (card->info.mcl_level[0])
1642 seq_printf(m, "qeth: %s firmware level %s\n",
1643 CARD_BUS_ID(card), card->info.mcl_level);
1646 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
1648 struct qeth_card *card;
1650 QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1651 card = kzalloc(sizeof(*card), GFP_KERNEL);
1654 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1657 dev_set_drvdata(&gdev->dev, card);
1658 CARD_RDEV(card) = gdev->cdev[0];
1659 CARD_WDEV(card) = gdev->cdev[1];
1660 CARD_DDEV(card) = gdev->cdev[2];
1662 card->event_wq = alloc_ordered_workqueue("%s_event", 0,
1663 dev_name(&gdev->dev));
1664 if (!card->event_wq)
1667 card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
1668 if (!card->read_cmd)
1671 card->debugfs = debugfs_create_dir(dev_name(&gdev->dev),
1673 debugfs_create_file("local_addrs", 0400, card->debugfs, card,
1674 &qeth_debugfs_local_addr_fops);
1676 card->qeth_service_level.seq_print = qeth_core_sl_print;
1677 register_service_level(&card->qeth_service_level);
1681 destroy_workqueue(card->event_wq);
1683 dev_set_drvdata(&gdev->dev, NULL);
1689 static int qeth_clear_channel(struct qeth_card *card,
1690 struct qeth_channel *channel)
1694 QETH_CARD_TEXT(card, 3, "clearch");
1695 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1696 rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
1697 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1701 rc = wait_event_interruptible_timeout(card->wait_q,
1702 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1703 if (rc == -ERESTARTSYS)
1705 if (channel->state != CH_STATE_STOPPED)
1707 channel->state = CH_STATE_DOWN;
1711 static int qeth_halt_channel(struct qeth_card *card,
1712 struct qeth_channel *channel)
1716 QETH_CARD_TEXT(card, 3, "haltch");
1717 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1718 rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
1719 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1723 rc = wait_event_interruptible_timeout(card->wait_q,
1724 channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1725 if (rc == -ERESTARTSYS)
1727 if (channel->state != CH_STATE_HALTED)
1732 static int qeth_stop_channel(struct qeth_channel *channel)
1734 struct ccw_device *cdev = channel->ccwdev;
1737 rc = ccw_device_set_offline(cdev);
1739 spin_lock_irq(get_ccwdev_lock(cdev));
1740 if (channel->active_cmd) {
1741 dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
1742 channel->active_cmd);
1743 channel->active_cmd = NULL;
1745 cdev->handler = NULL;
1746 spin_unlock_irq(get_ccwdev_lock(cdev));
1751 static int qeth_start_channel(struct qeth_channel *channel)
1753 struct ccw_device *cdev = channel->ccwdev;
1756 channel->state = CH_STATE_DOWN;
1757 atomic_set(&channel->irq_pending, 0);
1759 spin_lock_irq(get_ccwdev_lock(cdev));
1760 cdev->handler = qeth_irq;
1761 spin_unlock_irq(get_ccwdev_lock(cdev));
1763 rc = ccw_device_set_online(cdev);
1770 spin_lock_irq(get_ccwdev_lock(cdev));
1771 cdev->handler = NULL;
1772 spin_unlock_irq(get_ccwdev_lock(cdev));
1776 static int qeth_halt_channels(struct qeth_card *card)
1778 int rc1 = 0, rc2 = 0, rc3 = 0;
1780 QETH_CARD_TEXT(card, 3, "haltchs");
1781 rc1 = qeth_halt_channel(card, &card->read);
1782 rc2 = qeth_halt_channel(card, &card->write);
1783 rc3 = qeth_halt_channel(card, &card->data);
1791 static int qeth_clear_channels(struct qeth_card *card)
1793 int rc1 = 0, rc2 = 0, rc3 = 0;
1795 QETH_CARD_TEXT(card, 3, "clearchs");
1796 rc1 = qeth_clear_channel(card, &card->read);
1797 rc2 = qeth_clear_channel(card, &card->write);
1798 rc3 = qeth_clear_channel(card, &card->data);
1806 static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1810 QETH_CARD_TEXT(card, 3, "clhacrd");
1813 rc = qeth_halt_channels(card);
1816 return qeth_clear_channels(card);
1819 static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1823 QETH_CARD_TEXT(card, 3, "qdioclr");
1824 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1825 QETH_QDIO_CLEANING)) {
1826 case QETH_QDIO_ESTABLISHED:
1828 rc = qdio_shutdown(CARD_DDEV(card),
1829 QDIO_FLAG_CLEANUP_USING_HALT);
1831 rc = qdio_shutdown(CARD_DDEV(card),
1832 QDIO_FLAG_CLEANUP_USING_CLEAR);
1834 QETH_CARD_TEXT_(card, 3, "1err%d", rc);
1835 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1837 case QETH_QDIO_CLEANING:
1842 rc = qeth_clear_halt_card(card, use_halt);
1844 QETH_CARD_TEXT_(card, 3, "2err%d", rc);
1848 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
1850 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1851 struct diag26c_vnic_resp *response = NULL;
1852 struct diag26c_vnic_req *request = NULL;
1853 struct ccw_dev_id id;
1857 QETH_CARD_TEXT(card, 2, "vmlayer");
1859 cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
1863 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
1864 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
1865 if (!request || !response) {
1870 ccw_device_get_id(CARD_RDEV(card), &id);
1871 request->resp_buf_len = sizeof(*response);
1872 request->resp_version = DIAG26C_VERSION6_VM65918;
1873 request->req_format = DIAG26C_VNIC_INFO;
1875 memcpy(&request->sys_name, userid, 8);
1876 request->devno = id.devno;
1878 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1879 rc = diag26c(request, response, DIAG26C_PORT_VNIC);
1880 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1883 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
1885 if (request->resp_buf_len < sizeof(*response) ||
1886 response->version != request->resp_version) {
1891 if (response->protocol == VNIC_INFO_PROT_L2)
1892 disc = QETH_DISCIPLINE_LAYER2;
1893 else if (response->protocol == VNIC_INFO_PROT_L3)
1894 disc = QETH_DISCIPLINE_LAYER3;
1900 QETH_CARD_TEXT_(card, 2, "err%x", rc);
1904 /* Determine whether the device requires a specific layer discipline */
1905 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
1907 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1909 if (IS_OSM(card) || IS_OSN(card))
1910 disc = QETH_DISCIPLINE_LAYER2;
1911 else if (IS_VM_NIC(card))
1912 disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
1913 qeth_vm_detect_layer(card);
1916 case QETH_DISCIPLINE_LAYER2:
1917 QETH_CARD_TEXT(card, 3, "force l2");
1919 case QETH_DISCIPLINE_LAYER3:
1920 QETH_CARD_TEXT(card, 3, "force l3");
1923 QETH_CARD_TEXT(card, 3, "force no");
1929 static void qeth_set_blkt_defaults(struct qeth_card *card)
1931 QETH_CARD_TEXT(card, 2, "cfgblkt");
1933 if (card->info.use_v1_blkt) {
1934 card->info.blkt.time_total = 0;
1935 card->info.blkt.inter_packet = 0;
1936 card->info.blkt.inter_packet_jumbo = 0;
1938 card->info.blkt.time_total = 250;
1939 card->info.blkt.inter_packet = 5;
1940 card->info.blkt.inter_packet_jumbo = 15;
1944 static void qeth_idx_init(struct qeth_card *card)
1946 memset(&card->seqno, 0, sizeof(card->seqno));
1948 card->token.issuer_rm_w = 0x00010103UL;
1949 card->token.cm_filter_w = 0x00010108UL;
1950 card->token.cm_connection_w = 0x0001010aUL;
1951 card->token.ulp_filter_w = 0x0001010bUL;
1952 card->token.ulp_connection_w = 0x0001010dUL;
1954 switch (card->info.type) {
1955 case QETH_CARD_TYPE_IQD:
1956 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
1958 case QETH_CARD_TYPE_OSD:
1959 case QETH_CARD_TYPE_OSN:
1960 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
1967 static void qeth_idx_finalize_cmd(struct qeth_card *card,
1968 struct qeth_cmd_buffer *iob)
1970 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
1971 QETH_SEQ_NO_LENGTH);
1972 if (iob->channel == &card->write)
1973 card->seqno.trans_hdr++;
1976 static int qeth_peer_func_level(int level)
1978 if ((level & 0xff) == 8)
1979 return (level & 0xff) + 0x400;
1980 if (((level >> 8) & 3) == 1)
1981 return (level & 0xff) + 0x200;
1985 static void qeth_mpc_finalize_cmd(struct qeth_card *card,
1986 struct qeth_cmd_buffer *iob)
1988 qeth_idx_finalize_cmd(card, iob);
1990 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1991 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1992 card->seqno.pdu_hdr++;
1993 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1994 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1996 iob->callback = qeth_release_buffer_cb;
1999 static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
2000 struct qeth_cmd_buffer *reply)
2002 /* MPC cmds are issued strictly in sequence. */
2003 return !IS_IPA(reply->data);
2006 static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
2008 unsigned int data_length)
2010 struct qeth_cmd_buffer *iob;
2012 iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
2016 memcpy(iob->data, data, data_length);
2017 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
2019 iob->finalize = qeth_mpc_finalize_cmd;
2020 iob->match = qeth_mpc_match_reply;
2025 * qeth_send_control_data() - send control command to the card
2026 * @card: qeth_card structure pointer
2027 * @iob: qeth_cmd_buffer pointer
2028 * @reply_cb: callback function pointer
2029 * @cb_card: pointer to the qeth_card structure
2030 * @cb_reply: pointer to the qeth_reply structure
2031 * @cb_cmd: pointer to the original iob for non-IPA
2032 * commands, or to the qeth_ipa_cmd structure
2033 * for the IPA commands.
2034 * @reply_param: private pointer passed to the callback
2036 * Callback function gets called one or more times, with cb_cmd
2037 * pointing to the response returned by the hardware. Callback
2038 * function must return
2039 * > 0 if more reply blocks are expected,
2040 * 0 if the last or only reply block is received, and
2042 * Callback function can get the value of the reply_param pointer from the
2043 * field 'param' of the structure qeth_reply.
2046 static int qeth_send_control_data(struct qeth_card *card,
2047 struct qeth_cmd_buffer *iob,
2048 int (*reply_cb)(struct qeth_card *cb_card,
2049 struct qeth_reply *cb_reply,
2050 unsigned long cb_cmd),
2053 struct qeth_channel *channel = iob->channel;
2054 struct qeth_reply *reply = &iob->reply;
2055 long timeout = iob->timeout;
2058 QETH_CARD_TEXT(card, 2, "sendctl");
2060 reply->callback = reply_cb;
2061 reply->param = reply_param;
2063 timeout = wait_event_interruptible_timeout(card->wait_q,
2064 qeth_trylock_channel(channel),
2068 return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2072 iob->finalize(card, iob);
2073 QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
2075 qeth_enqueue_cmd(card, iob);
2077 /* This pairs with iob->callback, and keeps the iob alive after IO: */
2080 QETH_CARD_TEXT(card, 6, "noirqpnd");
2081 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
2082 rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
2083 (addr_t) iob, 0, 0, timeout);
2085 channel->active_cmd = iob;
2086 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
2088 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
2089 CARD_DEVID(card), rc);
2090 QETH_CARD_TEXT_(card, 2, " err%d", rc);
2091 qeth_dequeue_cmd(card, iob);
2093 qeth_unlock_channel(card, channel);
2097 timeout = wait_for_completion_interruptible_timeout(&iob->done,
2100 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2102 qeth_dequeue_cmd(card, iob);
2105 /* Wait until the callback for a late reply has completed: */
2106 spin_lock_irq(&iob->lock);
2108 /* Zap any callback that's still pending: */
2110 spin_unlock_irq(&iob->lock);
2121 struct qeth_node_desc {
2122 struct node_descriptor nd1;
2123 struct node_descriptor nd2;
2124 struct node_descriptor nd3;
2127 static void qeth_read_conf_data_cb(struct qeth_card *card,
2128 struct qeth_cmd_buffer *iob,
2129 unsigned int data_length)
2131 struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
2135 QETH_CARD_TEXT(card, 2, "cfgunit");
2137 if (data_length < sizeof(*nd)) {
2142 card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
2143 nd->nd1.plant[1] == _ascebc['M'];
2144 tag = (u8 *)&nd->nd1.tag;
2145 card->info.chpid = tag[0];
2146 card->info.unit_addr2 = tag[1];
2148 tag = (u8 *)&nd->nd2.tag;
2149 card->info.cula = tag[1];
2151 card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
2152 nd->nd3.model[1] == 0xF0 &&
2153 nd->nd3.model[2] >= 0xF1 &&
2154 nd->nd3.model[2] <= 0xF4;
2157 qeth_notify_cmd(iob, rc);
2161 static int qeth_read_conf_data(struct qeth_card *card)
2163 struct qeth_channel *channel = &card->data;
2164 struct qeth_cmd_buffer *iob;
2167 /* scan for RCD command in extended SenseID data */
2168 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
2169 if (!ciw || ciw->cmd == 0)
2171 if (ciw->count < sizeof(struct qeth_node_desc))
2174 iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
2178 iob->callback = qeth_read_conf_data_cb;
2179 qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
2182 return qeth_send_control_data(card, iob, NULL, NULL);
2185 static int qeth_idx_check_activate_response(struct qeth_card *card,
2186 struct qeth_channel *channel,
2187 struct qeth_cmd_buffer *iob)
2191 rc = qeth_check_idx_response(card, iob->data);
2195 if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
2198 /* negative reply: */
2199 QETH_CARD_TEXT_(card, 2, "idxneg%c",
2200 QETH_IDX_ACT_CAUSE_CODE(iob->data));
2202 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
2203 case QETH_IDX_ACT_ERR_EXCL:
2204 dev_err(&channel->ccwdev->dev,
2205 "The adapter is used exclusively by another host\n");
2207 case QETH_IDX_ACT_ERR_AUTH:
2208 case QETH_IDX_ACT_ERR_AUTH_USER:
2209 dev_err(&channel->ccwdev->dev,
2210 "Setting the device online failed because of insufficient authorization\n");
2213 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
2214 CCW_DEVID(channel->ccwdev));
2219 static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
2220 struct qeth_cmd_buffer *iob,
2221 unsigned int data_length)
2223 struct qeth_channel *channel = iob->channel;
2227 QETH_CARD_TEXT(card, 2, "idxrdcb");
2229 rc = qeth_idx_check_activate_response(card, channel, iob);
2233 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2234 if (peer_level != qeth_peer_func_level(card->info.func_level)) {
2235 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2236 CCW_DEVID(channel->ccwdev),
2237 card->info.func_level, peer_level);
2242 memcpy(&card->token.issuer_rm_r,
2243 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2244 QETH_MPC_TOKEN_LENGTH);
2245 memcpy(&card->info.mcl_level[0],
2246 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
2249 qeth_notify_cmd(iob, rc);
2253 static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
2254 struct qeth_cmd_buffer *iob,
2255 unsigned int data_length)
2257 struct qeth_channel *channel = iob->channel;
2261 QETH_CARD_TEXT(card, 2, "idxwrcb");
2263 rc = qeth_idx_check_activate_response(card, channel, iob);
2267 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2268 if ((peer_level & ~0x0100) !=
2269 qeth_peer_func_level(card->info.func_level)) {
2270 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2271 CCW_DEVID(channel->ccwdev),
2272 card->info.func_level, peer_level);
2277 qeth_notify_cmd(iob, rc);
2281 static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
2282 struct qeth_cmd_buffer *iob)
2284 u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
2285 u8 port = ((u8)card->dev->dev_port) | 0x80;
2286 struct ccw1 *ccw = __ccw_from_cmd(iob);
2288 qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
2290 qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
2291 iob->finalize = qeth_idx_finalize_cmd;
2293 port |= QETH_IDX_ACT_INVAL_FRAME;
2294 memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
2295 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2296 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
2297 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
2298 &card->info.func_level, 2);
2299 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2);
2300 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
2303 static int qeth_idx_activate_read_channel(struct qeth_card *card)
2305 struct qeth_channel *channel = &card->read;
2306 struct qeth_cmd_buffer *iob;
2309 QETH_CARD_TEXT(card, 2, "idxread");
2311 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2315 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
2316 qeth_idx_setup_activate_cmd(card, iob);
2317 iob->callback = qeth_idx_activate_read_channel_cb;
2319 rc = qeth_send_control_data(card, iob, NULL, NULL);
2323 channel->state = CH_STATE_UP;
2327 static int qeth_idx_activate_write_channel(struct qeth_card *card)
2329 struct qeth_channel *channel = &card->write;
2330 struct qeth_cmd_buffer *iob;
2333 QETH_CARD_TEXT(card, 2, "idxwrite");
2335 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2339 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
2340 qeth_idx_setup_activate_cmd(card, iob);
2341 iob->callback = qeth_idx_activate_write_channel_cb;
2343 rc = qeth_send_control_data(card, iob, NULL, NULL);
2347 channel->state = CH_STATE_UP;
2351 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2354 struct qeth_cmd_buffer *iob;
2356 QETH_CARD_TEXT(card, 2, "cmenblcb");
2358 iob = (struct qeth_cmd_buffer *) data;
2359 memcpy(&card->token.cm_filter_r,
2360 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
2361 QETH_MPC_TOKEN_LENGTH);
2365 static int qeth_cm_enable(struct qeth_card *card)
2367 struct qeth_cmd_buffer *iob;
2369 QETH_CARD_TEXT(card, 2, "cmenable");
2371 iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
2375 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
2376 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2377 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
2378 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
2380 return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
2383 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2386 struct qeth_cmd_buffer *iob;
2388 QETH_CARD_TEXT(card, 2, "cmsetpcb");
2390 iob = (struct qeth_cmd_buffer *) data;
2391 memcpy(&card->token.cm_connection_r,
2392 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2393 QETH_MPC_TOKEN_LENGTH);
2397 static int qeth_cm_setup(struct qeth_card *card)
2399 struct qeth_cmd_buffer *iob;
2401 QETH_CARD_TEXT(card, 2, "cmsetup");
2403 iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
2407 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2408 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2409 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2410 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2411 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2412 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2413 return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
2416 static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type)
2418 if (link_type == QETH_LINK_TYPE_LANE_TR ||
2419 link_type == QETH_LINK_TYPE_HSTR) {
2420 dev_err(&card->gdev->dev, "Unsupported Token Ring device\n");
2427 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
2429 struct net_device *dev = card->dev;
2430 unsigned int new_mtu;
2433 /* IQD needs accurate max MTU to set up its RX buffers: */
2436 /* tolerate quirky HW: */
2437 max_mtu = ETH_MAX_MTU;
2442 /* move any device with default MTU to new max MTU: */
2443 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
2445 /* adjust RX buffer size to new max MTU: */
2446 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
2447 if (dev->max_mtu && dev->max_mtu != max_mtu)
2448 qeth_free_qdio_queues(card);
2452 /* default MTUs for first setup: */
2453 else if (IS_LAYER2(card))
2454 new_mtu = ETH_DATA_LEN;
2456 new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
2459 dev->max_mtu = max_mtu;
2460 dev->mtu = min(new_mtu, max_mtu);
2465 static int qeth_get_mtu_outof_framesize(int framesize)
2467 switch (framesize) {
2481 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2484 __u16 mtu, framesize;
2486 struct qeth_cmd_buffer *iob;
2489 QETH_CARD_TEXT(card, 2, "ulpenacb");
2491 iob = (struct qeth_cmd_buffer *) data;
2492 memcpy(&card->token.ulp_filter_r,
2493 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2494 QETH_MPC_TOKEN_LENGTH);
2496 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2497 mtu = qeth_get_mtu_outof_framesize(framesize);
2499 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
2501 *(u16 *)reply->param = mtu;
2503 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2504 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2506 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2507 if (!qeth_is_supported_link_type(card, link_type))
2508 return -EPROTONOSUPPORT;
2511 card->info.link_type = link_type;
2512 QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
2516 static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
2519 return QETH_PROT_OSN2;
2520 return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
2523 static int qeth_ulp_enable(struct qeth_card *card)
2525 u8 prot_type = qeth_mpc_select_prot_type(card);
2526 struct qeth_cmd_buffer *iob;
2530 QETH_CARD_TEXT(card, 2, "ulpenabl");
2532 iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
2536 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
2537 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
2538 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2539 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2540 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2541 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2542 rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
2545 return qeth_update_max_mtu(card, max_mtu);
2548 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2551 struct qeth_cmd_buffer *iob;
2553 QETH_CARD_TEXT(card, 2, "ulpstpcb");
2555 iob = (struct qeth_cmd_buffer *) data;
2556 memcpy(&card->token.ulp_connection_r,
2557 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2558 QETH_MPC_TOKEN_LENGTH);
2559 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2561 QETH_CARD_TEXT(card, 2, "olmlimit");
2562 dev_err(&card->gdev->dev, "A connection could not be "
2563 "established because of an OLM limit\n");
2569 static int qeth_ulp_setup(struct qeth_card *card)
2572 struct qeth_cmd_buffer *iob;
2574 QETH_CARD_TEXT(card, 2, "ulpsetup");
2576 iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
2580 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2581 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2582 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2583 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2584 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2585 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2587 memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2);
2588 temp = (card->info.cula << 8) + card->info.unit_addr2;
2589 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2590 return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
2593 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
2595 struct qeth_qdio_out_buffer *newbuf;
2597 newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
2601 newbuf->buffer = q->qdio_bufs[bidx];
2602 skb_queue_head_init(&newbuf->skb_list);
2603 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
2605 atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
2606 q->bufs[bidx] = newbuf;
2610 static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2615 qeth_drain_output_queue(q, true);
2616 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2620 static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
2622 struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2628 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q))
2631 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
2632 if (qeth_init_qdio_out_buf(q, i))
2640 kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[--i]);
2641 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2647 static void qeth_tx_completion_timer(struct timer_list *timer)
2649 struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);
2651 napi_schedule(&queue->napi);
2652 QETH_TXQ_STAT_INC(queue, completion_timer);
2655 static int qeth_alloc_qdio_queues(struct qeth_card *card)
2659 QETH_CARD_TEXT(card, 2, "allcqdbf");
2661 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
2662 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2665 QETH_CARD_TEXT(card, 2, "inq");
2666 card->qdio.in_q = qeth_alloc_qdio_queue();
2667 if (!card->qdio.in_q)
2670 /* inbound buffer pool */
2671 if (qeth_alloc_buffer_pool(card))
2675 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2676 struct qeth_qdio_out_q *queue;
2678 queue = qeth_alloc_output_queue();
2681 QETH_CARD_TEXT_(card, 2, "outq %i", i);
2682 QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
2683 card->qdio.out_qs[i] = queue;
2685 queue->queue_no = i;
2686 INIT_LIST_HEAD(&queue->pending_bufs);
2687 spin_lock_init(&queue->lock);
2688 timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
2689 queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
2690 queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
2691 queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
2695 if (qeth_alloc_cq(card))
2702 qeth_free_output_queue(card->qdio.out_qs[--i]);
2703 card->qdio.out_qs[i] = NULL;
2705 qeth_free_buffer_pool(card);
2707 qeth_free_qdio_queue(card->qdio.in_q);
2708 card->qdio.in_q = NULL;
2710 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2714 static void qeth_free_qdio_queues(struct qeth_card *card)
2718 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
2719 QETH_QDIO_UNINITIALIZED)
2723 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2724 if (card->qdio.in_q->bufs[j].rx_skb)
2725 dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
2727 qeth_free_qdio_queue(card->qdio.in_q);
2728 card->qdio.in_q = NULL;
2729 /* inbound buffer pool */
2730 qeth_free_buffer_pool(card);
2731 /* free outbound qdio_qs */
2732 for (i = 0; i < card->qdio.no_out_queues; i++) {
2733 qeth_free_output_queue(card->qdio.out_qs[i]);
2734 card->qdio.out_qs[i] = NULL;
2738 static void qeth_fill_qib_parms(struct qeth_card *card,
2739 struct qeth_qib_parms *parms)
2741 struct qeth_qdio_out_q *queue;
2744 parms->pcit_magic[0] = 'P';
2745 parms->pcit_magic[1] = 'C';
2746 parms->pcit_magic[2] = 'I';
2747 parms->pcit_magic[3] = 'T';
2748 ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic));
2749 parms->pcit_a = QETH_PCI_THRESHOLD_A(card);
2750 parms->pcit_b = QETH_PCI_THRESHOLD_B(card);
2751 parms->pcit_c = QETH_PCI_TIMER_VALUE(card);
2753 parms->blkt_magic[0] = 'B';
2754 parms->blkt_magic[1] = 'L';
2755 parms->blkt_magic[2] = 'K';
2756 parms->blkt_magic[3] = 'T';
2757 ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic));
2758 parms->blkt_total = card->info.blkt.time_total;
2759 parms->blkt_inter_packet = card->info.blkt.inter_packet;
2760 parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo;
2762 /* Prio-queueing implicitly uses the default priorities: */
2763 if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1)
2766 parms->pque_magic[0] = 'P';
2767 parms->pque_magic[1] = 'Q';
2768 parms->pque_magic[2] = 'U';
2769 parms->pque_magic[3] = 'E';
2770 ASCEBC(parms->pque_magic, sizeof(parms->pque_magic));
2771 parms->pque_order = QETH_QIB_PQUE_ORDER_RR;
2772 parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL;
2774 qeth_for_each_output_queue(card, queue, i)
2775 parms->pque_priority[i] = queue->priority;
2778 static int qeth_qdio_activate(struct qeth_card *card)
2780 QETH_CARD_TEXT(card, 3, "qdioact");
2781 return qdio_activate(CARD_DDEV(card));
2784 static int qeth_dm_act(struct qeth_card *card)
2786 struct qeth_cmd_buffer *iob;
2788 QETH_CARD_TEXT(card, 2, "dmact");
2790 iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
2794 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2795 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2796 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2797 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2798 return qeth_send_control_data(card, iob, NULL, NULL);
2801 static int qeth_mpc_initialize(struct qeth_card *card)
2805 QETH_CARD_TEXT(card, 2, "mpcinit");
2807 rc = qeth_issue_next_read(card);
2809 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2812 rc = qeth_cm_enable(card);
2814 QETH_CARD_TEXT_(card, 2, "2err%d", rc);
2817 rc = qeth_cm_setup(card);
2819 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
2822 rc = qeth_ulp_enable(card);
2824 QETH_CARD_TEXT_(card, 2, "4err%d", rc);
2827 rc = qeth_ulp_setup(card);
2829 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2832 rc = qeth_alloc_qdio_queues(card);
2834 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2837 rc = qeth_qdio_establish(card);
2839 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
2840 qeth_free_qdio_queues(card);
2843 rc = qeth_qdio_activate(card);
2845 QETH_CARD_TEXT_(card, 2, "7err%d", rc);
2848 rc = qeth_dm_act(card);
2850 QETH_CARD_TEXT_(card, 2, "8err%d", rc);
2857 static void qeth_print_status_message(struct qeth_card *card)
2859 switch (card->info.type) {
2860 case QETH_CARD_TYPE_OSD:
2861 case QETH_CARD_TYPE_OSM:
2862 case QETH_CARD_TYPE_OSX:
2863 /* VM will use a non-zero first character
2864 * to indicate a HiperSockets like reporting
2865 * of the level OSA sets the first character to zero
2867 if (!card->info.mcl_level[0]) {
2868 sprintf(card->info.mcl_level, "%02x%02x",
2869 card->info.mcl_level[2],
2870 card->info.mcl_level[3]);
2874 case QETH_CARD_TYPE_IQD:
2875 if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
2876 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2877 card->info.mcl_level[0]];
2878 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2879 card->info.mcl_level[1]];
2880 card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2881 card->info.mcl_level[2]];
2882 card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2883 card->info.mcl_level[3]];
2884 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2888 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2890 dev_info(&card->gdev->dev,
2891 "Device is a%s card%s%s%s\nwith link type %s.\n",
2892 qeth_get_cardname(card),
2893 (card->info.mcl_level[0]) ? " (level: " : "",
2894 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2895 (card->info.mcl_level[0]) ? ")" : "",
2896 qeth_get_cardname_short(card));
2899 static void qeth_initialize_working_pool_list(struct qeth_card *card)
2901 struct qeth_buffer_pool_entry *entry;
2903 QETH_CARD_TEXT(card, 5, "inwrklst");
2905 list_for_each_entry(entry,
2906 &card->qdio.init_pool.entry_list, init_list) {
2907 qeth_put_buffer_pool_entry(card, entry);
2911 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2912 struct qeth_card *card)
2914 struct qeth_buffer_pool_entry *entry;
2917 if (list_empty(&card->qdio.in_buf_pool.entry_list))
2920 list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) {
2922 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2923 if (page_count(entry->elements[i]) > 1) {
2929 list_del_init(&entry->list);
2934 /* no free buffer in pool so take first one and swap pages */
2935 entry = list_first_entry(&card->qdio.in_buf_pool.entry_list,
2936 struct qeth_buffer_pool_entry, list);
2937 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2938 if (page_count(entry->elements[i]) > 1) {
2939 struct page *page = dev_alloc_page();
2944 __free_page(entry->elements[i]);
2945 entry->elements[i] = page;
2946 QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
2949 list_del_init(&entry->list);
2953 static int qeth_init_input_buffer(struct qeth_card *card,
2954 struct qeth_qdio_buffer *buf)
2956 struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry;
2959 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2960 buf->rx_skb = netdev_alloc_skb(card->dev,
2962 sizeof(struct ipv6hdr));
2968 pool_entry = qeth_find_free_buffer_pool_entry(card);
2972 buf->pool_entry = pool_entry;
2976 * since the buffer is accessed only from the input_tasklet
2977 * there shouldn't be a need to synchronize; also, since we use
2978 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2981 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2982 buf->buffer->element[i].length = PAGE_SIZE;
2983 buf->buffer->element[i].addr =
2984 page_to_phys(pool_entry->elements[i]);
2985 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2986 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
2988 buf->buffer->element[i].eflags = 0;
2989 buf->buffer->element[i].sflags = 0;
2994 static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
2995 struct qeth_qdio_out_q *queue)
2997 if (!IS_IQD(card) ||
2998 qeth_iqd_is_mcast_queue(card, queue) ||
2999 card->options.cq == QETH_CQ_ENABLED ||
3000 qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
3003 return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
3006 static int qeth_init_qdio_queues(struct qeth_card *card)
3008 unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count;
3012 QETH_CARD_TEXT(card, 2, "initqdqs");
3015 qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
3016 memset(&card->rx, 0, sizeof(struct qeth_rx));
3018 qeth_initialize_working_pool_list(card);
3019 /*give only as many buffers to hardware as we have buffer pool entries*/
3020 for (i = 0; i < rx_bufs; i++) {
3021 rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
3026 card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
3027 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs);
3029 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
3034 rc = qeth_cq_init(card);
3039 /* outbound queue */
3040 for (i = 0; i < card->qdio.no_out_queues; ++i) {
3041 struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];
3043 qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
3044 queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
3045 queue->next_buf_to_fill = 0;
3047 queue->prev_hdr = NULL;
3048 queue->coalesced_frames = 0;
3049 queue->bulk_start = 0;
3050 queue->bulk_count = 0;
3051 queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
3052 atomic_set(&queue->used_buffers, 0);
3053 atomic_set(&queue->set_pci_flags_count, 0);
3054 netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
3059 static void qeth_ipa_finalize_cmd(struct qeth_card *card,
3060 struct qeth_cmd_buffer *iob)
3062 qeth_mpc_finalize_cmd(card, iob);
3064 /* override with IPA-specific values: */
3065 __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
3068 void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
3070 bool (*match)(struct qeth_cmd_buffer *iob,
3071 struct qeth_cmd_buffer *reply))
3073 u8 prot_type = qeth_mpc_select_prot_type(card);
3074 u16 total_length = iob->length;
3076 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
3078 iob->finalize = qeth_ipa_finalize_cmd;
3081 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
3082 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
3083 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
3084 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
3085 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
3086 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
3087 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3088 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
3090 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
3092 static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
3093 struct qeth_cmd_buffer *reply)
3095 struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);
3097 return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
3100 struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
3101 enum qeth_ipa_cmds cmd_code,
3102 enum qeth_prot_versions prot,
3103 unsigned int data_length)
3105 struct qeth_cmd_buffer *iob;
3106 struct qeth_ipacmd_hdr *hdr;
3108 data_length += offsetof(struct qeth_ipa_cmd, data);
3109 iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
3114 qeth_prepare_ipa_cmd(card, iob, data_length, qeth_ipa_match_reply);
3116 hdr = &__ipa_cmd(iob)->hdr;
3117 hdr->command = cmd_code;
3118 hdr->initiator = IPA_CMD_INITIATOR_HOST;
3119 /* hdr->seqno is set by qeth_send_control_data() */
3120 hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH;
3121 hdr->rel_adapter_no = (u8) card->dev->dev_port;
3122 hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
3123 hdr->param_count = 1;
3124 hdr->prot_version = prot;
3127 EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);
3129 static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
3130 struct qeth_reply *reply, unsigned long data)
3132 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3134 return (cmd->hdr.return_code) ? -EIO : 0;
3138 * qeth_send_ipa_cmd() - send an IPA command
3140 * See qeth_send_control_data() for explanation of the arguments.
3143 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
3144 int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
3150 QETH_CARD_TEXT(card, 4, "sendipa");
3152 if (card->read_or_write_problem) {
3157 if (reply_cb == NULL)
3158 reply_cb = qeth_send_ipa_cmd_cb;
3159 rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
3161 qeth_clear_ipacmd_list(card);
3162 qeth_schedule_recovery(card);
3166 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
3168 static int qeth_send_startlan_cb(struct qeth_card *card,
3169 struct qeth_reply *reply, unsigned long data)
3171 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3173 if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
3176 return (cmd->hdr.return_code) ? -EIO : 0;
3179 static int qeth_send_startlan(struct qeth_card *card)
3181 struct qeth_cmd_buffer *iob;
3183 QETH_CARD_TEXT(card, 2, "strtlan");
3185 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
3188 return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
3191 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
3193 if (!cmd->hdr.return_code)
3194 cmd->hdr.return_code =
3195 cmd->data.setadapterparms.hdr.return_code;
3196 return cmd->hdr.return_code;
3199 static int qeth_query_setadapterparms_cb(struct qeth_card *card,
3200 struct qeth_reply *reply, unsigned long data)
3202 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3203 struct qeth_query_cmds_supp *query_cmd;
3205 QETH_CARD_TEXT(card, 3, "quyadpcb");
3206 if (qeth_setadpparms_inspect_rc(cmd))
3209 query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp;
3210 if (query_cmd->lan_type & 0x7f) {
3211 if (!qeth_is_supported_link_type(card, query_cmd->lan_type))
3212 return -EPROTONOSUPPORT;
3214 card->info.link_type = query_cmd->lan_type;
3215 QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
3218 card->options.adp.supported = query_cmd->supported_cmds;
3222 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
3223 enum qeth_ipa_setadp_cmd adp_cmd,
3224 unsigned int data_length)
3226 struct qeth_ipacmd_setadpparms_hdr *hdr;
3227 struct qeth_cmd_buffer *iob;
3229 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
3231 offsetof(struct qeth_ipacmd_setadpparms,
3236 hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
3237 hdr->cmdlength = sizeof(*hdr) + data_length;
3238 hdr->command_code = adp_cmd;
3239 hdr->used_total = 1;
3244 static int qeth_query_setadapterparms(struct qeth_card *card)
3247 struct qeth_cmd_buffer *iob;
3249 QETH_CARD_TEXT(card, 3, "queryadp");
3250 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
3251 SETADP_DATA_SIZEOF(query_cmds_supp));
3254 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
3258 static int qeth_query_ipassists_cb(struct qeth_card *card,
3259 struct qeth_reply *reply, unsigned long data)
3261 struct qeth_ipa_cmd *cmd;
3263 QETH_CARD_TEXT(card, 2, "qipasscb");
3265 cmd = (struct qeth_ipa_cmd *) data;
3267 switch (cmd->hdr.return_code) {
3268 case IPA_RC_SUCCESS:
3270 case IPA_RC_NOTSUPP:
3271 case IPA_RC_L2_UNSUPPORTED_CMD:
3272 QETH_CARD_TEXT(card, 2, "ipaunsup");
3273 card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
3274 card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
3277 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
3278 CARD_DEVID(card), cmd->hdr.return_code);
3282 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
3283 card->options.ipa4 = cmd->hdr.assists;
3284 else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
3285 card->options.ipa6 = cmd->hdr.assists;
3287 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
3292 static int qeth_query_ipassists(struct qeth_card *card,
3293 enum qeth_prot_versions prot)
3296 struct qeth_cmd_buffer *iob;
3298 QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
3299 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
3302 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
3306 static int qeth_query_switch_attributes_cb(struct qeth_card *card,
3307 struct qeth_reply *reply, unsigned long data)
3309 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3310 struct qeth_query_switch_attributes *attrs;
3311 struct qeth_switch_info *sw_info;
3313 QETH_CARD_TEXT(card, 2, "qswiatcb");
3314 if (qeth_setadpparms_inspect_rc(cmd))
3317 sw_info = (struct qeth_switch_info *)reply->param;
3318 attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
3319 sw_info->capabilities = attrs->capabilities;
3320 sw_info->settings = attrs->settings;
3321 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
3326 int qeth_query_switch_attributes(struct qeth_card *card,
3327 struct qeth_switch_info *sw_info)
3329 struct qeth_cmd_buffer *iob;
3331 QETH_CARD_TEXT(card, 2, "qswiattr");
3332 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
3334 if (!netif_carrier_ok(card->dev))
3336 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
3339 return qeth_send_ipa_cmd(card, iob,
3340 qeth_query_switch_attributes_cb, sw_info);
3343 struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
3344 enum qeth_diags_cmds sub_cmd,
3345 unsigned int data_length)
3347 struct qeth_ipacmd_diagass *cmd;
3348 struct qeth_cmd_buffer *iob;
3350 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
3351 DIAG_HDR_LEN + data_length);
3355 cmd = &__ipa_cmd(iob)->data.diagass;
3356 cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
3357 cmd->subcmd = sub_cmd;
3360 EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);
3362 static int qeth_query_setdiagass_cb(struct qeth_card *card,
3363 struct qeth_reply *reply, unsigned long data)
3365 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3366 u16 rc = cmd->hdr.return_code;
3369 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3373 card->info.diagass_support = cmd->data.diagass.ext;
3377 static int qeth_query_setdiagass(struct qeth_card *card)
3379 struct qeth_cmd_buffer *iob;
3381 QETH_CARD_TEXT(card, 2, "qdiagass");
3382 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
3385 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
3388 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
3390 unsigned long info = get_zeroed_page(GFP_KERNEL);
3391 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
3392 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
3393 struct ccw_dev_id ccwid;
3396 tid->chpid = card->info.chpid;
3397 ccw_device_get_id(CARD_RDEV(card), &ccwid);
3398 tid->ssid = ccwid.ssid;
3399 tid->devno = ccwid.devno;
3402 level = stsi(NULL, 0, 0, 0);
3403 if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3404 tid->lparnr = info222->lpar_number;
3405 if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3406 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
3407 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
3412 static int qeth_hw_trap_cb(struct qeth_card *card,
3413 struct qeth_reply *reply, unsigned long data)
3415 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3416 u16 rc = cmd->hdr.return_code;
3419 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3425 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
3427 struct qeth_cmd_buffer *iob;
3428 struct qeth_ipa_cmd *cmd;
3430 QETH_CARD_TEXT(card, 2, "diagtrap");
3431 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
3434 cmd = __ipa_cmd(iob);
3435 cmd->data.diagass.type = 1;
3436 cmd->data.diagass.action = action;
3438 case QETH_DIAGS_TRAP_ARM:
3439 cmd->data.diagass.options = 0x0003;
3440 cmd->data.diagass.ext = 0x00010000 +
3441 sizeof(struct qeth_trap_id);
3442 qeth_get_trap_id(card,
3443 (struct qeth_trap_id *)cmd->data.diagass.cdata);
3445 case QETH_DIAGS_TRAP_DISARM:
3446 cmd->data.diagass.options = 0x0001;
3448 case QETH_DIAGS_TRAP_CAPTURE:
3451 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
3454 static int qeth_check_qdio_errors(struct qeth_card *card,
3455 struct qdio_buffer *buf,
3456 unsigned int qdio_error,
3457 const char *dbftext)
3460 QETH_CARD_TEXT(card, 2, dbftext);
3461 QETH_CARD_TEXT_(card, 2, " F15=%02X",
3462 buf->element[15].sflags);
3463 QETH_CARD_TEXT_(card, 2, " F14=%02X",
3464 buf->element[14].sflags);
3465 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3466 if ((buf->element[15].sflags) == 0x12) {
3467 QETH_CARD_STAT_INC(card, rx_fifo_errors);
3475 static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
3478 struct qeth_qdio_q *queue = card->qdio.in_q;
3479 struct list_head *lh;
3484 /* only requeue at a certain threshold to avoid SIGAs */
3485 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
3486 for (i = queue->next_buf_to_init;
3487 i < queue->next_buf_to_init + count; ++i) {
3488 if (qeth_init_input_buffer(card,
3489 &queue->bufs[QDIO_BUFNR(i)])) {
3496 if (newcount < count) {
3497 /* we are in memory shortage so we switch back to
3498 traditional skb allocation and drop packages */
3499 atomic_set(&card->force_alloc_skb, 3);
3502 atomic_add_unless(&card->force_alloc_skb, -1, 0);
3507 list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
3509 if (i == card->qdio.in_buf_pool.buf_count) {
3510 QETH_CARD_TEXT(card, 2, "qsarbw");
3511 schedule_delayed_work(
3512 &card->buffer_reclaim_work,
3513 QETH_RECLAIM_WORK_TIME);
3518 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
3519 queue->next_buf_to_init, count);
3521 QETH_CARD_TEXT(card, 2, "qinberr");
3523 queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
3531 static void qeth_buffer_reclaim_work(struct work_struct *work)
3533 struct qeth_card *card = container_of(to_delayed_work(work),
3535 buffer_reclaim_work);
3538 napi_schedule(&card->napi);
3539 /* kick-start the NAPI softirq: */
3543 static void qeth_handle_send_error(struct qeth_card *card,
3544 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
3546 int sbalf15 = buffer->buffer->element[15].sflags;
3548 QETH_CARD_TEXT(card, 6, "hdsnderr");
3549 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3554 if ((sbalf15 >= 15) && (sbalf15 <= 31))
3557 QETH_CARD_TEXT(card, 1, "lnkfail");
3558 QETH_CARD_TEXT_(card, 1, "%04x %02x",
3559 (u16)qdio_err, (u8)sbalf15);
3563 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
3564 * @queue: queue to check for packing buffer
3566 * Returns number of buffers that were prepared for flush.
3568 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
3570 struct qeth_qdio_out_buffer *buffer;
3572 buffer = queue->bufs[queue->next_buf_to_fill];
3573 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
3574 (buffer->next_element_to_fill > 0)) {
3575 /* it's a packing buffer */
3576 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3577 queue->next_buf_to_fill =
3578 QDIO_BUFNR(queue->next_buf_to_fill + 1);
3585 * Switched to packing state if the number of used buffers on a queue
3586 * reaches a certain limit.
3588 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
3590 if (!queue->do_pack) {
3591 if (atomic_read(&queue->used_buffers)
3592 >= QETH_HIGH_WATERMARK_PACK){
3593 /* switch non-PACKING -> PACKING */
3594 QETH_CARD_TEXT(queue->card, 6, "np->pack");
3595 QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3602 * Switches from packing to non-packing mode. If there is a packing
3603 * buffer on the queue this buffer will be prepared to be flushed.
3604 * In that case 1 is returned to inform the caller. If no buffer
3605 * has to be flushed, zero is returned.
3607 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
3609 if (queue->do_pack) {
3610 if (atomic_read(&queue->used_buffers)
3611 <= QETH_LOW_WATERMARK_PACK) {
3612 /* switch PACKING -> non-PACKING */
3613 QETH_CARD_TEXT(queue->card, 6, "pack->np");
3614 QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3616 return qeth_prep_flush_pack_buffer(queue);
3622 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
3625 struct qeth_qdio_out_buffer *buf = queue->bufs[index];
3626 unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
3627 struct qeth_card *card = queue->card;
3631 for (i = index; i < index + count; ++i) {
3632 unsigned int bidx = QDIO_BUFNR(i);
3633 struct sk_buff *skb;
3635 buf = queue->bufs[bidx];
3636 buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
3637 SBAL_EFLAGS_LAST_ENTRY;
3638 queue->coalesced_frames += buf->frames;
3640 if (queue->bufstates)
3641 queue->bufstates[bidx].user = buf;
3644 skb_queue_walk(&buf->skb_list, skb)
3645 skb_tx_timestamp(skb);
3649 if (!IS_IQD(card)) {
3650 if (!queue->do_pack) {
3651 if ((atomic_read(&queue->used_buffers) >=
3652 (QETH_HIGH_WATERMARK_PACK -
3653 QETH_WATERMARK_PACK_FUZZ)) &&
3654 !atomic_read(&queue->set_pci_flags_count)) {
3655 /* it's likely that we'll go to packing
3657 atomic_inc(&queue->set_pci_flags_count);
3658 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3661 if (!atomic_read(&queue->set_pci_flags_count)) {
3663 * there's no outstanding PCI any more, so we
3664 * have to request a PCI to be sure the the PCI
3665 * will wake at some time in the future then we
3666 * can flush packed buffers that might still be
3667 * hanging around, which can happen if no
3668 * further send was requested by the stack
3670 atomic_inc(&queue->set_pci_flags_count);
3671 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3675 if (atomic_read(&queue->set_pci_flags_count))
3676 qdio_flags |= QDIO_FLAG_PCI_OUT;
3679 QETH_TXQ_STAT_INC(queue, doorbell);
3680 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
3681 queue->queue_no, index, count);
3686 /* ignore temporary SIGA errors without busy condition */
3688 /* Fake the TX completion interrupt: */
3690 unsigned int frames = READ_ONCE(queue->max_coalesced_frames);
3691 unsigned int usecs = READ_ONCE(queue->coalesce_usecs);
3693 if (frames && queue->coalesced_frames >= frames) {
3694 napi_schedule(&queue->napi);
3695 queue->coalesced_frames = 0;
3696 QETH_TXQ_STAT_INC(queue, coal_frames);
3698 qeth_tx_arm_timer(queue, usecs);
3704 QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3705 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
3706 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
3707 QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
3708 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3710 /* this must not happen under normal circumstances. if it
3711 * happens something is really wrong -> recover */
3712 qeth_schedule_recovery(queue->card);
3716 static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
3718 qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
3720 queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
3721 queue->prev_hdr = NULL;
3722 queue->bulk_count = 0;
3725 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
3728 * check if weed have to switch to non-packing mode or if
3729 * we have to get a pci flag out on the queue
3731 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
3732 !atomic_read(&queue->set_pci_flags_count)) {
3733 unsigned int index, flush_cnt;
3736 spin_lock(&queue->lock);
3738 index = queue->next_buf_to_fill;
3739 q_was_packing = queue->do_pack;
3741 flush_cnt = qeth_switch_to_nonpacking_if_needed(queue);
3742 if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count))
3743 flush_cnt = qeth_prep_flush_pack_buffer(queue);
3746 qeth_flush_buffers(queue, index, flush_cnt);
3748 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
3751 spin_unlock(&queue->lock);
3755 static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
3757 struct qeth_card *card = (struct qeth_card *)card_ptr;
3759 napi_schedule_irqoff(&card->napi);
3762 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
3766 if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
3770 if (card->options.cq == cq) {
3775 qeth_free_qdio_queues(card);
3776 card->options.cq = cq;
3783 EXPORT_SYMBOL_GPL(qeth_configure_cq);
3785 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
3786 unsigned int queue, int first_element,
3789 struct qeth_qdio_q *cq = card->qdio.c_q;
3793 QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
3794 QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
3795 QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
3798 netif_tx_stop_all_queues(card->dev);
3799 qeth_schedule_recovery(card);
3803 for (i = first_element; i < first_element + count; ++i) {
3804 struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
3807 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
3808 buffer->element[e].addr) {
3809 unsigned long phys_aob_addr = buffer->element[e].addr;
3811 qeth_qdio_handle_aob(card, phys_aob_addr);
3814 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3816 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
3817 card->qdio.c_q->next_buf_to_init,
3820 dev_warn(&card->gdev->dev,
3821 "QDIO reported an error, rc=%i\n", rc);
3822 QETH_CARD_TEXT(card, 2, "qcqherr");
3825 cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
3828 static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
3829 unsigned int qdio_err, int queue,
3830 int first_elem, int count,
3831 unsigned long card_ptr)
3833 struct qeth_card *card = (struct qeth_card *)card_ptr;
3835 QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
3836 QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
3839 qeth_schedule_recovery(card);
3842 static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3843 unsigned int qdio_error, int __queue,
3844 int first_element, int count,
3845 unsigned long card_ptr)
3847 struct qeth_card *card = (struct qeth_card *) card_ptr;
3848 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
3849 struct net_device *dev = card->dev;
3850 struct netdev_queue *txq;
3853 QETH_CARD_TEXT(card, 6, "qdouhdl");
3854 if (qdio_error & QDIO_ERROR_FATAL) {
3855 QETH_CARD_TEXT(card, 2, "achkcond");
3856 netif_tx_stop_all_queues(dev);
3857 qeth_schedule_recovery(card);
3861 for (i = first_element; i < (first_element + count); ++i) {
3862 struct qeth_qdio_out_buffer *buf = queue->bufs[QDIO_BUFNR(i)];
3864 qeth_handle_send_error(card, buf, qdio_error);
3865 qeth_clear_output_buffer(queue, buf, qdio_error, 0);
3868 atomic_sub(count, &queue->used_buffers);
3869 qeth_check_outbound_queue(queue);
3871 txq = netdev_get_tx_queue(dev, __queue);
3872 /* xmit may have observed the full-condition, but not yet stopped the
3873 * txq. In which case the code below won't trigger. So before returning,
3874 * xmit will re-check the txq's fill level and wake it up if needed.
3876 if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue))
3877 netif_tx_wake_queue(txq);
3881 * Note: Function assumes that we have 4 outbound queues.
3883 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
3885 struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
3888 switch (card->qdio.do_prio_queueing) {
3889 case QETH_PRIO_Q_ING_TOS:
3890 case QETH_PRIO_Q_ING_PREC:
3891 switch (vlan_get_protocol(skb)) {
3892 case htons(ETH_P_IP):
3893 tos = ipv4_get_dsfield(ip_hdr(skb));
3895 case htons(ETH_P_IPV6):
3896 tos = ipv6_get_dsfield(ipv6_hdr(skb));
3899 return card->qdio.default_out_queue;
3901 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
3902 return ~tos >> 6 & 3;
3903 if (tos & IPTOS_MINCOST)
3905 if (tos & IPTOS_RELIABILITY)
3907 if (tos & IPTOS_THROUGHPUT)
3909 if (tos & IPTOS_LOWDELAY)
3912 case QETH_PRIO_Q_ING_SKB:
3913 if (skb->priority > 5)
3915 return ~skb->priority >> 1 & 3;
3916 case QETH_PRIO_Q_ING_VLAN:
3917 if (veth->h_vlan_proto == htons(ETH_P_8021Q))
3918 return ~ntohs(veth->h_vlan_TCI) >>
3919 (VLAN_PRIO_SHIFT + 1) & 3;
3921 case QETH_PRIO_Q_ING_FIXED:
3922 return card->qdio.default_out_queue;
3926 return card->qdio.default_out_queue;
3928 EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3931 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags.
3934 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3935 * fragmented part of the SKB. Returns zero for linear SKB.
3937 static int qeth_get_elements_for_frags(struct sk_buff *skb)
3939 int cnt, elements = 0;
3941 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3942 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3944 elements += qeth_get_elements_for_range(
3945 (addr_t)skb_frag_address(frag),
3946 (addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3952 * qeth_count_elements() - Counts the number of QDIO buffer elements needed
3953 * to transmit an skb.
3954 * @skb: the skb to operate on.
3955 * @data_offset: skip this part of the skb's linear data
3957 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3958 * skb's data (both its linear part and paged fragments).
3960 unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
3962 unsigned int elements = qeth_get_elements_for_frags(skb);
3963 addr_t end = (addr_t)skb->data + skb_headlen(skb);
3964 addr_t start = (addr_t)skb->data + data_offset;
3967 elements += qeth_get_elements_for_range(start, end);
3970 EXPORT_SYMBOL_GPL(qeth_count_elements);
3972 #define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \
3976 * qeth_add_hw_header() - add a HW header to an skb.
3977 * @skb: skb that the HW header should be added to.
3978 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
3979 * it contains a valid pointer to a qeth_hdr.
3980 * @hdr_len: length of the HW header.
3981 * @proto_len: length of protocol headers that need to be in same page as the
3984 * Returns the pushed length. If the header can't be pushed on
3985 * (eg. because it would cross a page boundary), it is allocated from
3986 * the cache instead and 0 is returned.
3987 * The number of needed buffer elements is returned in @elements.
3988 * Error to create the hdr is indicated by returning with < 0.
3990 static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
3991 struct sk_buff *skb, struct qeth_hdr **hdr,
3992 unsigned int hdr_len, unsigned int proto_len,
3993 unsigned int *elements)
3995 gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
3996 const unsigned int contiguous = proto_len ? proto_len : 1;
3997 const unsigned int max_elements = queue->max_elements;
3998 unsigned int __elements;
4004 start = (addr_t)skb->data - hdr_len;
4005 end = (addr_t)skb->data;
4007 if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
4008 /* Push HW header into same page as first protocol header. */
4010 /* ... but TSO always needs a separate element for headers: */
4011 if (skb_is_gso(skb))
4012 __elements = 1 + qeth_count_elements(skb, proto_len);
4014 __elements = qeth_count_elements(skb, 0);
4015 } else if (!proto_len && PAGE_ALIGNED(skb->data)) {
4016 /* Push HW header into preceding page, flush with skb->data. */
4018 __elements = 1 + qeth_count_elements(skb, 0);
4020 /* Use header cache, copy protocol headers up. */
4022 __elements = 1 + qeth_count_elements(skb, proto_len);
4025 /* Compress skb to fit into one IO buffer: */
4026 if (__elements > max_elements) {
4027 if (!skb_is_nonlinear(skb)) {
4028 /* Drop it, no easy way of shrinking it further. */
4029 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
4030 max_elements, __elements, skb->len);
4034 rc = skb_linearize(skb);
4036 QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
4040 QETH_TXQ_STAT_INC(queue, skbs_linearized);
4041 /* Linearization changed the layout, re-evaluate: */
4045 *elements = __elements;
4046 /* Add the header: */
4048 *hdr = skb_push(skb, hdr_len);
4052 /* Fall back to cache element with known-good alignment: */
4053 if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
4055 *hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
4058 /* Copy protocol headers behind HW header: */
4059 skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
4063 static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
4064 struct sk_buff *curr_skb,
4065 struct qeth_hdr *curr_hdr)
4067 struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
4068 struct qeth_hdr *prev_hdr = queue->prev_hdr;
4073 /* All packets must have the same target: */
4074 if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
4075 struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);
4077 return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
4078 eth_hdr(curr_skb)->h_dest) &&
4079 qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
4082 return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
4083 qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
4087 * qeth_fill_buffer() - map skb into an output buffer
4088 * @buf: buffer to transport the skb
4089 * @skb: skb to map into the buffer
4090 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
4091 * from qeth_core_header_cache.
4092 * @offset: when mapping the skb, start at skb->data + offset
4093 * @hd_len: if > 0, build a dedicated header element of this size
4095 static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
4096 struct sk_buff *skb, struct qeth_hdr *hdr,
4097 unsigned int offset, unsigned int hd_len)
4099 struct qdio_buffer *buffer = buf->buffer;
4100 int element = buf->next_element_to_fill;
4101 int length = skb_headlen(skb) - offset;
4102 char *data = skb->data + offset;
4103 unsigned int elem_length, cnt;
4104 bool is_first_elem = true;
4106 __skb_queue_tail(&buf->skb_list, skb);
4108 /* build dedicated element for HW Header */
4110 is_first_elem = false;
4112 buffer->element[element].addr = virt_to_phys(hdr);
4113 buffer->element[element].length = hd_len;
4114 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
4116 /* HW header is allocated from cache: */
4117 if ((void *)hdr != skb->data)
4118 buf->is_header[element] = 1;
4119 /* HW header was pushed and is contiguous with linear part: */
4120 else if (length > 0 && !PAGE_ALIGNED(data) &&
4121 (data == (char *)hdr + hd_len))
4122 buffer->element[element].eflags |=
4123 SBAL_EFLAGS_CONTIGUOUS;
4128 /* map linear part into buffer element(s) */
4129 while (length > 0) {
4130 elem_length = min_t(unsigned int, length,
4131 PAGE_SIZE - offset_in_page(data));
4133 buffer->element[element].addr = virt_to_phys(data);
4134 buffer->element[element].length = elem_length;
4135 length -= elem_length;
4136 if (is_first_elem) {
4137 is_first_elem = false;
4138 if (length || skb_is_nonlinear(skb))
4139 /* skb needs additional elements */
4140 buffer->element[element].eflags =
4141 SBAL_EFLAGS_FIRST_FRAG;
4143 buffer->element[element].eflags = 0;
4145 buffer->element[element].eflags =
4146 SBAL_EFLAGS_MIDDLE_FRAG;
4149 data += elem_length;
4153 /* map page frags into buffer element(s) */
4154 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
4155 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
4157 data = skb_frag_address(frag);
4158 length = skb_frag_size(frag);
4159 while (length > 0) {
4160 elem_length = min_t(unsigned int, length,
4161 PAGE_SIZE - offset_in_page(data));
4163 buffer->element[element].addr = virt_to_phys(data);
4164 buffer->element[element].length = elem_length;
4165 buffer->element[element].eflags =
4166 SBAL_EFLAGS_MIDDLE_FRAG;
4168 length -= elem_length;
4169 data += elem_length;
4174 if (buffer->element[element - 1].eflags)
4175 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
4176 buf->next_element_to_fill = element;
4180 static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4181 struct sk_buff *skb, unsigned int elements,
4182 struct qeth_hdr *hdr, unsigned int offset,
4183 unsigned int hd_len)
4185 unsigned int bytes = qdisc_pkt_len(skb);
4186 struct qeth_qdio_out_buffer *buffer;
4187 unsigned int next_element;
4188 struct netdev_queue *txq;
4189 bool stopped = false;
4192 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
4193 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
4195 /* Just a sanity check, the wake/stop logic should ensure that we always
4196 * get a free buffer.
4198 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4201 flush = !qeth_iqd_may_bulk(queue, skb, hdr);
4204 (buffer->next_element_to_fill + elements > queue->max_elements)) {
4205 if (buffer->next_element_to_fill > 0) {
4206 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4207 queue->bulk_count++;
4210 if (queue->bulk_count >= queue->bulk_max)
4214 qeth_flush_queue(queue);
4216 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
4217 queue->bulk_count)];
4219 /* Sanity-check again: */
4220 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4224 if (buffer->next_element_to_fill == 0 &&
4225 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4226 /* If a TX completion happens right _here_ and misses to wake
4227 * the txq, then our re-check below will catch the race.
4229 QETH_TXQ_STAT_INC(queue, stopped);
4230 netif_tx_stop_queue(txq);
4234 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4235 buffer->bytes += bytes;
4236 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4237 queue->prev_hdr = hdr;
4239 flush = __netdev_tx_sent_queue(txq, bytes,
4240 !stopped && netdev_xmit_more());
4242 if (flush || next_element >= queue->max_elements) {
4243 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4244 queue->bulk_count++;
4246 if (queue->bulk_count >= queue->bulk_max)
4250 qeth_flush_queue(queue);
4253 if (stopped && !qeth_out_queue_is_full(queue))
4254 netif_tx_start_queue(txq);
4258 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4259 struct sk_buff *skb, struct qeth_hdr *hdr,
4260 unsigned int offset, unsigned int hd_len,
4261 int elements_needed)
4263 unsigned int start_index = queue->next_buf_to_fill;
4264 struct qeth_qdio_out_buffer *buffer;
4265 unsigned int next_element;
4266 struct netdev_queue *txq;
4267 bool stopped = false;
4268 int flush_count = 0;
4272 buffer = queue->bufs[queue->next_buf_to_fill];
4274 /* Just a sanity check, the wake/stop logic should ensure that we always
4275 * get a free buffer.
4277 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4280 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
4282 /* check if we need to switch packing state of this queue */
4283 qeth_switch_to_packing_if_needed(queue);
4284 if (queue->do_pack) {
4286 /* does packet fit in current buffer? */
4287 if (buffer->next_element_to_fill + elements_needed >
4288 queue->max_elements) {
4289 /* ... no -> set state PRIMED */
4290 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4292 queue->next_buf_to_fill =
4293 QDIO_BUFNR(queue->next_buf_to_fill + 1);
4294 buffer = queue->bufs[queue->next_buf_to_fill];
4296 /* We stepped forward, so sanity-check again: */
4297 if (atomic_read(&buffer->state) !=
4298 QETH_QDIO_BUF_EMPTY) {
4299 qeth_flush_buffers(queue, start_index,
4307 if (buffer->next_element_to_fill == 0 &&
4308 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4309 /* If a TX completion happens right _here_ and misses to wake
4310 * the txq, then our re-check below will catch the race.
4312 QETH_TXQ_STAT_INC(queue, stopped);
4313 netif_tx_stop_queue(txq);
4317 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4318 buffer->bytes += qdisc_pkt_len(skb);
4319 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4322 QETH_TXQ_STAT_INC(queue, skbs_pack);
4323 if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
4325 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4326 queue->next_buf_to_fill =
4327 QDIO_BUFNR(queue->next_buf_to_fill + 1);
4331 qeth_flush_buffers(queue, start_index, flush_count);
4335 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
4337 if (stopped && !qeth_out_queue_is_full(queue))
4338 netif_tx_start_queue(txq);
4341 EXPORT_SYMBOL_GPL(qeth_do_send_packet);
4343 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
4344 unsigned int payload_len, struct sk_buff *skb,
4345 unsigned int proto_len)
4347 struct qeth_hdr_ext_tso *ext = &hdr->ext;
4349 ext->hdr_tot_len = sizeof(*ext);
4350 ext->imb_hdr_no = 1;
4352 ext->hdr_version = 1;
4354 ext->payload_len = payload_len;
4355 ext->mss = skb_shinfo(skb)->gso_size;
4356 ext->dg_hdr_len = proto_len;
4359 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4360 struct qeth_qdio_out_q *queue, __be16 proto,
4361 void (*fill_header)(struct qeth_qdio_out_q *queue,
4362 struct qeth_hdr *hdr, struct sk_buff *skb,
4363 __be16 proto, unsigned int data_len))
4365 unsigned int proto_len, hw_hdr_len;
4366 unsigned int frame_len = skb->len;
4367 bool is_tso = skb_is_gso(skb);
4368 unsigned int data_offset = 0;
4369 struct qeth_hdr *hdr = NULL;
4370 unsigned int hd_len = 0;
4371 unsigned int elements;
4375 hw_hdr_len = sizeof(struct qeth_hdr_tso);
4376 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4378 hw_hdr_len = sizeof(struct qeth_hdr);
4379 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4382 rc = skb_cow_head(skb, hw_hdr_len);
4386 push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
4390 if (is_tso || !push_len) {
4391 /* HW header needs its own buffer element. */
4392 hd_len = hw_hdr_len + proto_len;
4393 data_offset = push_len + proto_len;
4395 memset(hdr, 0, hw_hdr_len);
4396 fill_header(queue, hdr, skb, proto, frame_len);
4398 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
4399 frame_len - proto_len, skb, proto_len);
4402 rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
4405 /* TODO: drop skb_orphan() once TX completion is fast enough */
4407 spin_lock(&queue->lock);
4408 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
4410 spin_unlock(&queue->lock);
4413 if (rc && !push_len)
4414 kmem_cache_free(qeth_core_header_cache, hdr);
4418 EXPORT_SYMBOL_GPL(qeth_xmit);
4420 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
4421 struct qeth_reply *reply, unsigned long data)
4423 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4424 struct qeth_ipacmd_setadpparms *setparms;
4426 QETH_CARD_TEXT(card, 4, "prmadpcb");
4428 setparms = &(cmd->data.setadapterparms);
4429 if (qeth_setadpparms_inspect_rc(cmd)) {
4430 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
4431 setparms->data.mode = SET_PROMISC_MODE_OFF;
4433 card->info.promisc_mode = setparms->data.mode;
4434 return (cmd->hdr.return_code) ? -EIO : 0;
4437 void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
4439 enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
4440 SET_PROMISC_MODE_OFF;
4441 struct qeth_cmd_buffer *iob;
4442 struct qeth_ipa_cmd *cmd;
4444 QETH_CARD_TEXT(card, 4, "setprom");
4445 QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
4447 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4448 SETADP_DATA_SIZEOF(mode));
4451 cmd = __ipa_cmd(iob);
4452 cmd->data.setadapterparms.data.mode = mode;
4453 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
4455 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
4457 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
4458 struct qeth_reply *reply, unsigned long data)
4460 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4461 struct qeth_ipacmd_setadpparms *adp_cmd;
4463 QETH_CARD_TEXT(card, 4, "chgmaccb");
4464 if (qeth_setadpparms_inspect_rc(cmd))
4467 adp_cmd = &cmd->data.setadapterparms;
4468 if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
4469 return -EADDRNOTAVAIL;
4471 if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
4472 !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4473 return -EADDRNOTAVAIL;
4475 ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
4479 int qeth_setadpparms_change_macaddr(struct qeth_card *card)
4482 struct qeth_cmd_buffer *iob;
4483 struct qeth_ipa_cmd *cmd;
4485 QETH_CARD_TEXT(card, 4, "chgmac");
4487 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4488 SETADP_DATA_SIZEOF(change_addr));
4491 cmd = __ipa_cmd(iob);
4492 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4493 cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
4494 ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
4495 card->dev->dev_addr);
4496 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
4500 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
4502 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4503 struct qeth_reply *reply, unsigned long data)
4505 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4506 struct qeth_set_access_ctrl *access_ctrl_req;
4508 QETH_CARD_TEXT(card, 4, "setaccb");
4510 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4511 QETH_CARD_TEXT_(card, 2, "rc=%d",
4512 cmd->data.setadapterparms.hdr.return_code);
4513 if (cmd->data.setadapterparms.hdr.return_code !=
4514 SET_ACCESS_CTRL_RC_SUCCESS)
4515 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4516 access_ctrl_req->subcmd_code, CARD_DEVID(card),
4517 cmd->data.setadapterparms.hdr.return_code);
4518 switch (qeth_setadpparms_inspect_rc(cmd)) {
4519 case SET_ACCESS_CTRL_RC_SUCCESS:
4520 if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE)
4521 dev_info(&card->gdev->dev,
4522 "QDIO data connection isolation is deactivated\n");
4524 dev_info(&card->gdev->dev,
4525 "QDIO data connection isolation is activated\n");
4527 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4528 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4531 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4532 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4535 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
4536 dev_err(&card->gdev->dev, "Adapter does not "
4537 "support QDIO data connection isolation\n");
4539 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
4540 dev_err(&card->gdev->dev,
4541 "Adapter is dedicated. "
4542 "QDIO data connection isolation not supported\n");
4544 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
4545 dev_err(&card->gdev->dev,
4546 "TSO does not permit QDIO data connection isolation\n");
4548 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
4549 dev_err(&card->gdev->dev, "The adjacent switch port does not "
4550 "support reflective relay mode\n");
4552 case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
4553 dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
4554 "enabled at the adjacent switch port");
4556 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
4557 dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
4558 "at the adjacent switch failed\n");
4559 /* benign error while disabling ISOLATION_MODE_FWD */
4566 int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
4567 enum qeth_ipa_isolation_modes mode)
4570 struct qeth_cmd_buffer *iob;
4571 struct qeth_ipa_cmd *cmd;
4572 struct qeth_set_access_ctrl *access_ctrl_req;
4574 QETH_CARD_TEXT(card, 4, "setacctl");
4576 if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
4577 dev_err(&card->gdev->dev,
4578 "Adapter does not support QDIO data connection isolation\n");
4582 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4583 SETADP_DATA_SIZEOF(set_access_ctrl));
4586 cmd = __ipa_cmd(iob);
4587 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4588 access_ctrl_req->subcmd_code = mode;
4590 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4593 QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
4594 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4595 rc, CARD_DEVID(card));
4601 void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
4603 struct qeth_card *card;
4605 card = dev->ml_priv;
4606 QETH_CARD_TEXT(card, 4, "txtimeo");
4607 qeth_schedule_recovery(card);
4609 EXPORT_SYMBOL_GPL(qeth_tx_timeout);
4611 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4613 struct qeth_card *card = dev->ml_priv;
4617 case MII_BMCR: /* Basic mode control register */
4619 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
4620 (card->info.link_type != QETH_LINK_TYPE_OSN) &&
4621 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
4622 (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
4623 rc |= BMCR_SPEED100;
4625 case MII_BMSR: /* Basic mode status register */
4626 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4627 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4630 case MII_PHYSID1: /* PHYS ID 1 */
4631 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4633 rc = (rc >> 5) & 0xFFFF;
4635 case MII_PHYSID2: /* PHYS ID 2 */
4636 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4638 case MII_ADVERTISE: /* Advertisement control reg */
4641 case MII_LPA: /* Link partner ability reg */
4642 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4643 LPA_100BASE4 | LPA_LPACK;
4645 case MII_EXPANSION: /* Expansion register */
4647 case MII_DCOUNTER: /* disconnect counter */
4649 case MII_FCSCOUNTER: /* false carrier counter */
4651 case MII_NWAYTEST: /* N-way auto-neg test register */
4653 case MII_RERRCOUNTER: /* rx error counter */
4654 rc = card->stats.rx_length_errors +
4655 card->stats.rx_frame_errors +
4656 card->stats.rx_fifo_errors;
4658 case MII_SREVISION: /* silicon revision */
4660 case MII_RESV1: /* reserved 1 */
4662 case MII_LBRERROR: /* loopback, rx, bypass error */
4664 case MII_PHYADDR: /* physical address */
4666 case MII_RESV2: /* reserved 2 */
4668 case MII_TPISTATUS: /* TPI status for 10mbps */
4670 case MII_NCONFIG: /* network interface config */
4678 static int qeth_snmp_command_cb(struct qeth_card *card,
4679 struct qeth_reply *reply, unsigned long data)
4681 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4682 struct qeth_arp_query_info *qinfo = reply->param;
4683 struct qeth_ipacmd_setadpparms *adp_cmd;
4684 unsigned int data_len;
4687 QETH_CARD_TEXT(card, 3, "snpcmdcb");
4689 if (cmd->hdr.return_code) {
4690 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4693 if (cmd->data.setadapterparms.hdr.return_code) {
4694 cmd->hdr.return_code =
4695 cmd->data.setadapterparms.hdr.return_code;
4696 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4700 adp_cmd = &cmd->data.setadapterparms;
4701 data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
4702 if (adp_cmd->hdr.seq_no == 1) {
4703 snmp_data = &adp_cmd->data.snmp;
4705 snmp_data = &adp_cmd->data.snmp.request;
4706 data_len -= offsetof(struct qeth_snmp_cmd, request);
4709 /* check if there is enough room in userspace */
4710 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4711 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
4714 QETH_CARD_TEXT_(card, 4, "snore%i",
4715 cmd->data.setadapterparms.hdr.used_total);
4716 QETH_CARD_TEXT_(card, 4, "sseqn%i",
4717 cmd->data.setadapterparms.hdr.seq_no);
4718 /*copy entries to user buffer*/
4719 memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
4720 qinfo->udata_offset += data_len;
4722 if (cmd->data.setadapterparms.hdr.seq_no <
4723 cmd->data.setadapterparms.hdr.used_total)
4728 static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4730 struct qeth_snmp_ureq __user *ureq;
4731 struct qeth_cmd_buffer *iob;
4732 unsigned int req_len;
4733 struct qeth_arp_query_info qinfo = {0, };
4736 QETH_CARD_TEXT(card, 3, "snmpcmd");
4738 if (IS_VM_NIC(card))
4741 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4745 ureq = (struct qeth_snmp_ureq __user *) udata;
4746 if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
4747 get_user(req_len, &ureq->hdr.req_len))
4750 /* Sanitize user input, to avoid overflows in iob size calculation: */
4751 if (req_len > QETH_BUFSIZE)
4754 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
4758 if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
4759 &ureq->cmd, req_len)) {
4764 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
4769 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4771 rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
4773 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4774 CARD_DEVID(card), rc);
4776 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4784 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4785 struct qeth_reply *reply,
4788 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4789 struct qeth_qoat_priv *priv = reply->param;
4792 QETH_CARD_TEXT(card, 3, "qoatcb");
4793 if (qeth_setadpparms_inspect_rc(cmd))
4796 resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4798 if (resdatalen > (priv->buffer_len - priv->response_len))
4801 memcpy(priv->buffer + priv->response_len,
4802 &cmd->data.setadapterparms.hdr, resdatalen);
4803 priv->response_len += resdatalen;
4805 if (cmd->data.setadapterparms.hdr.seq_no <
4806 cmd->data.setadapterparms.hdr.used_total)
4811 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4814 struct qeth_cmd_buffer *iob;
4815 struct qeth_ipa_cmd *cmd;
4816 struct qeth_query_oat *oat_req;
4817 struct qeth_query_oat_data oat_data;
4818 struct qeth_qoat_priv priv;
4821 QETH_CARD_TEXT(card, 3, "qoatcmd");
4823 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT))
4826 if (copy_from_user(&oat_data, udata, sizeof(oat_data)))
4829 priv.buffer_len = oat_data.buffer_len;
4830 priv.response_len = 0;
4831 priv.buffer = vzalloc(oat_data.buffer_len);
4835 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4836 SETADP_DATA_SIZEOF(query_oat));
4841 cmd = __ipa_cmd(iob);
4842 oat_req = &cmd->data.setadapterparms.data.query_oat;
4843 oat_req->subcmd_code = oat_data.command;
4845 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv);
4847 tmp = is_compat_task() ? compat_ptr(oat_data.ptr) :
4848 u64_to_user_ptr(oat_data.ptr);
4849 oat_data.response_len = priv.response_len;
4851 if (copy_to_user(tmp, priv.buffer, priv.response_len) ||
4852 copy_to_user(udata, &oat_data, sizeof(oat_data)))
4861 static int qeth_query_card_info_cb(struct qeth_card *card,
4862 struct qeth_reply *reply, unsigned long data)
4864 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4865 struct qeth_link_info *link_info = reply->param;
4866 struct qeth_query_card_info *card_info;
4868 QETH_CARD_TEXT(card, 2, "qcrdincb");
4869 if (qeth_setadpparms_inspect_rc(cmd))
4872 card_info = &cmd->data.setadapterparms.data.card_info;
4873 netdev_dbg(card->dev,
4874 "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
4875 card_info->card_type, card_info->port_mode,
4876 card_info->port_speed);
4878 switch (card_info->port_mode) {
4879 case CARD_INFO_PORTM_FULLDUPLEX:
4880 link_info->duplex = DUPLEX_FULL;
4882 case CARD_INFO_PORTM_HALFDUPLEX:
4883 link_info->duplex = DUPLEX_HALF;
4886 link_info->duplex = DUPLEX_UNKNOWN;
4889 switch (card_info->card_type) {
4890 case CARD_INFO_TYPE_1G_COPPER_A:
4891 case CARD_INFO_TYPE_1G_COPPER_B:
4892 link_info->speed = SPEED_1000;
4893 link_info->port = PORT_TP;
4895 case CARD_INFO_TYPE_1G_FIBRE_A:
4896 case CARD_INFO_TYPE_1G_FIBRE_B:
4897 link_info->speed = SPEED_1000;
4898 link_info->port = PORT_FIBRE;
4900 case CARD_INFO_TYPE_10G_FIBRE_A:
4901 case CARD_INFO_TYPE_10G_FIBRE_B:
4902 link_info->speed = SPEED_10000;
4903 link_info->port = PORT_FIBRE;
4906 switch (card_info->port_speed) {
4907 case CARD_INFO_PORTS_10M:
4908 link_info->speed = SPEED_10;
4910 case CARD_INFO_PORTS_100M:
4911 link_info->speed = SPEED_100;
4913 case CARD_INFO_PORTS_1G:
4914 link_info->speed = SPEED_1000;
4916 case CARD_INFO_PORTS_10G:
4917 link_info->speed = SPEED_10000;
4919 case CARD_INFO_PORTS_25G:
4920 link_info->speed = SPEED_25000;
4923 link_info->speed = SPEED_UNKNOWN;
4926 link_info->port = PORT_OTHER;
4932 int qeth_query_card_info(struct qeth_card *card,
4933 struct qeth_link_info *link_info)
4935 struct qeth_cmd_buffer *iob;
4937 QETH_CARD_TEXT(card, 2, "qcrdinfo");
4938 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
4940 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
4944 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, link_info);
4947 static int qeth_init_link_info_oat_cb(struct qeth_card *card,
4948 struct qeth_reply *reply_priv,
4951 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4952 struct qeth_link_info *link_info = reply_priv->param;
4953 struct qeth_query_oat_physical_if *phys_if;
4954 struct qeth_query_oat_reply *reply;
4956 if (qeth_setadpparms_inspect_rc(cmd))
4959 /* Multi-part reply is unexpected, don't bother: */
4960 if (cmd->data.setadapterparms.hdr.used_total > 1)
4963 /* Expect the reply to start with phys_if data: */
4964 reply = &cmd->data.setadapterparms.data.query_oat.reply[0];
4965 if (reply->type != QETH_QOAT_REPLY_TYPE_PHYS_IF ||
4966 reply->length < sizeof(*reply))
4969 phys_if = &reply->phys_if;
4971 switch (phys_if->speed_duplex) {
4972 case QETH_QOAT_PHYS_SPEED_10M_HALF:
4973 link_info->speed = SPEED_10;
4974 link_info->duplex = DUPLEX_HALF;
4976 case QETH_QOAT_PHYS_SPEED_10M_FULL:
4977 link_info->speed = SPEED_10;
4978 link_info->duplex = DUPLEX_FULL;
4980 case QETH_QOAT_PHYS_SPEED_100M_HALF:
4981 link_info->speed = SPEED_100;
4982 link_info->duplex = DUPLEX_HALF;
4984 case QETH_QOAT_PHYS_SPEED_100M_FULL:
4985 link_info->speed = SPEED_100;
4986 link_info->duplex = DUPLEX_FULL;
4988 case QETH_QOAT_PHYS_SPEED_1000M_HALF:
4989 link_info->speed = SPEED_1000;
4990 link_info->duplex = DUPLEX_HALF;
4992 case QETH_QOAT_PHYS_SPEED_1000M_FULL:
4993 link_info->speed = SPEED_1000;
4994 link_info->duplex = DUPLEX_FULL;
4996 case QETH_QOAT_PHYS_SPEED_10G_FULL:
4997 link_info->speed = SPEED_10000;
4998 link_info->duplex = DUPLEX_FULL;
5000 case QETH_QOAT_PHYS_SPEED_25G_FULL:
5001 link_info->speed = SPEED_25000;
5002 link_info->duplex = DUPLEX_FULL;
5004 case QETH_QOAT_PHYS_SPEED_UNKNOWN:
5006 link_info->speed = SPEED_UNKNOWN;
5007 link_info->duplex = DUPLEX_UNKNOWN;
5011 switch (phys_if->media_type) {
5012 case QETH_QOAT_PHYS_MEDIA_COPPER:
5013 link_info->port = PORT_TP;
5014 link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
5016 case QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT:
5017 link_info->port = PORT_FIBRE;
5018 link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT;
5020 case QETH_QOAT_PHYS_MEDIA_FIBRE_LONG:
5021 link_info->port = PORT_FIBRE;
5022 link_info->link_mode = QETH_LINK_MODE_FIBRE_LONG;
5025 link_info->port = PORT_OTHER;
5026 link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
5033 static void qeth_init_link_info(struct qeth_card *card)
5035 card->info.link_info.duplex = DUPLEX_FULL;
5037 if (IS_IQD(card) || IS_VM_NIC(card)) {
5038 card->info.link_info.speed = SPEED_10000;
5039 card->info.link_info.port = PORT_FIBRE;
5040 card->info.link_info.link_mode = QETH_LINK_MODE_FIBRE_SHORT;
5042 switch (card->info.link_type) {
5043 case QETH_LINK_TYPE_FAST_ETH:
5044 case QETH_LINK_TYPE_LANE_ETH100:
5045 card->info.link_info.speed = SPEED_100;
5046 card->info.link_info.port = PORT_TP;
5048 case QETH_LINK_TYPE_GBIT_ETH:
5049 case QETH_LINK_TYPE_LANE_ETH1000:
5050 card->info.link_info.speed = SPEED_1000;
5051 card->info.link_info.port = PORT_FIBRE;
5053 case QETH_LINK_TYPE_10GBIT_ETH:
5054 card->info.link_info.speed = SPEED_10000;
5055 card->info.link_info.port = PORT_FIBRE;
5057 case QETH_LINK_TYPE_25GBIT_ETH:
5058 card->info.link_info.speed = SPEED_25000;
5059 card->info.link_info.port = PORT_FIBRE;
5062 dev_info(&card->gdev->dev, "Unknown link type %x\n",
5063 card->info.link_type);
5064 card->info.link_info.speed = SPEED_UNKNOWN;
5065 card->info.link_info.port = PORT_OTHER;
5068 card->info.link_info.link_mode = QETH_LINK_MODE_UNKNOWN;
5071 /* Get more accurate data via QUERY OAT: */
5072 if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
5073 struct qeth_link_info link_info;
5074 struct qeth_cmd_buffer *iob;
5076 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
5077 SETADP_DATA_SIZEOF(query_oat));
5079 struct qeth_ipa_cmd *cmd = __ipa_cmd(iob);
5080 struct qeth_query_oat *oat_req;
5082 oat_req = &cmd->data.setadapterparms.data.query_oat;
5083 oat_req->subcmd_code = QETH_QOAT_SCOPE_INTERFACE;
5085 if (!qeth_send_ipa_cmd(card, iob,
5086 qeth_init_link_info_oat_cb,
5088 if (link_info.speed != SPEED_UNKNOWN)
5089 card->info.link_info.speed = link_info.speed;
5090 if (link_info.duplex != DUPLEX_UNKNOWN)
5091 card->info.link_info.duplex = link_info.duplex;
5092 if (link_info.port != PORT_OTHER)
5093 card->info.link_info.port = link_info.port;
5094 if (link_info.link_mode != QETH_LINK_MODE_UNKNOWN)
5095 card->info.link_info.link_mode = link_info.link_mode;
5102 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
5103 * @card: pointer to a qeth_card
5106 * 0, if a MAC address has been set for the card's netdevice
5107 * a return code, for various error conditions
5109 int qeth_vm_request_mac(struct qeth_card *card)
5111 struct diag26c_mac_resp *response;
5112 struct diag26c_mac_req *request;
5115 QETH_CARD_TEXT(card, 2, "vmreqmac");
5117 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
5118 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
5119 if (!request || !response) {
5124 request->resp_buf_len = sizeof(*response);
5125 request->resp_version = DIAG26C_VERSION2;
5126 request->op_code = DIAG26C_GET_MAC;
5127 request->devno = card->info.ddev_devno;
5129 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
5130 rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
5131 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
5134 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
5136 if (request->resp_buf_len < sizeof(*response) ||
5137 response->version != request->resp_version) {
5139 QETH_CARD_TEXT(card, 2, "badresp");
5140 QETH_CARD_HEX(card, 2, &request->resp_buf_len,
5141 sizeof(request->resp_buf_len));
5142 } else if (!is_valid_ether_addr(response->mac)) {
5144 QETH_CARD_TEXT(card, 2, "badmac");
5145 QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
5147 ether_addr_copy(card->dev->dev_addr, response->mac);
5155 EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
5157 static void qeth_determine_capabilities(struct qeth_card *card)
5159 struct qeth_channel *channel = &card->data;
5160 struct ccw_device *ddev = channel->ccwdev;
5162 int ddev_offline = 0;
5164 QETH_CARD_TEXT(card, 2, "detcapab");
5165 if (!ddev->online) {
5167 rc = qeth_start_channel(channel);
5169 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5174 rc = qeth_read_conf_data(card);
5176 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
5177 CARD_DEVID(card), rc);
5178 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5182 rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
5184 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5186 QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
5187 QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
5188 QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
5189 QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
5190 QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
5191 if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
5192 ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
5193 ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
5194 dev_info(&card->gdev->dev,
5195 "Completion Queueing supported\n");
5197 card->options.cq = QETH_CQ_NOTAVAILABLE;
5201 if (ddev_offline == 1)
5202 qeth_stop_channel(channel);
5207 static void qeth_read_ccw_conf_data(struct qeth_card *card)
5209 struct qeth_card_info *info = &card->info;
5210 struct ccw_device *cdev = CARD_DDEV(card);
5211 struct ccw_dev_id dev_id;
5213 QETH_CARD_TEXT(card, 2, "ccwconfd");
5214 ccw_device_get_id(cdev, &dev_id);
5216 info->ddev_devno = dev_id.devno;
5217 info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) &&
5218 !ccw_device_get_iid(cdev, &info->iid) &&
5219 !ccw_device_get_chid(cdev, 0, &info->chid);
5220 info->ssid = dev_id.ssid;
5222 dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n",
5223 info->chid, info->chpid);
5225 QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno);
5226 QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid);
5227 QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid);
5228 QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid);
5229 QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid);
5230 QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid);
5231 QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid);
5234 static int qeth_qdio_establish(struct qeth_card *card)
5236 struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
5237 struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
5238 struct qeth_qib_parms *qib_parms = NULL;
5239 struct qdio_initialize init_data;
5243 QETH_CARD_TEXT(card, 2, "qdioest");
5245 if (!IS_IQD(card) && !IS_VM_NIC(card)) {
5246 qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
5250 qeth_fill_qib_parms(card, qib_parms);
5253 in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
5254 if (card->options.cq == QETH_CQ_ENABLED)
5255 in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
5257 for (i = 0; i < card->qdio.no_out_queues; i++)
5258 out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
5260 memset(&init_data, 0, sizeof(struct qdio_initialize));
5261 init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT :
5263 init_data.qib_param_field_format = 0;
5264 init_data.qib_param_field = (void *)qib_parms;
5265 init_data.no_input_qs = card->qdio.no_in_queues;
5266 init_data.no_output_qs = card->qdio.no_out_queues;
5267 init_data.input_handler = qeth_qdio_input_handler;
5268 init_data.output_handler = qeth_qdio_output_handler;
5269 init_data.irq_poll = qeth_qdio_poll;
5270 init_data.int_parm = (unsigned long) card;
5271 init_data.input_sbal_addr_array = in_sbal_ptrs;
5272 init_data.output_sbal_addr_array = out_sbal_ptrs;
5273 init_data.output_sbal_state_array = card->qdio.out_bufstates;
5274 init_data.scan_threshold = IS_IQD(card) ? 0 : 32;
5276 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
5277 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
5278 rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs,
5279 init_data.no_output_qs);
5281 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5284 rc = qdio_establish(CARD_DDEV(card), &init_data);
5286 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5287 qdio_free(CARD_DDEV(card));
5291 switch (card->options.cq) {
5292 case QETH_CQ_ENABLED:
5293 dev_info(&card->gdev->dev, "Completion Queue support enabled");
5295 case QETH_CQ_DISABLED:
5296 dev_info(&card->gdev->dev, "Completion Queue support disabled");
5307 static void qeth_core_free_card(struct qeth_card *card)
5309 QETH_CARD_TEXT(card, 2, "freecrd");
5311 unregister_service_level(&card->qeth_service_level);
5312 debugfs_remove_recursive(card->debugfs);
5313 qeth_put_cmd(card->read_cmd);
5314 destroy_workqueue(card->event_wq);
5315 dev_set_drvdata(&card->gdev->dev, NULL);
5319 static void qeth_trace_features(struct qeth_card *card)
5321 QETH_CARD_TEXT(card, 2, "features");
5322 QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
5323 QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
5324 QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
5325 QETH_CARD_HEX(card, 2, &card->info.diagass_support,
5326 sizeof(card->info.diagass_support));
5329 static struct ccw_device_id qeth_ids[] = {
5330 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
5331 .driver_info = QETH_CARD_TYPE_OSD},
5332 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
5333 .driver_info = QETH_CARD_TYPE_IQD},
5334 #ifdef CONFIG_QETH_OSN
5335 {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
5336 .driver_info = QETH_CARD_TYPE_OSN},
5338 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
5339 .driver_info = QETH_CARD_TYPE_OSM},
5340 #ifdef CONFIG_QETH_OSX
5341 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
5342 .driver_info = QETH_CARD_TYPE_OSX},
5346 MODULE_DEVICE_TABLE(ccw, qeth_ids);
5348 static struct ccw_driver qeth_ccw_driver = {
5350 .owner = THIS_MODULE,
5354 .probe = ccwgroup_probe_ccwdev,
5355 .remove = ccwgroup_remove_ccwdev,
5358 static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
5363 QETH_CARD_TEXT(card, 2, "hrdsetup");
5364 atomic_set(&card->force_alloc_skb, 0);
5365 rc = qeth_update_from_chp_desc(card);
5370 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
5372 rc = qeth_qdio_clear_card(card, !IS_IQD(card));
5373 qeth_stop_channel(&card->data);
5374 qeth_stop_channel(&card->write);
5375 qeth_stop_channel(&card->read);
5376 qdio_free(CARD_DDEV(card));
5378 rc = qeth_start_channel(&card->read);
5381 rc = qeth_start_channel(&card->write);
5384 rc = qeth_start_channel(&card->data);
5388 if (rc == -ERESTARTSYS) {
5389 QETH_CARD_TEXT(card, 2, "break1");
5392 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5399 qeth_determine_capabilities(card);
5400 qeth_read_ccw_conf_data(card);
5401 qeth_idx_init(card);
5403 rc = qeth_idx_activate_read_channel(card);
5405 QETH_CARD_TEXT(card, 2, "break2");
5408 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5415 rc = qeth_idx_activate_write_channel(card);
5417 QETH_CARD_TEXT(card, 2, "break3");
5420 QETH_CARD_TEXT_(card, 2, "4err%d", rc);
5426 card->read_or_write_problem = 0;
5427 rc = qeth_mpc_initialize(card);
5429 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5433 rc = qeth_send_startlan(card);
5435 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5436 if (rc == -ENETDOWN) {
5437 dev_warn(&card->gdev->dev, "The LAN is offline\n");
5438 *carrier_ok = false;
5446 card->options.ipa4.supported = 0;
5447 card->options.ipa6.supported = 0;
5448 card->options.adp.supported = 0;
5449 card->options.sbp.supported_funcs = 0;
5450 card->info.diagass_support = 0;
5451 rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
5454 if (qeth_is_supported(card, IPA_IPV6)) {
5455 rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
5459 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
5460 rc = qeth_query_setadapterparms(card);
5462 QETH_CARD_TEXT_(card, 2, "7err%d", rc);
5466 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
5467 rc = qeth_query_setdiagass(card);
5469 QETH_CARD_TEXT_(card, 2, "8err%d", rc);
5472 qeth_trace_features(card);
5474 if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
5475 (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
5476 card->info.hwtrap = 0;
5478 if (card->options.isolation != ISOLATION_MODE_NONE) {
5479 rc = qeth_setadpparms_set_access_ctrl(card,
5480 card->options.isolation);
5485 qeth_init_link_info(card);
5487 rc = qeth_init_qdio_queues(card);
5489 QETH_CARD_TEXT_(card, 2, "9err%d", rc);
5495 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
5496 "an error on the device\n");
5497 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
5498 CARD_DEVID(card), rc);
5502 static int qeth_set_online(struct qeth_card *card,
5503 const struct qeth_discipline *disc)
5508 mutex_lock(&card->conf_mutex);
5509 QETH_CARD_TEXT(card, 2, "setonlin");
5511 rc = qeth_hardsetup_card(card, &carrier_ok);
5513 QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
5518 qeth_print_status_message(card);
5520 if (card->dev->reg_state != NETREG_REGISTERED)
5521 /* no need for locking / error handling at this early stage: */
5522 qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
5524 rc = disc->set_online(card, carrier_ok);
5528 /* let user_space know that device is online */
5529 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5531 mutex_unlock(&card->conf_mutex);
5536 qeth_qdio_clear_card(card, 0);
5537 qeth_clear_working_pool_list(card);
5538 qeth_flush_local_addrs(card);
5540 qeth_stop_channel(&card->data);
5541 qeth_stop_channel(&card->write);
5542 qeth_stop_channel(&card->read);
5543 qdio_free(CARD_DDEV(card));
5545 mutex_unlock(&card->conf_mutex);
5549 int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
5554 mutex_lock(&card->conf_mutex);
5555 QETH_CARD_TEXT(card, 3, "setoffl");
5557 if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
5558 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
5559 card->info.hwtrap = 1;
5562 /* cancel any stalled cmd that might block the rtnl: */
5563 qeth_clear_ipacmd_list(card);
5566 card->info.open_when_online = card->dev->flags & IFF_UP;
5567 dev_close(card->dev);
5568 netif_device_detach(card->dev);
5569 netif_carrier_off(card->dev);
5572 cancel_work_sync(&card->rx_mode_work);
5574 disc->set_offline(card);
5576 qeth_qdio_clear_card(card, 0);
5577 qeth_drain_output_queues(card);
5578 qeth_clear_working_pool_list(card);
5579 qeth_flush_local_addrs(card);
5580 card->info.promisc_mode = 0;
5582 rc = qeth_stop_channel(&card->data);
5583 rc2 = qeth_stop_channel(&card->write);
5584 rc3 = qeth_stop_channel(&card->read);
5586 rc = (rc2) ? rc2 : rc3;
5588 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5589 qdio_free(CARD_DDEV(card));
5591 /* let user_space know that device is offline */
5592 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5594 mutex_unlock(&card->conf_mutex);
5597 EXPORT_SYMBOL_GPL(qeth_set_offline);
5599 static int qeth_do_reset(void *data)
5601 const struct qeth_discipline *disc;
5602 struct qeth_card *card = data;
5605 /* Lock-free, other users will block until we are done. */
5606 disc = card->discipline;
5608 QETH_CARD_TEXT(card, 2, "recover1");
5609 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
5611 QETH_CARD_TEXT(card, 2, "recover2");
5612 dev_warn(&card->gdev->dev,
5613 "A recovery process has been started for the device\n");
5615 qeth_set_offline(card, disc, true);
5616 rc = qeth_set_online(card, disc);
5618 dev_info(&card->gdev->dev,
5619 "Device successfully recovered!\n");
5621 ccwgroup_set_offline(card->gdev);
5622 dev_warn(&card->gdev->dev,
5623 "The qeth device driver failed to recover an error on the device\n");
5625 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
5626 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
5630 #if IS_ENABLED(CONFIG_QETH_L3)
5631 static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
5632 struct qeth_hdr *hdr)
5634 struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
5635 struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
5636 struct net_device *dev = skb->dev;
5638 if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
5639 dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
5640 "FAKELL", skb->len);
5644 if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
5645 u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
5647 unsigned char tg_addr[ETH_ALEN];
5649 skb_reset_network_header(skb);
5650 switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
5651 case QETH_CAST_MULTICAST:
5652 if (prot == ETH_P_IP)
5653 ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
5655 ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
5656 QETH_CARD_STAT_INC(card, rx_multicast);
5658 case QETH_CAST_BROADCAST:
5659 ether_addr_copy(tg_addr, dev->broadcast);
5660 QETH_CARD_STAT_INC(card, rx_multicast);
5663 if (card->options.sniffer)
5664 skb->pkt_type = PACKET_OTHERHOST;
5665 ether_addr_copy(tg_addr, dev->dev_addr);
5668 if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
5669 dev_hard_header(skb, dev, prot, tg_addr,
5670 &l3_hdr->next_hop.rx.src_mac, skb->len);
5672 dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
5676 /* copy VLAN tag from hdr into skb */
5677 if (!card->options.sniffer &&
5678 (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
5679 QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
5680 u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
5682 l3_hdr->next_hop.rx.vlan_id;
5684 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
5689 static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
5690 struct qeth_hdr *hdr, bool uses_frags)
5692 struct napi_struct *napi = &card->napi;
5695 switch (hdr->hdr.l2.id) {
5696 case QETH_HEADER_TYPE_OSN:
5697 skb_push(skb, sizeof(*hdr));
5698 skb_copy_to_linear_data(skb, hdr, sizeof(*hdr));
5699 QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
5700 QETH_CARD_STAT_INC(card, rx_packets);
5702 card->osn_info.data_cb(skb);
5704 #if IS_ENABLED(CONFIG_QETH_L3)
5705 case QETH_HEADER_TYPE_LAYER3:
5706 qeth_l3_rebuild_skb(card, skb, hdr);
5707 is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5710 case QETH_HEADER_TYPE_LAYER2:
5711 is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5716 napi_free_frags(napi);
5718 dev_kfree_skb_any(skb);
5722 if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
5723 skb->ip_summed = CHECKSUM_UNNECESSARY;
5724 QETH_CARD_STAT_INC(card, rx_skb_csum);
5726 skb->ip_summed = CHECKSUM_NONE;
5729 QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
5730 QETH_CARD_STAT_INC(card, rx_packets);
5731 if (skb_is_nonlinear(skb)) {
5732 QETH_CARD_STAT_INC(card, rx_sg_skbs);
5733 QETH_CARD_STAT_ADD(card, rx_sg_frags,
5734 skb_shinfo(skb)->nr_frags);
5738 napi_gro_frags(napi);
5740 skb->protocol = eth_type_trans(skb, skb->dev);
5741 napi_gro_receive(napi, skb);
5745 static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
5747 struct page *page = virt_to_page(data);
5748 unsigned int next_frag;
5750 next_frag = skb_shinfo(skb)->nr_frags;
5752 skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
5756 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
5758 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
5761 static int qeth_extract_skb(struct qeth_card *card,
5762 struct qeth_qdio_buffer *qethbuffer, u8 *element_no,
5765 struct qeth_priv *priv = netdev_priv(card->dev);
5766 struct qdio_buffer *buffer = qethbuffer->buffer;
5767 struct napi_struct *napi = &card->napi;
5768 struct qdio_buffer_element *element;
5769 unsigned int linear_len = 0;
5770 bool uses_frags = false;
5771 int offset = *__offset;
5772 bool use_rx_sg = false;
5773 unsigned int headroom;
5774 struct qeth_hdr *hdr;
5775 struct sk_buff *skb;
5778 element = &buffer->element[*element_no];
5781 /* qeth_hdr must not cross element boundaries */
5782 while (element->length < offset + sizeof(struct qeth_hdr)) {
5783 if (qeth_is_last_sbale(element))
5789 hdr = phys_to_virt(element->addr) + offset;
5790 offset += sizeof(*hdr);
5793 switch (hdr->hdr.l2.id) {
5794 case QETH_HEADER_TYPE_LAYER2:
5795 skb_len = hdr->hdr.l2.pkt_length;
5796 linear_len = ETH_HLEN;
5799 case QETH_HEADER_TYPE_LAYER3:
5800 skb_len = hdr->hdr.l3.length;
5801 if (!IS_LAYER3(card)) {
5802 QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5806 if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
5807 linear_len = ETH_HLEN;
5812 if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
5813 linear_len = sizeof(struct ipv6hdr);
5815 linear_len = sizeof(struct iphdr);
5816 headroom = ETH_HLEN;
5818 case QETH_HEADER_TYPE_OSN:
5819 skb_len = hdr->hdr.osn.pdu_length;
5820 if (!IS_OSN(card)) {
5821 QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5825 linear_len = skb_len;
5826 headroom = sizeof(struct qeth_hdr);
5829 if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
5830 QETH_CARD_STAT_INC(card, rx_frame_errors);
5832 QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5834 /* Can't determine packet length, drop the whole buffer. */
5835 return -EPROTONOSUPPORT;
5838 if (skb_len < linear_len) {
5839 QETH_CARD_STAT_INC(card, rx_dropped_runt);
5843 use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
5844 (skb_len > READ_ONCE(priv->rx_copybreak) &&
5845 !atomic_read(&card->force_alloc_skb) &&
5849 /* QETH_CQ_ENABLED only: */
5850 if (qethbuffer->rx_skb &&
5851 skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
5852 skb = qethbuffer->rx_skb;
5853 qethbuffer->rx_skb = NULL;
5857 skb = napi_get_frags(napi);
5859 /* -ENOMEM, no point in falling back further. */
5860 QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5864 if (skb_tailroom(skb) >= linear_len + headroom) {
5869 netdev_info_once(card->dev,
5870 "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
5871 linear_len + headroom, skb_tailroom(skb));
5872 /* Shouldn't happen. Don't optimize, fall back to linear skb. */
5875 linear_len = skb_len;
5876 skb = napi_alloc_skb(napi, linear_len + headroom);
5878 QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5884 skb_reserve(skb, headroom);
5887 int data_len = min(skb_len, (int)(element->length - offset));
5888 char *data = phys_to_virt(element->addr) + offset;
5890 skb_len -= data_len;
5893 /* Extract data from current element: */
5894 if (skb && data_len) {
5896 unsigned int copy_len;
5898 copy_len = min_t(unsigned int, linear_len,
5901 skb_put_data(skb, data, copy_len);
5902 linear_len -= copy_len;
5903 data_len -= copy_len;
5908 qeth_create_skb_frag(skb, data, data_len);
5911 /* Step forward to next element: */
5913 if (qeth_is_last_sbale(element)) {
5914 QETH_CARD_TEXT(card, 4, "unexeob");
5915 QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5918 napi_free_frags(napi);
5920 dev_kfree_skb_any(skb);
5921 QETH_CARD_STAT_INC(card,
5931 /* This packet was skipped, go get another one: */
5935 *element_no = element - &buffer->element[0];
5938 qeth_receive_skb(card, skb, hdr, uses_frags);
5942 static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
5943 struct qeth_qdio_buffer *buf, bool *done)
5945 unsigned int work_done = 0;
5948 if (qeth_extract_skb(card, buf, &card->rx.buf_element,
5949 &card->rx.e_offset)) {
5961 static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
5963 struct qeth_rx *ctx = &card->rx;
5964 unsigned int work_done = 0;
5966 while (budget > 0) {
5967 struct qeth_qdio_buffer *buffer;
5968 unsigned int skbs_done = 0;
5971 /* Fetch completed RX buffers: */
5972 if (!card->rx.b_count) {
5973 card->rx.qdio_err = 0;
5974 card->rx.b_count = qdio_get_next_buffers(
5975 card->data.ccwdev, 0, &card->rx.b_index,
5976 &card->rx.qdio_err);
5977 if (card->rx.b_count <= 0) {
5978 card->rx.b_count = 0;
5983 /* Process one completed RX buffer: */
5984 buffer = &card->qdio.in_q->bufs[card->rx.b_index];
5985 if (!(card->rx.qdio_err &&
5986 qeth_check_qdio_errors(card, buffer->buffer,
5987 card->rx.qdio_err, "qinerr")))
5988 skbs_done = qeth_extract_skbs(card, budget, buffer,
5993 work_done += skbs_done;
5994 budget -= skbs_done;
5997 QETH_CARD_STAT_INC(card, rx_bufs);
5998 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
5999 buffer->pool_entry = NULL;
6002 ctx->bufs_refill -= qeth_rx_refill_queue(card,
6005 /* Step forward to next buffer: */
6006 card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
6007 card->rx.buf_element = 0;
6008 card->rx.e_offset = 0;
6015 static void qeth_cq_poll(struct qeth_card *card)
6017 unsigned int work_done = 0;
6019 while (work_done < QDIO_MAX_BUFFERS_PER_Q) {
6020 unsigned int start, error;
6023 completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start,
6028 qeth_qdio_cq_handler(card, error, 1, start, completed);
6029 work_done += completed;
6033 int qeth_poll(struct napi_struct *napi, int budget)
6035 struct qeth_card *card = container_of(napi, struct qeth_card, napi);
6036 unsigned int work_done;
6038 work_done = qeth_rx_poll(card, budget);
6040 if (card->options.cq == QETH_CQ_ENABLED)
6044 struct qeth_rx *ctx = &card->rx;
6046 /* Process any substantial refill backlog: */
6047 ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill);
6049 /* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
6050 if (work_done >= budget)
6054 if (napi_complete_done(napi, work_done) &&
6055 qdio_start_irq(CARD_DDEV(card)))
6056 napi_schedule(napi);
6060 EXPORT_SYMBOL_GPL(qeth_poll);
6062 static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
6063 unsigned int bidx, unsigned int qdio_error,
6066 struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
6067 u8 sflags = buffer->buffer->element[15].sflags;
6068 struct qeth_card *card = queue->card;
6069 bool error = !!qdio_error;
6071 if (qdio_error == QDIO_ERROR_SLSB_PENDING) {
6072 WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
6074 QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
6076 switch (atomic_cmpxchg(&buffer->state,
6077 QETH_QDIO_BUF_PRIMED,
6078 QETH_QDIO_BUF_PENDING)) {
6079 case QETH_QDIO_BUF_PRIMED:
6080 /* We have initial ownership, no QAOB (yet): */
6081 qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
6083 /* Handle race with qeth_qdio_handle_aob(): */
6084 switch (atomic_xchg(&buffer->state,
6085 QETH_QDIO_BUF_NEED_QAOB)) {
6086 case QETH_QDIO_BUF_PENDING:
6087 /* No concurrent QAOB notification. */
6089 /* Prepare the queue slot for immediate re-use: */
6090 qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
6091 if (qeth_init_qdio_out_buf(queue, bidx)) {
6092 QETH_CARD_TEXT(card, 2, "outofbuf");
6093 qeth_schedule_recovery(card);
6096 list_add(&buffer->list_entry,
6097 &queue->pending_bufs);
6098 /* Skip clearing the buffer: */
6100 case QETH_QDIO_BUF_QAOB_OK:
6101 qeth_notify_skbs(queue, buffer,
6102 TX_NOTIFY_DELAYED_OK);
6105 case QETH_QDIO_BUF_QAOB_ERROR:
6106 qeth_notify_skbs(queue, buffer,
6107 TX_NOTIFY_DELAYED_GENERALERROR);
6115 case QETH_QDIO_BUF_QAOB_OK:
6116 /* qeth_qdio_handle_aob() already received a QAOB: */
6117 qeth_notify_skbs(queue, buffer, TX_NOTIFY_OK);
6120 case QETH_QDIO_BUF_QAOB_ERROR:
6121 /* qeth_qdio_handle_aob() already received a QAOB: */
6122 qeth_notify_skbs(queue, buffer, TX_NOTIFY_GENERALERROR);
6128 } else if (card->options.cq == QETH_CQ_ENABLED) {
6129 qeth_notify_skbs(queue, buffer,
6130 qeth_compute_cq_notification(sflags, 0));
6133 qeth_clear_output_buffer(queue, buffer, error, budget);
6136 static int qeth_tx_poll(struct napi_struct *napi, int budget)
6138 struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
6139 unsigned int queue_no = queue->queue_no;
6140 struct qeth_card *card = queue->card;
6141 struct net_device *dev = card->dev;
6142 unsigned int work_done = 0;
6143 struct netdev_queue *txq;
6145 txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
6148 unsigned int start, error, i;
6149 unsigned int packets = 0;
6150 unsigned int bytes = 0;
6153 qeth_tx_complete_pending_bufs(card, queue, false);
6155 if (qeth_out_queue_is_empty(queue)) {
6156 napi_complete(napi);
6160 /* Give the CPU a breather: */
6161 if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
6162 QETH_TXQ_STAT_INC(queue, completion_yield);
6163 if (napi_complete_done(napi, 0))
6164 napi_schedule(napi);
6168 completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false,
6170 if (completed <= 0) {
6171 /* Ensure we see TX completion for pending work: */
6172 if (napi_complete_done(napi, 0))
6173 qeth_tx_arm_timer(queue, QETH_TX_TIMER_USECS);
6177 for (i = start; i < start + completed; i++) {
6178 struct qeth_qdio_out_buffer *buffer;
6179 unsigned int bidx = QDIO_BUFNR(i);
6181 buffer = queue->bufs[bidx];
6182 packets += buffer->frames;
6183 bytes += buffer->bytes;
6185 qeth_handle_send_error(card, buffer, error);
6186 qeth_iqd_tx_complete(queue, bidx, error, budget);
6189 netdev_tx_completed_queue(txq, packets, bytes);
6190 atomic_sub(completed, &queue->used_buffers);
6191 work_done += completed;
6193 /* xmit may have observed the full-condition, but not yet
6194 * stopped the txq. In which case the code below won't trigger.
6195 * So before returning, xmit will re-check the txq's fill level
6196 * and wake it up if needed.
6198 if (netif_tx_queue_stopped(txq) &&
6199 !qeth_out_queue_is_full(queue))
6200 netif_tx_wake_queue(txq);
6204 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
6206 if (!cmd->hdr.return_code)
6207 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
6208 return cmd->hdr.return_code;
6211 static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
6212 struct qeth_reply *reply,
6215 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6216 struct qeth_ipa_caps *caps = reply->param;
6218 if (qeth_setassparms_inspect_rc(cmd))
6221 caps->supported = cmd->data.setassparms.data.caps.supported;
6222 caps->enabled = cmd->data.setassparms.data.caps.enabled;
6226 int qeth_setassparms_cb(struct qeth_card *card,
6227 struct qeth_reply *reply, unsigned long data)
6229 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6231 QETH_CARD_TEXT(card, 4, "defadpcb");
6233 if (cmd->hdr.return_code)
6236 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
6237 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
6238 card->options.ipa4.enabled = cmd->hdr.assists.enabled;
6239 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
6240 card->options.ipa6.enabled = cmd->hdr.assists.enabled;
6243 EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
6245 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
6246 enum qeth_ipa_funcs ipa_func,
6248 unsigned int data_length,
6249 enum qeth_prot_versions prot)
6251 struct qeth_ipacmd_setassparms *setassparms;
6252 struct qeth_ipacmd_setassparms_hdr *hdr;
6253 struct qeth_cmd_buffer *iob;
6255 QETH_CARD_TEXT(card, 4, "getasscm");
6256 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
6258 offsetof(struct qeth_ipacmd_setassparms,
6263 setassparms = &__ipa_cmd(iob)->data.setassparms;
6264 setassparms->assist_no = ipa_func;
6266 hdr = &setassparms->hdr;
6267 hdr->length = sizeof(*hdr) + data_length;
6268 hdr->command_code = cmd_code;
6271 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
6273 int qeth_send_simple_setassparms_prot(struct qeth_card *card,
6274 enum qeth_ipa_funcs ipa_func,
6275 u16 cmd_code, u32 *data,
6276 enum qeth_prot_versions prot)
6278 unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
6279 struct qeth_cmd_buffer *iob;
6281 QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
6282 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
6287 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
6288 return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
6290 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
6292 static void qeth_unregister_dbf_views(void)
6296 for (x = 0; x < QETH_DBF_INFOS; x++) {
6297 debug_unregister(qeth_dbf[x].id);
6298 qeth_dbf[x].id = NULL;
6302 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
6304 char dbf_txt_buf[32];
6307 if (!debug_level_enabled(id, level))
6309 va_start(args, fmt);
6310 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
6312 debug_text_event(id, level, dbf_txt_buf);
6314 EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
6316 static int qeth_register_dbf_views(void)
6321 for (x = 0; x < QETH_DBF_INFOS; x++) {
6322 /* register the areas */
6323 qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
6327 if (qeth_dbf[x].id == NULL) {
6328 qeth_unregister_dbf_views();
6332 /* register a view */
6333 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
6335 qeth_unregister_dbf_views();
6339 /* set a passing level */
6340 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
6346 static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */
6348 int qeth_setup_discipline(struct qeth_card *card,
6349 enum qeth_discipline_id discipline)
6353 mutex_lock(&qeth_mod_mutex);
6354 switch (discipline) {
6355 case QETH_DISCIPLINE_LAYER3:
6356 card->discipline = try_then_request_module(
6357 symbol_get(qeth_l3_discipline), "qeth_l3");
6359 case QETH_DISCIPLINE_LAYER2:
6360 card->discipline = try_then_request_module(
6361 symbol_get(qeth_l2_discipline), "qeth_l2");
6366 mutex_unlock(&qeth_mod_mutex);
6368 if (!card->discipline) {
6369 dev_err(&card->gdev->dev, "There is no kernel module to "
6370 "support discipline %d\n", discipline);
6374 rc = card->discipline->setup(card->gdev);
6376 if (discipline == QETH_DISCIPLINE_LAYER2)
6377 symbol_put(qeth_l2_discipline);
6379 symbol_put(qeth_l3_discipline);
6380 card->discipline = NULL;
6385 card->options.layer = discipline;
6389 void qeth_remove_discipline(struct qeth_card *card)
6391 card->discipline->remove(card->gdev);
6393 if (IS_LAYER2(card))
6394 symbol_put(qeth_l2_discipline);
6396 symbol_put(qeth_l3_discipline);
6397 card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
6398 card->discipline = NULL;
6401 const struct device_type qeth_generic_devtype = {
6402 .name = "qeth_generic",
6404 EXPORT_SYMBOL_GPL(qeth_generic_devtype);
6406 static const struct device_type qeth_osn_devtype = {
6410 #define DBF_NAME_LEN 20
6412 struct qeth_dbf_entry {
6413 char dbf_name[DBF_NAME_LEN];
6414 debug_info_t *dbf_info;
6415 struct list_head dbf_list;
6418 static LIST_HEAD(qeth_dbf_list);
6419 static DEFINE_MUTEX(qeth_dbf_list_mutex);
6421 static debug_info_t *qeth_get_dbf_entry(char *name)
6423 struct qeth_dbf_entry *entry;
6424 debug_info_t *rc = NULL;
6426 mutex_lock(&qeth_dbf_list_mutex);
6427 list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
6428 if (strcmp(entry->dbf_name, name) == 0) {
6429 rc = entry->dbf_info;
6433 mutex_unlock(&qeth_dbf_list_mutex);
6437 static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
6439 struct qeth_dbf_entry *new_entry;
6441 card->debug = debug_register(name, 2, 1, 8);
6443 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
6446 if (debug_register_view(card->debug, &debug_hex_ascii_view))
6448 new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
6451 strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
6452 new_entry->dbf_info = card->debug;
6453 mutex_lock(&qeth_dbf_list_mutex);
6454 list_add(&new_entry->dbf_list, &qeth_dbf_list);
6455 mutex_unlock(&qeth_dbf_list_mutex);
6460 debug_unregister(card->debug);
6465 static void qeth_clear_dbf_list(void)
6467 struct qeth_dbf_entry *entry, *tmp;
6469 mutex_lock(&qeth_dbf_list_mutex);
6470 list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
6471 list_del(&entry->dbf_list);
6472 debug_unregister(entry->dbf_info);
6475 mutex_unlock(&qeth_dbf_list_mutex);
6478 static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
6480 struct net_device *dev;
6481 struct qeth_priv *priv;
6483 switch (card->info.type) {
6484 case QETH_CARD_TYPE_IQD:
6485 dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN,
6486 ether_setup, QETH_MAX_OUT_QUEUES, 1);
6488 case QETH_CARD_TYPE_OSM:
6489 dev = alloc_etherdev(sizeof(*priv));
6491 case QETH_CARD_TYPE_OSN:
6492 dev = alloc_netdev(sizeof(*priv), "osn%d", NET_NAME_UNKNOWN,
6496 dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
6502 priv = netdev_priv(dev);
6503 priv->rx_copybreak = QETH_RX_COPYBREAK;
6504 priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1;
6506 dev->ml_priv = card;
6507 dev->watchdog_timeo = QETH_TX_TIMEOUT;
6508 dev->min_mtu = IS_OSN(card) ? 64 : 576;
6509 /* initialized when device first goes online: */
6512 SET_NETDEV_DEV(dev, &card->gdev->dev);
6513 netif_carrier_off(dev);
6516 dev->ethtool_ops = &qeth_osn_ethtool_ops;
6518 dev->ethtool_ops = &qeth_ethtool_ops;
6519 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
6520 dev->hw_features |= NETIF_F_SG;
6521 dev->vlan_features |= NETIF_F_SG;
6523 dev->features |= NETIF_F_SG;
6529 struct net_device *qeth_clone_netdev(struct net_device *orig)
6531 struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
6536 clone->dev_port = orig->dev_port;
6540 static int qeth_core_probe_device(struct ccwgroup_device *gdev)
6542 struct qeth_card *card;
6545 enum qeth_discipline_id enforced_disc;
6546 char dbf_name[DBF_NAME_LEN];
6548 QETH_DBF_TEXT(SETUP, 2, "probedev");
6551 if (!get_device(dev))
6554 QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
6556 card = qeth_alloc_card(gdev);
6558 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
6563 snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
6564 dev_name(&gdev->dev));
6565 card->debug = qeth_get_dbf_entry(dbf_name);
6567 rc = qeth_add_dbf_entry(card, dbf_name);
6572 qeth_setup_card(card);
6573 card->dev = qeth_alloc_netdev(card);
6579 qeth_determine_capabilities(card);
6580 qeth_set_blkt_defaults(card);
6582 card->qdio.no_out_queues = card->dev->num_tx_queues;
6583 rc = qeth_update_from_chp_desc(card);
6588 gdev->dev.groups = qeth_osn_dev_groups;
6590 gdev->dev.groups = qeth_dev_groups;
6592 enforced_disc = qeth_enforce_discipline(card);
6593 switch (enforced_disc) {
6594 case QETH_DISCIPLINE_UNDETERMINED:
6595 gdev->dev.type = &qeth_generic_devtype;
6598 card->info.layer_enforced = true;
6599 /* It's so early that we don't need the discipline_mutex yet. */
6600 rc = qeth_setup_discipline(card, enforced_disc);
6602 goto err_setup_disc;
6604 gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype :
6605 card->discipline->devtype;
6613 free_netdev(card->dev);
6615 qeth_core_free_card(card);
6621 static void qeth_core_remove_device(struct ccwgroup_device *gdev)
6623 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6625 QETH_CARD_TEXT(card, 2, "removedv");
6627 mutex_lock(&card->discipline_mutex);
6628 if (card->discipline)
6629 qeth_remove_discipline(card);
6630 mutex_unlock(&card->discipline_mutex);
6632 qeth_free_qdio_queues(card);
6634 free_netdev(card->dev);
6635 qeth_core_free_card(card);
6636 put_device(&gdev->dev);
6639 static int qeth_core_set_online(struct ccwgroup_device *gdev)
6641 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6643 enum qeth_discipline_id def_discipline;
6645 mutex_lock(&card->discipline_mutex);
6646 if (!card->discipline) {
6647 def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
6648 QETH_DISCIPLINE_LAYER2;
6649 rc = qeth_setup_discipline(card, def_discipline);
6654 rc = qeth_set_online(card, card->discipline);
6657 mutex_unlock(&card->discipline_mutex);
6661 static int qeth_core_set_offline(struct ccwgroup_device *gdev)
6663 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6666 mutex_lock(&card->discipline_mutex);
6667 rc = qeth_set_offline(card, card->discipline, false);
6668 mutex_unlock(&card->discipline_mutex);
6673 static void qeth_core_shutdown(struct ccwgroup_device *gdev)
6675 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6677 qeth_set_allowed_threads(card, 0, 1);
6678 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
6679 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
6680 qeth_qdio_clear_card(card, 0);
6681 qeth_drain_output_queues(card);
6682 qdio_free(CARD_DDEV(card));
6685 static ssize_t group_store(struct device_driver *ddrv, const char *buf,
6690 err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
6693 return err ? err : count;
6695 static DRIVER_ATTR_WO(group);
6697 static struct attribute *qeth_drv_attrs[] = {
6698 &driver_attr_group.attr,
6701 static struct attribute_group qeth_drv_attr_group = {
6702 .attrs = qeth_drv_attrs,
6704 static const struct attribute_group *qeth_drv_attr_groups[] = {
6705 &qeth_drv_attr_group,
6709 static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
6711 .groups = qeth_drv_attr_groups,
6712 .owner = THIS_MODULE,
6715 .ccw_driver = &qeth_ccw_driver,
6716 .setup = qeth_core_probe_device,
6717 .remove = qeth_core_remove_device,
6718 .set_online = qeth_core_set_online,
6719 .set_offline = qeth_core_set_offline,
6720 .shutdown = qeth_core_shutdown,
6723 struct qeth_card *qeth_get_card_by_busid(char *bus_id)
6725 struct ccwgroup_device *gdev;
6726 struct qeth_card *card;
6728 gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id);
6732 card = dev_get_drvdata(&gdev->dev);
6733 put_device(&gdev->dev);
6736 EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);
6738 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6740 struct qeth_card *card = dev->ml_priv;
6741 struct mii_ioctl_data *mii_data;
6745 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
6746 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
6748 case SIOC_QETH_GET_CARD_TYPE:
6749 if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
6754 mii_data = if_mii(rq);
6755 mii_data->phy_id = 0;
6758 mii_data = if_mii(rq);
6759 if (mii_data->phy_id != 0)
6762 mii_data->val_out = qeth_mdio_read(dev,
6763 mii_data->phy_id, mii_data->reg_num);
6765 case SIOC_QETH_QUERY_OAT:
6766 rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
6769 if (card->discipline->do_ioctl)
6770 rc = card->discipline->do_ioctl(dev, rq, cmd);
6775 QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
6778 EXPORT_SYMBOL_GPL(qeth_do_ioctl);
6780 static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
6783 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6784 u32 *features = reply->param;
6786 if (qeth_setassparms_inspect_rc(cmd))
6789 *features = cmd->data.setassparms.data.flags_32bit;
6793 static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6794 enum qeth_prot_versions prot)
6796 return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
6800 static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6801 enum qeth_prot_versions prot, u8 *lp2lp)
6803 u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
6804 struct qeth_cmd_buffer *iob;
6805 struct qeth_ipa_caps caps;
6809 /* some L3 HW requires combined L3+L4 csum offload: */
6810 if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
6811 cstype == IPA_OUTBOUND_CHECKSUM)
6812 required_features |= QETH_IPA_CHECKSUM_IP_HDR;
6814 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
6819 rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
6823 if ((required_features & features) != required_features) {
6824 qeth_set_csum_off(card, cstype, prot);
6828 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
6829 SETASS_DATA_SIZEOF(flags_32bit),
6832 qeth_set_csum_off(card, cstype, prot);
6836 if (features & QETH_IPA_CHECKSUM_LP2LP)
6837 required_features |= QETH_IPA_CHECKSUM_LP2LP;
6838 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
6839 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6841 qeth_set_csum_off(card, cstype, prot);
6845 if (!qeth_ipa_caps_supported(&caps, required_features) ||
6846 !qeth_ipa_caps_enabled(&caps, required_features)) {
6847 qeth_set_csum_off(card, cstype, prot);
6851 dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
6852 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
6855 *lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP);
6860 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6861 enum qeth_prot_versions prot, u8 *lp2lp)
6863 return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) :
6864 qeth_set_csum_off(card, cstype, prot);
6867 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
6870 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6871 struct qeth_tso_start_data *tso_data = reply->param;
6873 if (qeth_setassparms_inspect_rc(cmd))
6876 tso_data->mss = cmd->data.setassparms.data.tso.mss;
6877 tso_data->supported = cmd->data.setassparms.data.tso.supported;
6881 static int qeth_set_tso_off(struct qeth_card *card,
6882 enum qeth_prot_versions prot)
6884 return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
6885 IPA_CMD_ASS_STOP, NULL, prot);
6888 static int qeth_set_tso_on(struct qeth_card *card,
6889 enum qeth_prot_versions prot)
6891 struct qeth_tso_start_data tso_data;
6892 struct qeth_cmd_buffer *iob;
6893 struct qeth_ipa_caps caps;
6896 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6897 IPA_CMD_ASS_START, 0, prot);
6901 rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6905 if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
6906 qeth_set_tso_off(card, prot);
6910 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6912 SETASS_DATA_SIZEOF(caps), prot);
6914 qeth_set_tso_off(card, prot);
6918 /* enable TSO capability */
6919 __ipa_cmd(iob)->data.setassparms.data.caps.enabled =
6920 QETH_IPA_LARGE_SEND_TCP;
6921 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6923 qeth_set_tso_off(card, prot);
6927 if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
6928 !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
6929 qeth_set_tso_off(card, prot);
6933 dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
6938 static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
6939 enum qeth_prot_versions prot)
6941 return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
6944 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
6946 int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
6949 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
6950 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6951 QETH_PROT_IPV4, NULL);
6952 if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6953 /* no/one Offload Assist available, so the rc is trivial */
6956 rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6957 QETH_PROT_IPV6, NULL);
6960 /* enable: success if any Assist is active */
6961 return (rc_ipv6) ? rc_ipv4 : 0;
6963 /* disable: failure if any Assist is still active */
6964 return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
6968 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6969 * @dev: a net_device
6971 void qeth_enable_hw_features(struct net_device *dev)
6973 struct qeth_card *card = dev->ml_priv;
6974 netdev_features_t features;
6976 features = dev->features;
6977 /* force-off any feature that might need an IPA sequence.
6978 * netdev_update_features() will restart them.
6980 dev->features &= ~dev->hw_features;
6981 /* toggle VLAN filter, so that VIDs are re-programmed: */
6982 if (IS_LAYER2(card) && IS_VM_NIC(card)) {
6983 dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
6984 dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6986 netdev_update_features(dev);
6987 if (features != dev->features)
6988 dev_warn(&card->gdev->dev,
6989 "Device recovery failed to restore all offload features\n");
6991 EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6993 static void qeth_check_restricted_features(struct qeth_card *card,
6994 netdev_features_t changed,
6995 netdev_features_t actual)
6997 netdev_features_t ipv6_features = NETIF_F_TSO6;
6998 netdev_features_t ipv4_features = NETIF_F_TSO;
7000 if (!card->info.has_lp2lp_cso_v6)
7001 ipv6_features |= NETIF_F_IPV6_CSUM;
7002 if (!card->info.has_lp2lp_cso_v4)
7003 ipv4_features |= NETIF_F_IP_CSUM;
7005 if ((changed & ipv6_features) && !(actual & ipv6_features))
7006 qeth_flush_local_addrs6(card);
7007 if ((changed & ipv4_features) && !(actual & ipv4_features))
7008 qeth_flush_local_addrs4(card);
7011 int qeth_set_features(struct net_device *dev, netdev_features_t features)
7013 struct qeth_card *card = dev->ml_priv;
7014 netdev_features_t changed = dev->features ^ features;
7017 QETH_CARD_TEXT(card, 2, "setfeat");
7018 QETH_CARD_HEX(card, 2, &features, sizeof(features));
7020 if ((changed & NETIF_F_IP_CSUM)) {
7021 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
7022 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4,
7023 &card->info.has_lp2lp_cso_v4);
7025 changed ^= NETIF_F_IP_CSUM;
7027 if (changed & NETIF_F_IPV6_CSUM) {
7028 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
7029 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6,
7030 &card->info.has_lp2lp_cso_v6);
7032 changed ^= NETIF_F_IPV6_CSUM;
7034 if (changed & NETIF_F_RXCSUM) {
7035 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
7037 changed ^= NETIF_F_RXCSUM;
7039 if (changed & NETIF_F_TSO) {
7040 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
7043 changed ^= NETIF_F_TSO;
7045 if (changed & NETIF_F_TSO6) {
7046 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
7049 changed ^= NETIF_F_TSO6;
7052 qeth_check_restricted_features(card, dev->features ^ features,
7053 dev->features ^ changed);
7055 /* everything changed successfully? */
7056 if ((dev->features ^ features) == changed)
7058 /* something went wrong. save changed features and return error */
7059 dev->features ^= changed;
7062 EXPORT_SYMBOL_GPL(qeth_set_features);
7064 netdev_features_t qeth_fix_features(struct net_device *dev,
7065 netdev_features_t features)
7067 struct qeth_card *card = dev->ml_priv;
7069 QETH_CARD_TEXT(card, 2, "fixfeat");
7070 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
7071 features &= ~NETIF_F_IP_CSUM;
7072 if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
7073 features &= ~NETIF_F_IPV6_CSUM;
7074 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
7075 !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
7076 features &= ~NETIF_F_RXCSUM;
7077 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
7078 features &= ~NETIF_F_TSO;
7079 if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
7080 features &= ~NETIF_F_TSO6;
7082 QETH_CARD_HEX(card, 2, &features, sizeof(features));
7085 EXPORT_SYMBOL_GPL(qeth_fix_features);
7087 netdev_features_t qeth_features_check(struct sk_buff *skb,
7088 struct net_device *dev,
7089 netdev_features_t features)
7091 struct qeth_card *card = dev->ml_priv;
7093 /* Traffic with local next-hop is not eligible for some offloads: */
7094 if (skb->ip_summed == CHECKSUM_PARTIAL &&
7095 READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) {
7096 netdev_features_t restricted = 0;
7098 if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
7099 restricted |= NETIF_F_ALL_TSO;
7101 switch (vlan_get_protocol(skb)) {
7102 case htons(ETH_P_IP):
7103 if (!card->info.has_lp2lp_cso_v4)
7104 restricted |= NETIF_F_IP_CSUM;
7106 if (restricted && qeth_next_hop_is_local_v4(card, skb))
7107 features &= ~restricted;
7109 case htons(ETH_P_IPV6):
7110 if (!card->info.has_lp2lp_cso_v6)
7111 restricted |= NETIF_F_IPV6_CSUM;
7113 if (restricted && qeth_next_hop_is_local_v6(card, skb))
7114 features &= ~restricted;
7121 /* GSO segmentation builds skbs with
7122 * a (small) linear part for the headers, and
7123 * page frags for the data.
7124 * Compared to a linear skb, the header-only part consumes an
7125 * additional buffer element. This reduces buffer utilization, and
7126 * hurts throughput. So compress small segments into one element.
7128 if (netif_needs_gso(skb, features)) {
7129 /* match skb_segment(): */
7130 unsigned int doffset = skb->data - skb_mac_header(skb);
7131 unsigned int hsize = skb_shinfo(skb)->gso_size;
7132 unsigned int hroom = skb_headroom(skb);
7134 /* linearize only if resulting skb allocations are order-0: */
7135 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
7136 features &= ~NETIF_F_SG;
7139 return vlan_features_check(skb, features);
7141 EXPORT_SYMBOL_GPL(qeth_features_check);
7143 void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7145 struct qeth_card *card = dev->ml_priv;
7146 struct qeth_qdio_out_q *queue;
7149 QETH_CARD_TEXT(card, 5, "getstat");
7151 stats->rx_packets = card->stats.rx_packets;
7152 stats->rx_bytes = card->stats.rx_bytes;
7153 stats->rx_errors = card->stats.rx_length_errors +
7154 card->stats.rx_frame_errors +
7155 card->stats.rx_fifo_errors;
7156 stats->rx_dropped = card->stats.rx_dropped_nomem +
7157 card->stats.rx_dropped_notsupp +
7158 card->stats.rx_dropped_runt;
7159 stats->multicast = card->stats.rx_multicast;
7160 stats->rx_length_errors = card->stats.rx_length_errors;
7161 stats->rx_frame_errors = card->stats.rx_frame_errors;
7162 stats->rx_fifo_errors = card->stats.rx_fifo_errors;
7164 for (i = 0; i < card->qdio.no_out_queues; i++) {
7165 queue = card->qdio.out_qs[i];
7167 stats->tx_packets += queue->stats.tx_packets;
7168 stats->tx_bytes += queue->stats.tx_bytes;
7169 stats->tx_errors += queue->stats.tx_errors;
7170 stats->tx_dropped += queue->stats.tx_dropped;
7173 EXPORT_SYMBOL_GPL(qeth_get_stats64);
7175 #define TC_IQD_UCAST 0
7176 static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
7177 unsigned int ucast_txqs)
7181 /* IQD requires mcast traffic to be placed on a dedicated queue, and
7182 * qeth_iqd_select_queue() deals with this.
7183 * For unicast traffic, we defer the queue selection to the stack.
7184 * By installing a trivial prio map that spans over only the unicast
7185 * queues, we can encourage the stack to spread the ucast traffic evenly
7186 * without selecting the mcast queue.
7189 /* One traffic class, spanning over all active ucast queues: */
7190 netdev_set_num_tc(dev, 1);
7191 netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
7192 QETH_IQD_MIN_UCAST_TXQ);
7194 /* Map all priorities to this traffic class: */
7195 for (prio = 0; prio <= TC_BITMASK; prio++)
7196 netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
7199 int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
7201 struct net_device *dev = card->dev;
7204 /* Per netif_setup_tc(), adjust the mapping first: */
7206 qeth_iqd_set_prio_tc_map(dev, count - 1);
7208 rc = netif_set_real_num_tx_queues(dev, count);
7210 if (rc && IS_IQD(card))
7211 qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);
7215 EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues);
7217 u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
7218 u8 cast_type, struct net_device *sb_dev)
7222 if (cast_type != RTN_UNICAST)
7223 return QETH_IQD_MCAST_TXQ;
7224 if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
7225 return QETH_IQD_MIN_UCAST_TXQ;
7227 txq = netdev_pick_tx(dev, skb, sb_dev);
7228 return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
7230 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
7232 int qeth_open(struct net_device *dev)
7234 struct qeth_card *card = dev->ml_priv;
7236 QETH_CARD_TEXT(card, 4, "qethopen");
7238 card->data.state = CH_STATE_UP;
7239 netif_tx_start_all_queues(dev);
7243 struct qeth_qdio_out_q *queue;
7246 qeth_for_each_output_queue(card, queue, i) {
7247 netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
7249 napi_enable(&queue->napi);
7250 napi_schedule(&queue->napi);
7254 napi_enable(&card->napi);
7255 napi_schedule(&card->napi);
7256 /* kick-start the NAPI softirq: */
7261 EXPORT_SYMBOL_GPL(qeth_open);
7263 int qeth_stop(struct net_device *dev)
7265 struct qeth_card *card = dev->ml_priv;
7267 QETH_CARD_TEXT(card, 4, "qethstop");
7269 napi_disable(&card->napi);
7270 cancel_delayed_work_sync(&card->buffer_reclaim_work);
7271 qdio_stop_irq(CARD_DDEV(card));
7274 struct qeth_qdio_out_q *queue;
7277 /* Quiesce the NAPI instances: */
7278 qeth_for_each_output_queue(card, queue, i)
7279 napi_disable(&queue->napi);
7281 /* Stop .ndo_start_xmit, might still access queue->napi. */
7282 netif_tx_disable(dev);
7284 qeth_for_each_output_queue(card, queue, i) {
7285 del_timer_sync(&queue->timer);
7286 /* Queues may get re-allocated, so remove the NAPIs. */
7287 netif_napi_del(&queue->napi);
7290 netif_tx_disable(dev);
7295 EXPORT_SYMBOL_GPL(qeth_stop);
7297 static int __init qeth_core_init(void)
7301 pr_info("loading core functions\n");
7303 qeth_debugfs_root = debugfs_create_dir("qeth", NULL);
7305 rc = qeth_register_dbf_views();
7308 qeth_core_root_dev = root_device_register("qeth");
7309 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
7312 qeth_core_header_cache =
7313 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
7314 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
7316 if (!qeth_core_header_cache) {
7320 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
7321 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
7322 if (!qeth_qdio_outbuf_cache) {
7326 rc = ccw_driver_register(&qeth_ccw_driver);
7329 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
7336 ccw_driver_unregister(&qeth_ccw_driver);
7338 kmem_cache_destroy(qeth_qdio_outbuf_cache);
7340 kmem_cache_destroy(qeth_core_header_cache);
7342 root_device_unregister(qeth_core_root_dev);
7344 qeth_unregister_dbf_views();
7346 debugfs_remove_recursive(qeth_debugfs_root);
7347 pr_err("Initializing the qeth device driver failed\n");
7351 static void __exit qeth_core_exit(void)
7353 qeth_clear_dbf_list();
7354 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
7355 ccw_driver_unregister(&qeth_ccw_driver);
7356 kmem_cache_destroy(qeth_qdio_outbuf_cache);
7357 kmem_cache_destroy(qeth_core_header_cache);
7358 root_device_unregister(qeth_core_root_dev);
7359 qeth_unregister_dbf_views();
7360 debugfs_remove_recursive(qeth_debugfs_root);
7361 pr_info("core functions removed\n");
7364 module_init(qeth_core_init);
7365 module_exit(qeth_core_exit);
7366 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
7367 MODULE_DESCRIPTION("qeth core functions");
7368 MODULE_LICENSE("GPL");