1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5 * Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
10 #define KMSG_COMPONENT "qeth"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/compat.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/log2.h>
21 #include <linux/tcp.h>
22 #include <linux/mii.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/netdevice.h>
28 #include <linux/netdev_features.h>
29 #include <linux/rcutree.h>
30 #include <linux/skbuff.h>
31 #include <linux/vmalloc.h>
33 #include <net/iucv/af_iucv.h>
34 #include <net/dsfield.h>
36 #include <asm/ebcdic.h>
37 #include <asm/chpid.h>
39 #include <asm/sysinfo.h>
42 #include <asm/ccwdev.h>
43 #include <asm/cpcmd.h>
45 #include "qeth_core.h"
47 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
48 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
50 [QETH_DBF_SETUP] = {"qeth_setup",
51 8, 1, 8, 5, &debug_hex_ascii_view, NULL},
52 [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
53 &debug_sprintf_view, NULL},
54 [QETH_DBF_CTRL] = {"qeth_control",
55 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
57 EXPORT_SYMBOL_GPL(qeth_dbf);
59 struct kmem_cache *qeth_core_header_cache;
60 EXPORT_SYMBOL_GPL(qeth_core_header_cache);
61 static struct kmem_cache *qeth_qdio_outbuf_cache;
63 static struct device *qeth_core_root_dev;
64 static struct dentry *qeth_debugfs_root;
65 static struct lock_class_key qdio_out_skb_queue_key;
67 static void qeth_issue_next_read_cb(struct qeth_card *card,
68 struct qeth_cmd_buffer *iob,
69 unsigned int data_length);
70 static int qeth_qdio_establish(struct qeth_card *);
71 static void qeth_free_qdio_queues(struct qeth_card *card);
72 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
73 struct qeth_qdio_out_buffer *buf,
74 enum iucv_tx_notify notification);
75 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
77 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
79 static void qeth_close_dev_handler(struct work_struct *work)
81 struct qeth_card *card;
83 card = container_of(work, struct qeth_card, close_dev_work);
84 QETH_CARD_TEXT(card, 2, "cldevhdl");
85 ccwgroup_set_offline(card->gdev);
88 static const char *qeth_get_cardname(struct qeth_card *card)
90 if (IS_VM_NIC(card)) {
91 switch (card->info.type) {
92 case QETH_CARD_TYPE_OSD:
93 return " Virtual NIC QDIO";
94 case QETH_CARD_TYPE_IQD:
95 return " Virtual NIC Hiper";
96 case QETH_CARD_TYPE_OSM:
97 return " Virtual NIC QDIO - OSM";
98 case QETH_CARD_TYPE_OSX:
99 return " Virtual NIC QDIO - OSX";
104 switch (card->info.type) {
105 case QETH_CARD_TYPE_OSD:
106 return " OSD Express";
107 case QETH_CARD_TYPE_IQD:
108 return " HiperSockets";
109 case QETH_CARD_TYPE_OSN:
111 case QETH_CARD_TYPE_OSM:
113 case QETH_CARD_TYPE_OSX:
122 /* max length to be returned: 14 */
123 const char *qeth_get_cardname_short(struct qeth_card *card)
125 if (IS_VM_NIC(card)) {
126 switch (card->info.type) {
127 case QETH_CARD_TYPE_OSD:
128 return "Virt.NIC QDIO";
129 case QETH_CARD_TYPE_IQD:
130 return "Virt.NIC Hiper";
131 case QETH_CARD_TYPE_OSM:
132 return "Virt.NIC OSM";
133 case QETH_CARD_TYPE_OSX:
134 return "Virt.NIC OSX";
139 switch (card->info.type) {
140 case QETH_CARD_TYPE_OSD:
141 switch (card->info.link_type) {
142 case QETH_LINK_TYPE_FAST_ETH:
144 case QETH_LINK_TYPE_HSTR:
146 case QETH_LINK_TYPE_GBIT_ETH:
148 case QETH_LINK_TYPE_10GBIT_ETH:
150 case QETH_LINK_TYPE_25GBIT_ETH:
152 case QETH_LINK_TYPE_LANE_ETH100:
153 return "OSD_FE_LANE";
154 case QETH_LINK_TYPE_LANE_TR:
155 return "OSD_TR_LANE";
156 case QETH_LINK_TYPE_LANE_ETH1000:
157 return "OSD_GbE_LANE";
158 case QETH_LINK_TYPE_LANE:
159 return "OSD_ATM_LANE";
161 return "OSD_Express";
163 case QETH_CARD_TYPE_IQD:
164 return "HiperSockets";
165 case QETH_CARD_TYPE_OSN:
167 case QETH_CARD_TYPE_OSM:
169 case QETH_CARD_TYPE_OSX:
178 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
179 int clear_start_mask)
183 spin_lock_irqsave(&card->thread_mask_lock, flags);
184 card->thread_allowed_mask = threads;
185 if (clear_start_mask)
186 card->thread_start_mask &= threads;
187 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
188 wake_up(&card->wait_q);
190 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
192 int qeth_threads_running(struct qeth_card *card, unsigned long threads)
197 spin_lock_irqsave(&card->thread_mask_lock, flags);
198 rc = (card->thread_running_mask & threads);
199 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
202 EXPORT_SYMBOL_GPL(qeth_threads_running);
204 void qeth_clear_working_pool_list(struct qeth_card *card)
206 struct qeth_buffer_pool_entry *pool_entry, *tmp;
207 struct qeth_qdio_q *queue = card->qdio.in_q;
210 QETH_CARD_TEXT(card, 5, "clwrklst");
211 list_for_each_entry_safe(pool_entry, tmp,
212 &card->qdio.in_buf_pool.entry_list, list){
213 list_del(&pool_entry->list);
216 for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
217 queue->bufs[i].pool_entry = NULL;
219 EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
221 static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
225 for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
226 if (entry->elements[i])
227 __free_page(entry->elements[i]);
233 static void qeth_free_buffer_pool(struct qeth_card *card)
235 struct qeth_buffer_pool_entry *entry, *tmp;
237 list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
239 list_del(&entry->init_list);
240 qeth_free_pool_entry(entry);
244 static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
246 struct qeth_buffer_pool_entry *entry;
249 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
253 for (i = 0; i < pages; i++) {
254 entry->elements[i] = __dev_alloc_page(GFP_KERNEL);
256 if (!entry->elements[i]) {
257 qeth_free_pool_entry(entry);
265 static int qeth_alloc_buffer_pool(struct qeth_card *card)
267 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
270 QETH_CARD_TEXT(card, 5, "alocpool");
271 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
272 struct qeth_buffer_pool_entry *entry;
274 entry = qeth_alloc_pool_entry(buf_elements);
276 qeth_free_buffer_pool(card);
280 list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
285 int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
287 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
288 struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
289 struct qeth_buffer_pool_entry *entry, *tmp;
290 int delta = count - pool->buf_count;
293 QETH_CARD_TEXT(card, 2, "realcbp");
295 /* Defer until queue is allocated: */
296 if (!card->qdio.in_q)
299 /* Remove entries from the pool: */
301 entry = list_first_entry(&pool->entry_list,
302 struct qeth_buffer_pool_entry,
304 list_del(&entry->init_list);
305 qeth_free_pool_entry(entry);
310 /* Allocate additional entries: */
312 entry = qeth_alloc_pool_entry(buf_elements);
314 list_for_each_entry_safe(entry, tmp, &entries,
316 list_del(&entry->init_list);
317 qeth_free_pool_entry(entry);
323 list_add(&entry->init_list, &entries);
328 list_splice(&entries, &pool->entry_list);
331 card->qdio.in_buf_pool.buf_count = count;
332 pool->buf_count = count;
335 EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
337 static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
342 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
346 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
348 struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
354 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
359 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
360 q->bufs[i].buffer = q->qdio_bufs[i];
362 QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
366 static int qeth_cq_init(struct qeth_card *card)
370 if (card->options.cq == QETH_CQ_ENABLED) {
371 QETH_CARD_TEXT(card, 2, "cqinit");
372 qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
373 QDIO_MAX_BUFFERS_PER_Q);
374 card->qdio.c_q->next_buf_to_init = 127;
375 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
376 card->qdio.no_in_queues - 1, 0,
379 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
388 static int qeth_alloc_cq(struct qeth_card *card)
392 if (card->options.cq == QETH_CQ_ENABLED) {
394 struct qdio_outbuf_state *outbuf_states;
396 QETH_CARD_TEXT(card, 2, "cqon");
397 card->qdio.c_q = qeth_alloc_qdio_queue();
398 if (!card->qdio.c_q) {
402 card->qdio.no_in_queues = 2;
403 card->qdio.out_bufstates =
404 kcalloc(card->qdio.no_out_queues *
405 QDIO_MAX_BUFFERS_PER_Q,
406 sizeof(struct qdio_outbuf_state),
408 outbuf_states = card->qdio.out_bufstates;
409 if (outbuf_states == NULL) {
413 for (i = 0; i < card->qdio.no_out_queues; ++i) {
414 card->qdio.out_qs[i]->bufstates = outbuf_states;
415 outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
418 QETH_CARD_TEXT(card, 2, "nocq");
419 card->qdio.c_q = NULL;
420 card->qdio.no_in_queues = 1;
422 QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
427 qeth_free_qdio_queue(card->qdio.c_q);
428 card->qdio.c_q = NULL;
430 dev_err(&card->gdev->dev, "Failed to create completion queue\n");
434 static void qeth_free_cq(struct qeth_card *card)
436 if (card->qdio.c_q) {
437 --card->qdio.no_in_queues;
438 qeth_free_qdio_queue(card->qdio.c_q);
439 card->qdio.c_q = NULL;
441 kfree(card->qdio.out_bufstates);
442 card->qdio.out_bufstates = NULL;
445 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
448 enum iucv_tx_notify n;
452 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
458 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
459 TX_NOTIFY_UNREACHABLE;
462 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
463 TX_NOTIFY_GENERALERROR;
470 static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
473 if (q->card->options.cq != QETH_CQ_ENABLED)
476 if (q->bufs[bidx]->next_pending != NULL) {
477 struct qeth_qdio_out_buffer *head = q->bufs[bidx];
478 struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
481 if (forced_cleanup ||
482 atomic_read(&c->state) ==
483 QETH_QDIO_BUF_HANDLED_DELAYED) {
484 struct qeth_qdio_out_buffer *f = c;
485 QETH_CARD_TEXT(f->q->card, 5, "fp");
486 QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
487 /* release here to avoid interleaving between
488 outbound tasklet and inbound tasklet
489 regarding notifications and lifecycle */
490 qeth_tx_complete_buf(c, forced_cleanup, 0);
493 WARN_ON_ONCE(head->next_pending != f);
494 head->next_pending = c;
495 kmem_cache_free(qeth_qdio_outbuf_cache, f);
503 if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
504 QETH_QDIO_BUF_HANDLED_DELAYED)) {
505 /* for recovery situations */
506 qeth_init_qdio_out_buf(q, bidx);
507 QETH_CARD_TEXT(q->card, 2, "clprecov");
512 static void qeth_qdio_handle_aob(struct qeth_card *card,
513 unsigned long phys_aob_addr)
516 struct qeth_qdio_out_buffer *buffer;
517 enum iucv_tx_notify notification;
520 aob = (struct qaob *) phys_to_virt(phys_aob_addr);
521 QETH_CARD_TEXT(card, 5, "haob");
522 QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
523 buffer = (struct qeth_qdio_out_buffer *) aob->user1;
524 QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
526 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
527 QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
528 notification = TX_NOTIFY_OK;
530 WARN_ON_ONCE(atomic_read(&buffer->state) !=
531 QETH_QDIO_BUF_PENDING);
532 atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
533 notification = TX_NOTIFY_DELAYED_OK;
536 if (aob->aorc != 0) {
537 QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
538 notification = qeth_compute_cq_notification(aob->aorc, 1);
540 qeth_notify_skbs(buffer->q, buffer, notification);
542 /* Free dangling allocations. The attached skbs are handled by
543 * qeth_cleanup_handled_pending().
546 i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
548 void *data = phys_to_virt(aob->sba[i]);
550 if (data && buffer->is_header[i])
551 kmem_cache_free(qeth_core_header_cache, data);
553 atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
555 qdio_release_aob(aob);
558 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
561 ccw->cmd_code = cmd_code;
562 ccw->flags = flags | CCW_FLAG_SLI;
564 ccw->cda = (__u32) __pa(data);
567 static int __qeth_issue_next_read(struct qeth_card *card)
569 struct qeth_cmd_buffer *iob = card->read_cmd;
570 struct qeth_channel *channel = iob->channel;
571 struct ccw1 *ccw = __ccw_from_cmd(iob);
574 QETH_CARD_TEXT(card, 5, "issnxrd");
575 if (channel->state != CH_STATE_UP)
578 memset(iob->data, 0, iob->length);
579 qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
580 iob->callback = qeth_issue_next_read_cb;
581 /* keep the cmd alive after completion: */
584 QETH_CARD_TEXT(card, 6, "noirqpnd");
585 rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
587 channel->active_cmd = iob;
589 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
590 rc, CARD_DEVID(card));
591 qeth_unlock_channel(card, channel);
593 card->read_or_write_problem = 1;
594 qeth_schedule_recovery(card);
599 static int qeth_issue_next_read(struct qeth_card *card)
603 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
604 ret = __qeth_issue_next_read(card);
605 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
610 static void qeth_enqueue_cmd(struct qeth_card *card,
611 struct qeth_cmd_buffer *iob)
613 spin_lock_irq(&card->lock);
614 list_add_tail(&iob->list, &card->cmd_waiter_list);
615 spin_unlock_irq(&card->lock);
618 static void qeth_dequeue_cmd(struct qeth_card *card,
619 struct qeth_cmd_buffer *iob)
621 spin_lock_irq(&card->lock);
622 list_del(&iob->list);
623 spin_unlock_irq(&card->lock);
626 void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
629 complete(&iob->done);
631 EXPORT_SYMBOL_GPL(qeth_notify_cmd);
633 static void qeth_flush_local_addrs4(struct qeth_card *card)
635 struct qeth_local_addr *addr;
636 struct hlist_node *tmp;
639 spin_lock_irq(&card->local_addrs4_lock);
640 hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) {
641 hash_del_rcu(&addr->hnode);
642 kfree_rcu(addr, rcu);
644 spin_unlock_irq(&card->local_addrs4_lock);
647 static void qeth_flush_local_addrs6(struct qeth_card *card)
649 struct qeth_local_addr *addr;
650 struct hlist_node *tmp;
653 spin_lock_irq(&card->local_addrs6_lock);
654 hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) {
655 hash_del_rcu(&addr->hnode);
656 kfree_rcu(addr, rcu);
658 spin_unlock_irq(&card->local_addrs6_lock);
661 void qeth_flush_local_addrs(struct qeth_card *card)
663 qeth_flush_local_addrs4(card);
664 qeth_flush_local_addrs6(card);
666 EXPORT_SYMBOL_GPL(qeth_flush_local_addrs);
668 static void qeth_add_local_addrs4(struct qeth_card *card,
669 struct qeth_ipacmd_local_addrs4 *cmd)
673 if (cmd->addr_length !=
674 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
675 dev_err_ratelimited(&card->gdev->dev,
676 "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
681 spin_lock(&card->local_addrs4_lock);
682 for (i = 0; i < cmd->count; i++) {
683 unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr);
684 struct qeth_local_addr *addr;
685 bool duplicate = false;
687 hash_for_each_possible(card->local_addrs4, addr, hnode, key) {
688 if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) {
697 addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
699 dev_err(&card->gdev->dev,
700 "Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
701 &cmd->addrs[i].addr);
705 ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr);
706 hash_add_rcu(card->local_addrs4, &addr->hnode, key);
708 spin_unlock(&card->local_addrs4_lock);
711 static void qeth_add_local_addrs6(struct qeth_card *card,
712 struct qeth_ipacmd_local_addrs6 *cmd)
716 if (cmd->addr_length !=
717 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
718 dev_err_ratelimited(&card->gdev->dev,
719 "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
724 spin_lock(&card->local_addrs6_lock);
725 for (i = 0; i < cmd->count; i++) {
726 u32 key = ipv6_addr_hash(&cmd->addrs[i].addr);
727 struct qeth_local_addr *addr;
728 bool duplicate = false;
730 hash_for_each_possible(card->local_addrs6, addr, hnode, key) {
731 if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) {
740 addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
742 dev_err(&card->gdev->dev,
743 "Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
744 &cmd->addrs[i].addr);
748 addr->addr = cmd->addrs[i].addr;
749 hash_add_rcu(card->local_addrs6, &addr->hnode, key);
751 spin_unlock(&card->local_addrs6_lock);
754 static void qeth_del_local_addrs4(struct qeth_card *card,
755 struct qeth_ipacmd_local_addrs4 *cmd)
759 if (cmd->addr_length !=
760 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
761 dev_err_ratelimited(&card->gdev->dev,
762 "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
767 spin_lock(&card->local_addrs4_lock);
768 for (i = 0; i < cmd->count; i++) {
769 struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i];
770 unsigned int key = ipv4_addr_hash(addr->addr);
771 struct qeth_local_addr *tmp;
773 hash_for_each_possible(card->local_addrs4, tmp, hnode, key) {
774 if (tmp->addr.s6_addr32[3] == addr->addr) {
775 hash_del_rcu(&tmp->hnode);
781 spin_unlock(&card->local_addrs4_lock);
784 static void qeth_del_local_addrs6(struct qeth_card *card,
785 struct qeth_ipacmd_local_addrs6 *cmd)
789 if (cmd->addr_length !=
790 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
791 dev_err_ratelimited(&card->gdev->dev,
792 "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
797 spin_lock(&card->local_addrs6_lock);
798 for (i = 0; i < cmd->count; i++) {
799 struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i];
800 u32 key = ipv6_addr_hash(&addr->addr);
801 struct qeth_local_addr *tmp;
803 hash_for_each_possible(card->local_addrs6, tmp, hnode, key) {
804 if (ipv6_addr_equal(&tmp->addr, &addr->addr)) {
805 hash_del_rcu(&tmp->hnode);
811 spin_unlock(&card->local_addrs6_lock);
814 static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
817 struct qeth_local_addr *tmp;
818 bool is_local = false;
822 if (hash_empty(card->local_addrs4))
826 next_hop = qeth_next_hop_v4_rcu(skb, qeth_dst_check_rcu(skb, 4));
827 key = ipv4_addr_hash(next_hop);
829 hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
830 if (tmp->addr.s6_addr32[3] == next_hop) {
840 static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
843 struct qeth_local_addr *tmp;
844 struct in6_addr *next_hop;
845 bool is_local = false;
848 if (hash_empty(card->local_addrs6))
852 next_hop = qeth_next_hop_v6_rcu(skb, qeth_dst_check_rcu(skb, 6));
853 key = ipv6_addr_hash(next_hop);
855 hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
856 if (ipv6_addr_equal(&tmp->addr, next_hop)) {
866 static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v)
868 struct qeth_card *card = m->private;
869 struct qeth_local_addr *tmp;
873 hash_for_each_rcu(card->local_addrs4, i, tmp, hnode)
874 seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]);
875 hash_for_each_rcu(card->local_addrs6, i, tmp, hnode)
876 seq_printf(m, "%pI6c\n", &tmp->addr);
882 DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr);
884 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
885 struct qeth_card *card)
887 const char *ipa_name;
888 int com = cmd->hdr.command;
889 ipa_name = qeth_get_ipa_cmd_name(com);
892 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
893 ipa_name, com, CARD_DEVID(card), rc,
894 qeth_get_ipa_msg(rc));
896 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
897 ipa_name, com, CARD_DEVID(card));
900 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
901 struct qeth_ipa_cmd *cmd)
903 QETH_CARD_TEXT(card, 5, "chkipad");
905 if (IS_IPA_REPLY(cmd)) {
906 if (cmd->hdr.command != IPA_CMD_SETCCID &&
907 cmd->hdr.command != IPA_CMD_DELCCID &&
908 cmd->hdr.command != IPA_CMD_MODCCID &&
909 cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
910 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
914 /* handle unsolicited event: */
915 switch (cmd->hdr.command) {
916 case IPA_CMD_STOPLAN:
917 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
918 dev_err(&card->gdev->dev,
919 "Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
920 QETH_CARD_IFNAME(card));
921 schedule_work(&card->close_dev_work);
923 dev_warn(&card->gdev->dev,
924 "The link for interface %s on CHPID 0x%X failed\n",
925 QETH_CARD_IFNAME(card), card->info.chpid);
926 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
927 netif_carrier_off(card->dev);
930 case IPA_CMD_STARTLAN:
931 dev_info(&card->gdev->dev,
932 "The link for %s on CHPID 0x%X has been restored\n",
933 QETH_CARD_IFNAME(card), card->info.chpid);
934 if (card->info.hwtrap)
935 card->info.hwtrap = 2;
936 qeth_schedule_recovery(card);
938 case IPA_CMD_SETBRIDGEPORT_IQD:
939 case IPA_CMD_SETBRIDGEPORT_OSA:
940 case IPA_CMD_ADDRESS_CHANGE_NOTIF:
941 if (card->discipline->control_event_handler(card, cmd))
944 case IPA_CMD_MODCCID:
946 case IPA_CMD_REGISTER_LOCAL_ADDR:
947 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
948 qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
949 else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
950 qeth_add_local_addrs6(card, &cmd->data.local_addrs6);
952 QETH_CARD_TEXT(card, 3, "irla");
954 case IPA_CMD_UNREGISTER_LOCAL_ADDR:
955 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
956 qeth_del_local_addrs4(card, &cmd->data.local_addrs4);
957 else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
958 qeth_del_local_addrs6(card, &cmd->data.local_addrs6);
960 QETH_CARD_TEXT(card, 3, "urla");
963 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
968 static void qeth_clear_ipacmd_list(struct qeth_card *card)
970 struct qeth_cmd_buffer *iob;
973 QETH_CARD_TEXT(card, 4, "clipalst");
975 spin_lock_irqsave(&card->lock, flags);
976 list_for_each_entry(iob, &card->cmd_waiter_list, list)
977 qeth_notify_cmd(iob, -ECANCELED);
978 spin_unlock_irqrestore(&card->lock, flags);
981 static int qeth_check_idx_response(struct qeth_card *card,
982 unsigned char *buffer)
984 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
985 if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
986 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
988 QETH_CARD_TEXT(card, 2, "ckidxres");
989 QETH_CARD_TEXT(card, 2, " idxterm");
990 QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
991 if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
992 buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
993 dev_err(&card->gdev->dev,
994 "The device does not support the configured transport mode\n");
995 return -EPROTONOSUPPORT;
1002 void qeth_put_cmd(struct qeth_cmd_buffer *iob)
1004 if (refcount_dec_and_test(&iob->ref_count)) {
1009 EXPORT_SYMBOL_GPL(qeth_put_cmd);
1011 static void qeth_release_buffer_cb(struct qeth_card *card,
1012 struct qeth_cmd_buffer *iob,
1013 unsigned int data_length)
1018 static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
1020 qeth_notify_cmd(iob, rc);
1024 struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
1025 unsigned int length, unsigned int ccws,
1028 struct qeth_cmd_buffer *iob;
1030 if (length > QETH_BUFSIZE)
1033 iob = kzalloc(sizeof(*iob), GFP_KERNEL);
1037 iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
1038 GFP_KERNEL | GFP_DMA);
1044 init_completion(&iob->done);
1045 spin_lock_init(&iob->lock);
1046 INIT_LIST_HEAD(&iob->list);
1047 refcount_set(&iob->ref_count, 1);
1048 iob->channel = channel;
1049 iob->timeout = timeout;
1050 iob->length = length;
1053 EXPORT_SYMBOL_GPL(qeth_alloc_cmd);
1055 static void qeth_issue_next_read_cb(struct qeth_card *card,
1056 struct qeth_cmd_buffer *iob,
1057 unsigned int data_length)
1059 struct qeth_cmd_buffer *request = NULL;
1060 struct qeth_ipa_cmd *cmd = NULL;
1061 struct qeth_reply *reply = NULL;
1062 struct qeth_cmd_buffer *tmp;
1063 unsigned long flags;
1066 QETH_CARD_TEXT(card, 4, "sndctlcb");
1067 rc = qeth_check_idx_response(card, iob->data);
1072 qeth_schedule_recovery(card);
1075 qeth_clear_ipacmd_list(card);
1079 cmd = __ipa_reply(iob);
1081 cmd = qeth_check_ipa_data(card, cmd);
1084 if (IS_OSN(card) && card->osn_info.assist_cb &&
1085 cmd->hdr.command != IPA_CMD_STARTLAN) {
1086 card->osn_info.assist_cb(card->dev, cmd);
1091 /* match against pending cmd requests */
1092 spin_lock_irqsave(&card->lock, flags);
1093 list_for_each_entry(tmp, &card->cmd_waiter_list, list) {
1094 if (tmp->match && tmp->match(tmp, iob)) {
1096 /* take the object outside the lock */
1097 qeth_get_cmd(request);
1101 spin_unlock_irqrestore(&card->lock, flags);
1106 reply = &request->reply;
1107 if (!reply->callback) {
1112 spin_lock_irqsave(&request->lock, flags);
1114 /* Bail out when the requestor has already left: */
1117 rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
1118 (unsigned long)iob);
1119 spin_unlock_irqrestore(&request->lock, flags);
1123 qeth_notify_cmd(request, rc);
1124 qeth_put_cmd(request);
1126 memcpy(&card->seqno.pdu_hdr_ack,
1127 QETH_PDU_HEADER_SEQ_NO(iob->data),
1128 QETH_SEQ_NO_LENGTH);
1129 __qeth_issue_next_read(card);
1134 static int qeth_set_thread_start_bit(struct qeth_card *card,
1135 unsigned long thread)
1137 unsigned long flags;
1140 spin_lock_irqsave(&card->thread_mask_lock, flags);
1141 if (!(card->thread_allowed_mask & thread))
1143 else if (card->thread_start_mask & thread)
1146 card->thread_start_mask |= thread;
1147 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1152 static void qeth_clear_thread_start_bit(struct qeth_card *card,
1153 unsigned long thread)
1155 unsigned long flags;
1157 spin_lock_irqsave(&card->thread_mask_lock, flags);
1158 card->thread_start_mask &= ~thread;
1159 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1160 wake_up(&card->wait_q);
1163 static void qeth_clear_thread_running_bit(struct qeth_card *card,
1164 unsigned long thread)
1166 unsigned long flags;
1168 spin_lock_irqsave(&card->thread_mask_lock, flags);
1169 card->thread_running_mask &= ~thread;
1170 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1171 wake_up_all(&card->wait_q);
1174 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
1176 unsigned long flags;
1179 spin_lock_irqsave(&card->thread_mask_lock, flags);
1180 if (card->thread_start_mask & thread) {
1181 if ((card->thread_allowed_mask & thread) &&
1182 !(card->thread_running_mask & thread)) {
1184 card->thread_start_mask &= ~thread;
1185 card->thread_running_mask |= thread;
1189 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1193 static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
1197 wait_event(card->wait_q,
1198 (rc = __qeth_do_run_thread(card, thread)) >= 0);
1202 int qeth_schedule_recovery(struct qeth_card *card)
1206 QETH_CARD_TEXT(card, 2, "startrec");
1208 rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
1210 schedule_work(&card->kernel_thread_starter);
1215 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
1221 sense = (char *) irb->ecw;
1222 cstat = irb->scsw.cmd.cstat;
1223 dstat = irb->scsw.cmd.dstat;
1225 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
1226 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
1227 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
1228 QETH_CARD_TEXT(card, 2, "CGENCHK");
1229 dev_warn(&cdev->dev, "The qeth device driver "
1230 "failed to recover an error on the device\n");
1231 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
1232 CCW_DEVID(cdev), dstat, cstat);
1233 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
1238 if (dstat & DEV_STAT_UNIT_CHECK) {
1239 if (sense[SENSE_RESETTING_EVENT_BYTE] &
1240 SENSE_RESETTING_EVENT_FLAG) {
1241 QETH_CARD_TEXT(card, 2, "REVIND");
1244 if (sense[SENSE_COMMAND_REJECT_BYTE] &
1245 SENSE_COMMAND_REJECT_FLAG) {
1246 QETH_CARD_TEXT(card, 2, "CMDREJi");
1249 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
1250 QETH_CARD_TEXT(card, 2, "AFFE");
1253 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
1254 QETH_CARD_TEXT(card, 2, "ZEROSEN");
1257 QETH_CARD_TEXT(card, 2, "DGENCHK");
1263 static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
1269 switch (PTR_ERR(irb)) {
1271 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
1273 QETH_CARD_TEXT(card, 2, "ckirberr");
1274 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
1277 dev_warn(&cdev->dev, "A hardware operation timed out"
1278 " on the device\n");
1279 QETH_CARD_TEXT(card, 2, "ckirberr");
1280 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT);
1283 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
1284 PTR_ERR(irb), CCW_DEVID(cdev));
1285 QETH_CARD_TEXT(card, 2, "ckirberr");
1286 QETH_CARD_TEXT(card, 2, " rc???");
1287 return PTR_ERR(irb);
1291 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1296 struct qeth_cmd_buffer *iob = NULL;
1297 struct ccwgroup_device *gdev;
1298 struct qeth_channel *channel;
1299 struct qeth_card *card;
1301 /* while we hold the ccwdev lock, this stays valid: */
1302 gdev = dev_get_drvdata(&cdev->dev);
1303 card = dev_get_drvdata(&gdev->dev);
1305 QETH_CARD_TEXT(card, 5, "irq");
1307 if (card->read.ccwdev == cdev) {
1308 channel = &card->read;
1309 QETH_CARD_TEXT(card, 5, "read");
1310 } else if (card->write.ccwdev == cdev) {
1311 channel = &card->write;
1312 QETH_CARD_TEXT(card, 5, "write");
1314 channel = &card->data;
1315 QETH_CARD_TEXT(card, 5, "data");
1319 QETH_CARD_TEXT(card, 5, "irqunsol");
1320 } else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
1321 QETH_CARD_TEXT(card, 5, "irqunexp");
1324 "Received IRQ with intparm %lx, expected %px\n",
1325 intparm, channel->active_cmd);
1326 if (channel->active_cmd)
1327 qeth_cancel_cmd(channel->active_cmd, -EIO);
1329 iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
1332 channel->active_cmd = NULL;
1333 qeth_unlock_channel(card, channel);
1335 rc = qeth_check_irb_error(card, cdev, irb);
1337 /* IO was terminated, free its resources. */
1339 qeth_cancel_cmd(iob, rc);
1343 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
1344 channel->state = CH_STATE_STOPPED;
1345 wake_up(&card->wait_q);
1348 if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
1349 channel->state = CH_STATE_HALTED;
1350 wake_up(&card->wait_q);
1353 if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
1354 SCSW_FCTL_HALT_FUNC))) {
1355 qeth_cancel_cmd(iob, -ECANCELED);
1359 cstat = irb->scsw.cmd.cstat;
1360 dstat = irb->scsw.cmd.dstat;
1362 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
1363 (dstat & DEV_STAT_UNIT_CHECK) ||
1365 if (irb->esw.esw0.erw.cons) {
1366 dev_warn(&channel->ccwdev->dev,
1367 "The qeth device driver failed to recover "
1368 "an error on the device\n");
1369 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1370 CCW_DEVID(channel->ccwdev), cstat,
1372 print_hex_dump(KERN_WARNING, "qeth: irb ",
1373 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
1374 print_hex_dump(KERN_WARNING, "qeth: sense data ",
1375 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
1378 rc = qeth_get_problem(card, cdev, irb);
1380 card->read_or_write_problem = 1;
1382 qeth_cancel_cmd(iob, rc);
1383 qeth_clear_ipacmd_list(card);
1384 qeth_schedule_recovery(card);
1391 if (irb->scsw.cmd.count > iob->length) {
1392 qeth_cancel_cmd(iob, -EIO);
1396 iob->callback(card, iob,
1397 iob->length - irb->scsw.cmd.count);
1401 static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1402 struct qeth_qdio_out_buffer *buf,
1403 enum iucv_tx_notify notification)
1405 struct sk_buff *skb;
1407 skb_queue_walk(&buf->skb_list, skb) {
1408 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
1409 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1410 if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk)
1411 iucv_sk(skb->sk)->sk_txnotify(skb, notification);
1415 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
1418 struct qeth_qdio_out_q *queue = buf->q;
1419 struct sk_buff *skb;
1421 /* release may never happen from within CQ tasklet scope */
1422 WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
1424 if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
1425 qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR);
1428 if (buf->next_element_to_fill == 0)
1431 QETH_TXQ_STAT_INC(queue, bufs);
1432 QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
1434 QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
1436 QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
1437 QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes);
1440 while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
1441 unsigned int bytes = qdisc_pkt_len(skb);
1442 bool is_tso = skb_is_gso(skb);
1443 unsigned int packets;
1445 packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
1447 if (skb->ip_summed == CHECKSUM_PARTIAL)
1448 QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
1449 if (skb_is_nonlinear(skb))
1450 QETH_TXQ_STAT_INC(queue, skbs_sg);
1452 QETH_TXQ_STAT_INC(queue, skbs_tso);
1453 QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
1457 napi_consume_skb(skb, budget);
1461 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1462 struct qeth_qdio_out_buffer *buf,
1463 bool error, int budget)
1467 /* is PCI flag set on buffer? */
1468 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
1469 atomic_dec(&queue->set_pci_flags_count);
1471 qeth_tx_complete_buf(buf, error, budget);
1473 for (i = 0; i < queue->max_elements; ++i) {
1474 void *data = phys_to_virt(buf->buffer->element[i].addr);
1476 if (data && buf->is_header[i])
1477 kmem_cache_free(qeth_core_header_cache, data);
1478 buf->is_header[i] = 0;
1481 qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
1482 buf->next_element_to_fill = 0;
1485 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1488 static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
1492 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
1495 qeth_cleanup_handled_pending(q, j, 1);
1496 qeth_clear_output_buffer(q, q->bufs[j], true, 0);
1498 kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
1504 void qeth_drain_output_queues(struct qeth_card *card)
1508 QETH_CARD_TEXT(card, 2, "clearqdbf");
1509 /* clear outbound buffers to free skbs */
1510 for (i = 0; i < card->qdio.no_out_queues; ++i) {
1511 if (card->qdio.out_qs[i])
1512 qeth_drain_output_queue(card->qdio.out_qs[i], false);
1515 EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
1517 static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
1519 unsigned int max = single ? 1 : card->dev->num_tx_queues;
1523 count = IS_VM_NIC(card) ? min(max, card->dev->real_num_tx_queues) : max;
1526 rc = netif_set_real_num_tx_queues(card->dev, count);
1532 if (card->qdio.no_out_queues == max)
1535 if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
1536 qeth_free_qdio_queues(card);
1538 if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
1539 dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
1541 card->qdio.no_out_queues = max;
1545 static int qeth_update_from_chp_desc(struct qeth_card *card)
1547 struct ccw_device *ccwdev;
1548 struct channel_path_desc_fmt0 *chp_dsc;
1551 QETH_CARD_TEXT(card, 2, "chp_desc");
1553 ccwdev = card->data.ccwdev;
1554 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
1558 card->info.func_level = 0x4100 + chp_dsc->desc;
1560 if (IS_OSD(card) || IS_OSX(card))
1561 /* CHPP field bit 6 == 1 -> single queue */
1562 rc = qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
1565 QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
1566 QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
1570 static void qeth_init_qdio_info(struct qeth_card *card)
1572 QETH_CARD_TEXT(card, 4, "intqdinf");
1573 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1574 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1575 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1578 card->qdio.no_in_queues = 1;
1579 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1581 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
1583 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1584 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1585 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1586 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1589 static void qeth_set_initial_options(struct qeth_card *card)
1591 card->options.route4.type = NO_ROUTER;
1592 card->options.route6.type = NO_ROUTER;
1593 card->options.isolation = ISOLATION_MODE_NONE;
1594 card->options.cq = QETH_CQ_DISABLED;
1595 card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
1598 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1600 unsigned long flags;
1603 spin_lock_irqsave(&card->thread_mask_lock, flags);
1604 QETH_CARD_TEXT_(card, 4, " %02x%02x%02x",
1605 (u8) card->thread_start_mask,
1606 (u8) card->thread_allowed_mask,
1607 (u8) card->thread_running_mask);
1608 rc = (card->thread_start_mask & thread);
1609 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1613 static int qeth_do_reset(void *data);
1614 static void qeth_start_kernel_thread(struct work_struct *work)
1616 struct task_struct *ts;
1617 struct qeth_card *card = container_of(work, struct qeth_card,
1618 kernel_thread_starter);
1619 QETH_CARD_TEXT(card , 2, "strthrd");
1621 if (card->read.state != CH_STATE_UP &&
1622 card->write.state != CH_STATE_UP)
1624 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1625 ts = kthread_run(qeth_do_reset, card, "qeth_recover");
1627 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1628 qeth_clear_thread_running_bit(card,
1629 QETH_RECOVER_THREAD);
1634 static void qeth_buffer_reclaim_work(struct work_struct *);
1635 static void qeth_setup_card(struct qeth_card *card)
1637 QETH_CARD_TEXT(card, 2, "setupcrd");
1639 card->info.type = CARD_RDEV(card)->id.driver_info;
1640 card->state = CARD_STATE_DOWN;
1641 spin_lock_init(&card->lock);
1642 spin_lock_init(&card->thread_mask_lock);
1643 mutex_init(&card->conf_mutex);
1644 mutex_init(&card->discipline_mutex);
1645 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1646 INIT_LIST_HEAD(&card->cmd_waiter_list);
1647 init_waitqueue_head(&card->wait_q);
1648 qeth_set_initial_options(card);
1649 /* IP address takeover */
1650 INIT_LIST_HEAD(&card->ipato.entries);
1651 qeth_init_qdio_info(card);
1652 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1653 INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
1654 hash_init(card->rx_mode_addrs);
1655 hash_init(card->local_addrs4);
1656 hash_init(card->local_addrs6);
1657 spin_lock_init(&card->local_addrs4_lock);
1658 spin_lock_init(&card->local_addrs6_lock);
1661 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
1663 struct qeth_card *card = container_of(slr, struct qeth_card,
1664 qeth_service_level);
1665 if (card->info.mcl_level[0])
1666 seq_printf(m, "qeth: %s firmware level %s\n",
1667 CARD_BUS_ID(card), card->info.mcl_level);
1670 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
1672 struct qeth_card *card;
1674 QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1675 card = kzalloc(sizeof(*card), GFP_KERNEL);
1678 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1681 dev_set_drvdata(&gdev->dev, card);
1682 CARD_RDEV(card) = gdev->cdev[0];
1683 CARD_WDEV(card) = gdev->cdev[1];
1684 CARD_DDEV(card) = gdev->cdev[2];
1686 card->event_wq = alloc_ordered_workqueue("%s_event", 0,
1687 dev_name(&gdev->dev));
1688 if (!card->event_wq)
1691 card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
1692 if (!card->read_cmd)
1695 card->debugfs = debugfs_create_dir(dev_name(&gdev->dev),
1697 debugfs_create_file("local_addrs", 0400, card->debugfs, card,
1698 &qeth_debugfs_local_addr_fops);
1700 card->qeth_service_level.seq_print = qeth_core_sl_print;
1701 register_service_level(&card->qeth_service_level);
1705 destroy_workqueue(card->event_wq);
1707 dev_set_drvdata(&gdev->dev, NULL);
1713 static int qeth_clear_channel(struct qeth_card *card,
1714 struct qeth_channel *channel)
1718 QETH_CARD_TEXT(card, 3, "clearch");
1719 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1720 rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
1721 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1725 rc = wait_event_interruptible_timeout(card->wait_q,
1726 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1727 if (rc == -ERESTARTSYS)
1729 if (channel->state != CH_STATE_STOPPED)
1731 channel->state = CH_STATE_DOWN;
1735 static int qeth_halt_channel(struct qeth_card *card,
1736 struct qeth_channel *channel)
1740 QETH_CARD_TEXT(card, 3, "haltch");
1741 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1742 rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
1743 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1747 rc = wait_event_interruptible_timeout(card->wait_q,
1748 channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1749 if (rc == -ERESTARTSYS)
1751 if (channel->state != CH_STATE_HALTED)
1756 int qeth_stop_channel(struct qeth_channel *channel)
1758 struct ccw_device *cdev = channel->ccwdev;
1761 rc = ccw_device_set_offline(cdev);
1763 spin_lock_irq(get_ccwdev_lock(cdev));
1764 if (channel->active_cmd) {
1765 dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
1766 channel->active_cmd);
1767 channel->active_cmd = NULL;
1769 cdev->handler = NULL;
1770 spin_unlock_irq(get_ccwdev_lock(cdev));
1774 EXPORT_SYMBOL_GPL(qeth_stop_channel);
1776 static int qeth_start_channel(struct qeth_channel *channel)
1778 struct ccw_device *cdev = channel->ccwdev;
1781 channel->state = CH_STATE_DOWN;
1782 atomic_set(&channel->irq_pending, 0);
1784 spin_lock_irq(get_ccwdev_lock(cdev));
1785 cdev->handler = qeth_irq;
1786 spin_unlock_irq(get_ccwdev_lock(cdev));
1788 rc = ccw_device_set_online(cdev);
1795 spin_lock_irq(get_ccwdev_lock(cdev));
1796 cdev->handler = NULL;
1797 spin_unlock_irq(get_ccwdev_lock(cdev));
1801 static int qeth_halt_channels(struct qeth_card *card)
1803 int rc1 = 0, rc2 = 0, rc3 = 0;
1805 QETH_CARD_TEXT(card, 3, "haltchs");
1806 rc1 = qeth_halt_channel(card, &card->read);
1807 rc2 = qeth_halt_channel(card, &card->write);
1808 rc3 = qeth_halt_channel(card, &card->data);
1816 static int qeth_clear_channels(struct qeth_card *card)
1818 int rc1 = 0, rc2 = 0, rc3 = 0;
1820 QETH_CARD_TEXT(card, 3, "clearchs");
1821 rc1 = qeth_clear_channel(card, &card->read);
1822 rc2 = qeth_clear_channel(card, &card->write);
1823 rc3 = qeth_clear_channel(card, &card->data);
1831 static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1835 QETH_CARD_TEXT(card, 3, "clhacrd");
1838 rc = qeth_halt_channels(card);
1841 return qeth_clear_channels(card);
1844 int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1848 QETH_CARD_TEXT(card, 3, "qdioclr");
1849 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1850 QETH_QDIO_CLEANING)) {
1851 case QETH_QDIO_ESTABLISHED:
1853 rc = qdio_shutdown(CARD_DDEV(card),
1854 QDIO_FLAG_CLEANUP_USING_HALT);
1856 rc = qdio_shutdown(CARD_DDEV(card),
1857 QDIO_FLAG_CLEANUP_USING_CLEAR);
1859 QETH_CARD_TEXT_(card, 3, "1err%d", rc);
1860 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1862 case QETH_QDIO_CLEANING:
1867 rc = qeth_clear_halt_card(card, use_halt);
1869 QETH_CARD_TEXT_(card, 3, "2err%d", rc);
1872 EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
1874 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
1876 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1877 struct diag26c_vnic_resp *response = NULL;
1878 struct diag26c_vnic_req *request = NULL;
1879 struct ccw_dev_id id;
1883 QETH_CARD_TEXT(card, 2, "vmlayer");
1885 cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
1889 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
1890 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
1891 if (!request || !response) {
1896 ccw_device_get_id(CARD_RDEV(card), &id);
1897 request->resp_buf_len = sizeof(*response);
1898 request->resp_version = DIAG26C_VERSION6_VM65918;
1899 request->req_format = DIAG26C_VNIC_INFO;
1901 memcpy(&request->sys_name, userid, 8);
1902 request->devno = id.devno;
1904 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1905 rc = diag26c(request, response, DIAG26C_PORT_VNIC);
1906 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1909 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
1911 if (request->resp_buf_len < sizeof(*response) ||
1912 response->version != request->resp_version) {
1917 if (response->protocol == VNIC_INFO_PROT_L2)
1918 disc = QETH_DISCIPLINE_LAYER2;
1919 else if (response->protocol == VNIC_INFO_PROT_L3)
1920 disc = QETH_DISCIPLINE_LAYER3;
1926 QETH_CARD_TEXT_(card, 2, "err%x", rc);
1930 /* Determine whether the device requires a specific layer discipline */
1931 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
1933 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1935 if (IS_OSM(card) || IS_OSN(card))
1936 disc = QETH_DISCIPLINE_LAYER2;
1937 else if (IS_VM_NIC(card))
1938 disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
1939 qeth_vm_detect_layer(card);
1942 case QETH_DISCIPLINE_LAYER2:
1943 QETH_CARD_TEXT(card, 3, "force l2");
1945 case QETH_DISCIPLINE_LAYER3:
1946 QETH_CARD_TEXT(card, 3, "force l3");
1949 QETH_CARD_TEXT(card, 3, "force no");
1955 static void qeth_set_blkt_defaults(struct qeth_card *card)
1957 QETH_CARD_TEXT(card, 2, "cfgblkt");
1959 if (card->info.use_v1_blkt) {
1960 card->info.blkt.time_total = 0;
1961 card->info.blkt.inter_packet = 0;
1962 card->info.blkt.inter_packet_jumbo = 0;
1964 card->info.blkt.time_total = 250;
1965 card->info.blkt.inter_packet = 5;
1966 card->info.blkt.inter_packet_jumbo = 15;
1970 static void qeth_idx_init(struct qeth_card *card)
1972 memset(&card->seqno, 0, sizeof(card->seqno));
1974 card->token.issuer_rm_w = 0x00010103UL;
1975 card->token.cm_filter_w = 0x00010108UL;
1976 card->token.cm_connection_w = 0x0001010aUL;
1977 card->token.ulp_filter_w = 0x0001010bUL;
1978 card->token.ulp_connection_w = 0x0001010dUL;
1980 switch (card->info.type) {
1981 case QETH_CARD_TYPE_IQD:
1982 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
1984 case QETH_CARD_TYPE_OSD:
1985 case QETH_CARD_TYPE_OSN:
1986 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
1993 static void qeth_idx_finalize_cmd(struct qeth_card *card,
1994 struct qeth_cmd_buffer *iob)
1996 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
1997 QETH_SEQ_NO_LENGTH);
1998 if (iob->channel == &card->write)
1999 card->seqno.trans_hdr++;
2002 static int qeth_peer_func_level(int level)
2004 if ((level & 0xff) == 8)
2005 return (level & 0xff) + 0x400;
2006 if (((level >> 8) & 3) == 1)
2007 return (level & 0xff) + 0x200;
2011 static void qeth_mpc_finalize_cmd(struct qeth_card *card,
2012 struct qeth_cmd_buffer *iob)
2014 qeth_idx_finalize_cmd(card, iob);
2016 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
2017 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
2018 card->seqno.pdu_hdr++;
2019 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
2020 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
2022 iob->callback = qeth_release_buffer_cb;
2025 static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
2026 struct qeth_cmd_buffer *reply)
2028 /* MPC cmds are issued strictly in sequence. */
2029 return !IS_IPA(reply->data);
2032 static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
2034 unsigned int data_length)
2036 struct qeth_cmd_buffer *iob;
2038 iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
2042 memcpy(iob->data, data, data_length);
2043 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
2045 iob->finalize = qeth_mpc_finalize_cmd;
2046 iob->match = qeth_mpc_match_reply;
2051 * qeth_send_control_data() - send control command to the card
2052 * @card: qeth_card structure pointer
2053 * @iob: qeth_cmd_buffer pointer
2054 * @reply_cb: callback function pointer
2055 * @cb_card: pointer to the qeth_card structure
2056 * @cb_reply: pointer to the qeth_reply structure
2057 * @cb_cmd: pointer to the original iob for non-IPA
2058 * commands, or to the qeth_ipa_cmd structure
2059 * for the IPA commands.
2060 * @reply_param: private pointer passed to the callback
2062 * Callback function gets called one or more times, with cb_cmd
2063 * pointing to the response returned by the hardware. Callback
2064 * function must return
2065 * > 0 if more reply blocks are expected,
2066 * 0 if the last or only reply block is received, and
2068 * Callback function can get the value of the reply_param pointer from the
2069 * field 'param' of the structure qeth_reply.
2072 static int qeth_send_control_data(struct qeth_card *card,
2073 struct qeth_cmd_buffer *iob,
2074 int (*reply_cb)(struct qeth_card *cb_card,
2075 struct qeth_reply *cb_reply,
2076 unsigned long cb_cmd),
2079 struct qeth_channel *channel = iob->channel;
2080 struct qeth_reply *reply = &iob->reply;
2081 long timeout = iob->timeout;
2084 QETH_CARD_TEXT(card, 2, "sendctl");
2086 reply->callback = reply_cb;
2087 reply->param = reply_param;
2089 timeout = wait_event_interruptible_timeout(card->wait_q,
2090 qeth_trylock_channel(channel),
2094 return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2098 iob->finalize(card, iob);
2099 QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
2101 qeth_enqueue_cmd(card, iob);
2103 /* This pairs with iob->callback, and keeps the iob alive after IO: */
2106 QETH_CARD_TEXT(card, 6, "noirqpnd");
2107 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
2108 rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
2109 (addr_t) iob, 0, 0, timeout);
2111 channel->active_cmd = iob;
2112 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
2114 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
2115 CARD_DEVID(card), rc);
2116 QETH_CARD_TEXT_(card, 2, " err%d", rc);
2117 qeth_dequeue_cmd(card, iob);
2119 qeth_unlock_channel(card, channel);
2123 timeout = wait_for_completion_interruptible_timeout(&iob->done,
2126 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2128 qeth_dequeue_cmd(card, iob);
2131 /* Wait until the callback for a late reply has completed: */
2132 spin_lock_irq(&iob->lock);
2134 /* Zap any callback that's still pending: */
2136 spin_unlock_irq(&iob->lock);
2147 struct qeth_node_desc {
2148 struct node_descriptor nd1;
2149 struct node_descriptor nd2;
2150 struct node_descriptor nd3;
2153 static void qeth_read_conf_data_cb(struct qeth_card *card,
2154 struct qeth_cmd_buffer *iob,
2155 unsigned int data_length)
2157 struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
2161 QETH_CARD_TEXT(card, 2, "cfgunit");
2163 if (data_length < sizeof(*nd)) {
2168 card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
2169 nd->nd1.plant[1] == _ascebc['M'];
2170 tag = (u8 *)&nd->nd1.tag;
2171 card->info.chpid = tag[0];
2172 card->info.unit_addr2 = tag[1];
2174 tag = (u8 *)&nd->nd2.tag;
2175 card->info.cula = tag[1];
2177 card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
2178 nd->nd3.model[1] == 0xF0 &&
2179 nd->nd3.model[2] >= 0xF1 &&
2180 nd->nd3.model[2] <= 0xF4;
2183 qeth_notify_cmd(iob, rc);
2187 static int qeth_read_conf_data(struct qeth_card *card)
2189 struct qeth_channel *channel = &card->data;
2190 struct qeth_cmd_buffer *iob;
2193 /* scan for RCD command in extended SenseID data */
2194 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
2195 if (!ciw || ciw->cmd == 0)
2197 if (ciw->count < sizeof(struct qeth_node_desc))
2200 iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
2204 iob->callback = qeth_read_conf_data_cb;
2205 qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
2208 return qeth_send_control_data(card, iob, NULL, NULL);
2211 static int qeth_idx_check_activate_response(struct qeth_card *card,
2212 struct qeth_channel *channel,
2213 struct qeth_cmd_buffer *iob)
2217 rc = qeth_check_idx_response(card, iob->data);
2221 if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
2224 /* negative reply: */
2225 QETH_CARD_TEXT_(card, 2, "idxneg%c",
2226 QETH_IDX_ACT_CAUSE_CODE(iob->data));
2228 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
2229 case QETH_IDX_ACT_ERR_EXCL:
2230 dev_err(&channel->ccwdev->dev,
2231 "The adapter is used exclusively by another host\n");
2233 case QETH_IDX_ACT_ERR_AUTH:
2234 case QETH_IDX_ACT_ERR_AUTH_USER:
2235 dev_err(&channel->ccwdev->dev,
2236 "Setting the device online failed because of insufficient authorization\n");
2239 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
2240 CCW_DEVID(channel->ccwdev));
2245 static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
2246 struct qeth_cmd_buffer *iob,
2247 unsigned int data_length)
2249 struct qeth_channel *channel = iob->channel;
2253 QETH_CARD_TEXT(card, 2, "idxrdcb");
2255 rc = qeth_idx_check_activate_response(card, channel, iob);
2259 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2260 if (peer_level != qeth_peer_func_level(card->info.func_level)) {
2261 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2262 CCW_DEVID(channel->ccwdev),
2263 card->info.func_level, peer_level);
2268 memcpy(&card->token.issuer_rm_r,
2269 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2270 QETH_MPC_TOKEN_LENGTH);
2271 memcpy(&card->info.mcl_level[0],
2272 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
2275 qeth_notify_cmd(iob, rc);
2279 static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
2280 struct qeth_cmd_buffer *iob,
2281 unsigned int data_length)
2283 struct qeth_channel *channel = iob->channel;
2287 QETH_CARD_TEXT(card, 2, "idxwrcb");
2289 rc = qeth_idx_check_activate_response(card, channel, iob);
2293 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2294 if ((peer_level & ~0x0100) !=
2295 qeth_peer_func_level(card->info.func_level)) {
2296 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2297 CCW_DEVID(channel->ccwdev),
2298 card->info.func_level, peer_level);
2303 qeth_notify_cmd(iob, rc);
2307 static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
2308 struct qeth_cmd_buffer *iob)
2310 u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
2311 u8 port = ((u8)card->dev->dev_port) | 0x80;
2312 struct ccw1 *ccw = __ccw_from_cmd(iob);
2314 qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
2316 qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
2317 iob->finalize = qeth_idx_finalize_cmd;
2319 port |= QETH_IDX_ACT_INVAL_FRAME;
2320 memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
2321 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2322 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
2323 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
2324 &card->info.func_level, 2);
2325 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2);
2326 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
2329 static int qeth_idx_activate_read_channel(struct qeth_card *card)
2331 struct qeth_channel *channel = &card->read;
2332 struct qeth_cmd_buffer *iob;
2335 QETH_CARD_TEXT(card, 2, "idxread");
2337 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2341 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
2342 qeth_idx_setup_activate_cmd(card, iob);
2343 iob->callback = qeth_idx_activate_read_channel_cb;
2345 rc = qeth_send_control_data(card, iob, NULL, NULL);
2349 channel->state = CH_STATE_UP;
2353 static int qeth_idx_activate_write_channel(struct qeth_card *card)
2355 struct qeth_channel *channel = &card->write;
2356 struct qeth_cmd_buffer *iob;
2359 QETH_CARD_TEXT(card, 2, "idxwrite");
2361 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2365 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
2366 qeth_idx_setup_activate_cmd(card, iob);
2367 iob->callback = qeth_idx_activate_write_channel_cb;
2369 rc = qeth_send_control_data(card, iob, NULL, NULL);
2373 channel->state = CH_STATE_UP;
2377 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2380 struct qeth_cmd_buffer *iob;
2382 QETH_CARD_TEXT(card, 2, "cmenblcb");
2384 iob = (struct qeth_cmd_buffer *) data;
2385 memcpy(&card->token.cm_filter_r,
2386 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
2387 QETH_MPC_TOKEN_LENGTH);
2391 static int qeth_cm_enable(struct qeth_card *card)
2393 struct qeth_cmd_buffer *iob;
2395 QETH_CARD_TEXT(card, 2, "cmenable");
2397 iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
2401 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
2402 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2403 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
2404 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
2406 return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
2409 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2412 struct qeth_cmd_buffer *iob;
2414 QETH_CARD_TEXT(card, 2, "cmsetpcb");
2416 iob = (struct qeth_cmd_buffer *) data;
2417 memcpy(&card->token.cm_connection_r,
2418 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2419 QETH_MPC_TOKEN_LENGTH);
2423 static int qeth_cm_setup(struct qeth_card *card)
2425 struct qeth_cmd_buffer *iob;
2427 QETH_CARD_TEXT(card, 2, "cmsetup");
2429 iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
2433 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2434 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2435 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2436 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2437 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2438 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2439 return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
2442 static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type)
2444 if (link_type == QETH_LINK_TYPE_LANE_TR ||
2445 link_type == QETH_LINK_TYPE_HSTR) {
2446 dev_err(&card->gdev->dev, "Unsupported Token Ring device\n");
2453 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
2455 struct net_device *dev = card->dev;
2456 unsigned int new_mtu;
2459 /* IQD needs accurate max MTU to set up its RX buffers: */
2462 /* tolerate quirky HW: */
2463 max_mtu = ETH_MAX_MTU;
2468 /* move any device with default MTU to new max MTU: */
2469 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
2471 /* adjust RX buffer size to new max MTU: */
2472 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
2473 if (dev->max_mtu && dev->max_mtu != max_mtu)
2474 qeth_free_qdio_queues(card);
2478 /* default MTUs for first setup: */
2479 else if (IS_LAYER2(card))
2480 new_mtu = ETH_DATA_LEN;
2482 new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
2485 dev->max_mtu = max_mtu;
2486 dev->mtu = min(new_mtu, max_mtu);
2491 static int qeth_get_mtu_outof_framesize(int framesize)
2493 switch (framesize) {
2507 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2510 __u16 mtu, framesize;
2512 struct qeth_cmd_buffer *iob;
2515 QETH_CARD_TEXT(card, 2, "ulpenacb");
2517 iob = (struct qeth_cmd_buffer *) data;
2518 memcpy(&card->token.ulp_filter_r,
2519 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2520 QETH_MPC_TOKEN_LENGTH);
2522 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2523 mtu = qeth_get_mtu_outof_framesize(framesize);
2525 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
2527 *(u16 *)reply->param = mtu;
2529 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2530 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2532 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2533 if (!qeth_is_supported_link_type(card, link_type))
2534 return -EPROTONOSUPPORT;
2537 card->info.link_type = link_type;
2538 QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
2542 static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
2545 return QETH_PROT_OSN2;
2546 return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
2549 static int qeth_ulp_enable(struct qeth_card *card)
2551 u8 prot_type = qeth_mpc_select_prot_type(card);
2552 struct qeth_cmd_buffer *iob;
2556 QETH_CARD_TEXT(card, 2, "ulpenabl");
2558 iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
2562 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
2563 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
2564 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2565 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2566 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2567 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2568 rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
2571 return qeth_update_max_mtu(card, max_mtu);
2574 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2577 struct qeth_cmd_buffer *iob;
2579 QETH_CARD_TEXT(card, 2, "ulpstpcb");
2581 iob = (struct qeth_cmd_buffer *) data;
2582 memcpy(&card->token.ulp_connection_r,
2583 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2584 QETH_MPC_TOKEN_LENGTH);
2585 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2587 QETH_CARD_TEXT(card, 2, "olmlimit");
2588 dev_err(&card->gdev->dev, "A connection could not be "
2589 "established because of an OLM limit\n");
2595 static int qeth_ulp_setup(struct qeth_card *card)
2598 struct qeth_cmd_buffer *iob;
2600 QETH_CARD_TEXT(card, 2, "ulpsetup");
2602 iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
2606 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2607 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2608 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2609 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2610 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2611 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2613 memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2);
2614 temp = (card->info.cula << 8) + card->info.unit_addr2;
2615 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2616 return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
2619 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
2621 struct qeth_qdio_out_buffer *newbuf;
2623 newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
2627 newbuf->buffer = q->qdio_bufs[bidx];
2628 skb_queue_head_init(&newbuf->skb_list);
2629 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
2631 newbuf->next_pending = q->bufs[bidx];
2632 atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
2633 q->bufs[bidx] = newbuf;
2637 static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2642 qeth_drain_output_queue(q, true);
2643 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2647 static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
2649 struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2654 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
2661 static void qeth_tx_completion_timer(struct timer_list *timer)
2663 struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);
2665 napi_schedule(&queue->napi);
2666 QETH_TXQ_STAT_INC(queue, completion_timer);
2669 static int qeth_alloc_qdio_queues(struct qeth_card *card)
2673 QETH_CARD_TEXT(card, 2, "allcqdbf");
2675 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
2676 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2679 QETH_CARD_TEXT(card, 2, "inq");
2680 card->qdio.in_q = qeth_alloc_qdio_queue();
2681 if (!card->qdio.in_q)
2684 /* inbound buffer pool */
2685 if (qeth_alloc_buffer_pool(card))
2689 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2690 struct qeth_qdio_out_q *queue;
2692 queue = qeth_alloc_output_queue();
2695 QETH_CARD_TEXT_(card, 2, "outq %i", i);
2696 QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
2697 card->qdio.out_qs[i] = queue;
2699 queue->queue_no = i;
2700 spin_lock_init(&queue->lock);
2701 timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
2702 queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
2703 queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
2705 /* give outbound qeth_qdio_buffers their qdio_buffers */
2706 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2707 WARN_ON(queue->bufs[j]);
2708 if (qeth_init_qdio_out_buf(queue, j))
2709 goto out_freeoutqbufs;
2714 if (qeth_alloc_cq(card))
2722 kmem_cache_free(qeth_qdio_outbuf_cache,
2723 card->qdio.out_qs[i]->bufs[j]);
2724 card->qdio.out_qs[i]->bufs[j] = NULL;
2728 qeth_free_output_queue(card->qdio.out_qs[--i]);
2729 card->qdio.out_qs[i] = NULL;
2731 qeth_free_buffer_pool(card);
2733 qeth_free_qdio_queue(card->qdio.in_q);
2734 card->qdio.in_q = NULL;
2736 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2740 static void qeth_free_qdio_queues(struct qeth_card *card)
2744 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
2745 QETH_QDIO_UNINITIALIZED)
2749 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2750 if (card->qdio.in_q->bufs[j].rx_skb)
2751 dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
2753 qeth_free_qdio_queue(card->qdio.in_q);
2754 card->qdio.in_q = NULL;
2755 /* inbound buffer pool */
2756 qeth_free_buffer_pool(card);
2757 /* free outbound qdio_qs */
2758 for (i = 0; i < card->qdio.no_out_queues; i++) {
2759 qeth_free_output_queue(card->qdio.out_qs[i]);
2760 card->qdio.out_qs[i] = NULL;
2764 static void qeth_create_qib_param_field(struct qeth_card *card,
2768 param_field[0] = _ascebc['P'];
2769 param_field[1] = _ascebc['C'];
2770 param_field[2] = _ascebc['I'];
2771 param_field[3] = _ascebc['T'];
2772 *((unsigned int *) (¶m_field[4])) = QETH_PCI_THRESHOLD_A(card);
2773 *((unsigned int *) (¶m_field[8])) = QETH_PCI_THRESHOLD_B(card);
2774 *((unsigned int *) (¶m_field[12])) = QETH_PCI_TIMER_VALUE(card);
2777 static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
2780 param_field[16] = _ascebc['B'];
2781 param_field[17] = _ascebc['L'];
2782 param_field[18] = _ascebc['K'];
2783 param_field[19] = _ascebc['T'];
2784 *((unsigned int *) (¶m_field[20])) = card->info.blkt.time_total;
2785 *((unsigned int *) (¶m_field[24])) = card->info.blkt.inter_packet;
2786 *((unsigned int *) (¶m_field[28])) =
2787 card->info.blkt.inter_packet_jumbo;
2790 static int qeth_qdio_activate(struct qeth_card *card)
2792 QETH_CARD_TEXT(card, 3, "qdioact");
2793 return qdio_activate(CARD_DDEV(card));
2796 static int qeth_dm_act(struct qeth_card *card)
2798 struct qeth_cmd_buffer *iob;
2800 QETH_CARD_TEXT(card, 2, "dmact");
2802 iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
2806 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2807 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2808 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2809 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2810 return qeth_send_control_data(card, iob, NULL, NULL);
2813 static int qeth_mpc_initialize(struct qeth_card *card)
2817 QETH_CARD_TEXT(card, 2, "mpcinit");
2819 rc = qeth_issue_next_read(card);
2821 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2824 rc = qeth_cm_enable(card);
2826 QETH_CARD_TEXT_(card, 2, "2err%d", rc);
2829 rc = qeth_cm_setup(card);
2831 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
2834 rc = qeth_ulp_enable(card);
2836 QETH_CARD_TEXT_(card, 2, "4err%d", rc);
2839 rc = qeth_ulp_setup(card);
2841 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2844 rc = qeth_alloc_qdio_queues(card);
2846 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2849 rc = qeth_qdio_establish(card);
2851 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
2852 qeth_free_qdio_queues(card);
2855 rc = qeth_qdio_activate(card);
2857 QETH_CARD_TEXT_(card, 2, "7err%d", rc);
2860 rc = qeth_dm_act(card);
2862 QETH_CARD_TEXT_(card, 2, "8err%d", rc);
2869 void qeth_print_status_message(struct qeth_card *card)
2871 switch (card->info.type) {
2872 case QETH_CARD_TYPE_OSD:
2873 case QETH_CARD_TYPE_OSM:
2874 case QETH_CARD_TYPE_OSX:
2875 /* VM will use a non-zero first character
2876 * to indicate a HiperSockets like reporting
2877 * of the level OSA sets the first character to zero
2879 if (!card->info.mcl_level[0]) {
2880 sprintf(card->info.mcl_level, "%02x%02x",
2881 card->info.mcl_level[2],
2882 card->info.mcl_level[3]);
2886 case QETH_CARD_TYPE_IQD:
2887 if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
2888 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2889 card->info.mcl_level[0]];
2890 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2891 card->info.mcl_level[1]];
2892 card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2893 card->info.mcl_level[2]];
2894 card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2895 card->info.mcl_level[3]];
2896 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2900 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2902 dev_info(&card->gdev->dev,
2903 "Device is a%s card%s%s%s\nwith link type %s.\n",
2904 qeth_get_cardname(card),
2905 (card->info.mcl_level[0]) ? " (level: " : "",
2906 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2907 (card->info.mcl_level[0]) ? ")" : "",
2908 qeth_get_cardname_short(card));
2910 EXPORT_SYMBOL_GPL(qeth_print_status_message);
2912 static void qeth_initialize_working_pool_list(struct qeth_card *card)
2914 struct qeth_buffer_pool_entry *entry;
2916 QETH_CARD_TEXT(card, 5, "inwrklst");
2918 list_for_each_entry(entry,
2919 &card->qdio.init_pool.entry_list, init_list) {
2920 qeth_put_buffer_pool_entry(card, entry);
2924 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2925 struct qeth_card *card)
2927 struct qeth_buffer_pool_entry *entry;
2930 if (list_empty(&card->qdio.in_buf_pool.entry_list))
2933 list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) {
2935 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2936 if (page_count(entry->elements[i]) > 1) {
2942 list_del_init(&entry->list);
2947 /* no free buffer in pool so take first one and swap pages */
2948 entry = list_first_entry(&card->qdio.in_buf_pool.entry_list,
2949 struct qeth_buffer_pool_entry, list);
2950 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2951 if (page_count(entry->elements[i]) > 1) {
2952 struct page *page = dev_alloc_page();
2957 __free_page(entry->elements[i]);
2958 entry->elements[i] = page;
2959 QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
2962 list_del_init(&entry->list);
2966 static int qeth_init_input_buffer(struct qeth_card *card,
2967 struct qeth_qdio_buffer *buf)
2969 struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry;
2972 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2973 buf->rx_skb = netdev_alloc_skb(card->dev,
2975 sizeof(struct ipv6hdr));
2981 pool_entry = qeth_find_free_buffer_pool_entry(card);
2985 buf->pool_entry = pool_entry;
2989 * since the buffer is accessed only from the input_tasklet
2990 * there shouldn't be a need to synchronize; also, since we use
2991 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2994 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2995 buf->buffer->element[i].length = PAGE_SIZE;
2996 buf->buffer->element[i].addr =
2997 page_to_phys(pool_entry->elements[i]);
2998 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2999 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
3001 buf->buffer->element[i].eflags = 0;
3002 buf->buffer->element[i].sflags = 0;
3007 static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
3008 struct qeth_qdio_out_q *queue)
3010 if (!IS_IQD(card) ||
3011 qeth_iqd_is_mcast_queue(card, queue) ||
3012 card->options.cq == QETH_CQ_ENABLED ||
3013 qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
3016 return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
3019 static int qeth_init_qdio_queues(struct qeth_card *card)
3021 unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count;
3025 QETH_CARD_TEXT(card, 2, "initqdqs");
3028 qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
3029 memset(&card->rx, 0, sizeof(struct qeth_rx));
3031 qeth_initialize_working_pool_list(card);
3032 /*give only as many buffers to hardware as we have buffer pool entries*/
3033 for (i = 0; i < rx_bufs; i++) {
3034 rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
3039 card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
3040 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs);
3042 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
3047 rc = qeth_cq_init(card);
3052 /* outbound queue */
3053 for (i = 0; i < card->qdio.no_out_queues; ++i) {
3054 struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];
3056 qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
3057 queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
3058 queue->next_buf_to_fill = 0;
3060 queue->prev_hdr = NULL;
3061 queue->coalesced_frames = 0;
3062 queue->bulk_start = 0;
3063 queue->bulk_count = 0;
3064 queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
3065 atomic_set(&queue->used_buffers, 0);
3066 atomic_set(&queue->set_pci_flags_count, 0);
3067 netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
3072 static void qeth_ipa_finalize_cmd(struct qeth_card *card,
3073 struct qeth_cmd_buffer *iob)
3075 qeth_mpc_finalize_cmd(card, iob);
3077 /* override with IPA-specific values: */
3078 __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
3081 void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
3083 bool (*match)(struct qeth_cmd_buffer *iob,
3084 struct qeth_cmd_buffer *reply))
3086 u8 prot_type = qeth_mpc_select_prot_type(card);
3087 u16 total_length = iob->length;
3089 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
3091 iob->finalize = qeth_ipa_finalize_cmd;
3094 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
3095 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
3096 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
3097 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
3098 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
3099 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
3100 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3101 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
3103 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
3105 static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
3106 struct qeth_cmd_buffer *reply)
3108 struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);
3110 return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
3113 struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
3114 enum qeth_ipa_cmds cmd_code,
3115 enum qeth_prot_versions prot,
3116 unsigned int data_length)
3118 struct qeth_cmd_buffer *iob;
3119 struct qeth_ipacmd_hdr *hdr;
3121 data_length += offsetof(struct qeth_ipa_cmd, data);
3122 iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
3127 qeth_prepare_ipa_cmd(card, iob, data_length, qeth_ipa_match_reply);
3129 hdr = &__ipa_cmd(iob)->hdr;
3130 hdr->command = cmd_code;
3131 hdr->initiator = IPA_CMD_INITIATOR_HOST;
3132 /* hdr->seqno is set by qeth_send_control_data() */
3133 hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH;
3134 hdr->rel_adapter_no = (u8) card->dev->dev_port;
3135 hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
3136 hdr->param_count = 1;
3137 hdr->prot_version = prot;
3140 EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);
3142 static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
3143 struct qeth_reply *reply, unsigned long data)
3145 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3147 return (cmd->hdr.return_code) ? -EIO : 0;
3151 * qeth_send_ipa_cmd() - send an IPA command
3153 * See qeth_send_control_data() for explanation of the arguments.
3156 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
3157 int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
3163 QETH_CARD_TEXT(card, 4, "sendipa");
3165 if (card->read_or_write_problem) {
3170 if (reply_cb == NULL)
3171 reply_cb = qeth_send_ipa_cmd_cb;
3172 rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
3174 qeth_clear_ipacmd_list(card);
3175 qeth_schedule_recovery(card);
3179 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
3181 static int qeth_send_startlan_cb(struct qeth_card *card,
3182 struct qeth_reply *reply, unsigned long data)
3184 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3186 if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
3189 return (cmd->hdr.return_code) ? -EIO : 0;
3192 static int qeth_send_startlan(struct qeth_card *card)
3194 struct qeth_cmd_buffer *iob;
3196 QETH_CARD_TEXT(card, 2, "strtlan");
3198 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
3201 return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
3204 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
3206 if (!cmd->hdr.return_code)
3207 cmd->hdr.return_code =
3208 cmd->data.setadapterparms.hdr.return_code;
3209 return cmd->hdr.return_code;
3212 static int qeth_query_setadapterparms_cb(struct qeth_card *card,
3213 struct qeth_reply *reply, unsigned long data)
3215 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3216 struct qeth_query_cmds_supp *query_cmd;
3218 QETH_CARD_TEXT(card, 3, "quyadpcb");
3219 if (qeth_setadpparms_inspect_rc(cmd))
3222 query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp;
3223 if (query_cmd->lan_type & 0x7f) {
3224 if (!qeth_is_supported_link_type(card, query_cmd->lan_type))
3225 return -EPROTONOSUPPORT;
3227 card->info.link_type = query_cmd->lan_type;
3228 QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
3231 card->options.adp.supported = query_cmd->supported_cmds;
3235 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
3236 enum qeth_ipa_setadp_cmd adp_cmd,
3237 unsigned int data_length)
3239 struct qeth_ipacmd_setadpparms_hdr *hdr;
3240 struct qeth_cmd_buffer *iob;
3242 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
3244 offsetof(struct qeth_ipacmd_setadpparms,
3249 hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
3250 hdr->cmdlength = sizeof(*hdr) + data_length;
3251 hdr->command_code = adp_cmd;
3252 hdr->used_total = 1;
3257 static int qeth_query_setadapterparms(struct qeth_card *card)
3260 struct qeth_cmd_buffer *iob;
3262 QETH_CARD_TEXT(card, 3, "queryadp");
3263 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
3264 SETADP_DATA_SIZEOF(query_cmds_supp));
3267 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
3271 static int qeth_query_ipassists_cb(struct qeth_card *card,
3272 struct qeth_reply *reply, unsigned long data)
3274 struct qeth_ipa_cmd *cmd;
3276 QETH_CARD_TEXT(card, 2, "qipasscb");
3278 cmd = (struct qeth_ipa_cmd *) data;
3280 switch (cmd->hdr.return_code) {
3281 case IPA_RC_SUCCESS:
3283 case IPA_RC_NOTSUPP:
3284 case IPA_RC_L2_UNSUPPORTED_CMD:
3285 QETH_CARD_TEXT(card, 2, "ipaunsup");
3286 card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
3287 card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
3290 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
3291 CARD_DEVID(card), cmd->hdr.return_code);
3295 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
3296 card->options.ipa4 = cmd->hdr.assists;
3297 else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
3298 card->options.ipa6 = cmd->hdr.assists;
3300 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
3305 static int qeth_query_ipassists(struct qeth_card *card,
3306 enum qeth_prot_versions prot)
3309 struct qeth_cmd_buffer *iob;
3311 QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
3312 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
3315 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
3319 static int qeth_query_switch_attributes_cb(struct qeth_card *card,
3320 struct qeth_reply *reply, unsigned long data)
3322 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3323 struct qeth_query_switch_attributes *attrs;
3324 struct qeth_switch_info *sw_info;
3326 QETH_CARD_TEXT(card, 2, "qswiatcb");
3327 if (qeth_setadpparms_inspect_rc(cmd))
3330 sw_info = (struct qeth_switch_info *)reply->param;
3331 attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
3332 sw_info->capabilities = attrs->capabilities;
3333 sw_info->settings = attrs->settings;
3334 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
3339 int qeth_query_switch_attributes(struct qeth_card *card,
3340 struct qeth_switch_info *sw_info)
3342 struct qeth_cmd_buffer *iob;
3344 QETH_CARD_TEXT(card, 2, "qswiattr");
3345 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
3347 if (!netif_carrier_ok(card->dev))
3349 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
3352 return qeth_send_ipa_cmd(card, iob,
3353 qeth_query_switch_attributes_cb, sw_info);
3356 struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
3357 enum qeth_diags_cmds sub_cmd,
3358 unsigned int data_length)
3360 struct qeth_ipacmd_diagass *cmd;
3361 struct qeth_cmd_buffer *iob;
3363 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
3364 DIAG_HDR_LEN + data_length);
3368 cmd = &__ipa_cmd(iob)->data.diagass;
3369 cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
3370 cmd->subcmd = sub_cmd;
3373 EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);
3375 static int qeth_query_setdiagass_cb(struct qeth_card *card,
3376 struct qeth_reply *reply, unsigned long data)
3378 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3379 u16 rc = cmd->hdr.return_code;
3382 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3386 card->info.diagass_support = cmd->data.diagass.ext;
3390 static int qeth_query_setdiagass(struct qeth_card *card)
3392 struct qeth_cmd_buffer *iob;
3394 QETH_CARD_TEXT(card, 2, "qdiagass");
3395 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
3398 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
3401 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
3403 unsigned long info = get_zeroed_page(GFP_KERNEL);
3404 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
3405 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
3406 struct ccw_dev_id ccwid;
3409 tid->chpid = card->info.chpid;
3410 ccw_device_get_id(CARD_RDEV(card), &ccwid);
3411 tid->ssid = ccwid.ssid;
3412 tid->devno = ccwid.devno;
3415 level = stsi(NULL, 0, 0, 0);
3416 if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3417 tid->lparnr = info222->lpar_number;
3418 if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3419 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
3420 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
3426 static int qeth_hw_trap_cb(struct qeth_card *card,
3427 struct qeth_reply *reply, unsigned long data)
3429 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3430 u16 rc = cmd->hdr.return_code;
3433 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3439 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
3441 struct qeth_cmd_buffer *iob;
3442 struct qeth_ipa_cmd *cmd;
3444 QETH_CARD_TEXT(card, 2, "diagtrap");
3445 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
3448 cmd = __ipa_cmd(iob);
3449 cmd->data.diagass.type = 1;
3450 cmd->data.diagass.action = action;
3452 case QETH_DIAGS_TRAP_ARM:
3453 cmd->data.diagass.options = 0x0003;
3454 cmd->data.diagass.ext = 0x00010000 +
3455 sizeof(struct qeth_trap_id);
3456 qeth_get_trap_id(card,
3457 (struct qeth_trap_id *)cmd->data.diagass.cdata);
3459 case QETH_DIAGS_TRAP_DISARM:
3460 cmd->data.diagass.options = 0x0001;
3462 case QETH_DIAGS_TRAP_CAPTURE:
3465 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
3468 static int qeth_check_qdio_errors(struct qeth_card *card,
3469 struct qdio_buffer *buf,
3470 unsigned int qdio_error,
3471 const char *dbftext)
3474 QETH_CARD_TEXT(card, 2, dbftext);
3475 QETH_CARD_TEXT_(card, 2, " F15=%02X",
3476 buf->element[15].sflags);
3477 QETH_CARD_TEXT_(card, 2, " F14=%02X",
3478 buf->element[14].sflags);
3479 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3480 if ((buf->element[15].sflags) == 0x12) {
3481 QETH_CARD_STAT_INC(card, rx_fifo_errors);
3489 static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
3492 struct qeth_qdio_q *queue = card->qdio.in_q;
3493 struct list_head *lh;
3498 /* only requeue at a certain threshold to avoid SIGAs */
3499 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
3500 for (i = queue->next_buf_to_init;
3501 i < queue->next_buf_to_init + count; ++i) {
3502 if (qeth_init_input_buffer(card,
3503 &queue->bufs[QDIO_BUFNR(i)])) {
3510 if (newcount < count) {
3511 /* we are in memory shortage so we switch back to
3512 traditional skb allocation and drop packages */
3513 atomic_set(&card->force_alloc_skb, 3);
3516 atomic_add_unless(&card->force_alloc_skb, -1, 0);
3521 list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
3523 if (i == card->qdio.in_buf_pool.buf_count) {
3524 QETH_CARD_TEXT(card, 2, "qsarbw");
3525 schedule_delayed_work(
3526 &card->buffer_reclaim_work,
3527 QETH_RECLAIM_WORK_TIME);
3532 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
3533 queue->next_buf_to_init, count);
3535 QETH_CARD_TEXT(card, 2, "qinberr");
3537 queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
3545 static void qeth_buffer_reclaim_work(struct work_struct *work)
3547 struct qeth_card *card = container_of(to_delayed_work(work),
3549 buffer_reclaim_work);
3552 napi_schedule(&card->napi);
3553 /* kick-start the NAPI softirq: */
3557 static void qeth_handle_send_error(struct qeth_card *card,
3558 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
3560 int sbalf15 = buffer->buffer->element[15].sflags;
3562 QETH_CARD_TEXT(card, 6, "hdsnderr");
3563 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3568 if ((sbalf15 >= 15) && (sbalf15 <= 31))
3571 QETH_CARD_TEXT(card, 1, "lnkfail");
3572 QETH_CARD_TEXT_(card, 1, "%04x %02x",
3573 (u16)qdio_err, (u8)sbalf15);
3577 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
3578 * @queue: queue to check for packing buffer
3580 * Returns number of buffers that were prepared for flush.
3582 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
3584 struct qeth_qdio_out_buffer *buffer;
3586 buffer = queue->bufs[queue->next_buf_to_fill];
3587 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
3588 (buffer->next_element_to_fill > 0)) {
3589 /* it's a packing buffer */
3590 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3591 queue->next_buf_to_fill =
3592 QDIO_BUFNR(queue->next_buf_to_fill + 1);
3599 * Switched to packing state if the number of used buffers on a queue
3600 * reaches a certain limit.
3602 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
3604 if (!queue->do_pack) {
3605 if (atomic_read(&queue->used_buffers)
3606 >= QETH_HIGH_WATERMARK_PACK){
3607 /* switch non-PACKING -> PACKING */
3608 QETH_CARD_TEXT(queue->card, 6, "np->pack");
3609 QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3616 * Switches from packing to non-packing mode. If there is a packing
3617 * buffer on the queue this buffer will be prepared to be flushed.
3618 * In that case 1 is returned to inform the caller. If no buffer
3619 * has to be flushed, zero is returned.
3621 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
3623 if (queue->do_pack) {
3624 if (atomic_read(&queue->used_buffers)
3625 <= QETH_LOW_WATERMARK_PACK) {
3626 /* switch PACKING -> non-PACKING */
3627 QETH_CARD_TEXT(queue->card, 6, "pack->np");
3628 QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3630 return qeth_prep_flush_pack_buffer(queue);
3636 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
3639 struct qeth_qdio_out_buffer *buf = queue->bufs[index];
3640 unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
3641 struct qeth_card *card = queue->card;
3645 for (i = index; i < index + count; ++i) {
3646 unsigned int bidx = QDIO_BUFNR(i);
3647 struct sk_buff *skb;
3649 buf = queue->bufs[bidx];
3650 buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
3651 SBAL_EFLAGS_LAST_ENTRY;
3652 queue->coalesced_frames += buf->frames;
3654 if (queue->bufstates)
3655 queue->bufstates[bidx].user = buf;
3658 skb_queue_walk(&buf->skb_list, skb)
3659 skb_tx_timestamp(skb);
3663 if (!IS_IQD(card)) {
3664 if (!queue->do_pack) {
3665 if ((atomic_read(&queue->used_buffers) >=
3666 (QETH_HIGH_WATERMARK_PACK -
3667 QETH_WATERMARK_PACK_FUZZ)) &&
3668 !atomic_read(&queue->set_pci_flags_count)) {
3669 /* it's likely that we'll go to packing
3671 atomic_inc(&queue->set_pci_flags_count);
3672 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3675 if (!atomic_read(&queue->set_pci_flags_count)) {
3677 * there's no outstanding PCI any more, so we
3678 * have to request a PCI to be sure the the PCI
3679 * will wake at some time in the future then we
3680 * can flush packed buffers that might still be
3681 * hanging around, which can happen if no
3682 * further send was requested by the stack
3684 atomic_inc(&queue->set_pci_flags_count);
3685 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3689 if (atomic_read(&queue->set_pci_flags_count))
3690 qdio_flags |= QDIO_FLAG_PCI_OUT;
3693 QETH_TXQ_STAT_INC(queue, doorbell);
3694 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
3695 queue->queue_no, index, count);
3697 /* Fake the TX completion interrupt: */
3699 unsigned int frames = READ_ONCE(queue->max_coalesced_frames);
3700 unsigned int usecs = READ_ONCE(queue->coalesce_usecs);
3702 if (frames && queue->coalesced_frames >= frames) {
3703 napi_schedule(&queue->napi);
3704 queue->coalesced_frames = 0;
3705 QETH_TXQ_STAT_INC(queue, coal_frames);
3707 qeth_tx_arm_timer(queue, usecs);
3712 /* ignore temporary SIGA errors without busy condition */
3715 QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3716 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
3717 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
3718 QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
3719 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3721 /* this must not happen under normal circumstances. if it
3722 * happens something is really wrong -> recover */
3723 qeth_schedule_recovery(queue->card);
3728 static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
3730 qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
3732 queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
3733 queue->prev_hdr = NULL;
3734 queue->bulk_count = 0;
3737 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
3740 * check if weed have to switch to non-packing mode or if
3741 * we have to get a pci flag out on the queue
3743 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
3744 !atomic_read(&queue->set_pci_flags_count)) {
3745 unsigned int index, flush_cnt;
3748 spin_lock(&queue->lock);
3750 index = queue->next_buf_to_fill;
3751 q_was_packing = queue->do_pack;
3753 flush_cnt = qeth_switch_to_nonpacking_if_needed(queue);
3754 if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count))
3755 flush_cnt = qeth_prep_flush_pack_buffer(queue);
3758 qeth_flush_buffers(queue, index, flush_cnt);
3760 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
3763 spin_unlock(&queue->lock);
3767 static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
3769 struct qeth_card *card = (struct qeth_card *)card_ptr;
3771 napi_schedule_irqoff(&card->napi);
3774 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
3778 if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
3782 if (card->options.cq == cq) {
3787 qeth_free_qdio_queues(card);
3788 card->options.cq = cq;
3795 EXPORT_SYMBOL_GPL(qeth_configure_cq);
3797 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
3798 unsigned int queue, int first_element,
3801 struct qeth_qdio_q *cq = card->qdio.c_q;
3805 QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
3806 QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
3807 QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
3810 netif_tx_stop_all_queues(card->dev);
3811 qeth_schedule_recovery(card);
3815 for (i = first_element; i < first_element + count; ++i) {
3816 struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
3819 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
3820 buffer->element[e].addr) {
3821 unsigned long phys_aob_addr = buffer->element[e].addr;
3823 qeth_qdio_handle_aob(card, phys_aob_addr);
3826 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3828 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
3829 card->qdio.c_q->next_buf_to_init,
3832 dev_warn(&card->gdev->dev,
3833 "QDIO reported an error, rc=%i\n", rc);
3834 QETH_CARD_TEXT(card, 2, "qcqherr");
3837 cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
3840 static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
3841 unsigned int qdio_err, int queue,
3842 int first_elem, int count,
3843 unsigned long card_ptr)
3845 struct qeth_card *card = (struct qeth_card *)card_ptr;
3847 QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
3848 QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
3851 qeth_schedule_recovery(card);
3854 static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3855 unsigned int qdio_error, int __queue,
3856 int first_element, int count,
3857 unsigned long card_ptr)
3859 struct qeth_card *card = (struct qeth_card *) card_ptr;
3860 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
3861 struct net_device *dev = card->dev;
3862 struct netdev_queue *txq;
3865 QETH_CARD_TEXT(card, 6, "qdouhdl");
3866 if (qdio_error & QDIO_ERROR_FATAL) {
3867 QETH_CARD_TEXT(card, 2, "achkcond");
3868 netif_tx_stop_all_queues(dev);
3869 qeth_schedule_recovery(card);
3873 for (i = first_element; i < (first_element + count); ++i) {
3874 struct qeth_qdio_out_buffer *buf = queue->bufs[QDIO_BUFNR(i)];
3876 qeth_handle_send_error(card, buf, qdio_error);
3877 qeth_clear_output_buffer(queue, buf, qdio_error, 0);
3880 atomic_sub(count, &queue->used_buffers);
3881 qeth_check_outbound_queue(queue);
3883 txq = netdev_get_tx_queue(dev, __queue);
3884 /* xmit may have observed the full-condition, but not yet stopped the
3885 * txq. In which case the code below won't trigger. So before returning,
3886 * xmit will re-check the txq's fill level and wake it up if needed.
3888 if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue))
3889 netif_tx_wake_queue(txq);
3893 * Note: Function assumes that we have 4 outbound queues.
3895 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
3897 struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
3900 switch (card->qdio.do_prio_queueing) {
3901 case QETH_PRIO_Q_ING_TOS:
3902 case QETH_PRIO_Q_ING_PREC:
3903 switch (qeth_get_ip_version(skb)) {
3905 tos = ipv4_get_dsfield(ip_hdr(skb));
3908 tos = ipv6_get_dsfield(ipv6_hdr(skb));
3911 return card->qdio.default_out_queue;
3913 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
3914 return ~tos >> 6 & 3;
3915 if (tos & IPTOS_MINCOST)
3917 if (tos & IPTOS_RELIABILITY)
3919 if (tos & IPTOS_THROUGHPUT)
3921 if (tos & IPTOS_LOWDELAY)
3924 case QETH_PRIO_Q_ING_SKB:
3925 if (skb->priority > 5)
3927 return ~skb->priority >> 1 & 3;
3928 case QETH_PRIO_Q_ING_VLAN:
3929 if (veth->h_vlan_proto == htons(ETH_P_8021Q))
3930 return ~ntohs(veth->h_vlan_TCI) >>
3931 (VLAN_PRIO_SHIFT + 1) & 3;
3933 case QETH_PRIO_Q_ING_FIXED:
3934 return card->qdio.default_out_queue;
3938 return card->qdio.default_out_queue;
3940 EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3943 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags.
3946 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3947 * fragmented part of the SKB. Returns zero for linear SKB.
3949 static int qeth_get_elements_for_frags(struct sk_buff *skb)
3951 int cnt, elements = 0;
3953 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3954 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3956 elements += qeth_get_elements_for_range(
3957 (addr_t)skb_frag_address(frag),
3958 (addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3964 * qeth_count_elements() - Counts the number of QDIO buffer elements needed
3965 * to transmit an skb.
3966 * @skb: the skb to operate on.
3967 * @data_offset: skip this part of the skb's linear data
3969 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3970 * skb's data (both its linear part and paged fragments).
3972 unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
3974 unsigned int elements = qeth_get_elements_for_frags(skb);
3975 addr_t end = (addr_t)skb->data + skb_headlen(skb);
3976 addr_t start = (addr_t)skb->data + data_offset;
3979 elements += qeth_get_elements_for_range(start, end);
3982 EXPORT_SYMBOL_GPL(qeth_count_elements);
3984 #define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \
3988 * qeth_add_hw_header() - add a HW header to an skb.
3989 * @skb: skb that the HW header should be added to.
3990 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
3991 * it contains a valid pointer to a qeth_hdr.
3992 * @hdr_len: length of the HW header.
3993 * @proto_len: length of protocol headers that need to be in same page as the
3996 * Returns the pushed length. If the header can't be pushed on
3997 * (eg. because it would cross a page boundary), it is allocated from
3998 * the cache instead and 0 is returned.
3999 * The number of needed buffer elements is returned in @elements.
4000 * Error to create the hdr is indicated by returning with < 0.
4002 static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
4003 struct sk_buff *skb, struct qeth_hdr **hdr,
4004 unsigned int hdr_len, unsigned int proto_len,
4005 unsigned int *elements)
4007 gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
4008 const unsigned int contiguous = proto_len ? proto_len : 1;
4009 const unsigned int max_elements = queue->max_elements;
4010 unsigned int __elements;
4016 start = (addr_t)skb->data - hdr_len;
4017 end = (addr_t)skb->data;
4019 if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
4020 /* Push HW header into same page as first protocol header. */
4022 /* ... but TSO always needs a separate element for headers: */
4023 if (skb_is_gso(skb))
4024 __elements = 1 + qeth_count_elements(skb, proto_len);
4026 __elements = qeth_count_elements(skb, 0);
4027 } else if (!proto_len && PAGE_ALIGNED(skb->data)) {
4028 /* Push HW header into preceding page, flush with skb->data. */
4030 __elements = 1 + qeth_count_elements(skb, 0);
4032 /* Use header cache, copy protocol headers up. */
4034 __elements = 1 + qeth_count_elements(skb, proto_len);
4037 /* Compress skb to fit into one IO buffer: */
4038 if (__elements > max_elements) {
4039 if (!skb_is_nonlinear(skb)) {
4040 /* Drop it, no easy way of shrinking it further. */
4041 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
4042 max_elements, __elements, skb->len);
4046 rc = skb_linearize(skb);
4048 QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
4052 QETH_TXQ_STAT_INC(queue, skbs_linearized);
4053 /* Linearization changed the layout, re-evaluate: */
4057 *elements = __elements;
4058 /* Add the header: */
4060 *hdr = skb_push(skb, hdr_len);
4064 /* Fall back to cache element with known-good alignment: */
4065 if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
4067 *hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
4070 /* Copy protocol headers behind HW header: */
4071 skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
4075 static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
4076 struct sk_buff *curr_skb,
4077 struct qeth_hdr *curr_hdr)
4079 struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
4080 struct qeth_hdr *prev_hdr = queue->prev_hdr;
4085 /* All packets must have the same target: */
4086 if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
4087 struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);
4089 return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
4090 eth_hdr(curr_skb)->h_dest) &&
4091 qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
4094 return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
4095 qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
4099 * qeth_fill_buffer() - map skb into an output buffer
4100 * @buf: buffer to transport the skb
4101 * @skb: skb to map into the buffer
4102 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
4103 * from qeth_core_header_cache.
4104 * @offset: when mapping the skb, start at skb->data + offset
4105 * @hd_len: if > 0, build a dedicated header element of this size
4107 static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
4108 struct sk_buff *skb, struct qeth_hdr *hdr,
4109 unsigned int offset, unsigned int hd_len)
4111 struct qdio_buffer *buffer = buf->buffer;
4112 int element = buf->next_element_to_fill;
4113 int length = skb_headlen(skb) - offset;
4114 char *data = skb->data + offset;
4115 unsigned int elem_length, cnt;
4116 bool is_first_elem = true;
4118 __skb_queue_tail(&buf->skb_list, skb);
4120 /* build dedicated element for HW Header */
4122 is_first_elem = false;
4124 buffer->element[element].addr = virt_to_phys(hdr);
4125 buffer->element[element].length = hd_len;
4126 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
4128 /* HW header is allocated from cache: */
4129 if ((void *)hdr != skb->data)
4130 buf->is_header[element] = 1;
4131 /* HW header was pushed and is contiguous with linear part: */
4132 else if (length > 0 && !PAGE_ALIGNED(data) &&
4133 (data == (char *)hdr + hd_len))
4134 buffer->element[element].eflags |=
4135 SBAL_EFLAGS_CONTIGUOUS;
4140 /* map linear part into buffer element(s) */
4141 while (length > 0) {
4142 elem_length = min_t(unsigned int, length,
4143 PAGE_SIZE - offset_in_page(data));
4145 buffer->element[element].addr = virt_to_phys(data);
4146 buffer->element[element].length = elem_length;
4147 length -= elem_length;
4148 if (is_first_elem) {
4149 is_first_elem = false;
4150 if (length || skb_is_nonlinear(skb))
4151 /* skb needs additional elements */
4152 buffer->element[element].eflags =
4153 SBAL_EFLAGS_FIRST_FRAG;
4155 buffer->element[element].eflags = 0;
4157 buffer->element[element].eflags =
4158 SBAL_EFLAGS_MIDDLE_FRAG;
4161 data += elem_length;
4165 /* map page frags into buffer element(s) */
4166 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
4167 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
4169 data = skb_frag_address(frag);
4170 length = skb_frag_size(frag);
4171 while (length > 0) {
4172 elem_length = min_t(unsigned int, length,
4173 PAGE_SIZE - offset_in_page(data));
4175 buffer->element[element].addr = virt_to_phys(data);
4176 buffer->element[element].length = elem_length;
4177 buffer->element[element].eflags =
4178 SBAL_EFLAGS_MIDDLE_FRAG;
4180 length -= elem_length;
4181 data += elem_length;
4186 if (buffer->element[element - 1].eflags)
4187 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
4188 buf->next_element_to_fill = element;
4192 static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4193 struct sk_buff *skb, unsigned int elements,
4194 struct qeth_hdr *hdr, unsigned int offset,
4195 unsigned int hd_len)
4197 unsigned int bytes = qdisc_pkt_len(skb);
4198 struct qeth_qdio_out_buffer *buffer;
4199 unsigned int next_element;
4200 struct netdev_queue *txq;
4201 bool stopped = false;
4204 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
4205 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
4207 /* Just a sanity check, the wake/stop logic should ensure that we always
4208 * get a free buffer.
4210 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4213 flush = !qeth_iqd_may_bulk(queue, skb, hdr);
4216 (buffer->next_element_to_fill + elements > queue->max_elements)) {
4217 if (buffer->next_element_to_fill > 0) {
4218 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4219 queue->bulk_count++;
4222 if (queue->bulk_count >= queue->bulk_max)
4226 qeth_flush_queue(queue);
4228 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
4229 queue->bulk_count)];
4231 /* Sanity-check again: */
4232 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4236 if (buffer->next_element_to_fill == 0 &&
4237 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4238 /* If a TX completion happens right _here_ and misses to wake
4239 * the txq, then our re-check below will catch the race.
4241 QETH_TXQ_STAT_INC(queue, stopped);
4242 netif_tx_stop_queue(txq);
4246 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4247 buffer->bytes += bytes;
4248 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4249 queue->prev_hdr = hdr;
4251 flush = __netdev_tx_sent_queue(txq, bytes,
4252 !stopped && netdev_xmit_more());
4254 if (flush || next_element >= queue->max_elements) {
4255 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4256 queue->bulk_count++;
4258 if (queue->bulk_count >= queue->bulk_max)
4262 qeth_flush_queue(queue);
4265 if (stopped && !qeth_out_queue_is_full(queue))
4266 netif_tx_start_queue(txq);
4270 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4271 struct sk_buff *skb, struct qeth_hdr *hdr,
4272 unsigned int offset, unsigned int hd_len,
4273 int elements_needed)
4275 unsigned int start_index = queue->next_buf_to_fill;
4276 struct qeth_qdio_out_buffer *buffer;
4277 unsigned int next_element;
4278 struct netdev_queue *txq;
4279 bool stopped = false;
4280 int flush_count = 0;
4284 buffer = queue->bufs[queue->next_buf_to_fill];
4286 /* Just a sanity check, the wake/stop logic should ensure that we always
4287 * get a free buffer.
4289 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4292 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
4294 /* check if we need to switch packing state of this queue */
4295 qeth_switch_to_packing_if_needed(queue);
4296 if (queue->do_pack) {
4298 /* does packet fit in current buffer? */
4299 if (buffer->next_element_to_fill + elements_needed >
4300 queue->max_elements) {
4301 /* ... no -> set state PRIMED */
4302 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4304 queue->next_buf_to_fill =
4305 QDIO_BUFNR(queue->next_buf_to_fill + 1);
4306 buffer = queue->bufs[queue->next_buf_to_fill];
4308 /* We stepped forward, so sanity-check again: */
4309 if (atomic_read(&buffer->state) !=
4310 QETH_QDIO_BUF_EMPTY) {
4311 qeth_flush_buffers(queue, start_index,
4319 if (buffer->next_element_to_fill == 0 &&
4320 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4321 /* If a TX completion happens right _here_ and misses to wake
4322 * the txq, then our re-check below will catch the race.
4324 QETH_TXQ_STAT_INC(queue, stopped);
4325 netif_tx_stop_queue(txq);
4329 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4330 buffer->bytes += qdisc_pkt_len(skb);
4331 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4334 QETH_TXQ_STAT_INC(queue, skbs_pack);
4335 if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
4337 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4338 queue->next_buf_to_fill =
4339 QDIO_BUFNR(queue->next_buf_to_fill + 1);
4343 qeth_flush_buffers(queue, start_index, flush_count);
4347 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
4349 if (stopped && !qeth_out_queue_is_full(queue))
4350 netif_tx_start_queue(txq);
4353 EXPORT_SYMBOL_GPL(qeth_do_send_packet);
4355 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
4356 unsigned int payload_len, struct sk_buff *skb,
4357 unsigned int proto_len)
4359 struct qeth_hdr_ext_tso *ext = &hdr->ext;
4361 ext->hdr_tot_len = sizeof(*ext);
4362 ext->imb_hdr_no = 1;
4364 ext->hdr_version = 1;
4366 ext->payload_len = payload_len;
4367 ext->mss = skb_shinfo(skb)->gso_size;
4368 ext->dg_hdr_len = proto_len;
4371 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4372 struct qeth_qdio_out_q *queue, int ipv,
4373 void (*fill_header)(struct qeth_qdio_out_q *queue,
4374 struct qeth_hdr *hdr, struct sk_buff *skb,
4375 int ipv, unsigned int data_len))
4377 unsigned int proto_len, hw_hdr_len;
4378 unsigned int frame_len = skb->len;
4379 bool is_tso = skb_is_gso(skb);
4380 unsigned int data_offset = 0;
4381 struct qeth_hdr *hdr = NULL;
4382 unsigned int hd_len = 0;
4383 unsigned int elements;
4387 hw_hdr_len = sizeof(struct qeth_hdr_tso);
4388 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4390 hw_hdr_len = sizeof(struct qeth_hdr);
4391 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4394 rc = skb_cow_head(skb, hw_hdr_len);
4398 push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
4402 if (is_tso || !push_len) {
4403 /* HW header needs its own buffer element. */
4404 hd_len = hw_hdr_len + proto_len;
4405 data_offset = push_len + proto_len;
4407 memset(hdr, 0, hw_hdr_len);
4408 fill_header(queue, hdr, skb, ipv, frame_len);
4410 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
4411 frame_len - proto_len, skb, proto_len);
4414 rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
4417 /* TODO: drop skb_orphan() once TX completion is fast enough */
4419 spin_lock(&queue->lock);
4420 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
4422 spin_unlock(&queue->lock);
4425 if (rc && !push_len)
4426 kmem_cache_free(qeth_core_header_cache, hdr);
4430 EXPORT_SYMBOL_GPL(qeth_xmit);
4432 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
4433 struct qeth_reply *reply, unsigned long data)
4435 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4436 struct qeth_ipacmd_setadpparms *setparms;
4438 QETH_CARD_TEXT(card, 4, "prmadpcb");
4440 setparms = &(cmd->data.setadapterparms);
4441 if (qeth_setadpparms_inspect_rc(cmd)) {
4442 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
4443 setparms->data.mode = SET_PROMISC_MODE_OFF;
4445 card->info.promisc_mode = setparms->data.mode;
4446 return (cmd->hdr.return_code) ? -EIO : 0;
4449 void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
4451 enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
4452 SET_PROMISC_MODE_OFF;
4453 struct qeth_cmd_buffer *iob;
4454 struct qeth_ipa_cmd *cmd;
4456 QETH_CARD_TEXT(card, 4, "setprom");
4457 QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
4459 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4460 SETADP_DATA_SIZEOF(mode));
4463 cmd = __ipa_cmd(iob);
4464 cmd->data.setadapterparms.data.mode = mode;
4465 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
4467 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
4469 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
4470 struct qeth_reply *reply, unsigned long data)
4472 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4473 struct qeth_ipacmd_setadpparms *adp_cmd;
4475 QETH_CARD_TEXT(card, 4, "chgmaccb");
4476 if (qeth_setadpparms_inspect_rc(cmd))
4479 adp_cmd = &cmd->data.setadapterparms;
4480 if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
4481 return -EADDRNOTAVAIL;
4483 if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
4484 !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4485 return -EADDRNOTAVAIL;
4487 ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
4491 int qeth_setadpparms_change_macaddr(struct qeth_card *card)
4494 struct qeth_cmd_buffer *iob;
4495 struct qeth_ipa_cmd *cmd;
4497 QETH_CARD_TEXT(card, 4, "chgmac");
4499 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4500 SETADP_DATA_SIZEOF(change_addr));
4503 cmd = __ipa_cmd(iob);
4504 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4505 cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
4506 ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
4507 card->dev->dev_addr);
4508 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
4512 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
4514 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4515 struct qeth_reply *reply, unsigned long data)
4517 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4518 struct qeth_set_access_ctrl *access_ctrl_req;
4520 QETH_CARD_TEXT(card, 4, "setaccb");
4522 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4523 QETH_CARD_TEXT_(card, 2, "rc=%d",
4524 cmd->data.setadapterparms.hdr.return_code);
4525 if (cmd->data.setadapterparms.hdr.return_code !=
4526 SET_ACCESS_CTRL_RC_SUCCESS)
4527 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4528 access_ctrl_req->subcmd_code, CARD_DEVID(card),
4529 cmd->data.setadapterparms.hdr.return_code);
4530 switch (qeth_setadpparms_inspect_rc(cmd)) {
4531 case SET_ACCESS_CTRL_RC_SUCCESS:
4532 if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE)
4533 dev_info(&card->gdev->dev,
4534 "QDIO data connection isolation is deactivated\n");
4536 dev_info(&card->gdev->dev,
4537 "QDIO data connection isolation is activated\n");
4539 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4540 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4543 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4544 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4547 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
4548 dev_err(&card->gdev->dev, "Adapter does not "
4549 "support QDIO data connection isolation\n");
4551 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
4552 dev_err(&card->gdev->dev,
4553 "Adapter is dedicated. "
4554 "QDIO data connection isolation not supported\n");
4556 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
4557 dev_err(&card->gdev->dev,
4558 "TSO does not permit QDIO data connection isolation\n");
4560 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
4561 dev_err(&card->gdev->dev, "The adjacent switch port does not "
4562 "support reflective relay mode\n");
4564 case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
4565 dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
4566 "enabled at the adjacent switch port");
4568 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
4569 dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
4570 "at the adjacent switch failed\n");
4571 /* benign error while disabling ISOLATION_MODE_FWD */
4578 int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
4579 enum qeth_ipa_isolation_modes mode)
4582 struct qeth_cmd_buffer *iob;
4583 struct qeth_ipa_cmd *cmd;
4584 struct qeth_set_access_ctrl *access_ctrl_req;
4586 QETH_CARD_TEXT(card, 4, "setacctl");
4588 if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
4589 dev_err(&card->gdev->dev,
4590 "Adapter does not support QDIO data connection isolation\n");
4594 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4595 SETADP_DATA_SIZEOF(set_access_ctrl));
4598 cmd = __ipa_cmd(iob);
4599 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4600 access_ctrl_req->subcmd_code = mode;
4602 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4605 QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
4606 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4607 rc, CARD_DEVID(card));
4613 void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
4615 struct qeth_card *card;
4617 card = dev->ml_priv;
4618 QETH_CARD_TEXT(card, 4, "txtimeo");
4619 qeth_schedule_recovery(card);
4621 EXPORT_SYMBOL_GPL(qeth_tx_timeout);
4623 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4625 struct qeth_card *card = dev->ml_priv;
4629 case MII_BMCR: /* Basic mode control register */
4631 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
4632 (card->info.link_type != QETH_LINK_TYPE_OSN) &&
4633 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
4634 (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
4635 rc |= BMCR_SPEED100;
4637 case MII_BMSR: /* Basic mode status register */
4638 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4639 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4642 case MII_PHYSID1: /* PHYS ID 1 */
4643 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4645 rc = (rc >> 5) & 0xFFFF;
4647 case MII_PHYSID2: /* PHYS ID 2 */
4648 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4650 case MII_ADVERTISE: /* Advertisement control reg */
4653 case MII_LPA: /* Link partner ability reg */
4654 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4655 LPA_100BASE4 | LPA_LPACK;
4657 case MII_EXPANSION: /* Expansion register */
4659 case MII_DCOUNTER: /* disconnect counter */
4661 case MII_FCSCOUNTER: /* false carrier counter */
4663 case MII_NWAYTEST: /* N-way auto-neg test register */
4665 case MII_RERRCOUNTER: /* rx error counter */
4666 rc = card->stats.rx_length_errors +
4667 card->stats.rx_frame_errors +
4668 card->stats.rx_fifo_errors;
4670 case MII_SREVISION: /* silicon revision */
4672 case MII_RESV1: /* reserved 1 */
4674 case MII_LBRERROR: /* loopback, rx, bypass error */
4676 case MII_PHYADDR: /* physical address */
4678 case MII_RESV2: /* reserved 2 */
4680 case MII_TPISTATUS: /* TPI status for 10mbps */
4682 case MII_NCONFIG: /* network interface config */
4690 static int qeth_snmp_command_cb(struct qeth_card *card,
4691 struct qeth_reply *reply, unsigned long data)
4693 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4694 struct qeth_arp_query_info *qinfo = reply->param;
4695 struct qeth_ipacmd_setadpparms *adp_cmd;
4696 unsigned int data_len;
4699 QETH_CARD_TEXT(card, 3, "snpcmdcb");
4701 if (cmd->hdr.return_code) {
4702 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4705 if (cmd->data.setadapterparms.hdr.return_code) {
4706 cmd->hdr.return_code =
4707 cmd->data.setadapterparms.hdr.return_code;
4708 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4712 adp_cmd = &cmd->data.setadapterparms;
4713 data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
4714 if (adp_cmd->hdr.seq_no == 1) {
4715 snmp_data = &adp_cmd->data.snmp;
4717 snmp_data = &adp_cmd->data.snmp.request;
4718 data_len -= offsetof(struct qeth_snmp_cmd, request);
4721 /* check if there is enough room in userspace */
4722 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4723 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
4726 QETH_CARD_TEXT_(card, 4, "snore%i",
4727 cmd->data.setadapterparms.hdr.used_total);
4728 QETH_CARD_TEXT_(card, 4, "sseqn%i",
4729 cmd->data.setadapterparms.hdr.seq_no);
4730 /*copy entries to user buffer*/
4731 memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
4732 qinfo->udata_offset += data_len;
4734 if (cmd->data.setadapterparms.hdr.seq_no <
4735 cmd->data.setadapterparms.hdr.used_total)
4740 static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4742 struct qeth_snmp_ureq __user *ureq;
4743 struct qeth_cmd_buffer *iob;
4744 unsigned int req_len;
4745 struct qeth_arp_query_info qinfo = {0, };
4748 QETH_CARD_TEXT(card, 3, "snmpcmd");
4750 if (IS_VM_NIC(card))
4753 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4757 ureq = (struct qeth_snmp_ureq __user *) udata;
4758 if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
4759 get_user(req_len, &ureq->hdr.req_len))
4762 /* Sanitize user input, to avoid overflows in iob size calculation: */
4763 if (req_len > QETH_BUFSIZE)
4766 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
4770 if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
4771 &ureq->cmd, req_len)) {
4776 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
4781 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4783 rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
4785 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4786 CARD_DEVID(card), rc);
4788 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4796 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4797 struct qeth_reply *reply,
4800 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4801 struct qeth_qoat_priv *priv = reply->param;
4804 QETH_CARD_TEXT(card, 3, "qoatcb");
4805 if (qeth_setadpparms_inspect_rc(cmd))
4808 resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4810 if (resdatalen > (priv->buffer_len - priv->response_len))
4813 memcpy(priv->buffer + priv->response_len,
4814 &cmd->data.setadapterparms.hdr, resdatalen);
4815 priv->response_len += resdatalen;
4817 if (cmd->data.setadapterparms.hdr.seq_no <
4818 cmd->data.setadapterparms.hdr.used_total)
4823 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4826 struct qeth_cmd_buffer *iob;
4827 struct qeth_ipa_cmd *cmd;
4828 struct qeth_query_oat *oat_req;
4829 struct qeth_query_oat_data oat_data;
4830 struct qeth_qoat_priv priv;
4833 QETH_CARD_TEXT(card, 3, "qoatcmd");
4835 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT))
4838 if (copy_from_user(&oat_data, udata, sizeof(oat_data)))
4841 priv.buffer_len = oat_data.buffer_len;
4842 priv.response_len = 0;
4843 priv.buffer = vzalloc(oat_data.buffer_len);
4847 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4848 SETADP_DATA_SIZEOF(query_oat));
4853 cmd = __ipa_cmd(iob);
4854 oat_req = &cmd->data.setadapterparms.data.query_oat;
4855 oat_req->subcmd_code = oat_data.command;
4857 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv);
4859 tmp = is_compat_task() ? compat_ptr(oat_data.ptr) :
4860 u64_to_user_ptr(oat_data.ptr);
4861 oat_data.response_len = priv.response_len;
4863 if (copy_to_user(tmp, priv.buffer, priv.response_len) ||
4864 copy_to_user(udata, &oat_data, sizeof(oat_data)))
4873 static int qeth_query_card_info_cb(struct qeth_card *card,
4874 struct qeth_reply *reply, unsigned long data)
4876 struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
4877 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4878 struct qeth_query_card_info *card_info;
4880 QETH_CARD_TEXT(card, 2, "qcrdincb");
4881 if (qeth_setadpparms_inspect_rc(cmd))
4884 card_info = &cmd->data.setadapterparms.data.card_info;
4885 carrier_info->card_type = card_info->card_type;
4886 carrier_info->port_mode = card_info->port_mode;
4887 carrier_info->port_speed = card_info->port_speed;
4891 int qeth_query_card_info(struct qeth_card *card,
4892 struct carrier_info *carrier_info)
4894 struct qeth_cmd_buffer *iob;
4896 QETH_CARD_TEXT(card, 2, "qcrdinfo");
4897 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
4899 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
4902 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
4903 (void *)carrier_info);
4907 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
4908 * @card: pointer to a qeth_card
4911 * 0, if a MAC address has been set for the card's netdevice
4912 * a return code, for various error conditions
4914 int qeth_vm_request_mac(struct qeth_card *card)
4916 struct diag26c_mac_resp *response;
4917 struct diag26c_mac_req *request;
4920 QETH_CARD_TEXT(card, 2, "vmreqmac");
4922 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
4923 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
4924 if (!request || !response) {
4929 request->resp_buf_len = sizeof(*response);
4930 request->resp_version = DIAG26C_VERSION2;
4931 request->op_code = DIAG26C_GET_MAC;
4932 request->devno = card->info.ddev_devno;
4934 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4935 rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
4936 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4939 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
4941 if (request->resp_buf_len < sizeof(*response) ||
4942 response->version != request->resp_version) {
4944 QETH_CARD_TEXT(card, 2, "badresp");
4945 QETH_CARD_HEX(card, 2, &request->resp_buf_len,
4946 sizeof(request->resp_buf_len));
4947 } else if (!is_valid_ether_addr(response->mac)) {
4949 QETH_CARD_TEXT(card, 2, "badmac");
4950 QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
4952 ether_addr_copy(card->dev->dev_addr, response->mac);
4960 EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
4962 static void qeth_determine_capabilities(struct qeth_card *card)
4964 struct qeth_channel *channel = &card->data;
4965 struct ccw_device *ddev = channel->ccwdev;
4967 int ddev_offline = 0;
4969 QETH_CARD_TEXT(card, 2, "detcapab");
4970 if (!ddev->online) {
4972 rc = qeth_start_channel(channel);
4974 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
4979 rc = qeth_read_conf_data(card);
4981 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
4982 CARD_DEVID(card), rc);
4983 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
4987 rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
4989 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
4991 QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
4992 QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
4993 QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
4994 QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
4995 QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
4996 if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
4997 ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
4998 ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
4999 dev_info(&card->gdev->dev,
5000 "Completion Queueing supported\n");
5002 card->options.cq = QETH_CQ_NOTAVAILABLE;
5007 if (ddev_offline == 1)
5008 qeth_stop_channel(channel);
5013 static void qeth_read_ccw_conf_data(struct qeth_card *card)
5015 struct qeth_card_info *info = &card->info;
5016 struct ccw_device *cdev = CARD_DDEV(card);
5017 struct ccw_dev_id dev_id;
5019 QETH_CARD_TEXT(card, 2, "ccwconfd");
5020 ccw_device_get_id(cdev, &dev_id);
5022 info->ddev_devno = dev_id.devno;
5023 info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) &&
5024 !ccw_device_get_iid(cdev, &info->iid) &&
5025 !ccw_device_get_chid(cdev, 0, &info->chid);
5026 info->ssid = dev_id.ssid;
5028 dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n",
5029 info->chid, info->chpid);
5031 QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno);
5032 QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid);
5033 QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid);
5034 QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid);
5035 QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid);
5036 QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid);
5037 QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid);
5040 static int qeth_qdio_establish(struct qeth_card *card)
5042 struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
5043 struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
5044 struct qdio_initialize init_data;
5045 char *qib_param_field;
5049 QETH_CARD_TEXT(card, 2, "qdioest");
5051 qib_param_field = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
5052 if (!qib_param_field) {
5054 goto out_free_nothing;
5057 qeth_create_qib_param_field(card, qib_param_field);
5058 qeth_create_qib_param_field_blkt(card, qib_param_field);
5060 in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
5061 if (card->options.cq == QETH_CQ_ENABLED)
5062 in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
5064 for (i = 0; i < card->qdio.no_out_queues; i++)
5065 out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
5067 memset(&init_data, 0, sizeof(struct qdio_initialize));
5068 init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT :
5070 init_data.qib_param_field_format = 0;
5071 init_data.qib_param_field = qib_param_field;
5072 init_data.no_input_qs = card->qdio.no_in_queues;
5073 init_data.no_output_qs = card->qdio.no_out_queues;
5074 init_data.input_handler = qeth_qdio_input_handler;
5075 init_data.output_handler = qeth_qdio_output_handler;
5076 init_data.irq_poll = qeth_qdio_poll;
5077 init_data.int_parm = (unsigned long) card;
5078 init_data.input_sbal_addr_array = in_sbal_ptrs;
5079 init_data.output_sbal_addr_array = out_sbal_ptrs;
5080 init_data.output_sbal_state_array = card->qdio.out_bufstates;
5081 init_data.scan_threshold = IS_IQD(card) ? 0 : 32;
5083 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
5084 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
5085 rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs,
5086 init_data.no_output_qs);
5088 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5091 rc = qdio_establish(CARD_DDEV(card), &init_data);
5093 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5094 qdio_free(CARD_DDEV(card));
5098 switch (card->options.cq) {
5099 case QETH_CQ_ENABLED:
5100 dev_info(&card->gdev->dev, "Completion Queue support enabled");
5102 case QETH_CQ_DISABLED:
5103 dev_info(&card->gdev->dev, "Completion Queue support disabled");
5109 kfree(qib_param_field);
5114 static void qeth_core_free_card(struct qeth_card *card)
5116 QETH_CARD_TEXT(card, 2, "freecrd");
5118 unregister_service_level(&card->qeth_service_level);
5119 debugfs_remove_recursive(card->debugfs);
5120 qeth_put_cmd(card->read_cmd);
5121 destroy_workqueue(card->event_wq);
5122 dev_set_drvdata(&card->gdev->dev, NULL);
5126 void qeth_trace_features(struct qeth_card *card)
5128 QETH_CARD_TEXT(card, 2, "features");
5129 QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
5130 QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
5131 QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
5132 QETH_CARD_HEX(card, 2, &card->info.diagass_support,
5133 sizeof(card->info.diagass_support));
5135 EXPORT_SYMBOL_GPL(qeth_trace_features);
5137 static struct ccw_device_id qeth_ids[] = {
5138 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
5139 .driver_info = QETH_CARD_TYPE_OSD},
5140 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
5141 .driver_info = QETH_CARD_TYPE_IQD},
5142 #ifdef CONFIG_QETH_OSN
5143 {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
5144 .driver_info = QETH_CARD_TYPE_OSN},
5146 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
5147 .driver_info = QETH_CARD_TYPE_OSM},
5148 #ifdef CONFIG_QETH_OSX
5149 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
5150 .driver_info = QETH_CARD_TYPE_OSX},
5154 MODULE_DEVICE_TABLE(ccw, qeth_ids);
5156 static struct ccw_driver qeth_ccw_driver = {
5158 .owner = THIS_MODULE,
5162 .probe = ccwgroup_probe_ccwdev,
5163 .remove = ccwgroup_remove_ccwdev,
5166 int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
5171 QETH_CARD_TEXT(card, 2, "hrdsetup");
5172 atomic_set(&card->force_alloc_skb, 0);
5173 rc = qeth_update_from_chp_desc(card);
5178 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
5180 rc = qeth_qdio_clear_card(card, !IS_IQD(card));
5181 qeth_stop_channel(&card->data);
5182 qeth_stop_channel(&card->write);
5183 qeth_stop_channel(&card->read);
5184 qdio_free(CARD_DDEV(card));
5186 rc = qeth_start_channel(&card->read);
5189 rc = qeth_start_channel(&card->write);
5192 rc = qeth_start_channel(&card->data);
5196 if (rc == -ERESTARTSYS) {
5197 QETH_CARD_TEXT(card, 2, "break1");
5200 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5207 qeth_determine_capabilities(card);
5208 qeth_read_ccw_conf_data(card);
5209 qeth_idx_init(card);
5211 rc = qeth_idx_activate_read_channel(card);
5213 QETH_CARD_TEXT(card, 2, "break2");
5216 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5223 rc = qeth_idx_activate_write_channel(card);
5225 QETH_CARD_TEXT(card, 2, "break3");
5228 QETH_CARD_TEXT_(card, 2, "4err%d", rc);
5234 card->read_or_write_problem = 0;
5235 rc = qeth_mpc_initialize(card);
5237 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5241 rc = qeth_send_startlan(card);
5243 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5244 if (rc == -ENETDOWN) {
5245 dev_warn(&card->gdev->dev, "The LAN is offline\n");
5246 *carrier_ok = false;
5254 card->options.ipa4.supported = 0;
5255 card->options.ipa6.supported = 0;
5256 card->options.adp.supported = 0;
5257 card->options.sbp.supported_funcs = 0;
5258 card->info.diagass_support = 0;
5259 rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
5262 if (qeth_is_supported(card, IPA_IPV6)) {
5263 rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
5267 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
5268 rc = qeth_query_setadapterparms(card);
5270 QETH_CARD_TEXT_(card, 2, "7err%d", rc);
5274 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
5275 rc = qeth_query_setdiagass(card);
5277 QETH_CARD_TEXT_(card, 2, "8err%d", rc);
5280 if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
5281 (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
5282 card->info.hwtrap = 0;
5284 if (card->options.isolation != ISOLATION_MODE_NONE) {
5285 rc = qeth_setadpparms_set_access_ctrl(card,
5286 card->options.isolation);
5291 rc = qeth_init_qdio_queues(card);
5293 QETH_CARD_TEXT_(card, 2, "9err%d", rc);
5299 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
5300 "an error on the device\n");
5301 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
5302 CARD_DEVID(card), rc);
5305 EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
5307 static int qeth_set_online(struct qeth_card *card)
5311 mutex_lock(&card->discipline_mutex);
5312 mutex_lock(&card->conf_mutex);
5313 QETH_CARD_TEXT(card, 2, "setonlin");
5315 rc = card->discipline->set_online(card);
5317 mutex_unlock(&card->conf_mutex);
5318 mutex_unlock(&card->discipline_mutex);
5323 int qeth_set_offline(struct qeth_card *card, bool resetting)
5327 mutex_lock(&card->discipline_mutex);
5328 mutex_lock(&card->conf_mutex);
5329 QETH_CARD_TEXT(card, 3, "setoffl");
5331 if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
5332 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
5333 card->info.hwtrap = 1;
5336 /* cancel any stalled cmd that might block the rtnl: */
5337 qeth_clear_ipacmd_list(card);
5340 card->info.open_when_online = card->dev->flags & IFF_UP;
5341 dev_close(card->dev);
5342 netif_device_detach(card->dev);
5343 netif_carrier_off(card->dev);
5346 card->discipline->set_offline(card);
5348 rc = qeth_stop_channel(&card->data);
5349 rc2 = qeth_stop_channel(&card->write);
5350 rc3 = qeth_stop_channel(&card->read);
5352 rc = (rc2) ? rc2 : rc3;
5354 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5355 qdio_free(CARD_DDEV(card));
5357 /* let user_space know that device is offline */
5358 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5360 mutex_unlock(&card->conf_mutex);
5361 mutex_unlock(&card->discipline_mutex);
5364 EXPORT_SYMBOL_GPL(qeth_set_offline);
5366 static int qeth_do_reset(void *data)
5368 struct qeth_card *card = data;
5371 QETH_CARD_TEXT(card, 2, "recover1");
5372 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
5374 QETH_CARD_TEXT(card, 2, "recover2");
5375 dev_warn(&card->gdev->dev,
5376 "A recovery process has been started for the device\n");
5378 qeth_set_offline(card, true);
5379 rc = qeth_set_online(card);
5381 dev_info(&card->gdev->dev,
5382 "Device successfully recovered!\n");
5384 ccwgroup_set_offline(card->gdev);
5385 dev_warn(&card->gdev->dev,
5386 "The qeth device driver failed to recover an error on the device\n");
5388 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
5389 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
5393 #if IS_ENABLED(CONFIG_QETH_L3)
5394 static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
5395 struct qeth_hdr *hdr)
5397 struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
5398 struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
5399 struct net_device *dev = skb->dev;
5401 if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
5402 dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
5403 "FAKELL", skb->len);
5407 if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
5408 u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
5410 unsigned char tg_addr[ETH_ALEN];
5412 skb_reset_network_header(skb);
5413 switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
5414 case QETH_CAST_MULTICAST:
5415 if (prot == ETH_P_IP)
5416 ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
5418 ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
5419 QETH_CARD_STAT_INC(card, rx_multicast);
5421 case QETH_CAST_BROADCAST:
5422 ether_addr_copy(tg_addr, dev->broadcast);
5423 QETH_CARD_STAT_INC(card, rx_multicast);
5426 if (card->options.sniffer)
5427 skb->pkt_type = PACKET_OTHERHOST;
5428 ether_addr_copy(tg_addr, dev->dev_addr);
5431 if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
5432 dev_hard_header(skb, dev, prot, tg_addr,
5433 &l3_hdr->next_hop.rx.src_mac, skb->len);
5435 dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
5439 /* copy VLAN tag from hdr into skb */
5440 if (!card->options.sniffer &&
5441 (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
5442 QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
5443 u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
5445 l3_hdr->next_hop.rx.vlan_id;
5447 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
5452 static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
5453 struct qeth_hdr *hdr, bool uses_frags)
5455 struct napi_struct *napi = &card->napi;
5458 switch (hdr->hdr.l2.id) {
5459 case QETH_HEADER_TYPE_OSN:
5460 skb_push(skb, sizeof(*hdr));
5461 skb_copy_to_linear_data(skb, hdr, sizeof(*hdr));
5462 QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
5463 QETH_CARD_STAT_INC(card, rx_packets);
5465 card->osn_info.data_cb(skb);
5467 #if IS_ENABLED(CONFIG_QETH_L3)
5468 case QETH_HEADER_TYPE_LAYER3:
5469 qeth_l3_rebuild_skb(card, skb, hdr);
5470 is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5473 case QETH_HEADER_TYPE_LAYER2:
5474 is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5479 napi_free_frags(napi);
5481 dev_kfree_skb_any(skb);
5485 if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
5486 skb->ip_summed = CHECKSUM_UNNECESSARY;
5487 QETH_CARD_STAT_INC(card, rx_skb_csum);
5489 skb->ip_summed = CHECKSUM_NONE;
5492 QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
5493 QETH_CARD_STAT_INC(card, rx_packets);
5494 if (skb_is_nonlinear(skb)) {
5495 QETH_CARD_STAT_INC(card, rx_sg_skbs);
5496 QETH_CARD_STAT_ADD(card, rx_sg_frags,
5497 skb_shinfo(skb)->nr_frags);
5501 napi_gro_frags(napi);
5503 skb->protocol = eth_type_trans(skb, skb->dev);
5504 napi_gro_receive(napi, skb);
5508 static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
5510 struct page *page = virt_to_page(data);
5511 unsigned int next_frag;
5513 next_frag = skb_shinfo(skb)->nr_frags;
5515 skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
5519 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
5521 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
5524 static int qeth_extract_skb(struct qeth_card *card,
5525 struct qeth_qdio_buffer *qethbuffer, u8 *element_no,
5528 struct qeth_priv *priv = netdev_priv(card->dev);
5529 struct qdio_buffer *buffer = qethbuffer->buffer;
5530 struct napi_struct *napi = &card->napi;
5531 struct qdio_buffer_element *element;
5532 unsigned int linear_len = 0;
5533 bool uses_frags = false;
5534 int offset = *__offset;
5535 bool use_rx_sg = false;
5536 unsigned int headroom;
5537 struct qeth_hdr *hdr;
5538 struct sk_buff *skb;
5541 element = &buffer->element[*element_no];
5544 /* qeth_hdr must not cross element boundaries */
5545 while (element->length < offset + sizeof(struct qeth_hdr)) {
5546 if (qeth_is_last_sbale(element))
5552 hdr = phys_to_virt(element->addr) + offset;
5553 offset += sizeof(*hdr);
5556 switch (hdr->hdr.l2.id) {
5557 case QETH_HEADER_TYPE_LAYER2:
5558 skb_len = hdr->hdr.l2.pkt_length;
5559 linear_len = ETH_HLEN;
5562 case QETH_HEADER_TYPE_LAYER3:
5563 skb_len = hdr->hdr.l3.length;
5564 if (!IS_LAYER3(card)) {
5565 QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5569 if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
5570 linear_len = ETH_HLEN;
5575 if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
5576 linear_len = sizeof(struct ipv6hdr);
5578 linear_len = sizeof(struct iphdr);
5579 headroom = ETH_HLEN;
5581 case QETH_HEADER_TYPE_OSN:
5582 skb_len = hdr->hdr.osn.pdu_length;
5583 if (!IS_OSN(card)) {
5584 QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5588 linear_len = skb_len;
5589 headroom = sizeof(struct qeth_hdr);
5592 if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
5593 QETH_CARD_STAT_INC(card, rx_frame_errors);
5595 QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5597 /* Can't determine packet length, drop the whole buffer. */
5598 return -EPROTONOSUPPORT;
5601 if (skb_len < linear_len) {
5602 QETH_CARD_STAT_INC(card, rx_dropped_runt);
5606 use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
5607 (skb_len > READ_ONCE(priv->rx_copybreak) &&
5608 !atomic_read(&card->force_alloc_skb) &&
5612 /* QETH_CQ_ENABLED only: */
5613 if (qethbuffer->rx_skb &&
5614 skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
5615 skb = qethbuffer->rx_skb;
5616 qethbuffer->rx_skb = NULL;
5620 skb = napi_get_frags(napi);
5622 /* -ENOMEM, no point in falling back further. */
5623 QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5627 if (skb_tailroom(skb) >= linear_len + headroom) {
5632 netdev_info_once(card->dev,
5633 "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
5634 linear_len + headroom, skb_tailroom(skb));
5635 /* Shouldn't happen. Don't optimize, fall back to linear skb. */
5638 linear_len = skb_len;
5639 skb = napi_alloc_skb(napi, linear_len + headroom);
5641 QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5647 skb_reserve(skb, headroom);
5650 int data_len = min(skb_len, (int)(element->length - offset));
5651 char *data = phys_to_virt(element->addr) + offset;
5653 skb_len -= data_len;
5656 /* Extract data from current element: */
5657 if (skb && data_len) {
5659 unsigned int copy_len;
5661 copy_len = min_t(unsigned int, linear_len,
5664 skb_put_data(skb, data, copy_len);
5665 linear_len -= copy_len;
5666 data_len -= copy_len;
5671 qeth_create_skb_frag(skb, data, data_len);
5674 /* Step forward to next element: */
5676 if (qeth_is_last_sbale(element)) {
5677 QETH_CARD_TEXT(card, 4, "unexeob");
5678 QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5681 napi_free_frags(napi);
5683 dev_kfree_skb_any(skb);
5684 QETH_CARD_STAT_INC(card,
5694 /* This packet was skipped, go get another one: */
5698 *element_no = element - &buffer->element[0];
5701 qeth_receive_skb(card, skb, hdr, uses_frags);
5705 static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
5706 struct qeth_qdio_buffer *buf, bool *done)
5708 unsigned int work_done = 0;
5711 if (qeth_extract_skb(card, buf, &card->rx.buf_element,
5712 &card->rx.e_offset)) {
5724 static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
5726 struct qeth_rx *ctx = &card->rx;
5727 unsigned int work_done = 0;
5729 while (budget > 0) {
5730 struct qeth_qdio_buffer *buffer;
5731 unsigned int skbs_done = 0;
5734 /* Fetch completed RX buffers: */
5735 if (!card->rx.b_count) {
5736 card->rx.qdio_err = 0;
5737 card->rx.b_count = qdio_get_next_buffers(
5738 card->data.ccwdev, 0, &card->rx.b_index,
5739 &card->rx.qdio_err);
5740 if (card->rx.b_count <= 0) {
5741 card->rx.b_count = 0;
5746 /* Process one completed RX buffer: */
5747 buffer = &card->qdio.in_q->bufs[card->rx.b_index];
5748 if (!(card->rx.qdio_err &&
5749 qeth_check_qdio_errors(card, buffer->buffer,
5750 card->rx.qdio_err, "qinerr")))
5751 skbs_done = qeth_extract_skbs(card, budget, buffer,
5756 work_done += skbs_done;
5757 budget -= skbs_done;
5760 QETH_CARD_STAT_INC(card, rx_bufs);
5761 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
5762 buffer->pool_entry = NULL;
5765 ctx->bufs_refill -= qeth_rx_refill_queue(card,
5768 /* Step forward to next buffer: */
5769 card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
5770 card->rx.buf_element = 0;
5771 card->rx.e_offset = 0;
5778 static void qeth_cq_poll(struct qeth_card *card)
5780 unsigned int work_done = 0;
5782 while (work_done < QDIO_MAX_BUFFERS_PER_Q) {
5783 unsigned int start, error;
5786 completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start,
5791 qeth_qdio_cq_handler(card, error, 1, start, completed);
5792 work_done += completed;
5796 int qeth_poll(struct napi_struct *napi, int budget)
5798 struct qeth_card *card = container_of(napi, struct qeth_card, napi);
5799 unsigned int work_done;
5801 work_done = qeth_rx_poll(card, budget);
5803 if (card->options.cq == QETH_CQ_ENABLED)
5807 struct qeth_rx *ctx = &card->rx;
5809 /* Process any substantial refill backlog: */
5810 ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill);
5812 /* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
5813 if (work_done >= budget)
5817 if (napi_complete_done(napi, work_done) &&
5818 qdio_start_irq(CARD_DDEV(card)))
5819 napi_schedule(napi);
5823 EXPORT_SYMBOL_GPL(qeth_poll);
5825 static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
5826 unsigned int bidx, bool error, int budget)
5828 struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
5829 u8 sflags = buffer->buffer->element[15].sflags;
5830 struct qeth_card *card = queue->card;
5832 if (queue->bufstates && (queue->bufstates[bidx].flags &
5833 QDIO_OUTBUF_STATE_FLAG_PENDING)) {
5834 WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
5836 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
5837 QETH_QDIO_BUF_PENDING) ==
5838 QETH_QDIO_BUF_PRIMED)
5839 qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
5841 QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
5843 /* prepare the queue slot for re-use: */
5844 qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
5845 if (qeth_init_qdio_out_buf(queue, bidx)) {
5846 QETH_CARD_TEXT(card, 2, "outofbuf");
5847 qeth_schedule_recovery(card);
5853 if (card->options.cq == QETH_CQ_ENABLED)
5854 qeth_notify_skbs(queue, buffer,
5855 qeth_compute_cq_notification(sflags, 0));
5856 qeth_clear_output_buffer(queue, buffer, error, budget);
5859 static int qeth_tx_poll(struct napi_struct *napi, int budget)
5861 struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
5862 unsigned int queue_no = queue->queue_no;
5863 struct qeth_card *card = queue->card;
5864 struct net_device *dev = card->dev;
5865 unsigned int work_done = 0;
5866 struct netdev_queue *txq;
5868 txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
5871 unsigned int start, error, i;
5872 unsigned int packets = 0;
5873 unsigned int bytes = 0;
5876 if (qeth_out_queue_is_empty(queue)) {
5877 napi_complete(napi);
5881 /* Give the CPU a breather: */
5882 if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
5883 QETH_TXQ_STAT_INC(queue, completion_yield);
5884 if (napi_complete_done(napi, 0))
5885 napi_schedule(napi);
5889 completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false,
5891 if (completed <= 0) {
5892 /* Ensure we see TX completion for pending work: */
5893 if (napi_complete_done(napi, 0))
5894 qeth_tx_arm_timer(queue, QETH_TX_TIMER_USECS);
5898 for (i = start; i < start + completed; i++) {
5899 struct qeth_qdio_out_buffer *buffer;
5900 unsigned int bidx = QDIO_BUFNR(i);
5902 buffer = queue->bufs[bidx];
5903 packets += buffer->frames;
5904 bytes += buffer->bytes;
5906 qeth_handle_send_error(card, buffer, error);
5907 qeth_iqd_tx_complete(queue, bidx, error, budget);
5908 qeth_cleanup_handled_pending(queue, bidx, false);
5911 netdev_tx_completed_queue(txq, packets, bytes);
5912 atomic_sub(completed, &queue->used_buffers);
5913 work_done += completed;
5915 /* xmit may have observed the full-condition, but not yet
5916 * stopped the txq. In which case the code below won't trigger.
5917 * So before returning, xmit will re-check the txq's fill level
5918 * and wake it up if needed.
5920 if (netif_tx_queue_stopped(txq) &&
5921 !qeth_out_queue_is_full(queue))
5922 netif_tx_wake_queue(txq);
5926 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
5928 if (!cmd->hdr.return_code)
5929 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5930 return cmd->hdr.return_code;
5933 static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
5934 struct qeth_reply *reply,
5937 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5938 struct qeth_ipa_caps *caps = reply->param;
5940 if (qeth_setassparms_inspect_rc(cmd))
5943 caps->supported = cmd->data.setassparms.data.caps.supported;
5944 caps->enabled = cmd->data.setassparms.data.caps.enabled;
5948 int qeth_setassparms_cb(struct qeth_card *card,
5949 struct qeth_reply *reply, unsigned long data)
5951 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5953 QETH_CARD_TEXT(card, 4, "defadpcb");
5955 if (cmd->hdr.return_code)
5958 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5959 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
5960 card->options.ipa4.enabled = cmd->hdr.assists.enabled;
5961 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
5962 card->options.ipa6.enabled = cmd->hdr.assists.enabled;
5965 EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
5967 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
5968 enum qeth_ipa_funcs ipa_func,
5970 unsigned int data_length,
5971 enum qeth_prot_versions prot)
5973 struct qeth_ipacmd_setassparms *setassparms;
5974 struct qeth_ipacmd_setassparms_hdr *hdr;
5975 struct qeth_cmd_buffer *iob;
5977 QETH_CARD_TEXT(card, 4, "getasscm");
5978 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
5980 offsetof(struct qeth_ipacmd_setassparms,
5985 setassparms = &__ipa_cmd(iob)->data.setassparms;
5986 setassparms->assist_no = ipa_func;
5988 hdr = &setassparms->hdr;
5989 hdr->length = sizeof(*hdr) + data_length;
5990 hdr->command_code = cmd_code;
5993 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
5995 int qeth_send_simple_setassparms_prot(struct qeth_card *card,
5996 enum qeth_ipa_funcs ipa_func,
5997 u16 cmd_code, u32 *data,
5998 enum qeth_prot_versions prot)
6000 unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
6001 struct qeth_cmd_buffer *iob;
6003 QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
6004 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
6009 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
6010 return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
6012 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
6014 static void qeth_unregister_dbf_views(void)
6017 for (x = 0; x < QETH_DBF_INFOS; x++) {
6018 debug_unregister(qeth_dbf[x].id);
6019 qeth_dbf[x].id = NULL;
6023 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
6025 char dbf_txt_buf[32];
6028 if (!debug_level_enabled(id, level))
6030 va_start(args, fmt);
6031 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
6033 debug_text_event(id, level, dbf_txt_buf);
6035 EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
6037 static int qeth_register_dbf_views(void)
6042 for (x = 0; x < QETH_DBF_INFOS; x++) {
6043 /* register the areas */
6044 qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
6048 if (qeth_dbf[x].id == NULL) {
6049 qeth_unregister_dbf_views();
6053 /* register a view */
6054 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
6056 qeth_unregister_dbf_views();
6060 /* set a passing level */
6061 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
6067 static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */
6069 int qeth_core_load_discipline(struct qeth_card *card,
6070 enum qeth_discipline_id discipline)
6072 mutex_lock(&qeth_mod_mutex);
6073 switch (discipline) {
6074 case QETH_DISCIPLINE_LAYER3:
6075 card->discipline = try_then_request_module(
6076 symbol_get(qeth_l3_discipline), "qeth_l3");
6078 case QETH_DISCIPLINE_LAYER2:
6079 card->discipline = try_then_request_module(
6080 symbol_get(qeth_l2_discipline), "qeth_l2");
6085 mutex_unlock(&qeth_mod_mutex);
6087 if (!card->discipline) {
6088 dev_err(&card->gdev->dev, "There is no kernel module to "
6089 "support discipline %d\n", discipline);
6093 card->options.layer = discipline;
6097 void qeth_core_free_discipline(struct qeth_card *card)
6099 if (IS_LAYER2(card))
6100 symbol_put(qeth_l2_discipline);
6102 symbol_put(qeth_l3_discipline);
6103 card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
6104 card->discipline = NULL;
6107 const struct device_type qeth_generic_devtype = {
6108 .name = "qeth_generic",
6109 .groups = qeth_generic_attr_groups,
6111 EXPORT_SYMBOL_GPL(qeth_generic_devtype);
6113 static const struct device_type qeth_osn_devtype = {
6115 .groups = qeth_osn_attr_groups,
6118 #define DBF_NAME_LEN 20
6120 struct qeth_dbf_entry {
6121 char dbf_name[DBF_NAME_LEN];
6122 debug_info_t *dbf_info;
6123 struct list_head dbf_list;
6126 static LIST_HEAD(qeth_dbf_list);
6127 static DEFINE_MUTEX(qeth_dbf_list_mutex);
6129 static debug_info_t *qeth_get_dbf_entry(char *name)
6131 struct qeth_dbf_entry *entry;
6132 debug_info_t *rc = NULL;
6134 mutex_lock(&qeth_dbf_list_mutex);
6135 list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
6136 if (strcmp(entry->dbf_name, name) == 0) {
6137 rc = entry->dbf_info;
6141 mutex_unlock(&qeth_dbf_list_mutex);
6145 static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
6147 struct qeth_dbf_entry *new_entry;
6149 card->debug = debug_register(name, 2, 1, 8);
6151 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
6154 if (debug_register_view(card->debug, &debug_hex_ascii_view))
6156 new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
6159 strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
6160 new_entry->dbf_info = card->debug;
6161 mutex_lock(&qeth_dbf_list_mutex);
6162 list_add(&new_entry->dbf_list, &qeth_dbf_list);
6163 mutex_unlock(&qeth_dbf_list_mutex);
6168 debug_unregister(card->debug);
6173 static void qeth_clear_dbf_list(void)
6175 struct qeth_dbf_entry *entry, *tmp;
6177 mutex_lock(&qeth_dbf_list_mutex);
6178 list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
6179 list_del(&entry->dbf_list);
6180 debug_unregister(entry->dbf_info);
6183 mutex_unlock(&qeth_dbf_list_mutex);
6186 static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
6188 struct net_device *dev;
6189 struct qeth_priv *priv;
6191 switch (card->info.type) {
6192 case QETH_CARD_TYPE_IQD:
6193 dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN,
6194 ether_setup, QETH_MAX_OUT_QUEUES, 1);
6196 case QETH_CARD_TYPE_OSM:
6197 dev = alloc_etherdev(sizeof(*priv));
6199 case QETH_CARD_TYPE_OSN:
6200 dev = alloc_netdev(sizeof(*priv), "osn%d", NET_NAME_UNKNOWN,
6204 dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
6210 priv = netdev_priv(dev);
6211 priv->rx_copybreak = QETH_RX_COPYBREAK;
6213 dev->ml_priv = card;
6214 dev->watchdog_timeo = QETH_TX_TIMEOUT;
6215 dev->min_mtu = IS_OSN(card) ? 64 : 576;
6216 /* initialized when device first goes online: */
6219 SET_NETDEV_DEV(dev, &card->gdev->dev);
6220 netif_carrier_off(dev);
6222 dev->ethtool_ops = IS_OSN(card) ? &qeth_osn_ethtool_ops :
6228 struct net_device *qeth_clone_netdev(struct net_device *orig)
6230 struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
6235 clone->dev_port = orig->dev_port;
6239 int qeth_setup_netdev(struct qeth_card *card)
6241 struct net_device *dev = card->dev;
6242 unsigned int num_tx_queues;
6244 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
6245 dev->hw_features |= NETIF_F_SG;
6246 dev->vlan_features |= NETIF_F_SG;
6249 dev->features |= NETIF_F_SG;
6250 num_tx_queues = QETH_IQD_MIN_TXQ;
6251 } else if (IS_VM_NIC(card)) {
6254 num_tx_queues = dev->real_num_tx_queues;
6257 return qeth_set_real_num_tx_queues(card, num_tx_queues);
6259 EXPORT_SYMBOL_GPL(qeth_setup_netdev);
6261 static int qeth_core_probe_device(struct ccwgroup_device *gdev)
6263 struct qeth_card *card;
6266 enum qeth_discipline_id enforced_disc;
6267 char dbf_name[DBF_NAME_LEN];
6269 QETH_DBF_TEXT(SETUP, 2, "probedev");
6272 if (!get_device(dev))
6275 QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
6277 card = qeth_alloc_card(gdev);
6279 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
6284 snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
6285 dev_name(&gdev->dev));
6286 card->debug = qeth_get_dbf_entry(dbf_name);
6288 rc = qeth_add_dbf_entry(card, dbf_name);
6293 qeth_setup_card(card);
6294 card->dev = qeth_alloc_netdev(card);
6300 qeth_determine_capabilities(card);
6301 qeth_set_blkt_defaults(card);
6303 card->qdio.no_out_queues = card->dev->num_tx_queues;
6304 rc = qeth_update_from_chp_desc(card);
6308 enforced_disc = qeth_enforce_discipline(card);
6309 switch (enforced_disc) {
6310 case QETH_DISCIPLINE_UNDETERMINED:
6311 gdev->dev.type = &qeth_generic_devtype;
6314 card->info.layer_enforced = true;
6315 rc = qeth_core_load_discipline(card, enforced_disc);
6319 gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype :
6320 card->discipline->devtype;
6321 rc = card->discipline->setup(card->gdev);
6330 qeth_core_free_discipline(card);
6333 free_netdev(card->dev);
6335 qeth_core_free_card(card);
6341 static void qeth_core_remove_device(struct ccwgroup_device *gdev)
6343 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6345 QETH_CARD_TEXT(card, 2, "removedv");
6347 if (card->discipline) {
6348 card->discipline->remove(gdev);
6349 qeth_core_free_discipline(card);
6352 qeth_free_qdio_queues(card);
6354 free_netdev(card->dev);
6355 qeth_core_free_card(card);
6356 put_device(&gdev->dev);
6359 static int qeth_core_set_online(struct ccwgroup_device *gdev)
6361 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6363 enum qeth_discipline_id def_discipline;
6365 if (!card->discipline) {
6366 def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
6367 QETH_DISCIPLINE_LAYER2;
6368 rc = qeth_core_load_discipline(card, def_discipline);
6371 rc = card->discipline->setup(card->gdev);
6373 qeth_core_free_discipline(card);
6378 rc = qeth_set_online(card);
6383 static int qeth_core_set_offline(struct ccwgroup_device *gdev)
6385 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6387 return qeth_set_offline(card, false);
6390 static void qeth_core_shutdown(struct ccwgroup_device *gdev)
6392 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6393 qeth_set_allowed_threads(card, 0, 1);
6394 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
6395 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
6396 qeth_qdio_clear_card(card, 0);
6397 qeth_drain_output_queues(card);
6398 qdio_free(CARD_DDEV(card));
6401 static ssize_t group_store(struct device_driver *ddrv, const char *buf,
6406 err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
6409 return err ? err : count;
6411 static DRIVER_ATTR_WO(group);
6413 static struct attribute *qeth_drv_attrs[] = {
6414 &driver_attr_group.attr,
6417 static struct attribute_group qeth_drv_attr_group = {
6418 .attrs = qeth_drv_attrs,
6420 static const struct attribute_group *qeth_drv_attr_groups[] = {
6421 &qeth_drv_attr_group,
6425 static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
6427 .groups = qeth_drv_attr_groups,
6428 .owner = THIS_MODULE,
6431 .ccw_driver = &qeth_ccw_driver,
6432 .setup = qeth_core_probe_device,
6433 .remove = qeth_core_remove_device,
6434 .set_online = qeth_core_set_online,
6435 .set_offline = qeth_core_set_offline,
6436 .shutdown = qeth_core_shutdown,
6439 struct qeth_card *qeth_get_card_by_busid(char *bus_id)
6441 struct ccwgroup_device *gdev;
6442 struct qeth_card *card;
6444 gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id);
6448 card = dev_get_drvdata(&gdev->dev);
6449 put_device(&gdev->dev);
6452 EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);
6454 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6456 struct qeth_card *card = dev->ml_priv;
6457 struct mii_ioctl_data *mii_data;
6461 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
6462 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
6464 case SIOC_QETH_GET_CARD_TYPE:
6465 if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
6470 mii_data = if_mii(rq);
6471 mii_data->phy_id = 0;
6474 mii_data = if_mii(rq);
6475 if (mii_data->phy_id != 0)
6478 mii_data->val_out = qeth_mdio_read(dev,
6479 mii_data->phy_id, mii_data->reg_num);
6481 case SIOC_QETH_QUERY_OAT:
6482 rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
6485 if (card->discipline->do_ioctl)
6486 rc = card->discipline->do_ioctl(dev, rq, cmd);
6491 QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
6494 EXPORT_SYMBOL_GPL(qeth_do_ioctl);
6496 static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
6499 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6500 u32 *features = reply->param;
6502 if (qeth_setassparms_inspect_rc(cmd))
6505 *features = cmd->data.setassparms.data.flags_32bit;
6509 static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6510 enum qeth_prot_versions prot)
6512 return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
6516 static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6517 enum qeth_prot_versions prot, u8 *lp2lp)
6519 u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
6520 struct qeth_cmd_buffer *iob;
6521 struct qeth_ipa_caps caps;
6525 /* some L3 HW requires combined L3+L4 csum offload: */
6526 if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
6527 cstype == IPA_OUTBOUND_CHECKSUM)
6528 required_features |= QETH_IPA_CHECKSUM_IP_HDR;
6530 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
6535 rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
6539 if ((required_features & features) != required_features) {
6540 qeth_set_csum_off(card, cstype, prot);
6544 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
6545 SETASS_DATA_SIZEOF(flags_32bit),
6548 qeth_set_csum_off(card, cstype, prot);
6552 if (features & QETH_IPA_CHECKSUM_LP2LP)
6553 required_features |= QETH_IPA_CHECKSUM_LP2LP;
6554 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
6555 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6557 qeth_set_csum_off(card, cstype, prot);
6561 if (!qeth_ipa_caps_supported(&caps, required_features) ||
6562 !qeth_ipa_caps_enabled(&caps, required_features)) {
6563 qeth_set_csum_off(card, cstype, prot);
6567 dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
6568 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
6571 *lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP);
6576 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6577 enum qeth_prot_versions prot, u8 *lp2lp)
6579 return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) :
6580 qeth_set_csum_off(card, cstype, prot);
6583 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
6586 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6587 struct qeth_tso_start_data *tso_data = reply->param;
6589 if (qeth_setassparms_inspect_rc(cmd))
6592 tso_data->mss = cmd->data.setassparms.data.tso.mss;
6593 tso_data->supported = cmd->data.setassparms.data.tso.supported;
6597 static int qeth_set_tso_off(struct qeth_card *card,
6598 enum qeth_prot_versions prot)
6600 return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
6601 IPA_CMD_ASS_STOP, NULL, prot);
6604 static int qeth_set_tso_on(struct qeth_card *card,
6605 enum qeth_prot_versions prot)
6607 struct qeth_tso_start_data tso_data;
6608 struct qeth_cmd_buffer *iob;
6609 struct qeth_ipa_caps caps;
6612 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6613 IPA_CMD_ASS_START, 0, prot);
6617 rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6621 if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
6622 qeth_set_tso_off(card, prot);
6626 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6628 SETASS_DATA_SIZEOF(caps), prot);
6630 qeth_set_tso_off(card, prot);
6634 /* enable TSO capability */
6635 __ipa_cmd(iob)->data.setassparms.data.caps.enabled =
6636 QETH_IPA_LARGE_SEND_TCP;
6637 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6639 qeth_set_tso_off(card, prot);
6643 if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
6644 !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
6645 qeth_set_tso_off(card, prot);
6649 dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
6654 static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
6655 enum qeth_prot_versions prot)
6657 return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
6660 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
6662 int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
6665 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
6666 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6667 QETH_PROT_IPV4, NULL);
6668 if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6669 /* no/one Offload Assist available, so the rc is trivial */
6672 rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6673 QETH_PROT_IPV6, NULL);
6676 /* enable: success if any Assist is active */
6677 return (rc_ipv6) ? rc_ipv4 : 0;
6679 /* disable: failure if any Assist is still active */
6680 return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
6684 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6685 * @dev: a net_device
6687 void qeth_enable_hw_features(struct net_device *dev)
6689 struct qeth_card *card = dev->ml_priv;
6690 netdev_features_t features;
6692 features = dev->features;
6693 /* force-off any feature that might need an IPA sequence.
6694 * netdev_update_features() will restart them.
6696 dev->features &= ~dev->hw_features;
6697 /* toggle VLAN filter, so that VIDs are re-programmed: */
6698 if (IS_LAYER2(card) && IS_VM_NIC(card)) {
6699 dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
6700 dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6702 netdev_update_features(dev);
6703 if (features != dev->features)
6704 dev_warn(&card->gdev->dev,
6705 "Device recovery failed to restore all offload features\n");
6707 EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6709 static void qeth_check_restricted_features(struct qeth_card *card,
6710 netdev_features_t changed,
6711 netdev_features_t actual)
6713 netdev_features_t ipv6_features = NETIF_F_TSO6;
6714 netdev_features_t ipv4_features = NETIF_F_TSO;
6716 if (!card->info.has_lp2lp_cso_v6)
6717 ipv6_features |= NETIF_F_IPV6_CSUM;
6718 if (!card->info.has_lp2lp_cso_v4)
6719 ipv4_features |= NETIF_F_IP_CSUM;
6721 if ((changed & ipv6_features) && !(actual & ipv6_features))
6722 qeth_flush_local_addrs6(card);
6723 if ((changed & ipv4_features) && !(actual & ipv4_features))
6724 qeth_flush_local_addrs4(card);
6727 int qeth_set_features(struct net_device *dev, netdev_features_t features)
6729 struct qeth_card *card = dev->ml_priv;
6730 netdev_features_t changed = dev->features ^ features;
6733 QETH_CARD_TEXT(card, 2, "setfeat");
6734 QETH_CARD_HEX(card, 2, &features, sizeof(features));
6736 if ((changed & NETIF_F_IP_CSUM)) {
6737 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6738 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4,
6739 &card->info.has_lp2lp_cso_v4);
6741 changed ^= NETIF_F_IP_CSUM;
6743 if (changed & NETIF_F_IPV6_CSUM) {
6744 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6745 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6,
6746 &card->info.has_lp2lp_cso_v6);
6748 changed ^= NETIF_F_IPV6_CSUM;
6750 if (changed & NETIF_F_RXCSUM) {
6751 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6753 changed ^= NETIF_F_RXCSUM;
6755 if (changed & NETIF_F_TSO) {
6756 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
6759 changed ^= NETIF_F_TSO;
6761 if (changed & NETIF_F_TSO6) {
6762 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
6765 changed ^= NETIF_F_TSO6;
6768 qeth_check_restricted_features(card, dev->features ^ features,
6769 dev->features ^ changed);
6771 /* everything changed successfully? */
6772 if ((dev->features ^ features) == changed)
6774 /* something went wrong. save changed features and return error */
6775 dev->features ^= changed;
6778 EXPORT_SYMBOL_GPL(qeth_set_features);
6780 netdev_features_t qeth_fix_features(struct net_device *dev,
6781 netdev_features_t features)
6783 struct qeth_card *card = dev->ml_priv;
6785 QETH_CARD_TEXT(card, 2, "fixfeat");
6786 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
6787 features &= ~NETIF_F_IP_CSUM;
6788 if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
6789 features &= ~NETIF_F_IPV6_CSUM;
6790 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
6791 !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6792 features &= ~NETIF_F_RXCSUM;
6793 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
6794 features &= ~NETIF_F_TSO;
6795 if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
6796 features &= ~NETIF_F_TSO6;
6798 QETH_CARD_HEX(card, 2, &features, sizeof(features));
6801 EXPORT_SYMBOL_GPL(qeth_fix_features);
6803 netdev_features_t qeth_features_check(struct sk_buff *skb,
6804 struct net_device *dev,
6805 netdev_features_t features)
6807 struct qeth_card *card = dev->ml_priv;
6809 /* Traffic with local next-hop is not eligible for some offloads: */
6810 if (skb->ip_summed == CHECKSUM_PARTIAL &&
6811 READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) {
6812 netdev_features_t restricted = 0;
6814 if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
6815 restricted |= NETIF_F_ALL_TSO;
6817 switch (vlan_get_protocol(skb)) {
6818 case htons(ETH_P_IP):
6819 if (!card->info.has_lp2lp_cso_v4)
6820 restricted |= NETIF_F_IP_CSUM;
6822 if (restricted && qeth_next_hop_is_local_v4(card, skb))
6823 features &= ~restricted;
6825 case htons(ETH_P_IPV6):
6826 if (!card->info.has_lp2lp_cso_v6)
6827 restricted |= NETIF_F_IPV6_CSUM;
6829 if (restricted && qeth_next_hop_is_local_v6(card, skb))
6830 features &= ~restricted;
6837 /* GSO segmentation builds skbs with
6838 * a (small) linear part for the headers, and
6839 * page frags for the data.
6840 * Compared to a linear skb, the header-only part consumes an
6841 * additional buffer element. This reduces buffer utilization, and
6842 * hurts throughput. So compress small segments into one element.
6844 if (netif_needs_gso(skb, features)) {
6845 /* match skb_segment(): */
6846 unsigned int doffset = skb->data - skb_mac_header(skb);
6847 unsigned int hsize = skb_shinfo(skb)->gso_size;
6848 unsigned int hroom = skb_headroom(skb);
6850 /* linearize only if resulting skb allocations are order-0: */
6851 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
6852 features &= ~NETIF_F_SG;
6855 return vlan_features_check(skb, features);
6857 EXPORT_SYMBOL_GPL(qeth_features_check);
6859 void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6861 struct qeth_card *card = dev->ml_priv;
6862 struct qeth_qdio_out_q *queue;
6865 QETH_CARD_TEXT(card, 5, "getstat");
6867 stats->rx_packets = card->stats.rx_packets;
6868 stats->rx_bytes = card->stats.rx_bytes;
6869 stats->rx_errors = card->stats.rx_length_errors +
6870 card->stats.rx_frame_errors +
6871 card->stats.rx_fifo_errors;
6872 stats->rx_dropped = card->stats.rx_dropped_nomem +
6873 card->stats.rx_dropped_notsupp +
6874 card->stats.rx_dropped_runt;
6875 stats->multicast = card->stats.rx_multicast;
6876 stats->rx_length_errors = card->stats.rx_length_errors;
6877 stats->rx_frame_errors = card->stats.rx_frame_errors;
6878 stats->rx_fifo_errors = card->stats.rx_fifo_errors;
6880 for (i = 0; i < card->qdio.no_out_queues; i++) {
6881 queue = card->qdio.out_qs[i];
6883 stats->tx_packets += queue->stats.tx_packets;
6884 stats->tx_bytes += queue->stats.tx_bytes;
6885 stats->tx_errors += queue->stats.tx_errors;
6886 stats->tx_dropped += queue->stats.tx_dropped;
6889 EXPORT_SYMBOL_GPL(qeth_get_stats64);
6891 #define TC_IQD_UCAST 0
6892 static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
6893 unsigned int ucast_txqs)
6897 /* IQD requires mcast traffic to be placed on a dedicated queue, and
6898 * qeth_iqd_select_queue() deals with this.
6899 * For unicast traffic, we defer the queue selection to the stack.
6900 * By installing a trivial prio map that spans over only the unicast
6901 * queues, we can encourage the stack to spread the ucast traffic evenly
6902 * without selecting the mcast queue.
6905 /* One traffic class, spanning over all active ucast queues: */
6906 netdev_set_num_tc(dev, 1);
6907 netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
6908 QETH_IQD_MIN_UCAST_TXQ);
6910 /* Map all priorities to this traffic class: */
6911 for (prio = 0; prio <= TC_BITMASK; prio++)
6912 netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
6915 int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
6917 struct net_device *dev = card->dev;
6920 /* Per netif_setup_tc(), adjust the mapping first: */
6922 qeth_iqd_set_prio_tc_map(dev, count - 1);
6924 rc = netif_set_real_num_tx_queues(dev, count);
6926 if (rc && IS_IQD(card))
6927 qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);
6932 u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
6933 u8 cast_type, struct net_device *sb_dev)
6937 if (cast_type != RTN_UNICAST)
6938 return QETH_IQD_MCAST_TXQ;
6939 if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
6940 return QETH_IQD_MIN_UCAST_TXQ;
6942 txq = netdev_pick_tx(dev, skb, sb_dev);
6943 return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
6945 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
6947 int qeth_open(struct net_device *dev)
6949 struct qeth_card *card = dev->ml_priv;
6951 QETH_CARD_TEXT(card, 4, "qethopen");
6953 card->data.state = CH_STATE_UP;
6954 netif_tx_start_all_queues(dev);
6956 napi_enable(&card->napi);
6958 napi_schedule(&card->napi);
6960 struct qeth_qdio_out_q *queue;
6963 qeth_for_each_output_queue(card, queue, i) {
6964 netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
6966 napi_enable(&queue->napi);
6967 napi_schedule(&queue->napi);
6970 /* kick-start the NAPI softirq: */
6974 EXPORT_SYMBOL_GPL(qeth_open);
6976 int qeth_stop(struct net_device *dev)
6978 struct qeth_card *card = dev->ml_priv;
6980 QETH_CARD_TEXT(card, 4, "qethstop");
6982 struct qeth_qdio_out_q *queue;
6985 /* Quiesce the NAPI instances: */
6986 qeth_for_each_output_queue(card, queue, i)
6987 napi_disable(&queue->napi);
6989 /* Stop .ndo_start_xmit, might still access queue->napi. */
6990 netif_tx_disable(dev);
6992 qeth_for_each_output_queue(card, queue, i) {
6993 del_timer_sync(&queue->timer);
6994 /* Queues may get re-allocated, so remove the NAPIs. */
6995 netif_napi_del(&queue->napi);
6998 netif_tx_disable(dev);
7001 napi_disable(&card->napi);
7002 cancel_delayed_work_sync(&card->buffer_reclaim_work);
7003 qdio_stop_irq(CARD_DDEV(card));
7007 EXPORT_SYMBOL_GPL(qeth_stop);
7009 static int __init qeth_core_init(void)
7013 pr_info("loading core functions\n");
7015 qeth_debugfs_root = debugfs_create_dir("qeth", NULL);
7017 rc = qeth_register_dbf_views();
7020 qeth_core_root_dev = root_device_register("qeth");
7021 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
7024 qeth_core_header_cache =
7025 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
7026 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
7028 if (!qeth_core_header_cache) {
7032 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
7033 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
7034 if (!qeth_qdio_outbuf_cache) {
7038 rc = ccw_driver_register(&qeth_ccw_driver);
7041 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
7048 ccw_driver_unregister(&qeth_ccw_driver);
7050 kmem_cache_destroy(qeth_qdio_outbuf_cache);
7052 kmem_cache_destroy(qeth_core_header_cache);
7054 root_device_unregister(qeth_core_root_dev);
7056 qeth_unregister_dbf_views();
7058 debugfs_remove_recursive(qeth_debugfs_root);
7059 pr_err("Initializing the qeth device driver failed\n");
7063 static void __exit qeth_core_exit(void)
7065 qeth_clear_dbf_list();
7066 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
7067 ccw_driver_unregister(&qeth_ccw_driver);
7068 kmem_cache_destroy(qeth_qdio_outbuf_cache);
7069 kmem_cache_destroy(qeth_core_header_cache);
7070 root_device_unregister(qeth_core_root_dev);
7071 qeth_unregister_dbf_views();
7072 debugfs_remove_recursive(qeth_debugfs_root);
7073 pr_info("core functions removed\n");
7076 module_init(qeth_core_init);
7077 module_exit(qeth_core_exit);
7078 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
7079 MODULE_DESCRIPTION("qeth core functions");
7080 MODULE_LICENSE("GPL");