netfilter: netns: shrink netns_ct struct
[linux-2.6-microblaze.git] / drivers / s390 / net / qeth_core_main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *    Copyright IBM Corp. 2007, 2009
4  *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5  *               Frank Pavlic <fpavlic@de.ibm.com>,
6  *               Thomas Spatzier <tspat@de.ibm.com>,
7  *               Frank Blaschka <frank.blaschka@de.ibm.com>
8  */
9
10 #define KMSG_COMPONENT "qeth"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13 #include <linux/compat.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/log2.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/mii.h>
23 #include <linux/kthread.h>
24 #include <linux/slab.h>
25 #include <linux/if_vlan.h>
26 #include <linux/netdevice.h>
27 #include <linux/netdev_features.h>
28 #include <linux/skbuff.h>
29 #include <linux/vmalloc.h>
30
31 #include <net/iucv/af_iucv.h>
32 #include <net/dsfield.h>
33
34 #include <asm/ebcdic.h>
35 #include <asm/chpid.h>
36 #include <asm/io.h>
37 #include <asm/sysinfo.h>
38 #include <asm/diag.h>
39 #include <asm/cio.h>
40 #include <asm/ccwdev.h>
41 #include <asm/cpcmd.h>
42
43 #include "qeth_core.h"
44
45 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
46         /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
47         /*                   N  P  A    M  L  V                      H  */
48         [QETH_DBF_SETUP] = {"qeth_setup",
49                                 8, 1,   8, 5, &debug_hex_ascii_view, NULL},
50         [QETH_DBF_MSG]   = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
51                             &debug_sprintf_view, NULL},
52         [QETH_DBF_CTRL]  = {"qeth_control",
53                 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
54 };
55 EXPORT_SYMBOL_GPL(qeth_dbf);
56
57 struct kmem_cache *qeth_core_header_cache;
58 EXPORT_SYMBOL_GPL(qeth_core_header_cache);
59 static struct kmem_cache *qeth_qdio_outbuf_cache;
60
61 static struct device *qeth_core_root_dev;
62 static struct lock_class_key qdio_out_skb_queue_key;
63
64 static void qeth_send_control_data_cb(struct qeth_card *card,
65                                       struct qeth_channel *channel,
66                                       struct qeth_cmd_buffer *iob);
67 static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
68 static void qeth_free_buffer_pool(struct qeth_card *);
69 static int qeth_qdio_establish(struct qeth_card *);
70 static void qeth_free_qdio_buffers(struct qeth_card *);
71 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
72                 struct qeth_qdio_out_buffer *buf,
73                 enum iucv_tx_notify notification);
74 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
75 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
76
77 struct workqueue_struct *qeth_wq;
78 EXPORT_SYMBOL_GPL(qeth_wq);
79
80 int qeth_card_hw_is_reachable(struct qeth_card *card)
81 {
82         return (card->state == CARD_STATE_SOFTSETUP) ||
83                 (card->state == CARD_STATE_UP);
84 }
85 EXPORT_SYMBOL_GPL(qeth_card_hw_is_reachable);
86
87 static void qeth_close_dev_handler(struct work_struct *work)
88 {
89         struct qeth_card *card;
90
91         card = container_of(work, struct qeth_card, close_dev_work);
92         QETH_CARD_TEXT(card, 2, "cldevhdl");
93         rtnl_lock();
94         dev_close(card->dev);
95         rtnl_unlock();
96         ccwgroup_set_offline(card->gdev);
97 }
98
99 void qeth_close_dev(struct qeth_card *card)
100 {
101         QETH_CARD_TEXT(card, 2, "cldevsubm");
102         queue_work(qeth_wq, &card->close_dev_work);
103 }
104 EXPORT_SYMBOL_GPL(qeth_close_dev);
105
106 static const char *qeth_get_cardname(struct qeth_card *card)
107 {
108         if (card->info.guestlan) {
109                 switch (card->info.type) {
110                 case QETH_CARD_TYPE_OSD:
111                         return " Virtual NIC QDIO";
112                 case QETH_CARD_TYPE_IQD:
113                         return " Virtual NIC Hiper";
114                 case QETH_CARD_TYPE_OSM:
115                         return " Virtual NIC QDIO - OSM";
116                 case QETH_CARD_TYPE_OSX:
117                         return " Virtual NIC QDIO - OSX";
118                 default:
119                         return " unknown";
120                 }
121         } else {
122                 switch (card->info.type) {
123                 case QETH_CARD_TYPE_OSD:
124                         return " OSD Express";
125                 case QETH_CARD_TYPE_IQD:
126                         return " HiperSockets";
127                 case QETH_CARD_TYPE_OSN:
128                         return " OSN QDIO";
129                 case QETH_CARD_TYPE_OSM:
130                         return " OSM QDIO";
131                 case QETH_CARD_TYPE_OSX:
132                         return " OSX QDIO";
133                 default:
134                         return " unknown";
135                 }
136         }
137         return " n/a";
138 }
139
140 /* max length to be returned: 14 */
141 const char *qeth_get_cardname_short(struct qeth_card *card)
142 {
143         if (card->info.guestlan) {
144                 switch (card->info.type) {
145                 case QETH_CARD_TYPE_OSD:
146                         return "Virt.NIC QDIO";
147                 case QETH_CARD_TYPE_IQD:
148                         return "Virt.NIC Hiper";
149                 case QETH_CARD_TYPE_OSM:
150                         return "Virt.NIC OSM";
151                 case QETH_CARD_TYPE_OSX:
152                         return "Virt.NIC OSX";
153                 default:
154                         return "unknown";
155                 }
156         } else {
157                 switch (card->info.type) {
158                 case QETH_CARD_TYPE_OSD:
159                         switch (card->info.link_type) {
160                         case QETH_LINK_TYPE_FAST_ETH:
161                                 return "OSD_100";
162                         case QETH_LINK_TYPE_HSTR:
163                                 return "HSTR";
164                         case QETH_LINK_TYPE_GBIT_ETH:
165                                 return "OSD_1000";
166                         case QETH_LINK_TYPE_10GBIT_ETH:
167                                 return "OSD_10GIG";
168                         case QETH_LINK_TYPE_25GBIT_ETH:
169                                 return "OSD_25GIG";
170                         case QETH_LINK_TYPE_LANE_ETH100:
171                                 return "OSD_FE_LANE";
172                         case QETH_LINK_TYPE_LANE_TR:
173                                 return "OSD_TR_LANE";
174                         case QETH_LINK_TYPE_LANE_ETH1000:
175                                 return "OSD_GbE_LANE";
176                         case QETH_LINK_TYPE_LANE:
177                                 return "OSD_ATM_LANE";
178                         default:
179                                 return "OSD_Express";
180                         }
181                 case QETH_CARD_TYPE_IQD:
182                         return "HiperSockets";
183                 case QETH_CARD_TYPE_OSN:
184                         return "OSN";
185                 case QETH_CARD_TYPE_OSM:
186                         return "OSM_1000";
187                 case QETH_CARD_TYPE_OSX:
188                         return "OSX_10GIG";
189                 default:
190                         return "unknown";
191                 }
192         }
193         return "n/a";
194 }
195
196 void qeth_set_recovery_task(struct qeth_card *card)
197 {
198         card->recovery_task = current;
199 }
200 EXPORT_SYMBOL_GPL(qeth_set_recovery_task);
201
202 void qeth_clear_recovery_task(struct qeth_card *card)
203 {
204         card->recovery_task = NULL;
205 }
206 EXPORT_SYMBOL_GPL(qeth_clear_recovery_task);
207
208 static bool qeth_is_recovery_task(const struct qeth_card *card)
209 {
210         return card->recovery_task == current;
211 }
212
213 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
214                          int clear_start_mask)
215 {
216         unsigned long flags;
217
218         spin_lock_irqsave(&card->thread_mask_lock, flags);
219         card->thread_allowed_mask = threads;
220         if (clear_start_mask)
221                 card->thread_start_mask &= threads;
222         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
223         wake_up(&card->wait_q);
224 }
225 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
226
227 int qeth_threads_running(struct qeth_card *card, unsigned long threads)
228 {
229         unsigned long flags;
230         int rc = 0;
231
232         spin_lock_irqsave(&card->thread_mask_lock, flags);
233         rc = (card->thread_running_mask & threads);
234         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
235         return rc;
236 }
237 EXPORT_SYMBOL_GPL(qeth_threads_running);
238
239 int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
240 {
241         if (qeth_is_recovery_task(card))
242                 return 0;
243         return wait_event_interruptible(card->wait_q,
244                         qeth_threads_running(card, threads) == 0);
245 }
246 EXPORT_SYMBOL_GPL(qeth_wait_for_threads);
247
248 void qeth_clear_working_pool_list(struct qeth_card *card)
249 {
250         struct qeth_buffer_pool_entry *pool_entry, *tmp;
251
252         QETH_CARD_TEXT(card, 5, "clwrklst");
253         list_for_each_entry_safe(pool_entry, tmp,
254                             &card->qdio.in_buf_pool.entry_list, list){
255                         list_del(&pool_entry->list);
256         }
257 }
258 EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
259
260 static int qeth_alloc_buffer_pool(struct qeth_card *card)
261 {
262         struct qeth_buffer_pool_entry *pool_entry;
263         void *ptr;
264         int i, j;
265
266         QETH_CARD_TEXT(card, 5, "alocpool");
267         for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
268                 pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL);
269                 if (!pool_entry) {
270                         qeth_free_buffer_pool(card);
271                         return -ENOMEM;
272                 }
273                 for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) {
274                         ptr = (void *) __get_free_page(GFP_KERNEL);
275                         if (!ptr) {
276                                 while (j > 0)
277                                         free_page((unsigned long)
278                                                   pool_entry->elements[--j]);
279                                 kfree(pool_entry);
280                                 qeth_free_buffer_pool(card);
281                                 return -ENOMEM;
282                         }
283                         pool_entry->elements[j] = ptr;
284                 }
285                 list_add(&pool_entry->init_list,
286                          &card->qdio.init_pool.entry_list);
287         }
288         return 0;
289 }
290
291 int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
292 {
293         QETH_CARD_TEXT(card, 2, "realcbp");
294
295         if ((card->state != CARD_STATE_DOWN) &&
296             (card->state != CARD_STATE_RECOVER))
297                 return -EPERM;
298
299         /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
300         qeth_clear_working_pool_list(card);
301         qeth_free_buffer_pool(card);
302         card->qdio.in_buf_pool.buf_count = bufcnt;
303         card->qdio.init_pool.buf_count = bufcnt;
304         return qeth_alloc_buffer_pool(card);
305 }
306 EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
307
308 static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
309 {
310         if (!q)
311                 return;
312
313         qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
314         kfree(q);
315 }
316
317 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
318 {
319         struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
320         int i;
321
322         if (!q)
323                 return NULL;
324
325         if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
326                 kfree(q);
327                 return NULL;
328         }
329
330         for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
331                 q->bufs[i].buffer = q->qdio_bufs[i];
332
333         QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
334         return q;
335 }
336
337 static int qeth_cq_init(struct qeth_card *card)
338 {
339         int rc;
340
341         if (card->options.cq == QETH_CQ_ENABLED) {
342                 QETH_DBF_TEXT(SETUP, 2, "cqinit");
343                 qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
344                                    QDIO_MAX_BUFFERS_PER_Q);
345                 card->qdio.c_q->next_buf_to_init = 127;
346                 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
347                              card->qdio.no_in_queues - 1, 0,
348                              127);
349                 if (rc) {
350                         QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
351                         goto out;
352                 }
353         }
354         rc = 0;
355 out:
356         return rc;
357 }
358
359 static int qeth_alloc_cq(struct qeth_card *card)
360 {
361         int rc;
362
363         if (card->options.cq == QETH_CQ_ENABLED) {
364                 int i;
365                 struct qdio_outbuf_state *outbuf_states;
366
367                 QETH_DBF_TEXT(SETUP, 2, "cqon");
368                 card->qdio.c_q = qeth_alloc_qdio_queue();
369                 if (!card->qdio.c_q) {
370                         rc = -1;
371                         goto kmsg_out;
372                 }
373                 card->qdio.no_in_queues = 2;
374                 card->qdio.out_bufstates =
375                         kcalloc(card->qdio.no_out_queues *
376                                         QDIO_MAX_BUFFERS_PER_Q,
377                                 sizeof(struct qdio_outbuf_state),
378                                 GFP_KERNEL);
379                 outbuf_states = card->qdio.out_bufstates;
380                 if (outbuf_states == NULL) {
381                         rc = -1;
382                         goto free_cq_out;
383                 }
384                 for (i = 0; i < card->qdio.no_out_queues; ++i) {
385                         card->qdio.out_qs[i]->bufstates = outbuf_states;
386                         outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
387                 }
388         } else {
389                 QETH_DBF_TEXT(SETUP, 2, "nocq");
390                 card->qdio.c_q = NULL;
391                 card->qdio.no_in_queues = 1;
392         }
393         QETH_DBF_TEXT_(SETUP, 2, "iqc%d", card->qdio.no_in_queues);
394         rc = 0;
395 out:
396         return rc;
397 free_cq_out:
398         qeth_free_qdio_queue(card->qdio.c_q);
399         card->qdio.c_q = NULL;
400 kmsg_out:
401         dev_err(&card->gdev->dev, "Failed to create completion queue\n");
402         goto out;
403 }
404
405 static void qeth_free_cq(struct qeth_card *card)
406 {
407         if (card->qdio.c_q) {
408                 --card->qdio.no_in_queues;
409                 qeth_free_qdio_queue(card->qdio.c_q);
410                 card->qdio.c_q = NULL;
411         }
412         kfree(card->qdio.out_bufstates);
413         card->qdio.out_bufstates = NULL;
414 }
415
416 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
417                                                         int delayed)
418 {
419         enum iucv_tx_notify n;
420
421         switch (sbalf15) {
422         case 0:
423                 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
424                 break;
425         case 4:
426         case 16:
427         case 17:
428         case 18:
429                 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
430                         TX_NOTIFY_UNREACHABLE;
431                 break;
432         default:
433                 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
434                         TX_NOTIFY_GENERALERROR;
435                 break;
436         }
437
438         return n;
439 }
440
441 static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
442                                          int forced_cleanup)
443 {
444         if (q->card->options.cq != QETH_CQ_ENABLED)
445                 return;
446
447         if (q->bufs[bidx]->next_pending != NULL) {
448                 struct qeth_qdio_out_buffer *head = q->bufs[bidx];
449                 struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
450
451                 while (c) {
452                         if (forced_cleanup ||
453                             atomic_read(&c->state) ==
454                               QETH_QDIO_BUF_HANDLED_DELAYED) {
455                                 struct qeth_qdio_out_buffer *f = c;
456                                 QETH_CARD_TEXT(f->q->card, 5, "fp");
457                                 QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
458                                 /* release here to avoid interleaving between
459                                    outbound tasklet and inbound tasklet
460                                    regarding notifications and lifecycle */
461                                 qeth_release_skbs(c);
462
463                                 c = f->next_pending;
464                                 WARN_ON_ONCE(head->next_pending != f);
465                                 head->next_pending = c;
466                                 kmem_cache_free(qeth_qdio_outbuf_cache, f);
467                         } else {
468                                 head = c;
469                                 c = c->next_pending;
470                         }
471
472                 }
473         }
474         if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
475                                         QETH_QDIO_BUF_HANDLED_DELAYED)) {
476                 /* for recovery situations */
477                 qeth_init_qdio_out_buf(q, bidx);
478                 QETH_CARD_TEXT(q->card, 2, "clprecov");
479         }
480 }
481
482
483 static void qeth_qdio_handle_aob(struct qeth_card *card,
484                                  unsigned long phys_aob_addr)
485 {
486         struct qaob *aob;
487         struct qeth_qdio_out_buffer *buffer;
488         enum iucv_tx_notify notification;
489         unsigned int i;
490
491         aob = (struct qaob *) phys_to_virt(phys_aob_addr);
492         QETH_CARD_TEXT(card, 5, "haob");
493         QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
494         buffer = (struct qeth_qdio_out_buffer *) aob->user1;
495         QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
496
497         if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
498                            QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
499                 notification = TX_NOTIFY_OK;
500         } else {
501                 WARN_ON_ONCE(atomic_read(&buffer->state) !=
502                                                         QETH_QDIO_BUF_PENDING);
503                 atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
504                 notification = TX_NOTIFY_DELAYED_OK;
505         }
506
507         if (aob->aorc != 0)  {
508                 QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
509                 notification = qeth_compute_cq_notification(aob->aorc, 1);
510         }
511         qeth_notify_skbs(buffer->q, buffer, notification);
512
513         /* Free dangling allocations. The attached skbs are handled by
514          * qeth_cleanup_handled_pending().
515          */
516         for (i = 0;
517              i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
518              i++) {
519                 if (aob->sba[i] && buffer->is_header[i])
520                         kmem_cache_free(qeth_core_header_cache,
521                                         (void *) aob->sba[i]);
522         }
523         atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
524
525         qdio_release_aob(aob);
526 }
527
528 static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
529 {
530         return card->options.cq == QETH_CQ_ENABLED &&
531             card->qdio.c_q != NULL &&
532             queue != 0 &&
533             queue == card->qdio.no_in_queues - 1;
534 }
535
536 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u32 len, void *data)
537 {
538         ccw->cmd_code = cmd_code;
539         ccw->flags = CCW_FLAG_SLI;
540         ccw->count = len;
541         ccw->cda = (__u32) __pa(data);
542 }
543
544 static int __qeth_issue_next_read(struct qeth_card *card)
545 {
546         struct qeth_channel *channel = &card->read;
547         struct qeth_cmd_buffer *iob;
548         int rc;
549
550         QETH_CARD_TEXT(card, 5, "issnxrd");
551         if (channel->state != CH_STATE_UP)
552                 return -EIO;
553         iob = qeth_get_buffer(channel);
554         if (!iob) {
555                 dev_warn(&card->gdev->dev, "The qeth device driver "
556                         "failed to recover an error on the device\n");
557                 QETH_DBF_MESSAGE(2, "issue_next_read on device %x failed: no iob available\n",
558                                  CARD_DEVID(card));
559                 return -ENOMEM;
560         }
561         qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
562         QETH_CARD_TEXT(card, 6, "noirqpnd");
563         rc = ccw_device_start(channel->ccwdev, channel->ccw,
564                               (addr_t) iob, 0, 0);
565         if (rc) {
566                 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
567                                  rc, CARD_DEVID(card));
568                 atomic_set(&channel->irq_pending, 0);
569                 card->read_or_write_problem = 1;
570                 qeth_schedule_recovery(card);
571                 wake_up(&card->wait_q);
572         }
573         return rc;
574 }
575
576 static int qeth_issue_next_read(struct qeth_card *card)
577 {
578         int ret;
579
580         spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
581         ret = __qeth_issue_next_read(card);
582         spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
583
584         return ret;
585 }
586
587 static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
588 {
589         struct qeth_reply *reply;
590
591         reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
592         if (reply) {
593                 refcount_set(&reply->refcnt, 1);
594                 atomic_set(&reply->received, 0);
595         }
596         return reply;
597 }
598
599 static void qeth_get_reply(struct qeth_reply *reply)
600 {
601         refcount_inc(&reply->refcnt);
602 }
603
604 static void qeth_put_reply(struct qeth_reply *reply)
605 {
606         if (refcount_dec_and_test(&reply->refcnt))
607                 kfree(reply);
608 }
609
610 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
611                 struct qeth_card *card)
612 {
613         const char *ipa_name;
614         int com = cmd->hdr.command;
615         ipa_name = qeth_get_ipa_cmd_name(com);
616
617         if (rc)
618                 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
619                                  ipa_name, com, CARD_DEVID(card), rc,
620                                  qeth_get_ipa_msg(rc));
621         else
622                 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
623                                  ipa_name, com, CARD_DEVID(card));
624 }
625
626 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
627                                                 struct qeth_ipa_cmd *cmd)
628 {
629         QETH_CARD_TEXT(card, 5, "chkipad");
630
631         if (IS_IPA_REPLY(cmd)) {
632                 if (cmd->hdr.command != IPA_CMD_SETCCID &&
633                     cmd->hdr.command != IPA_CMD_DELCCID &&
634                     cmd->hdr.command != IPA_CMD_MODCCID &&
635                     cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
636                         qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
637                 return cmd;
638         }
639
640         /* handle unsolicited event: */
641         switch (cmd->hdr.command) {
642         case IPA_CMD_STOPLAN:
643                 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
644                         dev_err(&card->gdev->dev,
645                                 "Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
646                                 QETH_CARD_IFNAME(card));
647                         qeth_close_dev(card);
648                 } else {
649                         dev_warn(&card->gdev->dev,
650                                  "The link for interface %s on CHPID 0x%X failed\n",
651                                  QETH_CARD_IFNAME(card), card->info.chpid);
652                         qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
653                         netif_carrier_off(card->dev);
654                 }
655                 return NULL;
656         case IPA_CMD_STARTLAN:
657                 dev_info(&card->gdev->dev,
658                          "The link for %s on CHPID 0x%X has been restored\n",
659                          QETH_CARD_IFNAME(card), card->info.chpid);
660                 if (card->info.hwtrap)
661                         card->info.hwtrap = 2;
662                 qeth_schedule_recovery(card);
663                 return NULL;
664         case IPA_CMD_SETBRIDGEPORT_IQD:
665         case IPA_CMD_SETBRIDGEPORT_OSA:
666         case IPA_CMD_ADDRESS_CHANGE_NOTIF:
667                 if (card->discipline->control_event_handler(card, cmd))
668                         return cmd;
669                 return NULL;
670         case IPA_CMD_MODCCID:
671                 return cmd;
672         case IPA_CMD_REGISTER_LOCAL_ADDR:
673                 QETH_CARD_TEXT(card, 3, "irla");
674                 return NULL;
675         case IPA_CMD_UNREGISTER_LOCAL_ADDR:
676                 QETH_CARD_TEXT(card, 3, "urla");
677                 return NULL;
678         default:
679                 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
680                 return cmd;
681         }
682 }
683
684 void qeth_clear_ipacmd_list(struct qeth_card *card)
685 {
686         struct qeth_reply *reply, *r;
687         unsigned long flags;
688
689         QETH_CARD_TEXT(card, 4, "clipalst");
690
691         spin_lock_irqsave(&card->lock, flags);
692         list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
693                 qeth_get_reply(reply);
694                 reply->rc = -EIO;
695                 atomic_inc(&reply->received);
696                 list_del_init(&reply->list);
697                 wake_up(&reply->wait_q);
698                 qeth_put_reply(reply);
699         }
700         spin_unlock_irqrestore(&card->lock, flags);
701 }
702 EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
703
704 static int qeth_check_idx_response(struct qeth_card *card,
705         unsigned char *buffer)
706 {
707         if (!buffer)
708                 return 0;
709
710         QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
711         if ((buffer[2] & 0xc0) == 0xc0) {
712                 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
713                                  buffer[4]);
714                 QETH_CARD_TEXT(card, 2, "ckidxres");
715                 QETH_CARD_TEXT(card, 2, " idxterm");
716                 QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
717                 if (buffer[4] == 0xf6) {
718                         dev_err(&card->gdev->dev,
719                         "The qeth device is not configured "
720                         "for the OSI layer required by z/VM\n");
721                         return -EPERM;
722                 }
723                 return -EIO;
724         }
725         return 0;
726 }
727
728 static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel)
729 {
730         __u8 index;
731
732         index = channel->io_buf_no;
733         do {
734                 if (channel->iob[index].state == BUF_STATE_FREE) {
735                         channel->iob[index].state = BUF_STATE_LOCKED;
736                         channel->io_buf_no = (channel->io_buf_no + 1) %
737                                 QETH_CMD_BUFFER_NO;
738                         memset(channel->iob[index].data, 0, QETH_BUFSIZE);
739                         return channel->iob + index;
740                 }
741                 index = (index + 1) % QETH_CMD_BUFFER_NO;
742         } while (index != channel->io_buf_no);
743
744         return NULL;
745 }
746
747 void qeth_release_buffer(struct qeth_channel *channel,
748                 struct qeth_cmd_buffer *iob)
749 {
750         unsigned long flags;
751
752         spin_lock_irqsave(&channel->iob_lock, flags);
753         iob->state = BUF_STATE_FREE;
754         iob->callback = qeth_send_control_data_cb;
755         iob->rc = 0;
756         spin_unlock_irqrestore(&channel->iob_lock, flags);
757         wake_up(&channel->wait_q);
758 }
759 EXPORT_SYMBOL_GPL(qeth_release_buffer);
760
761 static void qeth_release_buffer_cb(struct qeth_card *card,
762                                    struct qeth_channel *channel,
763                                    struct qeth_cmd_buffer *iob)
764 {
765         qeth_release_buffer(channel, iob);
766 }
767
768 static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel)
769 {
770         struct qeth_cmd_buffer *buffer = NULL;
771         unsigned long flags;
772
773         spin_lock_irqsave(&channel->iob_lock, flags);
774         buffer = __qeth_get_buffer(channel);
775         spin_unlock_irqrestore(&channel->iob_lock, flags);
776         return buffer;
777 }
778
779 struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *channel)
780 {
781         struct qeth_cmd_buffer *buffer;
782         wait_event(channel->wait_q,
783                    ((buffer = qeth_get_buffer(channel)) != NULL));
784         return buffer;
785 }
786 EXPORT_SYMBOL_GPL(qeth_wait_for_buffer);
787
788 void qeth_clear_cmd_buffers(struct qeth_channel *channel)
789 {
790         int cnt;
791
792         for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
793                 qeth_release_buffer(channel, &channel->iob[cnt]);
794         channel->io_buf_no = 0;
795 }
796 EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
797
798 static void qeth_send_control_data_cb(struct qeth_card *card,
799                                       struct qeth_channel *channel,
800                                       struct qeth_cmd_buffer *iob)
801 {
802         struct qeth_ipa_cmd *cmd = NULL;
803         struct qeth_reply *reply, *r;
804         unsigned long flags;
805         int keep_reply;
806         int rc = 0;
807
808         QETH_CARD_TEXT(card, 4, "sndctlcb");
809         rc = qeth_check_idx_response(card, iob->data);
810         switch (rc) {
811         case 0:
812                 break;
813         case -EIO:
814                 qeth_clear_ipacmd_list(card);
815                 qeth_schedule_recovery(card);
816                 /* fall through */
817         default:
818                 goto out;
819         }
820
821         if (IS_IPA(iob->data)) {
822                 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
823                 cmd = qeth_check_ipa_data(card, cmd);
824                 if (!cmd)
825                         goto out;
826                 if (IS_OSN(card) && card->osn_info.assist_cb &&
827                     cmd->hdr.command != IPA_CMD_STARTLAN) {
828                         card->osn_info.assist_cb(card->dev, cmd);
829                         goto out;
830                 }
831         } else {
832                 /* non-IPA commands should only flow during initialization */
833                 if (card->state != CARD_STATE_DOWN)
834                         goto out;
835         }
836
837         spin_lock_irqsave(&card->lock, flags);
838         list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
839                 if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
840                     ((cmd) && (reply->seqno == cmd->hdr.seqno))) {
841                         qeth_get_reply(reply);
842                         list_del_init(&reply->list);
843                         spin_unlock_irqrestore(&card->lock, flags);
844                         keep_reply = 0;
845                         if (reply->callback != NULL) {
846                                 if (cmd) {
847                                         reply->offset = (__u16)((char *)cmd -
848                                                         (char *)iob->data);
849                                         keep_reply = reply->callback(card,
850                                                         reply,
851                                                         (unsigned long)cmd);
852                                 } else
853                                         keep_reply = reply->callback(card,
854                                                         reply,
855                                                         (unsigned long)iob);
856                         }
857                         if (cmd)
858                                 reply->rc = (u16) cmd->hdr.return_code;
859                         else if (iob->rc)
860                                 reply->rc = iob->rc;
861                         if (keep_reply) {
862                                 spin_lock_irqsave(&card->lock, flags);
863                                 list_add_tail(&reply->list,
864                                               &card->cmd_waiter_list);
865                                 spin_unlock_irqrestore(&card->lock, flags);
866                         } else {
867                                 atomic_inc(&reply->received);
868                                 wake_up(&reply->wait_q);
869                         }
870                         qeth_put_reply(reply);
871                         goto out;
872                 }
873         }
874         spin_unlock_irqrestore(&card->lock, flags);
875 out:
876         memcpy(&card->seqno.pdu_hdr_ack,
877                 QETH_PDU_HEADER_SEQ_NO(iob->data),
878                 QETH_SEQ_NO_LENGTH);
879         qeth_release_buffer(channel, iob);
880 }
881
882 static int qeth_set_thread_start_bit(struct qeth_card *card,
883                 unsigned long thread)
884 {
885         unsigned long flags;
886
887         spin_lock_irqsave(&card->thread_mask_lock, flags);
888         if (!(card->thread_allowed_mask & thread) ||
889               (card->thread_start_mask & thread)) {
890                 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
891                 return -EPERM;
892         }
893         card->thread_start_mask |= thread;
894         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
895         return 0;
896 }
897
898 void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
899 {
900         unsigned long flags;
901
902         spin_lock_irqsave(&card->thread_mask_lock, flags);
903         card->thread_start_mask &= ~thread;
904         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
905         wake_up(&card->wait_q);
906 }
907 EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit);
908
909 void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
910 {
911         unsigned long flags;
912
913         spin_lock_irqsave(&card->thread_mask_lock, flags);
914         card->thread_running_mask &= ~thread;
915         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
916         wake_up_all(&card->wait_q);
917 }
918 EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
919
920 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
921 {
922         unsigned long flags;
923         int rc = 0;
924
925         spin_lock_irqsave(&card->thread_mask_lock, flags);
926         if (card->thread_start_mask & thread) {
927                 if ((card->thread_allowed_mask & thread) &&
928                     !(card->thread_running_mask & thread)) {
929                         rc = 1;
930                         card->thread_start_mask &= ~thread;
931                         card->thread_running_mask |= thread;
932                 } else
933                         rc = -EPERM;
934         }
935         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
936         return rc;
937 }
938
939 int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
940 {
941         int rc = 0;
942
943         wait_event(card->wait_q,
944                    (rc = __qeth_do_run_thread(card, thread)) >= 0);
945         return rc;
946 }
947 EXPORT_SYMBOL_GPL(qeth_do_run_thread);
948
949 void qeth_schedule_recovery(struct qeth_card *card)
950 {
951         QETH_CARD_TEXT(card, 2, "startrec");
952         if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
953                 schedule_work(&card->kernel_thread_starter);
954 }
955 EXPORT_SYMBOL_GPL(qeth_schedule_recovery);
956
957 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
958                             struct irb *irb)
959 {
960         int dstat, cstat;
961         char *sense;
962
963         sense = (char *) irb->ecw;
964         cstat = irb->scsw.cmd.cstat;
965         dstat = irb->scsw.cmd.dstat;
966
967         if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
968                      SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
969                      SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
970                 QETH_CARD_TEXT(card, 2, "CGENCHK");
971                 dev_warn(&cdev->dev, "The qeth device driver "
972                         "failed to recover an error on the device\n");
973                 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
974                                  CCW_DEVID(cdev), dstat, cstat);
975                 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
976                                 16, 1, irb, 64, 1);
977                 return 1;
978         }
979
980         if (dstat & DEV_STAT_UNIT_CHECK) {
981                 if (sense[SENSE_RESETTING_EVENT_BYTE] &
982                     SENSE_RESETTING_EVENT_FLAG) {
983                         QETH_CARD_TEXT(card, 2, "REVIND");
984                         return 1;
985                 }
986                 if (sense[SENSE_COMMAND_REJECT_BYTE] &
987                     SENSE_COMMAND_REJECT_FLAG) {
988                         QETH_CARD_TEXT(card, 2, "CMDREJi");
989                         return 1;
990                 }
991                 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
992                         QETH_CARD_TEXT(card, 2, "AFFE");
993                         return 1;
994                 }
995                 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
996                         QETH_CARD_TEXT(card, 2, "ZEROSEN");
997                         return 0;
998                 }
999                 QETH_CARD_TEXT(card, 2, "DGENCHK");
1000                         return 1;
1001         }
1002         return 0;
1003 }
1004
1005 static long qeth_check_irb_error(struct qeth_card *card,
1006                                  struct ccw_device *cdev, unsigned long intparm,
1007                                  struct irb *irb)
1008 {
1009         if (!IS_ERR(irb))
1010                 return 0;
1011
1012         switch (PTR_ERR(irb)) {
1013         case -EIO:
1014                 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
1015                                  CCW_DEVID(cdev));
1016                 QETH_CARD_TEXT(card, 2, "ckirberr");
1017                 QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
1018                 break;
1019         case -ETIMEDOUT:
1020                 dev_warn(&cdev->dev, "A hardware operation timed out"
1021                         " on the device\n");
1022                 QETH_CARD_TEXT(card, 2, "ckirberr");
1023                 QETH_CARD_TEXT_(card, 2, "  rc%d", -ETIMEDOUT);
1024                 if (intparm == QETH_RCD_PARM) {
1025                         if (card->data.ccwdev == cdev) {
1026                                 card->data.state = CH_STATE_DOWN;
1027                                 wake_up(&card->wait_q);
1028                         }
1029                 }
1030                 break;
1031         default:
1032                 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
1033                                  PTR_ERR(irb), CCW_DEVID(cdev));
1034                 QETH_CARD_TEXT(card, 2, "ckirberr");
1035                 QETH_CARD_TEXT(card, 2, "  rc???");
1036         }
1037         return PTR_ERR(irb);
1038 }
1039
1040 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1041                 struct irb *irb)
1042 {
1043         int rc;
1044         int cstat, dstat;
1045         struct qeth_cmd_buffer *iob = NULL;
1046         struct ccwgroup_device *gdev;
1047         struct qeth_channel *channel;
1048         struct qeth_card *card;
1049
1050         /* while we hold the ccwdev lock, this stays valid: */
1051         gdev = dev_get_drvdata(&cdev->dev);
1052         card = dev_get_drvdata(&gdev->dev);
1053         if (!card)
1054                 return;
1055
1056         QETH_CARD_TEXT(card, 5, "irq");
1057
1058         if (card->read.ccwdev == cdev) {
1059                 channel = &card->read;
1060                 QETH_CARD_TEXT(card, 5, "read");
1061         } else if (card->write.ccwdev == cdev) {
1062                 channel = &card->write;
1063                 QETH_CARD_TEXT(card, 5, "write");
1064         } else {
1065                 channel = &card->data;
1066                 QETH_CARD_TEXT(card, 5, "data");
1067         }
1068
1069         if (qeth_intparm_is_iob(intparm))
1070                 iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
1071
1072         if (qeth_check_irb_error(card, cdev, intparm, irb)) {
1073                 /* IO was terminated, free its resources. */
1074                 if (iob)
1075                         qeth_release_buffer(iob->channel, iob);
1076                 atomic_set(&channel->irq_pending, 0);
1077                 wake_up(&card->wait_q);
1078                 return;
1079         }
1080
1081         atomic_set(&channel->irq_pending, 0);
1082
1083         if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
1084                 channel->state = CH_STATE_STOPPED;
1085
1086         if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
1087                 channel->state = CH_STATE_HALTED;
1088
1089         /*let's wake up immediately on data channel*/
1090         if ((channel == &card->data) && (intparm != 0) &&
1091             (intparm != QETH_RCD_PARM))
1092                 goto out;
1093
1094         if (intparm == QETH_CLEAR_CHANNEL_PARM) {
1095                 QETH_CARD_TEXT(card, 6, "clrchpar");
1096                 /* we don't have to handle this further */
1097                 intparm = 0;
1098         }
1099         if (intparm == QETH_HALT_CHANNEL_PARM) {
1100                 QETH_CARD_TEXT(card, 6, "hltchpar");
1101                 /* we don't have to handle this further */
1102                 intparm = 0;
1103         }
1104
1105         cstat = irb->scsw.cmd.cstat;
1106         dstat = irb->scsw.cmd.dstat;
1107
1108         if ((dstat & DEV_STAT_UNIT_EXCEP) ||
1109             (dstat & DEV_STAT_UNIT_CHECK) ||
1110             (cstat)) {
1111                 if (irb->esw.esw0.erw.cons) {
1112                         dev_warn(&channel->ccwdev->dev,
1113                                 "The qeth device driver failed to recover "
1114                                 "an error on the device\n");
1115                         QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1116                                          CCW_DEVID(channel->ccwdev), cstat,
1117                                          dstat);
1118                         print_hex_dump(KERN_WARNING, "qeth: irb ",
1119                                 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
1120                         print_hex_dump(KERN_WARNING, "qeth: sense data ",
1121                                 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
1122                 }
1123                 if (intparm == QETH_RCD_PARM) {
1124                         channel->state = CH_STATE_DOWN;
1125                         goto out;
1126                 }
1127                 rc = qeth_get_problem(card, cdev, irb);
1128                 if (rc) {
1129                         card->read_or_write_problem = 1;
1130                         qeth_clear_ipacmd_list(card);
1131                         qeth_schedule_recovery(card);
1132                         goto out;
1133                 }
1134         }
1135
1136         if (intparm == QETH_RCD_PARM) {
1137                 channel->state = CH_STATE_RCD_DONE;
1138                 goto out;
1139         }
1140         if (channel == &card->data)
1141                 return;
1142         if (channel == &card->read &&
1143             channel->state == CH_STATE_UP)
1144                 __qeth_issue_next_read(card);
1145
1146         if (iob && iob->callback)
1147                 iob->callback(card, iob->channel, iob);
1148
1149 out:
1150         wake_up(&card->wait_q);
1151         return;
1152 }
1153
1154 static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1155                 struct qeth_qdio_out_buffer *buf,
1156                 enum iucv_tx_notify notification)
1157 {
1158         struct sk_buff *skb;
1159
1160         skb_queue_walk(&buf->skb_list, skb) {
1161                 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
1162                 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1163                 if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk)
1164                         iucv_sk(skb->sk)->sk_txnotify(skb, notification);
1165         }
1166 }
1167
1168 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
1169 {
1170         /* release may never happen from within CQ tasklet scope */
1171         WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
1172
1173         if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
1174                 qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR);
1175
1176         __skb_queue_purge(&buf->skb_list);
1177 }
1178
1179 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1180                                      struct qeth_qdio_out_buffer *buf)
1181 {
1182         int i;
1183
1184         /* is PCI flag set on buffer? */
1185         if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
1186                 atomic_dec(&queue->set_pci_flags_count);
1187
1188         qeth_release_skbs(buf);
1189
1190         for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
1191                 if (buf->buffer->element[i].addr && buf->is_header[i])
1192                         kmem_cache_free(qeth_core_header_cache,
1193                                 buf->buffer->element[i].addr);
1194                 buf->is_header[i] = 0;
1195         }
1196
1197         qeth_scrub_qdio_buffer(buf->buffer,
1198                                QETH_MAX_BUFFER_ELEMENTS(queue->card));
1199         buf->next_element_to_fill = 0;
1200         atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1201 }
1202
1203 static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
1204 {
1205         int j;
1206
1207         for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
1208                 if (!q->bufs[j])
1209                         continue;
1210                 qeth_cleanup_handled_pending(q, j, 1);
1211                 qeth_clear_output_buffer(q, q->bufs[j]);
1212                 if (free) {
1213                         kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
1214                         q->bufs[j] = NULL;
1215                 }
1216         }
1217 }
1218
1219 void qeth_clear_qdio_buffers(struct qeth_card *card)
1220 {
1221         int i;
1222
1223         QETH_CARD_TEXT(card, 2, "clearqdbf");
1224         /* clear outbound buffers to free skbs */
1225         for (i = 0; i < card->qdio.no_out_queues; ++i) {
1226                 if (card->qdio.out_qs[i]) {
1227                         qeth_clear_outq_buffers(card->qdio.out_qs[i], 0);
1228                 }
1229         }
1230 }
1231 EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers);
1232
1233 static void qeth_free_buffer_pool(struct qeth_card *card)
1234 {
1235         struct qeth_buffer_pool_entry *pool_entry, *tmp;
1236         int i = 0;
1237         list_for_each_entry_safe(pool_entry, tmp,
1238                                  &card->qdio.init_pool.entry_list, init_list){
1239                 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
1240                         free_page((unsigned long)pool_entry->elements[i]);
1241                 list_del(&pool_entry->init_list);
1242                 kfree(pool_entry);
1243         }
1244 }
1245
1246 static void qeth_clean_channel(struct qeth_channel *channel)
1247 {
1248         struct ccw_device *cdev = channel->ccwdev;
1249         int cnt;
1250
1251         QETH_DBF_TEXT(SETUP, 2, "freech");
1252
1253         spin_lock_irq(get_ccwdev_lock(cdev));
1254         cdev->handler = NULL;
1255         spin_unlock_irq(get_ccwdev_lock(cdev));
1256
1257         for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
1258                 kfree(channel->iob[cnt].data);
1259         kfree(channel->ccw);
1260 }
1261
1262 static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
1263 {
1264         struct ccw_device *cdev = channel->ccwdev;
1265         int cnt;
1266
1267         QETH_DBF_TEXT(SETUP, 2, "setupch");
1268
1269         channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
1270         if (!channel->ccw)
1271                 return -ENOMEM;
1272         channel->state = CH_STATE_DOWN;
1273         atomic_set(&channel->irq_pending, 0);
1274         init_waitqueue_head(&channel->wait_q);
1275
1276         spin_lock_irq(get_ccwdev_lock(cdev));
1277         cdev->handler = qeth_irq;
1278         spin_unlock_irq(get_ccwdev_lock(cdev));
1279
1280         if (!alloc_buffers)
1281                 return 0;
1282
1283         for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
1284                 channel->iob[cnt].data = kmalloc(QETH_BUFSIZE,
1285                                                  GFP_KERNEL | GFP_DMA);
1286                 if (channel->iob[cnt].data == NULL)
1287                         break;
1288                 channel->iob[cnt].state = BUF_STATE_FREE;
1289                 channel->iob[cnt].channel = channel;
1290                 channel->iob[cnt].callback = qeth_send_control_data_cb;
1291                 channel->iob[cnt].rc = 0;
1292         }
1293         if (cnt < QETH_CMD_BUFFER_NO) {
1294                 qeth_clean_channel(channel);
1295                 return -ENOMEM;
1296         }
1297         channel->io_buf_no = 0;
1298         spin_lock_init(&channel->iob_lock);
1299
1300         return 0;
1301 }
1302
1303 static void qeth_set_single_write_queues(struct qeth_card *card)
1304 {
1305         if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
1306             (card->qdio.no_out_queues == 4))
1307                 qeth_free_qdio_buffers(card);
1308
1309         card->qdio.no_out_queues = 1;
1310         if (card->qdio.default_out_queue != 0)
1311                 dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
1312
1313         card->qdio.default_out_queue = 0;
1314 }
1315
1316 static void qeth_set_multiple_write_queues(struct qeth_card *card)
1317 {
1318         if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
1319             (card->qdio.no_out_queues == 1)) {
1320                 qeth_free_qdio_buffers(card);
1321                 card->qdio.default_out_queue = 2;
1322         }
1323         card->qdio.no_out_queues = 4;
1324 }
1325
1326 static void qeth_update_from_chp_desc(struct qeth_card *card)
1327 {
1328         struct ccw_device *ccwdev;
1329         struct channel_path_desc_fmt0 *chp_dsc;
1330
1331         QETH_DBF_TEXT(SETUP, 2, "chp_desc");
1332
1333         ccwdev = card->data.ccwdev;
1334         chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
1335         if (!chp_dsc)
1336                 goto out;
1337
1338         card->info.func_level = 0x4100 + chp_dsc->desc;
1339         if (card->info.type == QETH_CARD_TYPE_IQD)
1340                 goto out;
1341
1342         /* CHPP field bit 6 == 1 -> single queue */
1343         if ((chp_dsc->chpp & 0x02) == 0x02)
1344                 qeth_set_single_write_queues(card);
1345         else
1346                 qeth_set_multiple_write_queues(card);
1347 out:
1348         kfree(chp_dsc);
1349         QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
1350         QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
1351 }
1352
1353 static void qeth_init_qdio_info(struct qeth_card *card)
1354 {
1355         QETH_DBF_TEXT(SETUP, 4, "intqdinf");
1356         atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1357         card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1358         card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1359         card->qdio.no_out_queues = QETH_MAX_QUEUES;
1360
1361         /* inbound */
1362         card->qdio.no_in_queues = 1;
1363         card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1364         if (card->info.type == QETH_CARD_TYPE_IQD)
1365                 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
1366         else
1367                 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1368         card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1369         INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1370         INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1371 }
1372
1373 static void qeth_set_initial_options(struct qeth_card *card)
1374 {
1375         card->options.route4.type = NO_ROUTER;
1376         card->options.route6.type = NO_ROUTER;
1377         card->options.rx_sg_cb = QETH_RX_SG_CB;
1378         card->options.isolation = ISOLATION_MODE_NONE;
1379         card->options.cq = QETH_CQ_DISABLED;
1380         card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
1381 }
1382
1383 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1384 {
1385         unsigned long flags;
1386         int rc = 0;
1387
1388         spin_lock_irqsave(&card->thread_mask_lock, flags);
1389         QETH_CARD_TEXT_(card, 4, "  %02x%02x%02x",
1390                         (u8) card->thread_start_mask,
1391                         (u8) card->thread_allowed_mask,
1392                         (u8) card->thread_running_mask);
1393         rc = (card->thread_start_mask & thread);
1394         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1395         return rc;
1396 }
1397
1398 static void qeth_start_kernel_thread(struct work_struct *work)
1399 {
1400         struct task_struct *ts;
1401         struct qeth_card *card = container_of(work, struct qeth_card,
1402                                         kernel_thread_starter);
1403         QETH_CARD_TEXT(card , 2, "strthrd");
1404
1405         if (card->read.state != CH_STATE_UP &&
1406             card->write.state != CH_STATE_UP)
1407                 return;
1408         if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1409                 ts = kthread_run(card->discipline->recover, (void *)card,
1410                                 "qeth_recover");
1411                 if (IS_ERR(ts)) {
1412                         qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1413                         qeth_clear_thread_running_bit(card,
1414                                 QETH_RECOVER_THREAD);
1415                 }
1416         }
1417 }
1418
1419 static void qeth_buffer_reclaim_work(struct work_struct *);
1420 static void qeth_setup_card(struct qeth_card *card)
1421 {
1422         QETH_DBF_TEXT(SETUP, 2, "setupcrd");
1423         QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1424
1425         card->info.type = CARD_RDEV(card)->id.driver_info;
1426         card->state = CARD_STATE_DOWN;
1427         spin_lock_init(&card->mclock);
1428         spin_lock_init(&card->lock);
1429         spin_lock_init(&card->ip_lock);
1430         spin_lock_init(&card->thread_mask_lock);
1431         mutex_init(&card->conf_mutex);
1432         mutex_init(&card->discipline_mutex);
1433         mutex_init(&card->vid_list_mutex);
1434         INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1435         INIT_LIST_HEAD(&card->cmd_waiter_list);
1436         init_waitqueue_head(&card->wait_q);
1437         qeth_set_initial_options(card);
1438         /* IP address takeover */
1439         INIT_LIST_HEAD(&card->ipato.entries);
1440         qeth_init_qdio_info(card);
1441         INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1442         INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
1443 }
1444
1445 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
1446 {
1447         struct qeth_card *card = container_of(slr, struct qeth_card,
1448                                         qeth_service_level);
1449         if (card->info.mcl_level[0])
1450                 seq_printf(m, "qeth: %s firmware level %s\n",
1451                         CARD_BUS_ID(card), card->info.mcl_level);
1452 }
1453
1454 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
1455 {
1456         struct qeth_card *card;
1457
1458         QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1459         card = kzalloc(sizeof(*card), GFP_KERNEL);
1460         if (!card)
1461                 goto out;
1462         QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1463
1464         card->gdev = gdev;
1465         dev_set_drvdata(&gdev->dev, card);
1466         CARD_RDEV(card) = gdev->cdev[0];
1467         CARD_WDEV(card) = gdev->cdev[1];
1468         CARD_DDEV(card) = gdev->cdev[2];
1469         if (qeth_setup_channel(&card->read, true))
1470                 goto out_ip;
1471         if (qeth_setup_channel(&card->write, true))
1472                 goto out_channel;
1473         if (qeth_setup_channel(&card->data, false))
1474                 goto out_data;
1475         card->qeth_service_level.seq_print = qeth_core_sl_print;
1476         register_service_level(&card->qeth_service_level);
1477         return card;
1478
1479 out_data:
1480         qeth_clean_channel(&card->write);
1481 out_channel:
1482         qeth_clean_channel(&card->read);
1483 out_ip:
1484         dev_set_drvdata(&gdev->dev, NULL);
1485         kfree(card);
1486 out:
1487         return NULL;
1488 }
1489
1490 static int qeth_clear_channel(struct qeth_card *card,
1491                               struct qeth_channel *channel)
1492 {
1493         int rc;
1494
1495         QETH_CARD_TEXT(card, 3, "clearch");
1496         spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1497         rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
1498         spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1499
1500         if (rc)
1501                 return rc;
1502         rc = wait_event_interruptible_timeout(card->wait_q,
1503                         channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1504         if (rc == -ERESTARTSYS)
1505                 return rc;
1506         if (channel->state != CH_STATE_STOPPED)
1507                 return -ETIME;
1508         channel->state = CH_STATE_DOWN;
1509         return 0;
1510 }
1511
1512 static int qeth_halt_channel(struct qeth_card *card,
1513                              struct qeth_channel *channel)
1514 {
1515         int rc;
1516
1517         QETH_CARD_TEXT(card, 3, "haltch");
1518         spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1519         rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
1520         spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1521
1522         if (rc)
1523                 return rc;
1524         rc = wait_event_interruptible_timeout(card->wait_q,
1525                         channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1526         if (rc == -ERESTARTSYS)
1527                 return rc;
1528         if (channel->state != CH_STATE_HALTED)
1529                 return -ETIME;
1530         return 0;
1531 }
1532
1533 static int qeth_halt_channels(struct qeth_card *card)
1534 {
1535         int rc1 = 0, rc2 = 0, rc3 = 0;
1536
1537         QETH_CARD_TEXT(card, 3, "haltchs");
1538         rc1 = qeth_halt_channel(card, &card->read);
1539         rc2 = qeth_halt_channel(card, &card->write);
1540         rc3 = qeth_halt_channel(card, &card->data);
1541         if (rc1)
1542                 return rc1;
1543         if (rc2)
1544                 return rc2;
1545         return rc3;
1546 }
1547
1548 static int qeth_clear_channels(struct qeth_card *card)
1549 {
1550         int rc1 = 0, rc2 = 0, rc3 = 0;
1551
1552         QETH_CARD_TEXT(card, 3, "clearchs");
1553         rc1 = qeth_clear_channel(card, &card->read);
1554         rc2 = qeth_clear_channel(card, &card->write);
1555         rc3 = qeth_clear_channel(card, &card->data);
1556         if (rc1)
1557                 return rc1;
1558         if (rc2)
1559                 return rc2;
1560         return rc3;
1561 }
1562
1563 static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1564 {
1565         int rc = 0;
1566
1567         QETH_CARD_TEXT(card, 3, "clhacrd");
1568
1569         if (halt)
1570                 rc = qeth_halt_channels(card);
1571         if (rc)
1572                 return rc;
1573         return qeth_clear_channels(card);
1574 }
1575
1576 int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1577 {
1578         int rc = 0;
1579
1580         QETH_CARD_TEXT(card, 3, "qdioclr");
1581         switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1582                 QETH_QDIO_CLEANING)) {
1583         case QETH_QDIO_ESTABLISHED:
1584                 if (card->info.type == QETH_CARD_TYPE_IQD)
1585                         rc = qdio_shutdown(CARD_DDEV(card),
1586                                 QDIO_FLAG_CLEANUP_USING_HALT);
1587                 else
1588                         rc = qdio_shutdown(CARD_DDEV(card),
1589                                 QDIO_FLAG_CLEANUP_USING_CLEAR);
1590                 if (rc)
1591                         QETH_CARD_TEXT_(card, 3, "1err%d", rc);
1592                 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1593                 break;
1594         case QETH_QDIO_CLEANING:
1595                 return rc;
1596         default:
1597                 break;
1598         }
1599         rc = qeth_clear_halt_card(card, use_halt);
1600         if (rc)
1601                 QETH_CARD_TEXT_(card, 3, "2err%d", rc);
1602         card->state = CARD_STATE_DOWN;
1603         return rc;
1604 }
1605 EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
1606
1607 static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
1608                                int *length)
1609 {
1610         struct ciw *ciw;
1611         char *rcd_buf;
1612         int ret;
1613         struct qeth_channel *channel = &card->data;
1614
1615         /*
1616          * scan for RCD command in extended SenseID data
1617          */
1618         ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
1619         if (!ciw || ciw->cmd == 0)
1620                 return -EOPNOTSUPP;
1621         rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
1622         if (!rcd_buf)
1623                 return -ENOMEM;
1624
1625         qeth_setup_ccw(channel->ccw, ciw->cmd, ciw->count, rcd_buf);
1626         channel->state = CH_STATE_RCD;
1627         spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1628         ret = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
1629                                        QETH_RCD_PARM, LPM_ANYPATH, 0,
1630                                        QETH_RCD_TIMEOUT);
1631         spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1632         if (!ret)
1633                 wait_event(card->wait_q,
1634                            (channel->state == CH_STATE_RCD_DONE ||
1635                             channel->state == CH_STATE_DOWN));
1636         if (channel->state == CH_STATE_DOWN)
1637                 ret = -EIO;
1638         else
1639                 channel->state = CH_STATE_DOWN;
1640         if (ret) {
1641                 kfree(rcd_buf);
1642                 *buffer = NULL;
1643                 *length = 0;
1644         } else {
1645                 *length = ciw->count;
1646                 *buffer = rcd_buf;
1647         }
1648         return ret;
1649 }
1650
1651 static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd)
1652 {
1653         QETH_DBF_TEXT(SETUP, 2, "cfgunit");
1654         card->info.chpid = prcd[30];
1655         card->info.unit_addr2 = prcd[31];
1656         card->info.cula = prcd[63];
1657         card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
1658                                (prcd[0x11] == _ascebc['M']));
1659 }
1660
1661 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
1662 {
1663         enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1664         struct diag26c_vnic_resp *response = NULL;
1665         struct diag26c_vnic_req *request = NULL;
1666         struct ccw_dev_id id;
1667         char userid[80];
1668         int rc = 0;
1669
1670         QETH_DBF_TEXT(SETUP, 2, "vmlayer");
1671
1672         cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
1673         if (rc)
1674                 goto out;
1675
1676         request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
1677         response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
1678         if (!request || !response) {
1679                 rc = -ENOMEM;
1680                 goto out;
1681         }
1682
1683         ccw_device_get_id(CARD_RDEV(card), &id);
1684         request->resp_buf_len = sizeof(*response);
1685         request->resp_version = DIAG26C_VERSION6_VM65918;
1686         request->req_format = DIAG26C_VNIC_INFO;
1687         ASCEBC(userid, 8);
1688         memcpy(&request->sys_name, userid, 8);
1689         request->devno = id.devno;
1690
1691         QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1692         rc = diag26c(request, response, DIAG26C_PORT_VNIC);
1693         QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1694         if (rc)
1695                 goto out;
1696         QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
1697
1698         if (request->resp_buf_len < sizeof(*response) ||
1699             response->version != request->resp_version) {
1700                 rc = -EIO;
1701                 goto out;
1702         }
1703
1704         if (response->protocol == VNIC_INFO_PROT_L2)
1705                 disc = QETH_DISCIPLINE_LAYER2;
1706         else if (response->protocol == VNIC_INFO_PROT_L3)
1707                 disc = QETH_DISCIPLINE_LAYER3;
1708
1709 out:
1710         kfree(response);
1711         kfree(request);
1712         if (rc)
1713                 QETH_DBF_TEXT_(SETUP, 2, "err%x", rc);
1714         return disc;
1715 }
1716
1717 /* Determine whether the device requires a specific layer discipline */
1718 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
1719 {
1720         enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1721
1722         if (card->info.type == QETH_CARD_TYPE_OSM ||
1723             card->info.type == QETH_CARD_TYPE_OSN)
1724                 disc = QETH_DISCIPLINE_LAYER2;
1725         else if (card->info.guestlan)
1726                 disc = (card->info.type == QETH_CARD_TYPE_IQD) ?
1727                                 QETH_DISCIPLINE_LAYER3 :
1728                                 qeth_vm_detect_layer(card);
1729
1730         switch (disc) {
1731         case QETH_DISCIPLINE_LAYER2:
1732                 QETH_DBF_TEXT(SETUP, 3, "force l2");
1733                 break;
1734         case QETH_DISCIPLINE_LAYER3:
1735                 QETH_DBF_TEXT(SETUP, 3, "force l3");
1736                 break;
1737         default:
1738                 QETH_DBF_TEXT(SETUP, 3, "force no");
1739         }
1740
1741         return disc;
1742 }
1743
1744 static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
1745 {
1746         QETH_DBF_TEXT(SETUP, 2, "cfgblkt");
1747
1748         if (prcd[74] == 0xF0 && prcd[75] == 0xF0 &&
1749             prcd[76] >= 0xF1 && prcd[76] <= 0xF4) {
1750                 card->info.blkt.time_total = 0;
1751                 card->info.blkt.inter_packet = 0;
1752                 card->info.blkt.inter_packet_jumbo = 0;
1753         } else {
1754                 card->info.blkt.time_total = 250;
1755                 card->info.blkt.inter_packet = 5;
1756                 card->info.blkt.inter_packet_jumbo = 15;
1757         }
1758 }
1759
1760 static void qeth_init_tokens(struct qeth_card *card)
1761 {
1762         card->token.issuer_rm_w = 0x00010103UL;
1763         card->token.cm_filter_w = 0x00010108UL;
1764         card->token.cm_connection_w = 0x0001010aUL;
1765         card->token.ulp_filter_w = 0x0001010bUL;
1766         card->token.ulp_connection_w = 0x0001010dUL;
1767 }
1768
1769 static void qeth_init_func_level(struct qeth_card *card)
1770 {
1771         switch (card->info.type) {
1772         case QETH_CARD_TYPE_IQD:
1773                 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
1774                 break;
1775         case QETH_CARD_TYPE_OSD:
1776         case QETH_CARD_TYPE_OSN:
1777                 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
1778                 break;
1779         default:
1780                 break;
1781         }
1782 }
1783
1784 static int qeth_idx_activate_get_answer(struct qeth_card *card,
1785                                         struct qeth_channel *channel,
1786                                         void (*reply_cb)(struct qeth_card *,
1787                                                          struct qeth_channel *,
1788                                                          struct qeth_cmd_buffer *))
1789 {
1790         struct qeth_cmd_buffer *iob;
1791         int rc;
1792
1793         QETH_DBF_TEXT(SETUP, 2, "idxanswr");
1794         iob = qeth_get_buffer(channel);
1795         if (!iob)
1796                 return -ENOMEM;
1797         iob->callback = reply_cb;
1798         qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
1799
1800         wait_event(card->wait_q,
1801                    atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1802         QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
1803         spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1804         rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
1805                                       (addr_t) iob, 0, 0, QETH_TIMEOUT);
1806         spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1807
1808         if (rc) {
1809                 QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
1810                 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
1811                 atomic_set(&channel->irq_pending, 0);
1812                 wake_up(&card->wait_q);
1813                 return rc;
1814         }
1815         rc = wait_event_interruptible_timeout(card->wait_q,
1816                          channel->state == CH_STATE_UP, QETH_TIMEOUT);
1817         if (rc == -ERESTARTSYS)
1818                 return rc;
1819         if (channel->state != CH_STATE_UP) {
1820                 rc = -ETIME;
1821                 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
1822         } else
1823                 rc = 0;
1824         return rc;
1825 }
1826
1827 static int qeth_idx_activate_channel(struct qeth_card *card,
1828                                      struct qeth_channel *channel,
1829                                      void (*reply_cb)(struct qeth_card *,
1830                                                       struct qeth_channel *,
1831                                                       struct qeth_cmd_buffer *))
1832 {
1833         struct qeth_cmd_buffer *iob;
1834         __u16 temp;
1835         __u8 tmp;
1836         int rc;
1837         struct ccw_dev_id temp_devid;
1838
1839         QETH_DBF_TEXT(SETUP, 2, "idxactch");
1840
1841         iob = qeth_get_buffer(channel);
1842         if (!iob)
1843                 return -ENOMEM;
1844         iob->callback = reply_cb;
1845         qeth_setup_ccw(channel->ccw, CCW_CMD_WRITE, IDX_ACTIVATE_SIZE,
1846                        iob->data);
1847         if (channel == &card->write) {
1848                 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
1849                 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1850                        &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1851                 card->seqno.trans_hdr++;
1852         } else {
1853                 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
1854                 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1855                        &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1856         }
1857         tmp = ((u8)card->dev->dev_port) | 0x80;
1858         memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1);
1859         memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1860                &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
1861         memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
1862                &card->info.func_level, sizeof(__u16));
1863         ccw_device_get_id(CARD_DDEV(card), &temp_devid);
1864         memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp_devid.devno, 2);
1865         temp = (card->info.cula << 8) + card->info.unit_addr2;
1866         memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
1867
1868         wait_event(card->wait_q,
1869                    atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1870         QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
1871         spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1872         rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
1873                                       (addr_t) iob, 0, 0, QETH_TIMEOUT);
1874         spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1875
1876         if (rc) {
1877                 QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n",
1878                         rc);
1879                 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
1880                 atomic_set(&channel->irq_pending, 0);
1881                 wake_up(&card->wait_q);
1882                 return rc;
1883         }
1884         rc = wait_event_interruptible_timeout(card->wait_q,
1885                         channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
1886         if (rc == -ERESTARTSYS)
1887                 return rc;
1888         if (channel->state != CH_STATE_ACTIVATING) {
1889                 dev_warn(&channel->ccwdev->dev, "The qeth device driver"
1890                         " failed to recover an error on the device\n");
1891                 QETH_DBF_MESSAGE(2, "IDX activate timed out on channel %x\n",
1892                                  CCW_DEVID(channel->ccwdev));
1893                 QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
1894                 return -ETIME;
1895         }
1896         return qeth_idx_activate_get_answer(card, channel, reply_cb);
1897 }
1898
1899 static int qeth_peer_func_level(int level)
1900 {
1901         if ((level & 0xff) == 8)
1902                 return (level & 0xff) + 0x400;
1903         if (((level >> 8) & 3) == 1)
1904                 return (level & 0xff) + 0x200;
1905         return level;
1906 }
1907
1908 static void qeth_idx_write_cb(struct qeth_card *card,
1909                               struct qeth_channel *channel,
1910                               struct qeth_cmd_buffer *iob)
1911 {
1912         __u16 temp;
1913
1914         QETH_DBF_TEXT(SETUP , 2, "idxwrcb");
1915
1916         if (channel->state == CH_STATE_DOWN) {
1917                 channel->state = CH_STATE_ACTIVATING;
1918                 goto out;
1919         }
1920
1921         if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1922                 if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL)
1923                         dev_err(&channel->ccwdev->dev,
1924                                 "The adapter is used exclusively by another "
1925                                 "host\n");
1926                 else
1927                         QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
1928                                          CCW_DEVID(channel->ccwdev));
1929                 goto out;
1930         }
1931         memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1932         if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
1933                 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
1934                                  CCW_DEVID(channel->ccwdev),
1935                                  card->info.func_level, temp);
1936                 goto out;
1937         }
1938         channel->state = CH_STATE_UP;
1939 out:
1940         qeth_release_buffer(channel, iob);
1941 }
1942
1943 static void qeth_idx_read_cb(struct qeth_card *card,
1944                              struct qeth_channel *channel,
1945                              struct qeth_cmd_buffer *iob)
1946 {
1947         __u16 temp;
1948
1949         QETH_DBF_TEXT(SETUP , 2, "idxrdcb");
1950         if (channel->state == CH_STATE_DOWN) {
1951                 channel->state = CH_STATE_ACTIVATING;
1952                 goto out;
1953         }
1954
1955         if (qeth_check_idx_response(card, iob->data))
1956                         goto out;
1957
1958         if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1959                 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
1960                 case QETH_IDX_ACT_ERR_EXCL:
1961                         dev_err(&channel->ccwdev->dev,
1962                                 "The adapter is used exclusively by another "
1963                                 "host\n");
1964                         break;
1965                 case QETH_IDX_ACT_ERR_AUTH:
1966                 case QETH_IDX_ACT_ERR_AUTH_USER:
1967                         dev_err(&channel->ccwdev->dev,
1968                                 "Setting the device online failed because of "
1969                                 "insufficient authorization\n");
1970                         break;
1971                 default:
1972                         QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
1973                                          CCW_DEVID(channel->ccwdev));
1974                 }
1975                 QETH_CARD_TEXT_(card, 2, "idxread%c",
1976                         QETH_IDX_ACT_CAUSE_CODE(iob->data));
1977                 goto out;
1978         }
1979
1980         memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1981         if (temp != qeth_peer_func_level(card->info.func_level)) {
1982                 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
1983                                  CCW_DEVID(channel->ccwdev),
1984                                  card->info.func_level, temp);
1985                 goto out;
1986         }
1987         memcpy(&card->token.issuer_rm_r,
1988                QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1989                QETH_MPC_TOKEN_LENGTH);
1990         memcpy(&card->info.mcl_level[0],
1991                QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
1992         channel->state = CH_STATE_UP;
1993 out:
1994         qeth_release_buffer(channel, iob);
1995 }
1996
1997 void qeth_prepare_control_data(struct qeth_card *card, int len,
1998                 struct qeth_cmd_buffer *iob)
1999 {
2000         qeth_setup_ccw(iob->channel->ccw, CCW_CMD_WRITE, len, iob->data);
2001         iob->callback = qeth_release_buffer_cb;
2002
2003         memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
2004                &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
2005         card->seqno.trans_hdr++;
2006         memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
2007                &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
2008         card->seqno.pdu_hdr++;
2009         memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
2010                &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
2011         QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
2012 }
2013 EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
2014
2015 /**
2016  * qeth_send_control_data() -   send control command to the card
2017  * @card:                       qeth_card structure pointer
2018  * @len:                        size of the command buffer
2019  * @iob:                        qeth_cmd_buffer pointer
2020  * @reply_cb:                   callback function pointer
2021  * @cb_card:                    pointer to the qeth_card structure
2022  * @cb_reply:                   pointer to the qeth_reply structure
2023  * @cb_cmd:                     pointer to the original iob for non-IPA
2024  *                              commands, or to the qeth_ipa_cmd structure
2025  *                              for the IPA commands.
2026  * @reply_param:                private pointer passed to the callback
2027  *
2028  * Returns the value of the `return_code' field of the response
2029  * block returned from the hardware, or other error indication.
2030  * Value of zero indicates successful execution of the command.
2031  *
2032  * Callback function gets called one or more times, with cb_cmd
2033  * pointing to the response returned by the hardware. Callback
2034  * function must return non-zero if more reply blocks are expected,
2035  * and zero if the last or only reply block is received. Callback
2036  * function can get the value of the reply_param pointer from the
2037  * field 'param' of the structure qeth_reply.
2038  */
2039
2040 int qeth_send_control_data(struct qeth_card *card, int len,
2041                 struct qeth_cmd_buffer *iob,
2042                 int (*reply_cb)(struct qeth_card *cb_card,
2043                                 struct qeth_reply *cb_reply,
2044                                 unsigned long cb_cmd),
2045                 void *reply_param)
2046 {
2047         struct qeth_channel *channel = iob->channel;
2048         int rc;
2049         struct qeth_reply *reply = NULL;
2050         unsigned long timeout, event_timeout;
2051         struct qeth_ipa_cmd *cmd = NULL;
2052
2053         QETH_CARD_TEXT(card, 2, "sendctl");
2054
2055         if (card->read_or_write_problem) {
2056                 qeth_release_buffer(channel, iob);
2057                 return -EIO;
2058         }
2059         reply = qeth_alloc_reply(card);
2060         if (!reply) {
2061                 return -ENOMEM;
2062         }
2063         reply->callback = reply_cb;
2064         reply->param = reply_param;
2065
2066         init_waitqueue_head(&reply->wait_q);
2067
2068         while (atomic_cmpxchg(&channel->irq_pending, 0, 1)) ;
2069
2070         if (IS_IPA(iob->data)) {
2071                 cmd = __ipa_cmd(iob);
2072                 cmd->hdr.seqno = card->seqno.ipa++;
2073                 reply->seqno = cmd->hdr.seqno;
2074                 event_timeout = QETH_IPA_TIMEOUT;
2075         } else {
2076                 reply->seqno = QETH_IDX_COMMAND_SEQNO;
2077                 event_timeout = QETH_TIMEOUT;
2078         }
2079         qeth_prepare_control_data(card, len, iob);
2080
2081         spin_lock_irq(&card->lock);
2082         list_add_tail(&reply->list, &card->cmd_waiter_list);
2083         spin_unlock_irq(&card->lock);
2084
2085         timeout = jiffies + event_timeout;
2086
2087         QETH_CARD_TEXT(card, 6, "noirqpnd");
2088         spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
2089         rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
2090                                       (addr_t) iob, 0, 0, event_timeout);
2091         spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
2092         if (rc) {
2093                 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
2094                                  CARD_DEVID(card), rc);
2095                 QETH_CARD_TEXT_(card, 2, " err%d", rc);
2096                 spin_lock_irq(&card->lock);
2097                 list_del_init(&reply->list);
2098                 qeth_put_reply(reply);
2099                 spin_unlock_irq(&card->lock);
2100                 qeth_release_buffer(channel, iob);
2101                 atomic_set(&channel->irq_pending, 0);
2102                 wake_up(&card->wait_q);
2103                 return rc;
2104         }
2105
2106         /* we have only one long running ipassist, since we can ensure
2107            process context of this command we can sleep */
2108         if (cmd && cmd->hdr.command == IPA_CMD_SETIP &&
2109             cmd->hdr.prot_version == QETH_PROT_IPV4) {
2110                 if (!wait_event_timeout(reply->wait_q,
2111                     atomic_read(&reply->received), event_timeout))
2112                         goto time_err;
2113         } else {
2114                 while (!atomic_read(&reply->received)) {
2115                         if (time_after(jiffies, timeout))
2116                                 goto time_err;
2117                         cpu_relax();
2118                 }
2119         }
2120
2121         rc = reply->rc;
2122         qeth_put_reply(reply);
2123         return rc;
2124
2125 time_err:
2126         reply->rc = -ETIME;
2127         spin_lock_irq(&card->lock);
2128         list_del_init(&reply->list);
2129         spin_unlock_irq(&card->lock);
2130         atomic_inc(&reply->received);
2131         rc = reply->rc;
2132         qeth_put_reply(reply);
2133         return rc;
2134 }
2135 EXPORT_SYMBOL_GPL(qeth_send_control_data);
2136
2137 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2138                 unsigned long data)
2139 {
2140         struct qeth_cmd_buffer *iob;
2141
2142         QETH_DBF_TEXT(SETUP, 2, "cmenblcb");
2143
2144         iob = (struct qeth_cmd_buffer *) data;
2145         memcpy(&card->token.cm_filter_r,
2146                QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
2147                QETH_MPC_TOKEN_LENGTH);
2148         return 0;
2149 }
2150
2151 static int qeth_cm_enable(struct qeth_card *card)
2152 {
2153         int rc;
2154         struct qeth_cmd_buffer *iob;
2155
2156         QETH_DBF_TEXT(SETUP, 2, "cmenable");
2157
2158         iob = qeth_wait_for_buffer(&card->write);
2159         memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
2160         memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
2161                &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2162         memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
2163                &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
2164
2165         rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
2166                                     qeth_cm_enable_cb, NULL);
2167         return rc;
2168 }
2169
2170 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2171                 unsigned long data)
2172 {
2173         struct qeth_cmd_buffer *iob;
2174
2175         QETH_DBF_TEXT(SETUP, 2, "cmsetpcb");
2176
2177         iob = (struct qeth_cmd_buffer *) data;
2178         memcpy(&card->token.cm_connection_r,
2179                QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2180                QETH_MPC_TOKEN_LENGTH);
2181         return 0;
2182 }
2183
2184 static int qeth_cm_setup(struct qeth_card *card)
2185 {
2186         int rc;
2187         struct qeth_cmd_buffer *iob;
2188
2189         QETH_DBF_TEXT(SETUP, 2, "cmsetup");
2190
2191         iob = qeth_wait_for_buffer(&card->write);
2192         memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
2193         memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2194                &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2195         memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2196                &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2197         memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2198                &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2199         rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
2200                                     qeth_cm_setup_cb, NULL);
2201         return rc;
2202 }
2203
2204 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
2205 {
2206         struct net_device *dev = card->dev;
2207         unsigned int new_mtu;
2208
2209         if (!max_mtu) {
2210                 /* IQD needs accurate max MTU to set up its RX buffers: */
2211                 if (IS_IQD(card))
2212                         return -EINVAL;
2213                 /* tolerate quirky HW: */
2214                 max_mtu = ETH_MAX_MTU;
2215         }
2216
2217         rtnl_lock();
2218         if (IS_IQD(card)) {
2219                 /* move any device with default MTU to new max MTU: */
2220                 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
2221
2222                 /* adjust RX buffer size to new max MTU: */
2223                 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
2224                 if (dev->max_mtu && dev->max_mtu != max_mtu)
2225                         qeth_free_qdio_buffers(card);
2226         } else {
2227                 if (dev->mtu)
2228                         new_mtu = dev->mtu;
2229                 /* default MTUs for first setup: */
2230                 else if (IS_LAYER2(card))
2231                         new_mtu = ETH_DATA_LEN;
2232                 else
2233                         new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
2234         }
2235
2236         dev->max_mtu = max_mtu;
2237         dev->mtu = min(new_mtu, max_mtu);
2238         rtnl_unlock();
2239         return 0;
2240 }
2241
2242 static int qeth_get_mtu_outof_framesize(int framesize)
2243 {
2244         switch (framesize) {
2245         case 0x4000:
2246                 return 8192;
2247         case 0x6000:
2248                 return 16384;
2249         case 0xa000:
2250                 return 32768;
2251         case 0xffff:
2252                 return 57344;
2253         default:
2254                 return 0;
2255         }
2256 }
2257
2258 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2259                 unsigned long data)
2260 {
2261         __u16 mtu, framesize;
2262         __u16 len;
2263         __u8 link_type;
2264         struct qeth_cmd_buffer *iob;
2265
2266         QETH_DBF_TEXT(SETUP, 2, "ulpenacb");
2267
2268         iob = (struct qeth_cmd_buffer *) data;
2269         memcpy(&card->token.ulp_filter_r,
2270                QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2271                QETH_MPC_TOKEN_LENGTH);
2272         if (card->info.type == QETH_CARD_TYPE_IQD) {
2273                 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2274                 mtu = qeth_get_mtu_outof_framesize(framesize);
2275         } else {
2276                 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
2277         }
2278         *(u16 *)reply->param = mtu;
2279
2280         memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2281         if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2282                 memcpy(&link_type,
2283                        QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2284                 card->info.link_type = link_type;
2285         } else
2286                 card->info.link_type = 0;
2287         QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type);
2288         return 0;
2289 }
2290
2291 static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
2292 {
2293         if (IS_OSN(card))
2294                 return QETH_PROT_OSN2;
2295         return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
2296 }
2297
2298 static int qeth_ulp_enable(struct qeth_card *card)
2299 {
2300         u8 prot_type = qeth_mpc_select_prot_type(card);
2301         struct qeth_cmd_buffer *iob;
2302         u16 max_mtu;
2303         int rc;
2304
2305         /*FIXME: trace view callbacks*/
2306         QETH_DBF_TEXT(SETUP, 2, "ulpenabl");
2307
2308         iob = qeth_wait_for_buffer(&card->write);
2309         memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
2310
2311         *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
2312         memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
2313         memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2314                &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2315         memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2316                &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2317         rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
2318                                     qeth_ulp_enable_cb, &max_mtu);
2319         if (rc)
2320                 return rc;
2321         return qeth_update_max_mtu(card, max_mtu);
2322 }
2323
2324 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2325                 unsigned long data)
2326 {
2327         struct qeth_cmd_buffer *iob;
2328
2329         QETH_DBF_TEXT(SETUP, 2, "ulpstpcb");
2330
2331         iob = (struct qeth_cmd_buffer *) data;
2332         memcpy(&card->token.ulp_connection_r,
2333                QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2334                QETH_MPC_TOKEN_LENGTH);
2335         if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2336                      3)) {
2337                 QETH_DBF_TEXT(SETUP, 2, "olmlimit");
2338                 dev_err(&card->gdev->dev, "A connection could not be "
2339                         "established because of an OLM limit\n");
2340                 iob->rc = -EMLINK;
2341         }
2342         QETH_DBF_TEXT_(SETUP, 2, "  rc%d", iob->rc);
2343         return 0;
2344 }
2345
2346 static int qeth_ulp_setup(struct qeth_card *card)
2347 {
2348         int rc;
2349         __u16 temp;
2350         struct qeth_cmd_buffer *iob;
2351         struct ccw_dev_id dev_id;
2352
2353         QETH_DBF_TEXT(SETUP, 2, "ulpsetup");
2354
2355         iob = qeth_wait_for_buffer(&card->write);
2356         memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
2357
2358         memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2359                &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2360         memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2361                &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2362         memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2363                &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2364
2365         ccw_device_get_id(CARD_DDEV(card), &dev_id);
2366         memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
2367         temp = (card->info.cula << 8) + card->info.unit_addr2;
2368         memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2369         rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
2370                                     qeth_ulp_setup_cb, NULL);
2371         return rc;
2372 }
2373
2374 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
2375 {
2376         struct qeth_qdio_out_buffer *newbuf;
2377
2378         newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
2379         if (!newbuf)
2380                 return -ENOMEM;
2381
2382         newbuf->buffer = q->qdio_bufs[bidx];
2383         skb_queue_head_init(&newbuf->skb_list);
2384         lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
2385         newbuf->q = q;
2386         newbuf->next_pending = q->bufs[bidx];
2387         atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
2388         q->bufs[bidx] = newbuf;
2389         return 0;
2390 }
2391
2392 static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q)
2393 {
2394         if (!q)
2395                 return;
2396
2397         qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2398         kfree(q);
2399 }
2400
2401 static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void)
2402 {
2403         struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2404
2405         if (!q)
2406                 return NULL;
2407
2408         if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
2409                 kfree(q);
2410                 return NULL;
2411         }
2412         return q;
2413 }
2414
2415 static int qeth_alloc_qdio_buffers(struct qeth_card *card)
2416 {
2417         int i, j;
2418
2419         QETH_DBF_TEXT(SETUP, 2, "allcqdbf");
2420
2421         if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
2422                 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2423                 return 0;
2424
2425         QETH_DBF_TEXT(SETUP, 2, "inq");
2426         card->qdio.in_q = qeth_alloc_qdio_queue();
2427         if (!card->qdio.in_q)
2428                 goto out_nomem;
2429
2430         /* inbound buffer pool */
2431         if (qeth_alloc_buffer_pool(card))
2432                 goto out_freeinq;
2433
2434         /* outbound */
2435         card->qdio.out_qs =
2436                 kcalloc(card->qdio.no_out_queues,
2437                         sizeof(struct qeth_qdio_out_q *),
2438                         GFP_KERNEL);
2439         if (!card->qdio.out_qs)
2440                 goto out_freepool;
2441         for (i = 0; i < card->qdio.no_out_queues; ++i) {
2442                 card->qdio.out_qs[i] = qeth_alloc_qdio_out_buf();
2443                 if (!card->qdio.out_qs[i])
2444                         goto out_freeoutq;
2445                 QETH_DBF_TEXT_(SETUP, 2, "outq %i", i);
2446                 QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *));
2447                 card->qdio.out_qs[i]->queue_no = i;
2448                 /* give outbound qeth_qdio_buffers their qdio_buffers */
2449                 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2450                         WARN_ON(card->qdio.out_qs[i]->bufs[j] != NULL);
2451                         if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j))
2452                                 goto out_freeoutqbufs;
2453                 }
2454         }
2455
2456         /* completion */
2457         if (qeth_alloc_cq(card))
2458                 goto out_freeoutq;
2459
2460         return 0;
2461
2462 out_freeoutqbufs:
2463         while (j > 0) {
2464                 --j;
2465                 kmem_cache_free(qeth_qdio_outbuf_cache,
2466                                 card->qdio.out_qs[i]->bufs[j]);
2467                 card->qdio.out_qs[i]->bufs[j] = NULL;
2468         }
2469 out_freeoutq:
2470         while (i > 0) {
2471                 qeth_free_qdio_out_buf(card->qdio.out_qs[--i]);
2472                 qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
2473         }
2474         kfree(card->qdio.out_qs);
2475         card->qdio.out_qs = NULL;
2476 out_freepool:
2477         qeth_free_buffer_pool(card);
2478 out_freeinq:
2479         qeth_free_qdio_queue(card->qdio.in_q);
2480         card->qdio.in_q = NULL;
2481 out_nomem:
2482         atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2483         return -ENOMEM;
2484 }
2485
2486 static void qeth_free_qdio_buffers(struct qeth_card *card)
2487 {
2488         int i, j;
2489
2490         if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
2491                 QETH_QDIO_UNINITIALIZED)
2492                 return;
2493
2494         qeth_free_cq(card);
2495         cancel_delayed_work_sync(&card->buffer_reclaim_work);
2496         for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2497                 if (card->qdio.in_q->bufs[j].rx_skb)
2498                         dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
2499         }
2500         qeth_free_qdio_queue(card->qdio.in_q);
2501         card->qdio.in_q = NULL;
2502         /* inbound buffer pool */
2503         qeth_free_buffer_pool(card);
2504         /* free outbound qdio_qs */
2505         if (card->qdio.out_qs) {
2506                 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2507                         qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
2508                         qeth_free_qdio_out_buf(card->qdio.out_qs[i]);
2509                 }
2510                 kfree(card->qdio.out_qs);
2511                 card->qdio.out_qs = NULL;
2512         }
2513 }
2514
2515 static void qeth_create_qib_param_field(struct qeth_card *card,
2516                 char *param_field)
2517 {
2518
2519         param_field[0] = _ascebc['P'];
2520         param_field[1] = _ascebc['C'];
2521         param_field[2] = _ascebc['I'];
2522         param_field[3] = _ascebc['T'];
2523         *((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
2524         *((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
2525         *((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
2526 }
2527
2528 static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
2529                 char *param_field)
2530 {
2531         param_field[16] = _ascebc['B'];
2532         param_field[17] = _ascebc['L'];
2533         param_field[18] = _ascebc['K'];
2534         param_field[19] = _ascebc['T'];
2535         *((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
2536         *((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
2537         *((unsigned int *) (&param_field[28])) =
2538                 card->info.blkt.inter_packet_jumbo;
2539 }
2540
2541 static int qeth_qdio_activate(struct qeth_card *card)
2542 {
2543         QETH_DBF_TEXT(SETUP, 3, "qdioact");
2544         return qdio_activate(CARD_DDEV(card));
2545 }
2546
2547 static int qeth_dm_act(struct qeth_card *card)
2548 {
2549         int rc;
2550         struct qeth_cmd_buffer *iob;
2551
2552         QETH_DBF_TEXT(SETUP, 2, "dmact");
2553
2554         iob = qeth_wait_for_buffer(&card->write);
2555         memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
2556
2557         memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2558                &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2559         memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2560                &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2561         rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
2562         return rc;
2563 }
2564
2565 static int qeth_mpc_initialize(struct qeth_card *card)
2566 {
2567         int rc;
2568
2569         QETH_DBF_TEXT(SETUP, 2, "mpcinit");
2570
2571         rc = qeth_issue_next_read(card);
2572         if (rc) {
2573                 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2574                 return rc;
2575         }
2576         rc = qeth_cm_enable(card);
2577         if (rc) {
2578                 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
2579                 goto out_qdio;
2580         }
2581         rc = qeth_cm_setup(card);
2582         if (rc) {
2583                 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
2584                 goto out_qdio;
2585         }
2586         rc = qeth_ulp_enable(card);
2587         if (rc) {
2588                 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
2589                 goto out_qdio;
2590         }
2591         rc = qeth_ulp_setup(card);
2592         if (rc) {
2593                 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
2594                 goto out_qdio;
2595         }
2596         rc = qeth_alloc_qdio_buffers(card);
2597         if (rc) {
2598                 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
2599                 goto out_qdio;
2600         }
2601         rc = qeth_qdio_establish(card);
2602         if (rc) {
2603                 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
2604                 qeth_free_qdio_buffers(card);
2605                 goto out_qdio;
2606         }
2607         rc = qeth_qdio_activate(card);
2608         if (rc) {
2609                 QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
2610                 goto out_qdio;
2611         }
2612         rc = qeth_dm_act(card);
2613         if (rc) {
2614                 QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
2615                 goto out_qdio;
2616         }
2617
2618         return 0;
2619 out_qdio:
2620         qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
2621         qdio_free(CARD_DDEV(card));
2622         return rc;
2623 }
2624
2625 void qeth_print_status_message(struct qeth_card *card)
2626 {
2627         switch (card->info.type) {
2628         case QETH_CARD_TYPE_OSD:
2629         case QETH_CARD_TYPE_OSM:
2630         case QETH_CARD_TYPE_OSX:
2631                 /* VM will use a non-zero first character
2632                  * to indicate a HiperSockets like reporting
2633                  * of the level OSA sets the first character to zero
2634                  * */
2635                 if (!card->info.mcl_level[0]) {
2636                         sprintf(card->info.mcl_level, "%02x%02x",
2637                                 card->info.mcl_level[2],
2638                                 card->info.mcl_level[3]);
2639                         break;
2640                 }
2641                 /* fallthrough */
2642         case QETH_CARD_TYPE_IQD:
2643                 if ((card->info.guestlan) ||
2644                     (card->info.mcl_level[0] & 0x80)) {
2645                         card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2646                                 card->info.mcl_level[0]];
2647                         card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2648                                 card->info.mcl_level[1]];
2649                         card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2650                                 card->info.mcl_level[2]];
2651                         card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2652                                 card->info.mcl_level[3]];
2653                         card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2654                 }
2655                 break;
2656         default:
2657                 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2658         }
2659         dev_info(&card->gdev->dev,
2660                  "Device is a%s card%s%s%s\nwith link type %s.\n",
2661                  qeth_get_cardname(card),
2662                  (card->info.mcl_level[0]) ? " (level: " : "",
2663                  (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2664                  (card->info.mcl_level[0]) ? ")" : "",
2665                  qeth_get_cardname_short(card));
2666 }
2667 EXPORT_SYMBOL_GPL(qeth_print_status_message);
2668
2669 static void qeth_initialize_working_pool_list(struct qeth_card *card)
2670 {
2671         struct qeth_buffer_pool_entry *entry;
2672
2673         QETH_CARD_TEXT(card, 5, "inwrklst");
2674
2675         list_for_each_entry(entry,
2676                             &card->qdio.init_pool.entry_list, init_list) {
2677                 qeth_put_buffer_pool_entry(card, entry);
2678         }
2679 }
2680
2681 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2682                                         struct qeth_card *card)
2683 {
2684         struct list_head *plh;
2685         struct qeth_buffer_pool_entry *entry;
2686         int i, free;
2687         struct page *page;
2688
2689         if (list_empty(&card->qdio.in_buf_pool.entry_list))
2690                 return NULL;
2691
2692         list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
2693                 entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
2694                 free = 1;
2695                 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2696                         if (page_count(virt_to_page(entry->elements[i])) > 1) {
2697                                 free = 0;
2698                                 break;
2699                         }
2700                 }
2701                 if (free) {
2702                         list_del_init(&entry->list);
2703                         return entry;
2704                 }
2705         }
2706
2707         /* no free buffer in pool so take first one and swap pages */
2708         entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2709                         struct qeth_buffer_pool_entry, list);
2710         for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2711                 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2712                         page = alloc_page(GFP_ATOMIC);
2713                         if (!page) {
2714                                 return NULL;
2715                         } else {
2716                                 free_page((unsigned long)entry->elements[i]);
2717                                 entry->elements[i] = page_address(page);
2718                                 if (card->options.performance_stats)
2719                                         card->perf_stats.sg_alloc_page_rx++;
2720                         }
2721                 }
2722         }
2723         list_del_init(&entry->list);
2724         return entry;
2725 }
2726
2727 static int qeth_init_input_buffer(struct qeth_card *card,
2728                 struct qeth_qdio_buffer *buf)
2729 {
2730         struct qeth_buffer_pool_entry *pool_entry;
2731         int i;
2732
2733         if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2734                 buf->rx_skb = netdev_alloc_skb(card->dev,
2735                                                QETH_RX_PULL_LEN + ETH_HLEN);
2736                 if (!buf->rx_skb)
2737                         return 1;
2738         }
2739
2740         pool_entry = qeth_find_free_buffer_pool_entry(card);
2741         if (!pool_entry)
2742                 return 1;
2743
2744         /*
2745          * since the buffer is accessed only from the input_tasklet
2746          * there shouldn't be a need to synchronize; also, since we use
2747          * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run  out off
2748          * buffers
2749          */
2750
2751         buf->pool_entry = pool_entry;
2752         for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2753                 buf->buffer->element[i].length = PAGE_SIZE;
2754                 buf->buffer->element[i].addr =  pool_entry->elements[i];
2755                 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2756                         buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
2757                 else
2758                         buf->buffer->element[i].eflags = 0;
2759                 buf->buffer->element[i].sflags = 0;
2760         }
2761         return 0;
2762 }
2763
2764 int qeth_init_qdio_queues(struct qeth_card *card)
2765 {
2766         int i, j;
2767         int rc;
2768
2769         QETH_DBF_TEXT(SETUP, 2, "initqdqs");
2770
2771         /* inbound queue */
2772         qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2773         memset(&card->rx, 0, sizeof(struct qeth_rx));
2774         qeth_initialize_working_pool_list(card);
2775         /*give only as many buffers to hardware as we have buffer pool entries*/
2776         for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
2777                 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
2778         card->qdio.in_q->next_buf_to_init =
2779                 card->qdio.in_buf_pool.buf_count - 1;
2780         rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
2781                      card->qdio.in_buf_pool.buf_count - 1);
2782         if (rc) {
2783                 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2784                 return rc;
2785         }
2786
2787         /* completion */
2788         rc = qeth_cq_init(card);
2789         if (rc) {
2790                 return rc;
2791         }
2792
2793         /* outbound queue */
2794         for (i = 0; i < card->qdio.no_out_queues; ++i) {
2795                 qdio_reset_buffers(card->qdio.out_qs[i]->qdio_bufs,
2796                                    QDIO_MAX_BUFFERS_PER_Q);
2797                 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2798                         qeth_clear_output_buffer(card->qdio.out_qs[i],
2799                                                  card->qdio.out_qs[i]->bufs[j]);
2800                 }
2801                 card->qdio.out_qs[i]->card = card;
2802                 card->qdio.out_qs[i]->next_buf_to_fill = 0;
2803                 card->qdio.out_qs[i]->do_pack = 0;
2804                 atomic_set(&card->qdio.out_qs[i]->used_buffers, 0);
2805                 atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
2806                 atomic_set(&card->qdio.out_qs[i]->state,
2807                            QETH_OUT_Q_UNLOCKED);
2808         }
2809         return 0;
2810 }
2811 EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
2812
2813 static __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type)
2814 {
2815         switch (link_type) {
2816         case QETH_LINK_TYPE_HSTR:
2817                 return 2;
2818         default:
2819                 return 1;
2820         }
2821 }
2822
2823 static void qeth_fill_ipacmd_header(struct qeth_card *card,
2824                                     struct qeth_ipa_cmd *cmd,
2825                                     enum qeth_ipa_cmds command,
2826                                     enum qeth_prot_versions prot)
2827 {
2828         cmd->hdr.command = command;
2829         cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
2830         /* cmd->hdr.seqno is set by qeth_send_control_data() */
2831         cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
2832         cmd->hdr.rel_adapter_no = (u8) card->dev->dev_port;
2833         cmd->hdr.prim_version_no = IS_LAYER2(card) ? 2 : 1;
2834         cmd->hdr.param_count = 1;
2835         cmd->hdr.prot_version = prot;
2836 }
2837
2838 void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob)
2839 {
2840         u8 prot_type = qeth_mpc_select_prot_type(card);
2841
2842         memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2843         memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
2844         memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
2845                &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2846 }
2847 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
2848
2849 struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
2850                 enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
2851 {
2852         struct qeth_cmd_buffer *iob;
2853
2854         iob = qeth_get_buffer(&card->write);
2855         if (iob) {
2856                 qeth_prepare_ipa_cmd(card, iob);
2857                 qeth_fill_ipacmd_header(card, __ipa_cmd(iob), ipacmd, prot);
2858         } else {
2859                 dev_warn(&card->gdev->dev,
2860                          "The qeth driver ran out of channel command buffers\n");
2861                 QETH_DBF_MESSAGE(1, "device %x ran out of channel command buffers",
2862                                  CARD_DEVID(card));
2863         }
2864
2865         return iob;
2866 }
2867 EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer);
2868
2869 /**
2870  * qeth_send_ipa_cmd() - send an IPA command
2871  *
2872  * See qeth_send_control_data() for explanation of the arguments.
2873  */
2874
2875 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2876                 int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
2877                         unsigned long),
2878                 void *reply_param)
2879 {
2880         int rc;
2881
2882         QETH_CARD_TEXT(card, 4, "sendipa");
2883         rc = qeth_send_control_data(card, IPA_CMD_LENGTH,
2884                                                 iob, reply_cb, reply_param);
2885         if (rc == -ETIME) {
2886                 qeth_clear_ipacmd_list(card);
2887                 qeth_schedule_recovery(card);
2888         }
2889         return rc;
2890 }
2891 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
2892
2893 static int qeth_send_startlan(struct qeth_card *card)
2894 {
2895         int rc;
2896         struct qeth_cmd_buffer *iob;
2897
2898         QETH_DBF_TEXT(SETUP, 2, "strtlan");
2899
2900         iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
2901         if (!iob)
2902                 return -ENOMEM;
2903         rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
2904         return rc;
2905 }
2906
2907 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
2908 {
2909         if (!cmd->hdr.return_code)
2910                 cmd->hdr.return_code =
2911                         cmd->data.setadapterparms.hdr.return_code;
2912         return cmd->hdr.return_code;
2913 }
2914
2915 static int qeth_query_setadapterparms_cb(struct qeth_card *card,
2916                 struct qeth_reply *reply, unsigned long data)
2917 {
2918         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2919
2920         QETH_CARD_TEXT(card, 3, "quyadpcb");
2921         if (qeth_setadpparms_inspect_rc(cmd))
2922                 return 0;
2923
2924         if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
2925                 card->info.link_type =
2926                       cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
2927                 QETH_DBF_TEXT_(SETUP, 2, "lnk %d", card->info.link_type);
2928         }
2929         card->options.adp.supported_funcs =
2930                 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
2931         return 0;
2932 }
2933
2934 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
2935                 __u32 command, __u32 cmdlen)
2936 {
2937         struct qeth_cmd_buffer *iob;
2938         struct qeth_ipa_cmd *cmd;
2939
2940         iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,
2941                                      QETH_PROT_IPV4);
2942         if (iob) {
2943                 cmd = __ipa_cmd(iob);
2944                 cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
2945                 cmd->data.setadapterparms.hdr.command_code = command;
2946                 cmd->data.setadapterparms.hdr.used_total = 1;
2947                 cmd->data.setadapterparms.hdr.seq_no = 1;
2948         }
2949
2950         return iob;
2951 }
2952
2953 static int qeth_query_setadapterparms(struct qeth_card *card)
2954 {
2955         int rc;
2956         struct qeth_cmd_buffer *iob;
2957
2958         QETH_CARD_TEXT(card, 3, "queryadp");
2959         iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
2960                                    sizeof(struct qeth_ipacmd_setadpparms));
2961         if (!iob)
2962                 return -ENOMEM;
2963         rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
2964         return rc;
2965 }
2966
2967 static int qeth_query_ipassists_cb(struct qeth_card *card,
2968                 struct qeth_reply *reply, unsigned long data)
2969 {
2970         struct qeth_ipa_cmd *cmd;
2971
2972         QETH_DBF_TEXT(SETUP, 2, "qipasscb");
2973
2974         cmd = (struct qeth_ipa_cmd *) data;
2975
2976         switch (cmd->hdr.return_code) {
2977         case IPA_RC_NOTSUPP:
2978         case IPA_RC_L2_UNSUPPORTED_CMD:
2979                 QETH_DBF_TEXT(SETUP, 2, "ipaunsup");
2980                 card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
2981                 card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
2982                 return 0;
2983         default:
2984                 if (cmd->hdr.return_code) {
2985                         QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
2986                                          CARD_DEVID(card),
2987                                          cmd->hdr.return_code);
2988                         return 0;
2989                 }
2990         }
2991
2992         if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
2993                 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
2994                 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
2995         } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) {
2996                 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
2997                 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
2998         } else
2999                 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
3000                                  CARD_DEVID(card));
3001         return 0;
3002 }
3003
3004 static int qeth_query_ipassists(struct qeth_card *card,
3005                                 enum qeth_prot_versions prot)
3006 {
3007         int rc;
3008         struct qeth_cmd_buffer *iob;
3009
3010         QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
3011         iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
3012         if (!iob)
3013                 return -ENOMEM;
3014         rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
3015         return rc;
3016 }
3017
3018 static int qeth_query_switch_attributes_cb(struct qeth_card *card,
3019                                 struct qeth_reply *reply, unsigned long data)
3020 {
3021         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3022         struct qeth_query_switch_attributes *attrs;
3023         struct qeth_switch_info *sw_info;
3024
3025         QETH_CARD_TEXT(card, 2, "qswiatcb");
3026         if (qeth_setadpparms_inspect_rc(cmd))
3027                 return 0;
3028
3029         sw_info = (struct qeth_switch_info *)reply->param;
3030         attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
3031         sw_info->capabilities = attrs->capabilities;
3032         sw_info->settings = attrs->settings;
3033         QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
3034                         sw_info->settings);
3035         return 0;
3036 }
3037
3038 int qeth_query_switch_attributes(struct qeth_card *card,
3039                                  struct qeth_switch_info *sw_info)
3040 {
3041         struct qeth_cmd_buffer *iob;
3042
3043         QETH_CARD_TEXT(card, 2, "qswiattr");
3044         if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
3045                 return -EOPNOTSUPP;
3046         if (!netif_carrier_ok(card->dev))
3047                 return -ENOMEDIUM;
3048         iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES,
3049                                 sizeof(struct qeth_ipacmd_setadpparms_hdr));
3050         if (!iob)
3051                 return -ENOMEM;
3052         return qeth_send_ipa_cmd(card, iob,
3053                                 qeth_query_switch_attributes_cb, sw_info);
3054 }
3055
3056 static int qeth_query_setdiagass_cb(struct qeth_card *card,
3057                 struct qeth_reply *reply, unsigned long data)
3058 {
3059         struct qeth_ipa_cmd *cmd;
3060         __u16 rc;
3061
3062         cmd = (struct qeth_ipa_cmd *)data;
3063         rc = cmd->hdr.return_code;
3064         if (rc)
3065                 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3066         else
3067                 card->info.diagass_support = cmd->data.diagass.ext;
3068         return 0;
3069 }
3070
3071 static int qeth_query_setdiagass(struct qeth_card *card)
3072 {
3073         struct qeth_cmd_buffer *iob;
3074         struct qeth_ipa_cmd    *cmd;
3075
3076         QETH_DBF_TEXT(SETUP, 2, "qdiagass");
3077         iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
3078         if (!iob)
3079                 return -ENOMEM;
3080         cmd = __ipa_cmd(iob);
3081         cmd->data.diagass.subcmd_len = 16;
3082         cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY;
3083         return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
3084 }
3085
3086 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
3087 {
3088         unsigned long info = get_zeroed_page(GFP_KERNEL);
3089         struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
3090         struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
3091         struct ccw_dev_id ccwid;
3092         int level;
3093
3094         tid->chpid = card->info.chpid;
3095         ccw_device_get_id(CARD_RDEV(card), &ccwid);
3096         tid->ssid = ccwid.ssid;
3097         tid->devno = ccwid.devno;
3098         if (!info)
3099                 return;
3100         level = stsi(NULL, 0, 0, 0);
3101         if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3102                 tid->lparnr = info222->lpar_number;
3103         if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3104                 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
3105                 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
3106         }
3107         free_page(info);
3108         return;
3109 }
3110
3111 static int qeth_hw_trap_cb(struct qeth_card *card,
3112                 struct qeth_reply *reply, unsigned long data)
3113 {
3114         struct qeth_ipa_cmd *cmd;
3115         __u16 rc;
3116
3117         cmd = (struct qeth_ipa_cmd *)data;
3118         rc = cmd->hdr.return_code;
3119         if (rc)
3120                 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3121         return 0;
3122 }
3123
3124 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
3125 {
3126         struct qeth_cmd_buffer *iob;
3127         struct qeth_ipa_cmd *cmd;
3128
3129         QETH_DBF_TEXT(SETUP, 2, "diagtrap");
3130         iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
3131         if (!iob)
3132                 return -ENOMEM;
3133         cmd = __ipa_cmd(iob);
3134         cmd->data.diagass.subcmd_len = 80;
3135         cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP;
3136         cmd->data.diagass.type = 1;
3137         cmd->data.diagass.action = action;
3138         switch (action) {
3139         case QETH_DIAGS_TRAP_ARM:
3140                 cmd->data.diagass.options = 0x0003;
3141                 cmd->data.diagass.ext = 0x00010000 +
3142                         sizeof(struct qeth_trap_id);
3143                 qeth_get_trap_id(card,
3144                         (struct qeth_trap_id *)cmd->data.diagass.cdata);
3145                 break;
3146         case QETH_DIAGS_TRAP_DISARM:
3147                 cmd->data.diagass.options = 0x0001;
3148                 break;
3149         case QETH_DIAGS_TRAP_CAPTURE:
3150                 break;
3151         }
3152         return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
3153 }
3154 EXPORT_SYMBOL_GPL(qeth_hw_trap);
3155
3156 static int qeth_check_qdio_errors(struct qeth_card *card,
3157                                   struct qdio_buffer *buf,
3158                                   unsigned int qdio_error,
3159                                   const char *dbftext)
3160 {
3161         if (qdio_error) {
3162                 QETH_CARD_TEXT(card, 2, dbftext);
3163                 QETH_CARD_TEXT_(card, 2, " F15=%02X",
3164                                buf->element[15].sflags);
3165                 QETH_CARD_TEXT_(card, 2, " F14=%02X",
3166                                buf->element[14].sflags);
3167                 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3168                 if ((buf->element[15].sflags) == 0x12) {
3169                         card->stats.rx_dropped++;
3170                         return 0;
3171                 } else
3172                         return 1;
3173         }
3174         return 0;
3175 }
3176
3177 static void qeth_queue_input_buffer(struct qeth_card *card, int index)
3178 {
3179         struct qeth_qdio_q *queue = card->qdio.in_q;
3180         struct list_head *lh;
3181         int count;
3182         int i;
3183         int rc;
3184         int newcount = 0;
3185
3186         count = (index < queue->next_buf_to_init)?
3187                 card->qdio.in_buf_pool.buf_count -
3188                 (queue->next_buf_to_init - index) :
3189                 card->qdio.in_buf_pool.buf_count -
3190                 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
3191         /* only requeue at a certain threshold to avoid SIGAs */
3192         if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
3193                 for (i = queue->next_buf_to_init;
3194                      i < queue->next_buf_to_init + count; ++i) {
3195                         if (qeth_init_input_buffer(card,
3196                                 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
3197                                 break;
3198                         } else {
3199                                 newcount++;
3200                         }
3201                 }
3202
3203                 if (newcount < count) {
3204                         /* we are in memory shortage so we switch back to
3205                            traditional skb allocation and drop packages */
3206                         atomic_set(&card->force_alloc_skb, 3);
3207                         count = newcount;
3208                 } else {
3209                         atomic_add_unless(&card->force_alloc_skb, -1, 0);
3210                 }
3211
3212                 if (!count) {
3213                         i = 0;
3214                         list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
3215                                 i++;
3216                         if (i == card->qdio.in_buf_pool.buf_count) {
3217                                 QETH_CARD_TEXT(card, 2, "qsarbw");
3218                                 card->reclaim_index = index;
3219                                 schedule_delayed_work(
3220                                         &card->buffer_reclaim_work,
3221                                         QETH_RECLAIM_WORK_TIME);
3222                         }
3223                         return;
3224                 }
3225
3226                 /*
3227                  * according to old code it should be avoided to requeue all
3228                  * 128 buffers in order to benefit from PCI avoidance.
3229                  * this function keeps at least one buffer (the buffer at
3230                  * 'index') un-requeued -> this buffer is the first buffer that
3231                  * will be requeued the next time
3232                  */
3233                 if (card->options.performance_stats) {
3234                         card->perf_stats.inbound_do_qdio_cnt++;
3235                         card->perf_stats.inbound_do_qdio_start_time =
3236                                 qeth_get_micros();
3237                 }
3238                 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
3239                              queue->next_buf_to_init, count);
3240                 if (card->options.performance_stats)
3241                         card->perf_stats.inbound_do_qdio_time +=
3242                                 qeth_get_micros() -
3243                                 card->perf_stats.inbound_do_qdio_start_time;
3244                 if (rc) {
3245                         QETH_CARD_TEXT(card, 2, "qinberr");
3246                 }
3247                 queue->next_buf_to_init = (queue->next_buf_to_init + count) %
3248                                           QDIO_MAX_BUFFERS_PER_Q;
3249         }
3250 }
3251
3252 static void qeth_buffer_reclaim_work(struct work_struct *work)
3253 {
3254         struct qeth_card *card = container_of(work, struct qeth_card,
3255                 buffer_reclaim_work.work);
3256
3257         QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index);
3258         qeth_queue_input_buffer(card, card->reclaim_index);
3259 }
3260
3261 static void qeth_handle_send_error(struct qeth_card *card,
3262                 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
3263 {
3264         int sbalf15 = buffer->buffer->element[15].sflags;
3265
3266         QETH_CARD_TEXT(card, 6, "hdsnderr");
3267         if (card->info.type == QETH_CARD_TYPE_IQD) {
3268                 if (sbalf15 == 0) {
3269                         qdio_err = 0;
3270                 } else {
3271                         qdio_err = 1;
3272                 }
3273         }
3274         qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3275
3276         if (!qdio_err)
3277                 return;
3278
3279         if ((sbalf15 >= 15) && (sbalf15 <= 31))
3280                 return;
3281
3282         QETH_CARD_TEXT(card, 1, "lnkfail");
3283         QETH_CARD_TEXT_(card, 1, "%04x %02x",
3284                        (u16)qdio_err, (u8)sbalf15);
3285 }
3286
3287 /**
3288  * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
3289  * @queue: queue to check for packing buffer
3290  *
3291  * Returns number of buffers that were prepared for flush.
3292  */
3293 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
3294 {
3295         struct qeth_qdio_out_buffer *buffer;
3296
3297         buffer = queue->bufs[queue->next_buf_to_fill];
3298         if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
3299             (buffer->next_element_to_fill > 0)) {
3300                 /* it's a packing buffer */
3301                 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3302                 queue->next_buf_to_fill =
3303                         (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
3304                 return 1;
3305         }
3306         return 0;
3307 }
3308
3309 /*
3310  * Switched to packing state if the number of used buffers on a queue
3311  * reaches a certain limit.
3312  */
3313 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
3314 {
3315         if (!queue->do_pack) {
3316                 if (atomic_read(&queue->used_buffers)
3317                     >= QETH_HIGH_WATERMARK_PACK){
3318                         /* switch non-PACKING -> PACKING */
3319                         QETH_CARD_TEXT(queue->card, 6, "np->pack");
3320                         if (queue->card->options.performance_stats)
3321                                 queue->card->perf_stats.sc_dp_p++;
3322                         queue->do_pack = 1;
3323                 }
3324         }
3325 }
3326
3327 /*
3328  * Switches from packing to non-packing mode. If there is a packing
3329  * buffer on the queue this buffer will be prepared to be flushed.
3330  * In that case 1 is returned to inform the caller. If no buffer
3331  * has to be flushed, zero is returned.
3332  */
3333 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
3334 {
3335         if (queue->do_pack) {
3336                 if (atomic_read(&queue->used_buffers)
3337                     <= QETH_LOW_WATERMARK_PACK) {
3338                         /* switch PACKING -> non-PACKING */
3339                         QETH_CARD_TEXT(queue->card, 6, "pack->np");
3340                         if (queue->card->options.performance_stats)
3341                                 queue->card->perf_stats.sc_p_dp++;
3342                         queue->do_pack = 0;
3343                         return qeth_prep_flush_pack_buffer(queue);
3344                 }
3345         }
3346         return 0;
3347 }
3348
3349 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
3350                                int count)
3351 {
3352         struct qeth_qdio_out_buffer *buf;
3353         int rc;
3354         int i;
3355         unsigned int qdio_flags;
3356
3357         for (i = index; i < index + count; ++i) {
3358                 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3359                 buf = queue->bufs[bidx];
3360                 buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
3361                                 SBAL_EFLAGS_LAST_ENTRY;
3362
3363                 if (queue->bufstates)
3364                         queue->bufstates[bidx].user = buf;
3365
3366                 if (queue->card->info.type == QETH_CARD_TYPE_IQD)
3367                         continue;
3368
3369                 if (!queue->do_pack) {
3370                         if ((atomic_read(&queue->used_buffers) >=
3371                                 (QETH_HIGH_WATERMARK_PACK -
3372                                  QETH_WATERMARK_PACK_FUZZ)) &&
3373                             !atomic_read(&queue->set_pci_flags_count)) {
3374                                 /* it's likely that we'll go to packing
3375                                  * mode soon */
3376                                 atomic_inc(&queue->set_pci_flags_count);
3377                                 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3378                         }
3379                 } else {
3380                         if (!atomic_read(&queue->set_pci_flags_count)) {
3381                                 /*
3382                                  * there's no outstanding PCI any more, so we
3383                                  * have to request a PCI to be sure the the PCI
3384                                  * will wake at some time in the future then we
3385                                  * can flush packed buffers that might still be
3386                                  * hanging around, which can happen if no
3387                                  * further send was requested by the stack
3388                                  */
3389                                 atomic_inc(&queue->set_pci_flags_count);
3390                                 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3391                         }
3392                 }
3393         }
3394
3395         netif_trans_update(queue->card->dev);
3396         if (queue->card->options.performance_stats) {
3397                 queue->card->perf_stats.outbound_do_qdio_cnt++;
3398                 queue->card->perf_stats.outbound_do_qdio_start_time =
3399                         qeth_get_micros();
3400         }
3401         qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
3402         if (atomic_read(&queue->set_pci_flags_count))
3403                 qdio_flags |= QDIO_FLAG_PCI_OUT;
3404         atomic_add(count, &queue->used_buffers);
3405
3406         rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
3407                      queue->queue_no, index, count);
3408         if (queue->card->options.performance_stats)
3409                 queue->card->perf_stats.outbound_do_qdio_time +=
3410                         qeth_get_micros() -
3411                         queue->card->perf_stats.outbound_do_qdio_start_time;
3412         if (rc) {
3413                 queue->card->stats.tx_errors += count;
3414                 /* ignore temporary SIGA errors without busy condition */
3415                 if (rc == -ENOBUFS)
3416                         return;
3417                 QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3418                 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
3419                 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
3420                 QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
3421                 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3422
3423                 /* this must not happen under normal circumstances. if it
3424                  * happens something is really wrong -> recover */
3425                 qeth_schedule_recovery(queue->card);
3426                 return;
3427         }
3428         if (queue->card->options.performance_stats)
3429                 queue->card->perf_stats.bufs_sent += count;
3430 }
3431
3432 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
3433 {
3434         int index;
3435         int flush_cnt = 0;
3436         int q_was_packing = 0;
3437
3438         /*
3439          * check if weed have to switch to non-packing mode or if
3440          * we have to get a pci flag out on the queue
3441          */
3442         if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
3443             !atomic_read(&queue->set_pci_flags_count)) {
3444                 if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
3445                                 QETH_OUT_Q_UNLOCKED) {
3446                         /*
3447                          * If we get in here, there was no action in
3448                          * do_send_packet. So, we check if there is a
3449                          * packing buffer to be flushed here.
3450                          */
3451                         netif_stop_queue(queue->card->dev);
3452                         index = queue->next_buf_to_fill;
3453                         q_was_packing = queue->do_pack;
3454                         /* queue->do_pack may change */
3455                         barrier();
3456                         flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
3457                         if (!flush_cnt &&
3458                             !atomic_read(&queue->set_pci_flags_count))
3459                                 flush_cnt += qeth_prep_flush_pack_buffer(queue);
3460                         if (queue->card->options.performance_stats &&
3461                             q_was_packing)
3462                                 queue->card->perf_stats.bufs_sent_pack +=
3463                                         flush_cnt;
3464                         if (flush_cnt)
3465                                 qeth_flush_buffers(queue, index, flush_cnt);
3466                         atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3467                 }
3468         }
3469 }
3470
3471 static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
3472                                  unsigned long card_ptr)
3473 {
3474         struct qeth_card *card = (struct qeth_card *)card_ptr;
3475
3476         if (card->dev->flags & IFF_UP)
3477                 napi_schedule(&card->napi);
3478 }
3479
3480 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
3481 {
3482         int rc;
3483
3484         if (card->options.cq ==  QETH_CQ_NOTAVAILABLE) {
3485                 rc = -1;
3486                 goto out;
3487         } else {
3488                 if (card->options.cq == cq) {
3489                         rc = 0;
3490                         goto out;
3491                 }
3492
3493                 if (card->state != CARD_STATE_DOWN &&
3494                     card->state != CARD_STATE_RECOVER) {
3495                         rc = -1;
3496                         goto out;
3497                 }
3498
3499                 qeth_free_qdio_buffers(card);
3500                 card->options.cq = cq;
3501                 rc = 0;
3502         }
3503 out:
3504         return rc;
3505
3506 }
3507 EXPORT_SYMBOL_GPL(qeth_configure_cq);
3508
3509 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
3510                                  unsigned int queue, int first_element,
3511                                  int count)
3512 {
3513         struct qeth_qdio_q *cq = card->qdio.c_q;
3514         int i;
3515         int rc;
3516
3517         if (!qeth_is_cq(card, queue))
3518                 goto out;
3519
3520         QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
3521         QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
3522         QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
3523
3524         if (qdio_err) {
3525                 netif_stop_queue(card->dev);
3526                 qeth_schedule_recovery(card);
3527                 goto out;
3528         }
3529
3530         if (card->options.performance_stats) {
3531                 card->perf_stats.cq_cnt++;
3532                 card->perf_stats.cq_start_time = qeth_get_micros();
3533         }
3534
3535         for (i = first_element; i < first_element + count; ++i) {
3536                 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3537                 struct qdio_buffer *buffer = cq->qdio_bufs[bidx];
3538                 int e = 0;
3539
3540                 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
3541                        buffer->element[e].addr) {
3542                         unsigned long phys_aob_addr;
3543
3544                         phys_aob_addr = (unsigned long) buffer->element[e].addr;
3545                         qeth_qdio_handle_aob(card, phys_aob_addr);
3546                         ++e;
3547                 }
3548                 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3549         }
3550         rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
3551                     card->qdio.c_q->next_buf_to_init,
3552                     count);
3553         if (rc) {
3554                 dev_warn(&card->gdev->dev,
3555                         "QDIO reported an error, rc=%i\n", rc);
3556                 QETH_CARD_TEXT(card, 2, "qcqherr");
3557         }
3558         card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init
3559                                    + count) % QDIO_MAX_BUFFERS_PER_Q;
3560
3561         netif_wake_queue(card->dev);
3562
3563         if (card->options.performance_stats) {
3564                 int delta_t = qeth_get_micros();
3565                 delta_t -= card->perf_stats.cq_start_time;
3566                 card->perf_stats.cq_time += delta_t;
3567         }
3568 out:
3569         return;
3570 }
3571
3572 static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
3573                                     unsigned int qdio_err, int queue,
3574                                     int first_elem, int count,
3575                                     unsigned long card_ptr)
3576 {
3577         struct qeth_card *card = (struct qeth_card *)card_ptr;
3578
3579         QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
3580         QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
3581
3582         if (qeth_is_cq(card, queue))
3583                 qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count);
3584         else if (qdio_err)
3585                 qeth_schedule_recovery(card);
3586 }
3587
3588 static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3589                                      unsigned int qdio_error, int __queue,
3590                                      int first_element, int count,
3591                                      unsigned long card_ptr)
3592 {
3593         struct qeth_card *card        = (struct qeth_card *) card_ptr;
3594         struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
3595         struct qeth_qdio_out_buffer *buffer;
3596         int i;
3597
3598         QETH_CARD_TEXT(card, 6, "qdouhdl");
3599         if (qdio_error & QDIO_ERROR_FATAL) {
3600                 QETH_CARD_TEXT(card, 2, "achkcond");
3601                 netif_stop_queue(card->dev);
3602                 qeth_schedule_recovery(card);
3603                 return;
3604         }
3605         if (card->options.performance_stats) {
3606                 card->perf_stats.outbound_handler_cnt++;
3607                 card->perf_stats.outbound_handler_start_time =
3608                         qeth_get_micros();
3609         }
3610         for (i = first_element; i < (first_element + count); ++i) {
3611                 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3612                 buffer = queue->bufs[bidx];
3613                 qeth_handle_send_error(card, buffer, qdio_error);
3614
3615                 if (queue->bufstates &&
3616                     (queue->bufstates[bidx].flags &
3617                      QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) {
3618                         WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
3619
3620                         if (atomic_cmpxchg(&buffer->state,
3621                                            QETH_QDIO_BUF_PRIMED,
3622                                            QETH_QDIO_BUF_PENDING) ==
3623                                 QETH_QDIO_BUF_PRIMED) {
3624                                 qeth_notify_skbs(queue, buffer,
3625                                                  TX_NOTIFY_PENDING);
3626                         }
3627                         QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx);
3628
3629                         /* prepare the queue slot for re-use: */
3630                         qeth_scrub_qdio_buffer(buffer->buffer,
3631                                                QETH_MAX_BUFFER_ELEMENTS(card));
3632                         if (qeth_init_qdio_out_buf(queue, bidx)) {
3633                                 QETH_CARD_TEXT(card, 2, "outofbuf");
3634                                 qeth_schedule_recovery(card);
3635                         }
3636                 } else {
3637                         if (card->options.cq == QETH_CQ_ENABLED) {
3638                                 enum iucv_tx_notify n;
3639
3640                                 n = qeth_compute_cq_notification(
3641                                         buffer->buffer->element[15].sflags, 0);
3642                                 qeth_notify_skbs(queue, buffer, n);
3643                         }
3644
3645                         qeth_clear_output_buffer(queue, buffer);
3646                 }
3647                 qeth_cleanup_handled_pending(queue, bidx, 0);
3648         }
3649         atomic_sub(count, &queue->used_buffers);
3650         /* check if we need to do something on this outbound queue */
3651         if (card->info.type != QETH_CARD_TYPE_IQD)
3652                 qeth_check_outbound_queue(queue);
3653
3654         netif_wake_queue(queue->card->dev);
3655         if (card->options.performance_stats)
3656                 card->perf_stats.outbound_handler_time += qeth_get_micros() -
3657                         card->perf_stats.outbound_handler_start_time;
3658 }
3659
3660 /* We cannot use outbound queue 3 for unicast packets on HiperSockets */
3661 static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num)
3662 {
3663         if ((card->info.type == QETH_CARD_TYPE_IQD) && (queue_num == 3))
3664                 return 2;
3665         return queue_num;
3666 }
3667
3668 /**
3669  * Note: Function assumes that we have 4 outbound queues.
3670  */
3671 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3672                             int ipv)
3673 {
3674         __be16 *tci;
3675         u8 tos;
3676
3677         switch (card->qdio.do_prio_queueing) {
3678         case QETH_PRIO_Q_ING_TOS:
3679         case QETH_PRIO_Q_ING_PREC:
3680                 switch (ipv) {
3681                 case 4:
3682                         tos = ipv4_get_dsfield(ip_hdr(skb));
3683                         break;
3684                 case 6:
3685                         tos = ipv6_get_dsfield(ipv6_hdr(skb));
3686                         break;
3687                 default:
3688                         return card->qdio.default_out_queue;
3689                 }
3690                 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
3691                         return qeth_cut_iqd_prio(card, ~tos >> 6 & 3);
3692                 if (tos & IPTOS_MINCOST)
3693                         return qeth_cut_iqd_prio(card, 3);
3694                 if (tos & IPTOS_RELIABILITY)
3695                         return 2;
3696                 if (tos & IPTOS_THROUGHPUT)
3697                         return 1;
3698                 if (tos & IPTOS_LOWDELAY)
3699                         return 0;
3700                 break;
3701         case QETH_PRIO_Q_ING_SKB:
3702                 if (skb->priority > 5)
3703                         return 0;
3704                 return qeth_cut_iqd_prio(card, ~skb->priority >> 1 & 3);
3705         case QETH_PRIO_Q_ING_VLAN:
3706                 tci = &((struct ethhdr *)skb->data)->h_proto;
3707                 if (be16_to_cpu(*tci) == ETH_P_8021Q)
3708                         return qeth_cut_iqd_prio(card,
3709                         ~be16_to_cpu(*(tci + 1)) >> (VLAN_PRIO_SHIFT + 1) & 3);
3710                 break;
3711         default:
3712                 break;
3713         }
3714         return card->qdio.default_out_queue;
3715 }
3716 EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3717
3718 /**
3719  * qeth_get_elements_for_frags() -      find number of SBALEs for skb frags.
3720  * @skb:                                SKB address
3721  *
3722  * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3723  * fragmented part of the SKB. Returns zero for linear SKB.
3724  */
3725 static int qeth_get_elements_for_frags(struct sk_buff *skb)
3726 {
3727         int cnt, elements = 0;
3728
3729         for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3730                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[cnt];
3731
3732                 elements += qeth_get_elements_for_range(
3733                         (addr_t)skb_frag_address(frag),
3734                         (addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3735         }
3736         return elements;
3737 }
3738
3739 /**
3740  * qeth_count_elements() -      Counts the number of QDIO buffer elements needed
3741  *                              to transmit an skb.
3742  * @skb:                        the skb to operate on.
3743  * @data_offset:                skip this part of the skb's linear data
3744  *
3745  * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3746  * skb's data (both its linear part and paged fragments).
3747  */
3748 unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
3749 {
3750         unsigned int elements = qeth_get_elements_for_frags(skb);
3751         addr_t end = (addr_t)skb->data + skb_headlen(skb);
3752         addr_t start = (addr_t)skb->data + data_offset;
3753
3754         if (start != end)
3755                 elements += qeth_get_elements_for_range(start, end);
3756         return elements;
3757 }
3758 EXPORT_SYMBOL_GPL(qeth_count_elements);
3759
3760 #define QETH_HDR_CACHE_OBJ_SIZE         (sizeof(struct qeth_hdr_tso) + \
3761                                          MAX_TCP_HEADER)
3762
3763 /**
3764  * qeth_add_hw_header() - add a HW header to an skb.
3765  * @skb: skb that the HW header should be added to.
3766  * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
3767  *       it contains a valid pointer to a qeth_hdr.
3768  * @hdr_len: length of the HW header.
3769  * @proto_len: length of protocol headers that need to be in same page as the
3770  *             HW header.
3771  *
3772  * Returns the pushed length. If the header can't be pushed on
3773  * (eg. because it would cross a page boundary), it is allocated from
3774  * the cache instead and 0 is returned.
3775  * The number of needed buffer elements is returned in @elements.
3776  * Error to create the hdr is indicated by returning with < 0.
3777  */
3778 static int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
3779                               struct qeth_hdr **hdr, unsigned int hdr_len,
3780                               unsigned int proto_len, unsigned int *elements)
3781 {
3782         const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
3783         const unsigned int contiguous = proto_len ? proto_len : 1;
3784         unsigned int __elements;
3785         addr_t start, end;
3786         bool push_ok;
3787         int rc;
3788
3789 check_layout:
3790         start = (addr_t)skb->data - hdr_len;
3791         end = (addr_t)skb->data;
3792
3793         if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
3794                 /* Push HW header into same page as first protocol header. */
3795                 push_ok = true;
3796                 /* ... but TSO always needs a separate element for headers: */
3797                 if (skb_is_gso(skb))
3798                         __elements = 1 + qeth_count_elements(skb, proto_len);
3799                 else
3800                         __elements = qeth_count_elements(skb, 0);
3801         } else if (!proto_len && qeth_get_elements_for_range(start, end) == 1) {
3802                 /* Push HW header into a new page. */
3803                 push_ok = true;
3804                 __elements = 1 + qeth_count_elements(skb, 0);
3805         } else {
3806                 /* Use header cache, copy protocol headers up. */
3807                 push_ok = false;
3808                 __elements = 1 + qeth_count_elements(skb, proto_len);
3809         }
3810
3811         /* Compress skb to fit into one IO buffer: */
3812         if (__elements > max_elements) {
3813                 if (!skb_is_nonlinear(skb)) {
3814                         /* Drop it, no easy way of shrinking it further. */
3815                         QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
3816                                          max_elements, __elements, skb->len);
3817                         return -E2BIG;
3818                 }
3819
3820                 rc = skb_linearize(skb);
3821                 if (card->options.performance_stats) {
3822                         if (rc)
3823                                 card->perf_stats.tx_linfail++;
3824                         else
3825                                 card->perf_stats.tx_lin++;
3826                 }
3827                 if (rc)
3828                         return rc;
3829
3830                 /* Linearization changed the layout, re-evaluate: */
3831                 goto check_layout;
3832         }
3833
3834         *elements = __elements;
3835         /* Add the header: */
3836         if (push_ok) {
3837                 *hdr = skb_push(skb, hdr_len);
3838                 return hdr_len;
3839         }
3840         /* fall back */
3841         if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
3842                 return -E2BIG;
3843         *hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
3844         if (!*hdr)
3845                 return -ENOMEM;
3846         /* Copy protocol headers behind HW header: */
3847         skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
3848         return 0;
3849 }
3850
3851 static void __qeth_fill_buffer(struct sk_buff *skb,
3852                                struct qeth_qdio_out_buffer *buf,
3853                                bool is_first_elem, unsigned int offset)
3854 {
3855         struct qdio_buffer *buffer = buf->buffer;
3856         int element = buf->next_element_to_fill;
3857         int length = skb_headlen(skb) - offset;
3858         char *data = skb->data + offset;
3859         int length_here, cnt;
3860
3861         /* map linear part into buffer element(s) */
3862         while (length > 0) {
3863                 /* length_here is the remaining amount of data in this page */
3864                 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
3865                 if (length < length_here)
3866                         length_here = length;
3867
3868                 buffer->element[element].addr = data;
3869                 buffer->element[element].length = length_here;
3870                 length -= length_here;
3871                 if (is_first_elem) {
3872                         is_first_elem = false;
3873                         if (length || skb_is_nonlinear(skb))
3874                                 /* skb needs additional elements */
3875                                 buffer->element[element].eflags =
3876                                         SBAL_EFLAGS_FIRST_FRAG;
3877                         else
3878                                 buffer->element[element].eflags = 0;
3879                 } else {
3880                         buffer->element[element].eflags =
3881                                 SBAL_EFLAGS_MIDDLE_FRAG;
3882                 }
3883                 data += length_here;
3884                 element++;
3885         }
3886
3887         /* map page frags into buffer element(s) */
3888         for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3889                 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3890
3891                 data = skb_frag_address(frag);
3892                 length = skb_frag_size(frag);
3893                 while (length > 0) {
3894                         length_here = PAGE_SIZE -
3895                                 ((unsigned long) data % PAGE_SIZE);
3896                         if (length < length_here)
3897                                 length_here = length;
3898
3899                         buffer->element[element].addr = data;
3900                         buffer->element[element].length = length_here;
3901                         buffer->element[element].eflags =
3902                                 SBAL_EFLAGS_MIDDLE_FRAG;
3903                         length -= length_here;
3904                         data += length_here;
3905                         element++;
3906                 }
3907         }
3908
3909         if (buffer->element[element - 1].eflags)
3910                 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
3911         buf->next_element_to_fill = element;
3912 }
3913
3914 /**
3915  * qeth_fill_buffer() - map skb into an output buffer
3916  * @queue:      QDIO queue to submit the buffer on
3917  * @buf:        buffer to transport the skb
3918  * @skb:        skb to map into the buffer
3919  * @hdr:        qeth_hdr for this skb. Either at skb->data, or allocated
3920  *              from qeth_core_header_cache.
3921  * @offset:     when mapping the skb, start at skb->data + offset
3922  * @hd_len:     if > 0, build a dedicated header element of this size
3923  */
3924 static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
3925                             struct qeth_qdio_out_buffer *buf,
3926                             struct sk_buff *skb, struct qeth_hdr *hdr,
3927                             unsigned int offset, unsigned int hd_len)
3928 {
3929         struct qdio_buffer *buffer = buf->buffer;
3930         bool is_first_elem = true;
3931         int flush_cnt = 0;
3932
3933         __skb_queue_tail(&buf->skb_list, skb);
3934
3935         /* build dedicated header element */
3936         if (hd_len) {
3937                 int element = buf->next_element_to_fill;
3938                 is_first_elem = false;
3939
3940                 buffer->element[element].addr = hdr;
3941                 buffer->element[element].length = hd_len;
3942                 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
3943                 /* remember to free cache-allocated qeth_hdr: */
3944                 buf->is_header[element] = ((void *)hdr != skb->data);
3945                 buf->next_element_to_fill++;
3946         }
3947
3948         __qeth_fill_buffer(skb, buf, is_first_elem, offset);
3949
3950         if (!queue->do_pack) {
3951                 QETH_CARD_TEXT(queue->card, 6, "fillbfnp");
3952                 /* set state to PRIMED -> will be flushed */
3953                 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
3954                 flush_cnt = 1;
3955         } else {
3956                 QETH_CARD_TEXT(queue->card, 6, "fillbfpa");
3957                 if (queue->card->options.performance_stats)
3958                         queue->card->perf_stats.skbs_sent_pack++;
3959                 if (buf->next_element_to_fill >=
3960                                 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
3961                         /*
3962                          * packed buffer if full -> set state PRIMED
3963                          * -> will be flushed
3964                          */
3965                         atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
3966                         flush_cnt = 1;
3967                 }
3968         }
3969         return flush_cnt;
3970 }
3971
3972 static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue,
3973                                     struct sk_buff *skb, struct qeth_hdr *hdr,
3974                                     unsigned int offset, unsigned int hd_len)
3975 {
3976         int index = queue->next_buf_to_fill;
3977         struct qeth_qdio_out_buffer *buffer = queue->bufs[index];
3978
3979         /*
3980          * check if buffer is empty to make sure that we do not 'overtake'
3981          * ourselves and try to fill a buffer that is already primed
3982          */
3983         if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
3984                 return -EBUSY;
3985         queue->next_buf_to_fill = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
3986         qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
3987         qeth_flush_buffers(queue, index, 1);
3988         return 0;
3989 }
3990
3991 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3992                         struct sk_buff *skb, struct qeth_hdr *hdr,
3993                         unsigned int offset, unsigned int hd_len,
3994                         int elements_needed)
3995 {
3996         struct qeth_qdio_out_buffer *buffer;
3997         int start_index;
3998         int flush_count = 0;
3999         int do_pack = 0;
4000         int tmp;
4001         int rc = 0;
4002
4003         /* spin until we get the queue ... */
4004         while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
4005                               QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
4006         start_index = queue->next_buf_to_fill;
4007         buffer = queue->bufs[queue->next_buf_to_fill];
4008         /*
4009          * check if buffer is empty to make sure that we do not 'overtake'
4010          * ourselves and try to fill a buffer that is already primed
4011          */
4012         if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
4013                 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4014                 return -EBUSY;
4015         }
4016         /* check if we need to switch packing state of this queue */
4017         qeth_switch_to_packing_if_needed(queue);
4018         if (queue->do_pack) {
4019                 do_pack = 1;
4020                 /* does packet fit in current buffer? */
4021                 if ((QETH_MAX_BUFFER_ELEMENTS(card) -
4022                     buffer->next_element_to_fill) < elements_needed) {
4023                         /* ... no -> set state PRIMED */
4024                         atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4025                         flush_count++;
4026                         queue->next_buf_to_fill =
4027                                 (queue->next_buf_to_fill + 1) %
4028                                 QDIO_MAX_BUFFERS_PER_Q;
4029                         buffer = queue->bufs[queue->next_buf_to_fill];
4030                         /* we did a step forward, so check buffer state
4031                          * again */
4032                         if (atomic_read(&buffer->state) !=
4033                             QETH_QDIO_BUF_EMPTY) {
4034                                 qeth_flush_buffers(queue, start_index,
4035                                                            flush_count);
4036                                 atomic_set(&queue->state,
4037                                                 QETH_OUT_Q_UNLOCKED);
4038                                 rc = -EBUSY;
4039                                 goto out;
4040                         }
4041                 }
4042         }
4043         tmp = qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
4044         queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
4045                                   QDIO_MAX_BUFFERS_PER_Q;
4046         flush_count += tmp;
4047         if (flush_count)
4048                 qeth_flush_buffers(queue, start_index, flush_count);
4049         else if (!atomic_read(&queue->set_pci_flags_count))
4050                 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
4051         /*
4052          * queue->state will go from LOCKED -> UNLOCKED or from
4053          * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
4054          * (switch packing state or flush buffer to get another pci flag out).
4055          * In that case we will enter this loop
4056          */
4057         while (atomic_dec_return(&queue->state)) {
4058                 start_index = queue->next_buf_to_fill;
4059                 /* check if we can go back to non-packing state */
4060                 tmp = qeth_switch_to_nonpacking_if_needed(queue);
4061                 /*
4062                  * check if we need to flush a packing buffer to get a pci
4063                  * flag out on the queue
4064                  */
4065                 if (!tmp && !atomic_read(&queue->set_pci_flags_count))
4066                         tmp = qeth_prep_flush_pack_buffer(queue);
4067                 if (tmp) {
4068                         qeth_flush_buffers(queue, start_index, tmp);
4069                         flush_count += tmp;
4070                 }
4071         }
4072 out:
4073         /* at this point the queue is UNLOCKED again */
4074         if (queue->card->options.performance_stats && do_pack)
4075                 queue->card->perf_stats.bufs_sent_pack += flush_count;
4076
4077         return rc;
4078 }
4079 EXPORT_SYMBOL_GPL(qeth_do_send_packet);
4080
4081 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
4082                               unsigned int payload_len, struct sk_buff *skb,
4083                               unsigned int proto_len)
4084 {
4085         struct qeth_hdr_ext_tso *ext = &hdr->ext;
4086
4087         ext->hdr_tot_len = sizeof(*ext);
4088         ext->imb_hdr_no = 1;
4089         ext->hdr_type = 1;
4090         ext->hdr_version = 1;
4091         ext->hdr_len = 28;
4092         ext->payload_len = payload_len;
4093         ext->mss = skb_shinfo(skb)->gso_size;
4094         ext->dg_hdr_len = proto_len;
4095 }
4096
4097 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4098               struct qeth_qdio_out_q *queue, int ipv, int cast_type,
4099               void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr,
4100                                   struct sk_buff *skb, int ipv, int cast_type,
4101                                   unsigned int data_len))
4102 {
4103         unsigned int proto_len, hw_hdr_len;
4104         unsigned int frame_len = skb->len;
4105         bool is_tso = skb_is_gso(skb);
4106         unsigned int data_offset = 0;
4107         struct qeth_hdr *hdr = NULL;
4108         unsigned int hd_len = 0;
4109         unsigned int elements;
4110         int push_len, rc;
4111         bool is_sg;
4112
4113         if (is_tso) {
4114                 hw_hdr_len = sizeof(struct qeth_hdr_tso);
4115                 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4116         } else {
4117                 hw_hdr_len = sizeof(struct qeth_hdr);
4118                 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4119         }
4120
4121         rc = skb_cow_head(skb, hw_hdr_len);
4122         if (rc)
4123                 return rc;
4124
4125         push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len,
4126                                       &elements);
4127         if (push_len < 0)
4128                 return push_len;
4129         if (is_tso || !push_len) {
4130                 /* HW header needs its own buffer element. */
4131                 hd_len = hw_hdr_len + proto_len;
4132                 data_offset = push_len + proto_len;
4133         }
4134         memset(hdr, 0, hw_hdr_len);
4135         fill_header(card, hdr, skb, ipv, cast_type, frame_len);
4136         if (is_tso)
4137                 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
4138                                   frame_len - proto_len, skb, proto_len);
4139
4140         is_sg = skb_is_nonlinear(skb);
4141         if (IS_IQD(card)) {
4142                 rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
4143                                               hd_len);
4144         } else {
4145                 /* TODO: drop skb_orphan() once TX completion is fast enough */
4146                 skb_orphan(skb);
4147                 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
4148                                          hd_len, elements);
4149         }
4150
4151         if (!rc) {
4152                 if (card->options.performance_stats) {
4153                         card->perf_stats.buf_elements_sent += elements;
4154                         if (is_sg)
4155                                 card->perf_stats.sg_skbs_sent++;
4156                         if (is_tso) {
4157                                 card->perf_stats.large_send_bytes += frame_len;
4158                                 card->perf_stats.large_send_cnt++;
4159                         }
4160                 }
4161         } else {
4162                 if (!push_len)
4163                         kmem_cache_free(qeth_core_header_cache, hdr);
4164                 if (rc == -EBUSY)
4165                         /* roll back to ETH header */
4166                         skb_pull(skb, push_len);
4167         }
4168         return rc;
4169 }
4170 EXPORT_SYMBOL_GPL(qeth_xmit);
4171
4172 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
4173                 struct qeth_reply *reply, unsigned long data)
4174 {
4175         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4176         struct qeth_ipacmd_setadpparms *setparms;
4177
4178         QETH_CARD_TEXT(card, 4, "prmadpcb");
4179
4180         setparms = &(cmd->data.setadapterparms);
4181         if (qeth_setadpparms_inspect_rc(cmd)) {
4182                 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
4183                 setparms->data.mode = SET_PROMISC_MODE_OFF;
4184         }
4185         card->info.promisc_mode = setparms->data.mode;
4186         return 0;
4187 }
4188
4189 void qeth_setadp_promisc_mode(struct qeth_card *card)
4190 {
4191         enum qeth_ipa_promisc_modes mode;
4192         struct net_device *dev = card->dev;
4193         struct qeth_cmd_buffer *iob;
4194         struct qeth_ipa_cmd *cmd;
4195
4196         QETH_CARD_TEXT(card, 4, "setprom");
4197
4198         if (((dev->flags & IFF_PROMISC) &&
4199              (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
4200             (!(dev->flags & IFF_PROMISC) &&
4201              (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
4202                 return;
4203         mode = SET_PROMISC_MODE_OFF;
4204         if (dev->flags & IFF_PROMISC)
4205                 mode = SET_PROMISC_MODE_ON;
4206         QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
4207
4208         iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4209                         sizeof(struct qeth_ipacmd_setadpparms_hdr) + 8);
4210         if (!iob)
4211                 return;
4212         cmd = __ipa_cmd(iob);
4213         cmd->data.setadapterparms.data.mode = mode;
4214         qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
4215 }
4216 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
4217
4218 struct net_device_stats *qeth_get_stats(struct net_device *dev)
4219 {
4220         struct qeth_card *card;
4221
4222         card = dev->ml_priv;
4223
4224         QETH_CARD_TEXT(card, 5, "getstat");
4225
4226         return &card->stats;
4227 }
4228 EXPORT_SYMBOL_GPL(qeth_get_stats);
4229
4230 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
4231                 struct qeth_reply *reply, unsigned long data)
4232 {
4233         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4234         struct qeth_ipacmd_setadpparms *adp_cmd;
4235
4236         QETH_CARD_TEXT(card, 4, "chgmaccb");
4237         if (qeth_setadpparms_inspect_rc(cmd))
4238                 return 0;
4239
4240         adp_cmd = &cmd->data.setadapterparms;
4241         if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
4242             !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4243                 return 0;
4244
4245         ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
4246         return 0;
4247 }
4248
4249 int qeth_setadpparms_change_macaddr(struct qeth_card *card)
4250 {
4251         int rc;
4252         struct qeth_cmd_buffer *iob;
4253         struct qeth_ipa_cmd *cmd;
4254
4255         QETH_CARD_TEXT(card, 4, "chgmac");
4256
4257         iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4258                                    sizeof(struct qeth_ipacmd_setadpparms_hdr) +
4259                                    sizeof(struct qeth_change_addr));
4260         if (!iob)
4261                 return -ENOMEM;
4262         cmd = __ipa_cmd(iob);
4263         cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4264         cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
4265         ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
4266                         card->dev->dev_addr);
4267         rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
4268                                NULL);
4269         return rc;
4270 }
4271 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
4272
4273 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4274                 struct qeth_reply *reply, unsigned long data)
4275 {
4276         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4277         struct qeth_set_access_ctrl *access_ctrl_req;
4278         int fallback = *(int *)reply->param;
4279
4280         QETH_CARD_TEXT(card, 4, "setaccb");
4281         if (cmd->hdr.return_code)
4282                 return 0;
4283         qeth_setadpparms_inspect_rc(cmd);
4284
4285         access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4286         QETH_DBF_TEXT_(SETUP, 2, "setaccb");
4287         QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
4288         QETH_DBF_TEXT_(SETUP, 2, "rc=%d",
4289                 cmd->data.setadapterparms.hdr.return_code);
4290         if (cmd->data.setadapterparms.hdr.return_code !=
4291                                                 SET_ACCESS_CTRL_RC_SUCCESS)
4292                 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4293                                  access_ctrl_req->subcmd_code, CARD_DEVID(card),
4294                                  cmd->data.setadapterparms.hdr.return_code);
4295         switch (cmd->data.setadapterparms.hdr.return_code) {
4296         case SET_ACCESS_CTRL_RC_SUCCESS:
4297                 if (card->options.isolation == ISOLATION_MODE_NONE) {
4298                         dev_info(&card->gdev->dev,
4299                             "QDIO data connection isolation is deactivated\n");
4300                 } else {
4301                         dev_info(&card->gdev->dev,
4302                             "QDIO data connection isolation is activated\n");
4303                 }
4304                 break;
4305         case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4306                 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4307                                  CARD_DEVID(card));
4308                 if (fallback)
4309                         card->options.isolation = card->options.prev_isolation;
4310                 break;
4311         case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4312                 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4313                                  CARD_DEVID(card));
4314                 if (fallback)
4315                         card->options.isolation = card->options.prev_isolation;
4316                 break;
4317         case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
4318                 dev_err(&card->gdev->dev, "Adapter does not "
4319                         "support QDIO data connection isolation\n");
4320                 break;
4321         case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
4322                 dev_err(&card->gdev->dev,
4323                         "Adapter is dedicated. "
4324                         "QDIO data connection isolation not supported\n");
4325                 if (fallback)
4326                         card->options.isolation = card->options.prev_isolation;
4327                 break;
4328         case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
4329                 dev_err(&card->gdev->dev,
4330                         "TSO does not permit QDIO data connection isolation\n");
4331                 if (fallback)
4332                         card->options.isolation = card->options.prev_isolation;
4333                 break;
4334         case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
4335                 dev_err(&card->gdev->dev, "The adjacent switch port does not "
4336                         "support reflective relay mode\n");
4337                 if (fallback)
4338                         card->options.isolation = card->options.prev_isolation;
4339                 break;
4340         case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
4341                 dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
4342                                         "enabled at the adjacent switch port");
4343                 if (fallback)
4344                         card->options.isolation = card->options.prev_isolation;
4345                 break;
4346         case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
4347                 dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
4348                                         "at the adjacent switch failed\n");
4349                 break;
4350         default:
4351                 /* this should never happen */
4352                 if (fallback)
4353                         card->options.isolation = card->options.prev_isolation;
4354                 break;
4355         }
4356         return 0;
4357 }
4358
4359 static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
4360                 enum qeth_ipa_isolation_modes isolation, int fallback)
4361 {
4362         int rc;
4363         struct qeth_cmd_buffer *iob;
4364         struct qeth_ipa_cmd *cmd;
4365         struct qeth_set_access_ctrl *access_ctrl_req;
4366
4367         QETH_CARD_TEXT(card, 4, "setacctl");
4368
4369         QETH_DBF_TEXT_(SETUP, 2, "setacctl");
4370         QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
4371
4372         iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4373                                    sizeof(struct qeth_ipacmd_setadpparms_hdr) +
4374                                    sizeof(struct qeth_set_access_ctrl));
4375         if (!iob)
4376                 return -ENOMEM;
4377         cmd = __ipa_cmd(iob);
4378         access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4379         access_ctrl_req->subcmd_code = isolation;
4380
4381         rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4382                                &fallback);
4383         QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc);
4384         return rc;
4385 }
4386
4387 int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
4388 {
4389         int rc = 0;
4390
4391         QETH_CARD_TEXT(card, 4, "setactlo");
4392
4393         if ((card->info.type == QETH_CARD_TYPE_OSD ||
4394              card->info.type == QETH_CARD_TYPE_OSX) &&
4395              qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
4396                 rc = qeth_setadpparms_set_access_ctrl(card,
4397                         card->options.isolation, fallback);
4398                 if (rc) {
4399                         QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4400                                          rc, CARD_DEVID(card));
4401                         rc = -EOPNOTSUPP;
4402                 }
4403         } else if (card->options.isolation != ISOLATION_MODE_NONE) {
4404                 card->options.isolation = ISOLATION_MODE_NONE;
4405
4406                 dev_err(&card->gdev->dev, "Adapter does not "
4407                         "support QDIO data connection isolation\n");
4408                 rc = -EOPNOTSUPP;
4409         }
4410         return rc;
4411 }
4412 EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online);
4413
4414 void qeth_tx_timeout(struct net_device *dev)
4415 {
4416         struct qeth_card *card;
4417
4418         card = dev->ml_priv;
4419         QETH_CARD_TEXT(card, 4, "txtimeo");
4420         card->stats.tx_errors++;
4421         qeth_schedule_recovery(card);
4422 }
4423 EXPORT_SYMBOL_GPL(qeth_tx_timeout);
4424
4425 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4426 {
4427         struct qeth_card *card = dev->ml_priv;
4428         int rc = 0;
4429
4430         switch (regnum) {
4431         case MII_BMCR: /* Basic mode control register */
4432                 rc = BMCR_FULLDPLX;
4433                 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
4434                     (card->info.link_type != QETH_LINK_TYPE_OSN) &&
4435                     (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
4436                     (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
4437                         rc |= BMCR_SPEED100;
4438                 break;
4439         case MII_BMSR: /* Basic mode status register */
4440                 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4441                      BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4442                      BMSR_100BASE4;
4443                 break;
4444         case MII_PHYSID1: /* PHYS ID 1 */
4445                 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4446                      dev->dev_addr[2];
4447                 rc = (rc >> 5) & 0xFFFF;
4448                 break;
4449         case MII_PHYSID2: /* PHYS ID 2 */
4450                 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4451                 break;
4452         case MII_ADVERTISE: /* Advertisement control reg */
4453                 rc = ADVERTISE_ALL;
4454                 break;
4455         case MII_LPA: /* Link partner ability reg */
4456                 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4457                      LPA_100BASE4 | LPA_LPACK;
4458                 break;
4459         case MII_EXPANSION: /* Expansion register */
4460                 break;
4461         case MII_DCOUNTER: /* disconnect counter */
4462                 break;
4463         case MII_FCSCOUNTER: /* false carrier counter */
4464                 break;
4465         case MII_NWAYTEST: /* N-way auto-neg test register */
4466                 break;
4467         case MII_RERRCOUNTER: /* rx error counter */
4468                 rc = card->stats.rx_errors;
4469                 break;
4470         case MII_SREVISION: /* silicon revision */
4471                 break;
4472         case MII_RESV1: /* reserved 1 */
4473                 break;
4474         case MII_LBRERROR: /* loopback, rx, bypass error */
4475                 break;
4476         case MII_PHYADDR: /* physical address */
4477                 break;
4478         case MII_RESV2: /* reserved 2 */
4479                 break;
4480         case MII_TPISTATUS: /* TPI status for 10mbps */
4481                 break;
4482         case MII_NCONFIG: /* network interface config */
4483                 break;
4484         default:
4485                 break;
4486         }
4487         return rc;
4488 }
4489
4490 static int qeth_send_ipa_snmp_cmd(struct qeth_card *card,
4491                 struct qeth_cmd_buffer *iob, int len,
4492                 int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
4493                         unsigned long),
4494                 void *reply_param)
4495 {
4496         u16 s1, s2;
4497
4498         QETH_CARD_TEXT(card, 4, "sendsnmp");
4499
4500         /* adjust PDU length fields in IPA_PDU_HEADER */
4501         s1 = (u32) IPA_PDU_HEADER_SIZE + len;
4502         s2 = (u32) len;
4503         memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
4504         memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
4505         memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
4506         memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
4507         return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
4508                                       reply_cb, reply_param);
4509 }
4510
4511 static int qeth_snmp_command_cb(struct qeth_card *card,
4512                 struct qeth_reply *reply, unsigned long sdata)
4513 {
4514         struct qeth_ipa_cmd *cmd;
4515         struct qeth_arp_query_info *qinfo;
4516         struct qeth_snmp_cmd *snmp;
4517         unsigned char *data;
4518         __u16 data_len;
4519
4520         QETH_CARD_TEXT(card, 3, "snpcmdcb");
4521
4522         cmd = (struct qeth_ipa_cmd *) sdata;
4523         data = (unsigned char *)((char *)cmd - reply->offset);
4524         qinfo = (struct qeth_arp_query_info *) reply->param;
4525         snmp = &cmd->data.setadapterparms.data.snmp;
4526
4527         if (cmd->hdr.return_code) {
4528                 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4529                 return 0;
4530         }
4531         if (cmd->data.setadapterparms.hdr.return_code) {
4532                 cmd->hdr.return_code =
4533                         cmd->data.setadapterparms.hdr.return_code;
4534                 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4535                 return 0;
4536         }
4537         data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
4538         if (cmd->data.setadapterparms.hdr.seq_no == 1)
4539                 data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
4540         else
4541                 data_len -= (__u16)((char *)&snmp->request - (char *)cmd);
4542
4543         /* check if there is enough room in userspace */
4544         if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4545                 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOMEM);
4546                 cmd->hdr.return_code = IPA_RC_ENOMEM;
4547                 return 0;
4548         }
4549         QETH_CARD_TEXT_(card, 4, "snore%i",
4550                        cmd->data.setadapterparms.hdr.used_total);
4551         QETH_CARD_TEXT_(card, 4, "sseqn%i",
4552                 cmd->data.setadapterparms.hdr.seq_no);
4553         /*copy entries to user buffer*/
4554         if (cmd->data.setadapterparms.hdr.seq_no == 1) {
4555                 memcpy(qinfo->udata + qinfo->udata_offset,
4556                        (char *)snmp,
4557                        data_len + offsetof(struct qeth_snmp_cmd, data));
4558                 qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
4559         } else {
4560                 memcpy(qinfo->udata + qinfo->udata_offset,
4561                        (char *)&snmp->request, data_len);
4562         }
4563         qinfo->udata_offset += data_len;
4564         /* check if all replies received ... */
4565                 QETH_CARD_TEXT_(card, 4, "srtot%i",
4566                                cmd->data.setadapterparms.hdr.used_total);
4567                 QETH_CARD_TEXT_(card, 4, "srseq%i",
4568                                cmd->data.setadapterparms.hdr.seq_no);
4569         if (cmd->data.setadapterparms.hdr.seq_no <
4570             cmd->data.setadapterparms.hdr.used_total)
4571                 return 1;
4572         return 0;
4573 }
4574
4575 static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4576 {
4577         struct qeth_cmd_buffer *iob;
4578         struct qeth_ipa_cmd *cmd;
4579         struct qeth_snmp_ureq *ureq;
4580         unsigned int req_len;
4581         struct qeth_arp_query_info qinfo = {0, };
4582         int rc = 0;
4583
4584         QETH_CARD_TEXT(card, 3, "snmpcmd");
4585
4586         if (card->info.guestlan)
4587                 return -EOPNOTSUPP;
4588
4589         if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4590             IS_LAYER3(card))
4591                 return -EOPNOTSUPP;
4592
4593         /* skip 4 bytes (data_len struct member) to get req_len */
4594         if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
4595                 return -EFAULT;
4596         if (req_len > (QETH_BUFSIZE - IPA_PDU_HEADER_SIZE -
4597                        sizeof(struct qeth_ipacmd_hdr) -
4598                        sizeof(struct qeth_ipacmd_setadpparms_hdr)))
4599                 return -EINVAL;
4600         ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr));
4601         if (IS_ERR(ureq)) {
4602                 QETH_CARD_TEXT(card, 2, "snmpnome");
4603                 return PTR_ERR(ureq);
4604         }
4605         qinfo.udata_len = ureq->hdr.data_len;
4606         qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
4607         if (!qinfo.udata) {
4608                 kfree(ureq);
4609                 return -ENOMEM;
4610         }
4611         qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4612
4613         iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
4614                                    QETH_SNMP_SETADP_CMDLENGTH + req_len);
4615         if (!iob) {
4616                 rc = -ENOMEM;
4617                 goto out;
4618         }
4619         cmd = __ipa_cmd(iob);
4620         memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
4621         rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
4622                                     qeth_snmp_command_cb, (void *)&qinfo);
4623         if (rc)
4624                 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4625                                  CARD_DEVID(card), rc);
4626         else {
4627                 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4628                         rc = -EFAULT;
4629         }
4630 out:
4631         kfree(ureq);
4632         kfree(qinfo.udata);
4633         return rc;
4634 }
4635
4636 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4637                 struct qeth_reply *reply, unsigned long data)
4638 {
4639         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4640         struct qeth_qoat_priv *priv;
4641         char *resdata;
4642         int resdatalen;
4643
4644         QETH_CARD_TEXT(card, 3, "qoatcb");
4645         if (qeth_setadpparms_inspect_rc(cmd))
4646                 return 0;
4647
4648         priv = (struct qeth_qoat_priv *)reply->param;
4649         resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4650         resdata = (char *)data + 28;
4651
4652         if (resdatalen > (priv->buffer_len - priv->response_len)) {
4653                 cmd->hdr.return_code = IPA_RC_FFFF;
4654                 return 0;
4655         }
4656
4657         memcpy((priv->buffer + priv->response_len), resdata,
4658                 resdatalen);
4659         priv->response_len += resdatalen;
4660
4661         if (cmd->data.setadapterparms.hdr.seq_no <
4662             cmd->data.setadapterparms.hdr.used_total)
4663                 return 1;
4664         return 0;
4665 }
4666
4667 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4668 {
4669         int rc = 0;
4670         struct qeth_cmd_buffer *iob;
4671         struct qeth_ipa_cmd *cmd;
4672         struct qeth_query_oat *oat_req;
4673         struct qeth_query_oat_data oat_data;
4674         struct qeth_qoat_priv priv;
4675         void __user *tmp;
4676
4677         QETH_CARD_TEXT(card, 3, "qoatcmd");
4678
4679         if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
4680                 rc = -EOPNOTSUPP;
4681                 goto out;
4682         }
4683
4684         if (copy_from_user(&oat_data, udata,
4685             sizeof(struct qeth_query_oat_data))) {
4686                         rc = -EFAULT;
4687                         goto out;
4688         }
4689
4690         priv.buffer_len = oat_data.buffer_len;
4691         priv.response_len = 0;
4692         priv.buffer = vzalloc(oat_data.buffer_len);
4693         if (!priv.buffer) {
4694                 rc = -ENOMEM;
4695                 goto out;
4696         }
4697
4698         iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4699                                    sizeof(struct qeth_ipacmd_setadpparms_hdr) +
4700                                    sizeof(struct qeth_query_oat));
4701         if (!iob) {
4702                 rc = -ENOMEM;
4703                 goto out_free;
4704         }
4705         cmd = __ipa_cmd(iob);
4706         oat_req = &cmd->data.setadapterparms.data.query_oat;
4707         oat_req->subcmd_code = oat_data.command;
4708
4709         rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb,
4710                                &priv);
4711         if (!rc) {
4712                 if (is_compat_task())
4713                         tmp = compat_ptr(oat_data.ptr);
4714                 else
4715                         tmp = (void __user *)(unsigned long)oat_data.ptr;
4716
4717                 if (copy_to_user(tmp, priv.buffer,
4718                     priv.response_len)) {
4719                         rc = -EFAULT;
4720                         goto out_free;
4721                 }
4722
4723                 oat_data.response_len = priv.response_len;
4724
4725                 if (copy_to_user(udata, &oat_data,
4726                     sizeof(struct qeth_query_oat_data)))
4727                         rc = -EFAULT;
4728         } else
4729                 if (rc == IPA_RC_FFFF)
4730                         rc = -EFAULT;
4731
4732 out_free:
4733         vfree(priv.buffer);
4734 out:
4735         return rc;
4736 }
4737
4738 static int qeth_query_card_info_cb(struct qeth_card *card,
4739                                    struct qeth_reply *reply, unsigned long data)
4740 {
4741         struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
4742         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4743         struct qeth_query_card_info *card_info;
4744
4745         QETH_CARD_TEXT(card, 2, "qcrdincb");
4746         if (qeth_setadpparms_inspect_rc(cmd))
4747                 return 0;
4748
4749         card_info = &cmd->data.setadapterparms.data.card_info;
4750         carrier_info->card_type = card_info->card_type;
4751         carrier_info->port_mode = card_info->port_mode;
4752         carrier_info->port_speed = card_info->port_speed;
4753         return 0;
4754 }
4755
4756 static int qeth_query_card_info(struct qeth_card *card,
4757                                 struct carrier_info *carrier_info)
4758 {
4759         struct qeth_cmd_buffer *iob;
4760
4761         QETH_CARD_TEXT(card, 2, "qcrdinfo");
4762         if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
4763                 return -EOPNOTSUPP;
4764         iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO,
4765                 sizeof(struct qeth_ipacmd_setadpparms_hdr));
4766         if (!iob)
4767                 return -ENOMEM;
4768         return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
4769                                         (void *)carrier_info);
4770 }
4771
4772 /**
4773  * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
4774  * @card: pointer to a qeth_card
4775  *
4776  * Returns
4777  *      0, if a MAC address has been set for the card's netdevice
4778  *      a return code, for various error conditions
4779  */
4780 int qeth_vm_request_mac(struct qeth_card *card)
4781 {
4782         struct diag26c_mac_resp *response;
4783         struct diag26c_mac_req *request;
4784         struct ccw_dev_id id;
4785         int rc;
4786
4787         QETH_DBF_TEXT(SETUP, 2, "vmreqmac");
4788
4789         request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
4790         response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
4791         if (!request || !response) {
4792                 rc = -ENOMEM;
4793                 goto out;
4794         }
4795
4796         ccw_device_get_id(CARD_DDEV(card), &id);
4797         request->resp_buf_len = sizeof(*response);
4798         request->resp_version = DIAG26C_VERSION2;
4799         request->op_code = DIAG26C_GET_MAC;
4800         request->devno = id.devno;
4801
4802         QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4803         rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
4804         QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4805         if (rc)
4806                 goto out;
4807         QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
4808
4809         if (request->resp_buf_len < sizeof(*response) ||
4810             response->version != request->resp_version) {
4811                 rc = -EIO;
4812                 QETH_DBF_TEXT(SETUP, 2, "badresp");
4813                 QETH_DBF_HEX(SETUP, 2, &request->resp_buf_len,
4814                              sizeof(request->resp_buf_len));
4815         } else if (!is_valid_ether_addr(response->mac)) {
4816                 rc = -EINVAL;
4817                 QETH_DBF_TEXT(SETUP, 2, "badmac");
4818                 QETH_DBF_HEX(SETUP, 2, response->mac, ETH_ALEN);
4819         } else {
4820                 ether_addr_copy(card->dev->dev_addr, response->mac);
4821         }
4822
4823 out:
4824         kfree(response);
4825         kfree(request);
4826         return rc;
4827 }
4828 EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
4829
4830 static int qeth_get_qdio_q_format(struct qeth_card *card)
4831 {
4832         if (card->info.type == QETH_CARD_TYPE_IQD)
4833                 return QDIO_IQDIO_QFMT;
4834         else
4835                 return QDIO_QETH_QFMT;
4836 }
4837
4838 static void qeth_determine_capabilities(struct qeth_card *card)
4839 {
4840         int rc;
4841         int length;
4842         char *prcd;
4843         struct ccw_device *ddev;
4844         int ddev_offline = 0;
4845
4846         QETH_DBF_TEXT(SETUP, 2, "detcapab");
4847         ddev = CARD_DDEV(card);
4848         if (!ddev->online) {
4849                 ddev_offline = 1;
4850                 rc = ccw_device_set_online(ddev);
4851                 if (rc) {
4852                         QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
4853                         goto out;
4854                 }
4855         }
4856
4857         rc = qeth_read_conf_data(card, (void **) &prcd, &length);
4858         if (rc) {
4859                 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
4860                                  CARD_DEVID(card), rc);
4861                 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
4862                 goto out_offline;
4863         }
4864         qeth_configure_unitaddr(card, prcd);
4865         if (ddev_offline)
4866                 qeth_configure_blkt_default(card, prcd);
4867         kfree(prcd);
4868
4869         rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
4870         if (rc)
4871                 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
4872
4873         QETH_DBF_TEXT_(SETUP, 2, "qfmt%d", card->ssqd.qfmt);
4874         QETH_DBF_TEXT_(SETUP, 2, "ac1:%02x", card->ssqd.qdioac1);
4875         QETH_DBF_TEXT_(SETUP, 2, "ac2:%04x", card->ssqd.qdioac2);
4876         QETH_DBF_TEXT_(SETUP, 2, "ac3:%04x", card->ssqd.qdioac3);
4877         QETH_DBF_TEXT_(SETUP, 2, "icnt%d", card->ssqd.icnt);
4878         if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
4879             ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
4880             ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
4881                 dev_info(&card->gdev->dev,
4882                         "Completion Queueing supported\n");
4883         } else {
4884                 card->options.cq = QETH_CQ_NOTAVAILABLE;
4885         }
4886
4887
4888 out_offline:
4889         if (ddev_offline == 1)
4890                 ccw_device_set_offline(ddev);
4891 out:
4892         return;
4893 }
4894
4895 static void qeth_qdio_establish_cq(struct qeth_card *card,
4896                                    struct qdio_buffer **in_sbal_ptrs,
4897                                    void (**queue_start_poll)
4898                                         (struct ccw_device *, int,
4899                                          unsigned long))
4900 {
4901         int i;
4902
4903         if (card->options.cq == QETH_CQ_ENABLED) {
4904                 int offset = QDIO_MAX_BUFFERS_PER_Q *
4905                              (card->qdio.no_in_queues - 1);
4906                 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
4907                         in_sbal_ptrs[offset + i] = (struct qdio_buffer *)
4908                                 virt_to_phys(card->qdio.c_q->bufs[i].buffer);
4909                 }
4910
4911                 queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
4912         }
4913 }
4914
4915 static int qeth_qdio_establish(struct qeth_card *card)
4916 {
4917         struct qdio_initialize init_data;
4918         char *qib_param_field;
4919         struct qdio_buffer **in_sbal_ptrs;
4920         void (**queue_start_poll) (struct ccw_device *, int, unsigned long);
4921         struct qdio_buffer **out_sbal_ptrs;
4922         int i, j, k;
4923         int rc = 0;
4924
4925         QETH_DBF_TEXT(SETUP, 2, "qdioest");
4926
4927         qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q,
4928                                   GFP_KERNEL);
4929         if (!qib_param_field) {
4930                 rc =  -ENOMEM;
4931                 goto out_free_nothing;
4932         }
4933
4934         qeth_create_qib_param_field(card, qib_param_field);
4935         qeth_create_qib_param_field_blkt(card, qib_param_field);
4936
4937         in_sbal_ptrs = kcalloc(card->qdio.no_in_queues * QDIO_MAX_BUFFERS_PER_Q,
4938                                sizeof(void *),
4939                                GFP_KERNEL);
4940         if (!in_sbal_ptrs) {
4941                 rc = -ENOMEM;
4942                 goto out_free_qib_param;
4943         }
4944         for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
4945                 in_sbal_ptrs[i] = (struct qdio_buffer *)
4946                         virt_to_phys(card->qdio.in_q->bufs[i].buffer);
4947         }
4948
4949         queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *),
4950                                    GFP_KERNEL);
4951         if (!queue_start_poll) {
4952                 rc = -ENOMEM;
4953                 goto out_free_in_sbals;
4954         }
4955         for (i = 0; i < card->qdio.no_in_queues; ++i)
4956                 queue_start_poll[i] = qeth_qdio_start_poll;
4957
4958         qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
4959
4960         out_sbal_ptrs =
4961                 kcalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q,
4962                         sizeof(void *),
4963                         GFP_KERNEL);
4964         if (!out_sbal_ptrs) {
4965                 rc = -ENOMEM;
4966                 goto out_free_queue_start_poll;
4967         }
4968         for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
4969                 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) {
4970                         out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys(
4971                                 card->qdio.out_qs[i]->bufs[j]->buffer);
4972                 }
4973
4974         memset(&init_data, 0, sizeof(struct qdio_initialize));
4975         init_data.cdev                   = CARD_DDEV(card);
4976         init_data.q_format               = qeth_get_qdio_q_format(card);
4977         init_data.qib_param_field_format = 0;
4978         init_data.qib_param_field        = qib_param_field;
4979         init_data.no_input_qs            = card->qdio.no_in_queues;
4980         init_data.no_output_qs           = card->qdio.no_out_queues;
4981         init_data.input_handler          = qeth_qdio_input_handler;
4982         init_data.output_handler         = qeth_qdio_output_handler;
4983         init_data.queue_start_poll_array = queue_start_poll;
4984         init_data.int_parm               = (unsigned long) card;
4985         init_data.input_sbal_addr_array  = (void **) in_sbal_ptrs;
4986         init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
4987         init_data.output_sbal_state_array = card->qdio.out_bufstates;
4988         init_data.scan_threshold =
4989                 (card->info.type == QETH_CARD_TYPE_IQD) ? 1 : 32;
4990
4991         if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
4992                 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
4993                 rc = qdio_allocate(&init_data);
4994                 if (rc) {
4995                         atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
4996                         goto out;
4997                 }
4998                 rc = qdio_establish(&init_data);
4999                 if (rc) {
5000                         atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5001                         qdio_free(CARD_DDEV(card));
5002                 }
5003         }
5004
5005         switch (card->options.cq) {
5006         case QETH_CQ_ENABLED:
5007                 dev_info(&card->gdev->dev, "Completion Queue support enabled");
5008                 break;
5009         case QETH_CQ_DISABLED:
5010                 dev_info(&card->gdev->dev, "Completion Queue support disabled");
5011                 break;
5012         default:
5013                 break;
5014         }
5015 out:
5016         kfree(out_sbal_ptrs);
5017 out_free_queue_start_poll:
5018         kfree(queue_start_poll);
5019 out_free_in_sbals:
5020         kfree(in_sbal_ptrs);
5021 out_free_qib_param:
5022         kfree(qib_param_field);
5023 out_free_nothing:
5024         return rc;
5025 }
5026
5027 static void qeth_core_free_card(struct qeth_card *card)
5028 {
5029         QETH_DBF_TEXT(SETUP, 2, "freecrd");
5030         QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
5031         qeth_clean_channel(&card->read);
5032         qeth_clean_channel(&card->write);
5033         qeth_clean_channel(&card->data);
5034         qeth_free_qdio_buffers(card);
5035         unregister_service_level(&card->qeth_service_level);
5036         dev_set_drvdata(&card->gdev->dev, NULL);
5037         kfree(card);
5038 }
5039
5040 void qeth_trace_features(struct qeth_card *card)
5041 {
5042         QETH_CARD_TEXT(card, 2, "features");
5043         QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
5044         QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
5045         QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
5046         QETH_CARD_HEX(card, 2, &card->info.diagass_support,
5047                       sizeof(card->info.diagass_support));
5048 }
5049 EXPORT_SYMBOL_GPL(qeth_trace_features);
5050
5051 static struct ccw_device_id qeth_ids[] = {
5052         {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
5053                                         .driver_info = QETH_CARD_TYPE_OSD},
5054         {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
5055                                         .driver_info = QETH_CARD_TYPE_IQD},
5056         {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
5057                                         .driver_info = QETH_CARD_TYPE_OSN},
5058         {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
5059                                         .driver_info = QETH_CARD_TYPE_OSM},
5060         {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
5061                                         .driver_info = QETH_CARD_TYPE_OSX},
5062         {},
5063 };
5064 MODULE_DEVICE_TABLE(ccw, qeth_ids);
5065
5066 static struct ccw_driver qeth_ccw_driver = {
5067         .driver = {
5068                 .owner = THIS_MODULE,
5069                 .name = "qeth",
5070         },
5071         .ids = qeth_ids,
5072         .probe = ccwgroup_probe_ccwdev,
5073         .remove = ccwgroup_remove_ccwdev,
5074 };
5075
5076 int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
5077 {
5078         int retries = 3;
5079         int rc;
5080
5081         QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
5082         atomic_set(&card->force_alloc_skb, 0);
5083         qeth_update_from_chp_desc(card);
5084 retry:
5085         if (retries < 3)
5086                 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
5087                                  CARD_DEVID(card));
5088         rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
5089         ccw_device_set_offline(CARD_DDEV(card));
5090         ccw_device_set_offline(CARD_WDEV(card));
5091         ccw_device_set_offline(CARD_RDEV(card));
5092         qdio_free(CARD_DDEV(card));
5093         rc = ccw_device_set_online(CARD_RDEV(card));
5094         if (rc)
5095                 goto retriable;
5096         rc = ccw_device_set_online(CARD_WDEV(card));
5097         if (rc)
5098                 goto retriable;
5099         rc = ccw_device_set_online(CARD_DDEV(card));
5100         if (rc)
5101                 goto retriable;
5102 retriable:
5103         if (rc == -ERESTARTSYS) {
5104                 QETH_DBF_TEXT(SETUP, 2, "break1");
5105                 return rc;
5106         } else if (rc) {
5107                 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
5108                 if (--retries < 0)
5109                         goto out;
5110                 else
5111                         goto retry;
5112         }
5113         qeth_determine_capabilities(card);
5114         qeth_init_tokens(card);
5115         qeth_init_func_level(card);
5116         rc = qeth_idx_activate_channel(card, &card->read, qeth_idx_read_cb);
5117         if (rc == -ERESTARTSYS) {
5118                 QETH_DBF_TEXT(SETUP, 2, "break2");
5119                 return rc;
5120         } else if (rc) {
5121                 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
5122                 if (--retries < 0)
5123                         goto out;
5124                 else
5125                         goto retry;
5126         }
5127         rc = qeth_idx_activate_channel(card, &card->write, qeth_idx_write_cb);
5128         if (rc == -ERESTARTSYS) {
5129                 QETH_DBF_TEXT(SETUP, 2, "break3");
5130                 return rc;
5131         } else if (rc) {
5132                 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
5133                 if (--retries < 0)
5134                         goto out;
5135                 else
5136                         goto retry;
5137         }
5138         card->read_or_write_problem = 0;
5139         rc = qeth_mpc_initialize(card);
5140         if (rc) {
5141                 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
5142                 goto out;
5143         }
5144
5145         rc = qeth_send_startlan(card);
5146         if (rc) {
5147                 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
5148                 if (rc == IPA_RC_LAN_OFFLINE) {
5149                         dev_warn(&card->gdev->dev,
5150                                 "The LAN is offline\n");
5151                         *carrier_ok = false;
5152                 } else {
5153                         rc = -ENODEV;
5154                         goto out;
5155                 }
5156         } else {
5157                 *carrier_ok = true;
5158         }
5159
5160         if (qeth_netdev_is_registered(card->dev)) {
5161                 if (*carrier_ok)
5162                         netif_carrier_on(card->dev);
5163                 else
5164                         netif_carrier_off(card->dev);
5165         }
5166
5167         card->options.ipa4.supported_funcs = 0;
5168         card->options.ipa6.supported_funcs = 0;
5169         card->options.adp.supported_funcs = 0;
5170         card->options.sbp.supported_funcs = 0;
5171         card->info.diagass_support = 0;
5172         rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
5173         if (rc == -ENOMEM)
5174                 goto out;
5175         if (qeth_is_supported(card, IPA_IPV6)) {
5176                 rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
5177                 if (rc == -ENOMEM)
5178                         goto out;
5179         }
5180         if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
5181                 rc = qeth_query_setadapterparms(card);
5182                 if (rc < 0) {
5183                         QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
5184                         goto out;
5185                 }
5186         }
5187         if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
5188                 rc = qeth_query_setdiagass(card);
5189                 if (rc < 0) {
5190                         QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
5191                         goto out;
5192                 }
5193         }
5194         return 0;
5195 out:
5196         dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
5197                 "an error on the device\n");
5198         QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
5199                          CARD_DEVID(card), rc);
5200         return rc;
5201 }
5202 EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
5203
5204 static void qeth_create_skb_frag(struct qdio_buffer_element *element,
5205                                  struct sk_buff *skb, int offset, int data_len)
5206 {
5207         struct page *page = virt_to_page(element->addr);
5208         unsigned int next_frag;
5209
5210         /* first fill the linear space */
5211         if (!skb->len) {
5212                 unsigned int linear = min(data_len, skb_tailroom(skb));
5213
5214                 skb_put_data(skb, element->addr + offset, linear);
5215                 data_len -= linear;
5216                 if (!data_len)
5217                         return;
5218                 offset += linear;
5219                 /* fall through to add page frag for remaining data */
5220         }
5221
5222         next_frag = skb_shinfo(skb)->nr_frags;
5223         get_page(page);
5224         skb_add_rx_frag(skb, next_frag, page, offset, data_len, data_len);
5225 }
5226
5227 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
5228 {
5229         return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
5230 }
5231
5232 struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
5233                 struct qeth_qdio_buffer *qethbuffer,
5234                 struct qdio_buffer_element **__element, int *__offset,
5235                 struct qeth_hdr **hdr)
5236 {
5237         struct qdio_buffer_element *element = *__element;
5238         struct qdio_buffer *buffer = qethbuffer->buffer;
5239         int offset = *__offset;
5240         struct sk_buff *skb;
5241         int skb_len = 0;
5242         void *data_ptr;
5243         int data_len;
5244         int headroom = 0;
5245         int use_rx_sg = 0;
5246
5247         /* qeth_hdr must not cross element boundaries */
5248         while (element->length < offset + sizeof(struct qeth_hdr)) {
5249                 if (qeth_is_last_sbale(element))
5250                         return NULL;
5251                 element++;
5252                 offset = 0;
5253         }
5254         *hdr = element->addr + offset;
5255
5256         offset += sizeof(struct qeth_hdr);
5257         switch ((*hdr)->hdr.l2.id) {
5258         case QETH_HEADER_TYPE_LAYER2:
5259                 skb_len = (*hdr)->hdr.l2.pkt_length;
5260                 break;
5261         case QETH_HEADER_TYPE_LAYER3:
5262                 skb_len = (*hdr)->hdr.l3.length;
5263                 headroom = ETH_HLEN;
5264                 break;
5265         case QETH_HEADER_TYPE_OSN:
5266                 skb_len = (*hdr)->hdr.osn.pdu_length;
5267                 headroom = sizeof(struct qeth_hdr);
5268                 break;
5269         default:
5270                 break;
5271         }
5272
5273         if (!skb_len)
5274                 return NULL;
5275
5276         if (((skb_len >= card->options.rx_sg_cb) &&
5277              (!(card->info.type == QETH_CARD_TYPE_OSN)) &&
5278              (!atomic_read(&card->force_alloc_skb))) ||
5279             (card->options.cq == QETH_CQ_ENABLED))
5280                 use_rx_sg = 1;
5281
5282         if (use_rx_sg && qethbuffer->rx_skb) {
5283                 /* QETH_CQ_ENABLED only: */
5284                 skb = qethbuffer->rx_skb;
5285                 qethbuffer->rx_skb = NULL;
5286         } else {
5287                 unsigned int linear = (use_rx_sg) ? QETH_RX_PULL_LEN : skb_len;
5288
5289                 skb = napi_alloc_skb(&card->napi, linear + headroom);
5290         }
5291         if (!skb)
5292                 goto no_mem;
5293         if (headroom)
5294                 skb_reserve(skb, headroom);
5295
5296         data_ptr = element->addr + offset;
5297         while (skb_len) {
5298                 data_len = min(skb_len, (int)(element->length - offset));
5299                 if (data_len) {
5300                         if (use_rx_sg)
5301                                 qeth_create_skb_frag(element, skb, offset,
5302                                                      data_len);
5303                         else
5304                                 skb_put_data(skb, data_ptr, data_len);
5305                 }
5306                 skb_len -= data_len;
5307                 if (skb_len) {
5308                         if (qeth_is_last_sbale(element)) {
5309                                 QETH_CARD_TEXT(card, 4, "unexeob");
5310                                 QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5311                                 dev_kfree_skb_any(skb);
5312                                 card->stats.rx_errors++;
5313                                 return NULL;
5314                         }
5315                         element++;
5316                         offset = 0;
5317                         data_ptr = element->addr;
5318                 } else {
5319                         offset += data_len;
5320                 }
5321         }
5322         *__element = element;
5323         *__offset = offset;
5324         if (use_rx_sg && card->options.performance_stats) {
5325                 card->perf_stats.sg_skbs_rx++;
5326                 card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags;
5327         }
5328         return skb;
5329 no_mem:
5330         if (net_ratelimit()) {
5331                 QETH_CARD_TEXT(card, 2, "noskbmem");
5332         }
5333         card->stats.rx_dropped++;
5334         return NULL;
5335 }
5336 EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
5337
5338 int qeth_poll(struct napi_struct *napi, int budget)
5339 {
5340         struct qeth_card *card = container_of(napi, struct qeth_card, napi);
5341         int work_done = 0;
5342         struct qeth_qdio_buffer *buffer;
5343         int done;
5344         int new_budget = budget;
5345
5346         if (card->options.performance_stats) {
5347                 card->perf_stats.inbound_cnt++;
5348                 card->perf_stats.inbound_start_time = qeth_get_micros();
5349         }
5350
5351         while (1) {
5352                 if (!card->rx.b_count) {
5353                         card->rx.qdio_err = 0;
5354                         card->rx.b_count = qdio_get_next_buffers(
5355                                 card->data.ccwdev, 0, &card->rx.b_index,
5356                                 &card->rx.qdio_err);
5357                         if (card->rx.b_count <= 0) {
5358                                 card->rx.b_count = 0;
5359                                 break;
5360                         }
5361                         card->rx.b_element =
5362                                 &card->qdio.in_q->bufs[card->rx.b_index]
5363                                 .buffer->element[0];
5364                         card->rx.e_offset = 0;
5365                 }
5366
5367                 while (card->rx.b_count) {
5368                         buffer = &card->qdio.in_q->bufs[card->rx.b_index];
5369                         if (!(card->rx.qdio_err &&
5370                             qeth_check_qdio_errors(card, buffer->buffer,
5371                             card->rx.qdio_err, "qinerr")))
5372                                 work_done +=
5373                                         card->discipline->process_rx_buffer(
5374                                                 card, new_budget, &done);
5375                         else
5376                                 done = 1;
5377
5378                         if (done) {
5379                                 if (card->options.performance_stats)
5380                                         card->perf_stats.bufs_rec++;
5381                                 qeth_put_buffer_pool_entry(card,
5382                                         buffer->pool_entry);
5383                                 qeth_queue_input_buffer(card, card->rx.b_index);
5384                                 card->rx.b_count--;
5385                                 if (card->rx.b_count) {
5386                                         card->rx.b_index =
5387                                                 (card->rx.b_index + 1) %
5388                                                 QDIO_MAX_BUFFERS_PER_Q;
5389                                         card->rx.b_element =
5390                                                 &card->qdio.in_q
5391                                                 ->bufs[card->rx.b_index]
5392                                                 .buffer->element[0];
5393                                         card->rx.e_offset = 0;
5394                                 }
5395                         }
5396
5397                         if (work_done >= budget)
5398                                 goto out;
5399                         else
5400                                 new_budget = budget - work_done;
5401                 }
5402         }
5403
5404         napi_complete_done(napi, work_done);
5405         if (qdio_start_irq(card->data.ccwdev, 0))
5406                 napi_schedule(&card->napi);
5407 out:
5408         if (card->options.performance_stats)
5409                 card->perf_stats.inbound_time += qeth_get_micros() -
5410                         card->perf_stats.inbound_start_time;
5411         return work_done;
5412 }
5413 EXPORT_SYMBOL_GPL(qeth_poll);
5414
5415 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
5416 {
5417         if (!cmd->hdr.return_code)
5418                 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5419         return cmd->hdr.return_code;
5420 }
5421
5422 static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
5423                                         struct qeth_reply *reply,
5424                                         unsigned long data)
5425 {
5426         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5427         struct qeth_ipa_caps *caps = reply->param;
5428
5429         if (qeth_setassparms_inspect_rc(cmd))
5430                 return 0;
5431
5432         caps->supported = cmd->data.setassparms.data.caps.supported;
5433         caps->enabled = cmd->data.setassparms.data.caps.enabled;
5434         return 0;
5435 }
5436
5437 int qeth_setassparms_cb(struct qeth_card *card,
5438                         struct qeth_reply *reply, unsigned long data)
5439 {
5440         struct qeth_ipa_cmd *cmd;
5441
5442         QETH_CARD_TEXT(card, 4, "defadpcb");
5443
5444         cmd = (struct qeth_ipa_cmd *) data;
5445         if (cmd->hdr.return_code == 0) {
5446                 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5447                 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
5448                         card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
5449                 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
5450                         card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
5451         }
5452         return 0;
5453 }
5454 EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
5455
5456 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
5457                                                  enum qeth_ipa_funcs ipa_func,
5458                                                  __u16 cmd_code, __u16 len,
5459                                                  enum qeth_prot_versions prot)
5460 {
5461         struct qeth_cmd_buffer *iob;
5462         struct qeth_ipa_cmd *cmd;
5463
5464         QETH_CARD_TEXT(card, 4, "getasscm");
5465         iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
5466
5467         if (iob) {
5468                 cmd = __ipa_cmd(iob);
5469                 cmd->data.setassparms.hdr.assist_no = ipa_func;
5470                 cmd->data.setassparms.hdr.length = 8 + len;
5471                 cmd->data.setassparms.hdr.command_code = cmd_code;
5472         }
5473
5474         return iob;
5475 }
5476 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
5477
5478 int qeth_send_simple_setassparms_prot(struct qeth_card *card,
5479                                       enum qeth_ipa_funcs ipa_func,
5480                                       u16 cmd_code, long data,
5481                                       enum qeth_prot_versions prot)
5482 {
5483         int length = 0;
5484         struct qeth_cmd_buffer *iob;
5485
5486         QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
5487         if (data)
5488                 length = sizeof(__u32);
5489         iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
5490         if (!iob)
5491                 return -ENOMEM;
5492
5493         __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (__u32) data;
5494         return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
5495 }
5496 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
5497
5498 static void qeth_unregister_dbf_views(void)
5499 {
5500         int x;
5501         for (x = 0; x < QETH_DBF_INFOS; x++) {
5502                 debug_unregister(qeth_dbf[x].id);
5503                 qeth_dbf[x].id = NULL;
5504         }
5505 }
5506
5507 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
5508 {
5509         char dbf_txt_buf[32];
5510         va_list args;
5511
5512         if (!debug_level_enabled(id, level))
5513                 return;
5514         va_start(args, fmt);
5515         vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
5516         va_end(args);
5517         debug_text_event(id, level, dbf_txt_buf);
5518 }
5519 EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
5520
5521 static int qeth_register_dbf_views(void)
5522 {
5523         int ret;
5524         int x;
5525
5526         for (x = 0; x < QETH_DBF_INFOS; x++) {
5527                 /* register the areas */
5528                 qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
5529                                                 qeth_dbf[x].pages,
5530                                                 qeth_dbf[x].areas,
5531                                                 qeth_dbf[x].len);
5532                 if (qeth_dbf[x].id == NULL) {
5533                         qeth_unregister_dbf_views();
5534                         return -ENOMEM;
5535                 }
5536
5537                 /* register a view */
5538                 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
5539                 if (ret) {
5540                         qeth_unregister_dbf_views();
5541                         return ret;
5542                 }
5543
5544                 /* set a passing level */
5545                 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
5546         }
5547
5548         return 0;
5549 }
5550
5551 static DEFINE_MUTEX(qeth_mod_mutex);    /* for synchronized module loading */
5552
5553 int qeth_core_load_discipline(struct qeth_card *card,
5554                 enum qeth_discipline_id discipline)
5555 {
5556         mutex_lock(&qeth_mod_mutex);
5557         switch (discipline) {
5558         case QETH_DISCIPLINE_LAYER3:
5559                 card->discipline = try_then_request_module(
5560                         symbol_get(qeth_l3_discipline), "qeth_l3");
5561                 break;
5562         case QETH_DISCIPLINE_LAYER2:
5563                 card->discipline = try_then_request_module(
5564                         symbol_get(qeth_l2_discipline), "qeth_l2");
5565                 break;
5566         default:
5567                 break;
5568         }
5569         mutex_unlock(&qeth_mod_mutex);
5570
5571         if (!card->discipline) {
5572                 dev_err(&card->gdev->dev, "There is no kernel module to "
5573                         "support discipline %d\n", discipline);
5574                 return -EINVAL;
5575         }
5576
5577         card->options.layer = discipline;
5578         return 0;
5579 }
5580
5581 void qeth_core_free_discipline(struct qeth_card *card)
5582 {
5583         if (IS_LAYER2(card))
5584                 symbol_put(qeth_l2_discipline);
5585         else
5586                 symbol_put(qeth_l3_discipline);
5587         card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
5588         card->discipline = NULL;
5589 }
5590
5591 const struct device_type qeth_generic_devtype = {
5592         .name = "qeth_generic",
5593         .groups = qeth_generic_attr_groups,
5594 };
5595 EXPORT_SYMBOL_GPL(qeth_generic_devtype);
5596
5597 static const struct device_type qeth_osn_devtype = {
5598         .name = "qeth_osn",
5599         .groups = qeth_osn_attr_groups,
5600 };
5601
5602 #define DBF_NAME_LEN    20
5603
5604 struct qeth_dbf_entry {
5605         char dbf_name[DBF_NAME_LEN];
5606         debug_info_t *dbf_info;
5607         struct list_head dbf_list;
5608 };
5609
5610 static LIST_HEAD(qeth_dbf_list);
5611 static DEFINE_MUTEX(qeth_dbf_list_mutex);
5612
5613 static debug_info_t *qeth_get_dbf_entry(char *name)
5614 {
5615         struct qeth_dbf_entry *entry;
5616         debug_info_t *rc = NULL;
5617
5618         mutex_lock(&qeth_dbf_list_mutex);
5619         list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
5620                 if (strcmp(entry->dbf_name, name) == 0) {
5621                         rc = entry->dbf_info;
5622                         break;
5623                 }
5624         }
5625         mutex_unlock(&qeth_dbf_list_mutex);
5626         return rc;
5627 }
5628
5629 static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
5630 {
5631         struct qeth_dbf_entry *new_entry;
5632
5633         card->debug = debug_register(name, 2, 1, 8);
5634         if (!card->debug) {
5635                 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
5636                 goto err;
5637         }
5638         if (debug_register_view(card->debug, &debug_hex_ascii_view))
5639                 goto err_dbg;
5640         new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
5641         if (!new_entry)
5642                 goto err_dbg;
5643         strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
5644         new_entry->dbf_info = card->debug;
5645         mutex_lock(&qeth_dbf_list_mutex);
5646         list_add(&new_entry->dbf_list, &qeth_dbf_list);
5647         mutex_unlock(&qeth_dbf_list_mutex);
5648
5649         return 0;
5650
5651 err_dbg:
5652         debug_unregister(card->debug);
5653 err:
5654         return -ENOMEM;
5655 }
5656
5657 static void qeth_clear_dbf_list(void)
5658 {
5659         struct qeth_dbf_entry *entry, *tmp;
5660
5661         mutex_lock(&qeth_dbf_list_mutex);
5662         list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
5663                 list_del(&entry->dbf_list);
5664                 debug_unregister(entry->dbf_info);
5665                 kfree(entry);
5666         }
5667         mutex_unlock(&qeth_dbf_list_mutex);
5668 }
5669
5670 static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
5671 {
5672         struct net_device *dev;
5673
5674         switch (card->info.type) {
5675         case QETH_CARD_TYPE_IQD:
5676                 dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN, ether_setup);
5677                 break;
5678         case QETH_CARD_TYPE_OSN:
5679                 dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup);
5680                 break;
5681         default:
5682                 dev = alloc_etherdev(0);
5683         }
5684
5685         if (!dev)
5686                 return NULL;
5687
5688         dev->ml_priv = card;
5689         dev->watchdog_timeo = QETH_TX_TIMEOUT;
5690         dev->min_mtu = IS_OSN(card) ? 64 : 576;
5691          /* initialized when device first goes online: */
5692         dev->max_mtu = 0;
5693         dev->mtu = 0;
5694         SET_NETDEV_DEV(dev, &card->gdev->dev);
5695         netif_carrier_off(dev);
5696
5697         if (!IS_OSN(card)) {
5698                 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
5699                 dev->hw_features |= NETIF_F_SG;
5700                 dev->vlan_features |= NETIF_F_SG;
5701                 if (IS_IQD(card))
5702                         dev->features |= NETIF_F_SG;
5703         }
5704
5705         return dev;
5706 }
5707
5708 struct net_device *qeth_clone_netdev(struct net_device *orig)
5709 {
5710         struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
5711
5712         if (!clone)
5713                 return NULL;
5714
5715         clone->dev_port = orig->dev_port;
5716         return clone;
5717 }
5718
5719 static int qeth_core_probe_device(struct ccwgroup_device *gdev)
5720 {
5721         struct qeth_card *card;
5722         struct device *dev;
5723         int rc;
5724         enum qeth_discipline_id enforced_disc;
5725         char dbf_name[DBF_NAME_LEN];
5726
5727         QETH_DBF_TEXT(SETUP, 2, "probedev");
5728
5729         dev = &gdev->dev;
5730         if (!get_device(dev))
5731                 return -ENODEV;
5732
5733         QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
5734
5735         card = qeth_alloc_card(gdev);
5736         if (!card) {
5737                 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
5738                 rc = -ENOMEM;
5739                 goto err_dev;
5740         }
5741
5742         snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
5743                 dev_name(&gdev->dev));
5744         card->debug = qeth_get_dbf_entry(dbf_name);
5745         if (!card->debug) {
5746                 rc = qeth_add_dbf_entry(card, dbf_name);
5747                 if (rc)
5748                         goto err_card;
5749         }
5750
5751         qeth_setup_card(card);
5752         qeth_update_from_chp_desc(card);
5753
5754         card->dev = qeth_alloc_netdev(card);
5755         if (!card->dev) {
5756                 rc = -ENOMEM;
5757                 goto err_card;
5758         }
5759
5760         qeth_determine_capabilities(card);
5761         enforced_disc = qeth_enforce_discipline(card);
5762         switch (enforced_disc) {
5763         case QETH_DISCIPLINE_UNDETERMINED:
5764                 gdev->dev.type = &qeth_generic_devtype;
5765                 break;
5766         default:
5767                 card->info.layer_enforced = true;
5768                 rc = qeth_core_load_discipline(card, enforced_disc);
5769                 if (rc)
5770                         goto err_load;
5771
5772                 gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN)
5773                                         ? card->discipline->devtype
5774                                         : &qeth_osn_devtype;
5775                 rc = card->discipline->setup(card->gdev);
5776                 if (rc)
5777                         goto err_disc;
5778                 break;
5779         }
5780
5781         return 0;
5782
5783 err_disc:
5784         qeth_core_free_discipline(card);
5785 err_load:
5786         free_netdev(card->dev);
5787 err_card:
5788         qeth_core_free_card(card);
5789 err_dev:
5790         put_device(dev);
5791         return rc;
5792 }
5793
5794 static void qeth_core_remove_device(struct ccwgroup_device *gdev)
5795 {
5796         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5797
5798         QETH_DBF_TEXT(SETUP, 2, "removedv");
5799
5800         if (card->discipline) {
5801                 card->discipline->remove(gdev);
5802                 qeth_core_free_discipline(card);
5803         }
5804
5805         free_netdev(card->dev);
5806         qeth_core_free_card(card);
5807         put_device(&gdev->dev);
5808 }
5809
5810 static int qeth_core_set_online(struct ccwgroup_device *gdev)
5811 {
5812         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5813         int rc = 0;
5814         enum qeth_discipline_id def_discipline;
5815
5816         if (!card->discipline) {
5817                 if (card->info.type == QETH_CARD_TYPE_IQD)
5818                         def_discipline = QETH_DISCIPLINE_LAYER3;
5819                 else
5820                         def_discipline = QETH_DISCIPLINE_LAYER2;
5821                 rc = qeth_core_load_discipline(card, def_discipline);
5822                 if (rc)
5823                         goto err;
5824                 rc = card->discipline->setup(card->gdev);
5825                 if (rc) {
5826                         qeth_core_free_discipline(card);
5827                         goto err;
5828                 }
5829         }
5830         rc = card->discipline->set_online(gdev);
5831 err:
5832         return rc;
5833 }
5834
5835 static int qeth_core_set_offline(struct ccwgroup_device *gdev)
5836 {
5837         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5838         return card->discipline->set_offline(gdev);
5839 }
5840
5841 static void qeth_core_shutdown(struct ccwgroup_device *gdev)
5842 {
5843         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5844         qeth_set_allowed_threads(card, 0, 1);
5845         if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
5846                 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
5847         qeth_qdio_clear_card(card, 0);
5848         qeth_clear_qdio_buffers(card);
5849         qdio_free(CARD_DDEV(card));
5850 }
5851
5852 static int qeth_core_freeze(struct ccwgroup_device *gdev)
5853 {
5854         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5855         if (card->discipline && card->discipline->freeze)
5856                 return card->discipline->freeze(gdev);
5857         return 0;
5858 }
5859
5860 static int qeth_core_thaw(struct ccwgroup_device *gdev)
5861 {
5862         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5863         if (card->discipline && card->discipline->thaw)
5864                 return card->discipline->thaw(gdev);
5865         return 0;
5866 }
5867
5868 static int qeth_core_restore(struct ccwgroup_device *gdev)
5869 {
5870         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5871         if (card->discipline && card->discipline->restore)
5872                 return card->discipline->restore(gdev);
5873         return 0;
5874 }
5875
5876 static ssize_t group_store(struct device_driver *ddrv, const char *buf,
5877                            size_t count)
5878 {
5879         int err;
5880
5881         err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
5882                                   buf);
5883
5884         return err ? err : count;
5885 }
5886 static DRIVER_ATTR_WO(group);
5887
5888 static struct attribute *qeth_drv_attrs[] = {
5889         &driver_attr_group.attr,
5890         NULL,
5891 };
5892 static struct attribute_group qeth_drv_attr_group = {
5893         .attrs = qeth_drv_attrs,
5894 };
5895 static const struct attribute_group *qeth_drv_attr_groups[] = {
5896         &qeth_drv_attr_group,
5897         NULL,
5898 };
5899
5900 static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
5901         .driver = {
5902                 .groups = qeth_drv_attr_groups,
5903                 .owner = THIS_MODULE,
5904                 .name = "qeth",
5905         },
5906         .ccw_driver = &qeth_ccw_driver,
5907         .setup = qeth_core_probe_device,
5908         .remove = qeth_core_remove_device,
5909         .set_online = qeth_core_set_online,
5910         .set_offline = qeth_core_set_offline,
5911         .shutdown = qeth_core_shutdown,
5912         .prepare = NULL,
5913         .complete = NULL,
5914         .freeze = qeth_core_freeze,
5915         .thaw = qeth_core_thaw,
5916         .restore = qeth_core_restore,
5917 };
5918
5919 struct qeth_card *qeth_get_card_by_busid(char *bus_id)
5920 {
5921         struct ccwgroup_device *gdev;
5922         struct qeth_card *card;
5923
5924         gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id);
5925         if (!gdev)
5926                 return NULL;
5927
5928         card = dev_get_drvdata(&gdev->dev);
5929         put_device(&gdev->dev);
5930         return card;
5931 }
5932 EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);
5933
5934 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5935 {
5936         struct qeth_card *card = dev->ml_priv;
5937         struct mii_ioctl_data *mii_data;
5938         int rc = 0;
5939
5940         if (!card)
5941                 return -ENODEV;
5942
5943         if (!qeth_card_hw_is_reachable(card))
5944                 return -ENODEV;
5945
5946         if (card->info.type == QETH_CARD_TYPE_OSN)
5947                 return -EPERM;
5948
5949         switch (cmd) {
5950         case SIOC_QETH_ADP_SET_SNMP_CONTROL:
5951                 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
5952                 break;
5953         case SIOC_QETH_GET_CARD_TYPE:
5954                 if ((card->info.type == QETH_CARD_TYPE_OSD ||
5955                      card->info.type == QETH_CARD_TYPE_OSM ||
5956                      card->info.type == QETH_CARD_TYPE_OSX) &&
5957                     !card->info.guestlan)
5958                         return 1;
5959                 else
5960                         return 0;
5961         case SIOCGMIIPHY:
5962                 mii_data = if_mii(rq);
5963                 mii_data->phy_id = 0;
5964                 break;
5965         case SIOCGMIIREG:
5966                 mii_data = if_mii(rq);
5967                 if (mii_data->phy_id != 0)
5968                         rc = -EINVAL;
5969                 else
5970                         mii_data->val_out = qeth_mdio_read(dev,
5971                                 mii_data->phy_id, mii_data->reg_num);
5972                 break;
5973         case SIOC_QETH_QUERY_OAT:
5974                 rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
5975                 break;
5976         default:
5977                 if (card->discipline->do_ioctl)
5978                         rc = card->discipline->do_ioctl(dev, rq, cmd);
5979                 else
5980                         rc = -EOPNOTSUPP;
5981         }
5982         if (rc)
5983                 QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
5984         return rc;
5985 }
5986 EXPORT_SYMBOL_GPL(qeth_do_ioctl);
5987
5988 static struct {
5989         const char str[ETH_GSTRING_LEN];
5990 } qeth_ethtool_stats_keys[] = {
5991 /*  0 */{"rx skbs"},
5992         {"rx buffers"},
5993         {"tx skbs"},
5994         {"tx buffers"},
5995         {"tx skbs no packing"},
5996         {"tx buffers no packing"},
5997         {"tx skbs packing"},
5998         {"tx buffers packing"},
5999         {"tx sg skbs"},
6000         {"tx buffer elements"},
6001 /* 10 */{"rx sg skbs"},
6002         {"rx sg frags"},
6003         {"rx sg page allocs"},
6004         {"tx large kbytes"},
6005         {"tx large count"},
6006         {"tx pk state ch n->p"},
6007         {"tx pk state ch p->n"},
6008         {"tx pk watermark low"},
6009         {"tx pk watermark high"},
6010         {"queue 0 buffer usage"},
6011 /* 20 */{"queue 1 buffer usage"},
6012         {"queue 2 buffer usage"},
6013         {"queue 3 buffer usage"},
6014         {"rx poll time"},
6015         {"rx poll count"},
6016         {"rx do_QDIO time"},
6017         {"rx do_QDIO count"},
6018         {"tx handler time"},
6019         {"tx handler count"},
6020         {"tx time"},
6021 /* 30 */{"tx count"},
6022         {"tx do_QDIO time"},
6023         {"tx do_QDIO count"},
6024         {"tx csum"},
6025         {"tx lin"},
6026         {"tx linfail"},
6027         {"cq handler count"},
6028         {"cq handler time"},
6029         {"rx csum"}
6030 };
6031
6032 int qeth_core_get_sset_count(struct net_device *dev, int stringset)
6033 {
6034         switch (stringset) {
6035         case ETH_SS_STATS:
6036                 return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN);
6037         default:
6038                 return -EINVAL;
6039         }
6040 }
6041 EXPORT_SYMBOL_GPL(qeth_core_get_sset_count);
6042
6043 void qeth_core_get_ethtool_stats(struct net_device *dev,
6044                 struct ethtool_stats *stats, u64 *data)
6045 {
6046         struct qeth_card *card = dev->ml_priv;
6047         data[0] = card->stats.rx_packets -
6048                                 card->perf_stats.initial_rx_packets;
6049         data[1] = card->perf_stats.bufs_rec;
6050         data[2] = card->stats.tx_packets -
6051                                 card->perf_stats.initial_tx_packets;
6052         data[3] = card->perf_stats.bufs_sent;
6053         data[4] = card->stats.tx_packets - card->perf_stats.initial_tx_packets
6054                         - card->perf_stats.skbs_sent_pack;
6055         data[5] = card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack;
6056         data[6] = card->perf_stats.skbs_sent_pack;
6057         data[7] = card->perf_stats.bufs_sent_pack;
6058         data[8] = card->perf_stats.sg_skbs_sent;
6059         data[9] = card->perf_stats.buf_elements_sent;
6060         data[10] = card->perf_stats.sg_skbs_rx;
6061         data[11] = card->perf_stats.sg_frags_rx;
6062         data[12] = card->perf_stats.sg_alloc_page_rx;
6063         data[13] = (card->perf_stats.large_send_bytes >> 10);
6064         data[14] = card->perf_stats.large_send_cnt;
6065         data[15] = card->perf_stats.sc_dp_p;
6066         data[16] = card->perf_stats.sc_p_dp;
6067         data[17] = QETH_LOW_WATERMARK_PACK;
6068         data[18] = QETH_HIGH_WATERMARK_PACK;
6069         data[19] = atomic_read(&card->qdio.out_qs[0]->used_buffers);
6070         data[20] = (card->qdio.no_out_queues > 1) ?
6071                         atomic_read(&card->qdio.out_qs[1]->used_buffers) : 0;
6072         data[21] = (card->qdio.no_out_queues > 2) ?
6073                         atomic_read(&card->qdio.out_qs[2]->used_buffers) : 0;
6074         data[22] = (card->qdio.no_out_queues > 3) ?
6075                         atomic_read(&card->qdio.out_qs[3]->used_buffers) : 0;
6076         data[23] = card->perf_stats.inbound_time;
6077         data[24] = card->perf_stats.inbound_cnt;
6078         data[25] = card->perf_stats.inbound_do_qdio_time;
6079         data[26] = card->perf_stats.inbound_do_qdio_cnt;
6080         data[27] = card->perf_stats.outbound_handler_time;
6081         data[28] = card->perf_stats.outbound_handler_cnt;
6082         data[29] = card->perf_stats.outbound_time;
6083         data[30] = card->perf_stats.outbound_cnt;
6084         data[31] = card->perf_stats.outbound_do_qdio_time;
6085         data[32] = card->perf_stats.outbound_do_qdio_cnt;
6086         data[33] = card->perf_stats.tx_csum;
6087         data[34] = card->perf_stats.tx_lin;
6088         data[35] = card->perf_stats.tx_linfail;
6089         data[36] = card->perf_stats.cq_cnt;
6090         data[37] = card->perf_stats.cq_time;
6091         data[38] = card->perf_stats.rx_csum;
6092 }
6093 EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
6094
6095 void qeth_core_get_strings(struct net_device *dev, u32 stringset, u8 *data)
6096 {
6097         switch (stringset) {
6098         case ETH_SS_STATS:
6099                 memcpy(data, &qeth_ethtool_stats_keys,
6100                         sizeof(qeth_ethtool_stats_keys));
6101                 break;
6102         default:
6103                 WARN_ON(1);
6104                 break;
6105         }
6106 }
6107 EXPORT_SYMBOL_GPL(qeth_core_get_strings);
6108
6109 void qeth_core_get_drvinfo(struct net_device *dev,
6110                 struct ethtool_drvinfo *info)
6111 {
6112         struct qeth_card *card = dev->ml_priv;
6113
6114         strlcpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3",
6115                 sizeof(info->driver));
6116         strlcpy(info->version, "1.0", sizeof(info->version));
6117         strlcpy(info->fw_version, card->info.mcl_level,
6118                 sizeof(info->fw_version));
6119         snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s",
6120                  CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card));
6121 }
6122 EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo);
6123
6124 /* Helper function to fill 'advertising' and 'supported' which are the same. */
6125 /* Autoneg and full-duplex are supported and advertised unconditionally.     */
6126 /* Always advertise and support all speeds up to specified, and only one     */
6127 /* specified port type.                                                      */
6128 static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd,
6129                                 int maxspeed, int porttype)
6130 {
6131         ethtool_link_ksettings_zero_link_mode(cmd, supported);
6132         ethtool_link_ksettings_zero_link_mode(cmd, advertising);
6133         ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising);
6134
6135         ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
6136         ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
6137
6138         switch (porttype) {
6139         case PORT_TP:
6140                 ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
6141                 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
6142                 break;
6143         case PORT_FIBRE:
6144                 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
6145                 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
6146                 break;
6147         default:
6148                 ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
6149                 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
6150                 WARN_ON_ONCE(1);
6151         }
6152
6153         /* partially does fall through, to also select lower speeds */
6154         switch (maxspeed) {
6155         case SPEED_25000:
6156                 ethtool_link_ksettings_add_link_mode(cmd, supported,
6157                                                      25000baseSR_Full);
6158                 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6159                                                      25000baseSR_Full);
6160                 break;
6161         case SPEED_10000:
6162                 ethtool_link_ksettings_add_link_mode(cmd, supported,
6163                                                      10000baseT_Full);
6164                 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6165                                                      10000baseT_Full);
6166         case SPEED_1000:
6167                 ethtool_link_ksettings_add_link_mode(cmd, supported,
6168                                                      1000baseT_Full);
6169                 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6170                                                      1000baseT_Full);
6171                 ethtool_link_ksettings_add_link_mode(cmd, supported,
6172                                                      1000baseT_Half);
6173                 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6174                                                      1000baseT_Half);
6175         case SPEED_100:
6176                 ethtool_link_ksettings_add_link_mode(cmd, supported,
6177                                                      100baseT_Full);
6178                 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6179                                                      100baseT_Full);
6180                 ethtool_link_ksettings_add_link_mode(cmd, supported,
6181                                                      100baseT_Half);
6182                 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6183                                                      100baseT_Half);
6184         case SPEED_10:
6185                 ethtool_link_ksettings_add_link_mode(cmd, supported,
6186                                                      10baseT_Full);
6187                 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6188                                                      10baseT_Full);
6189                 ethtool_link_ksettings_add_link_mode(cmd, supported,
6190                                                      10baseT_Half);
6191                 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6192                                                      10baseT_Half);
6193                 /* end fallthrough */
6194                 break;
6195         default:
6196                 ethtool_link_ksettings_add_link_mode(cmd, supported,
6197                                                      10baseT_Full);
6198                 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6199                                                      10baseT_Full);
6200                 ethtool_link_ksettings_add_link_mode(cmd, supported,
6201                                                      10baseT_Half);
6202                 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6203                                                      10baseT_Half);
6204                 WARN_ON_ONCE(1);
6205         }
6206 }
6207
6208 int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
6209                 struct ethtool_link_ksettings *cmd)
6210 {
6211         struct qeth_card *card = netdev->ml_priv;
6212         enum qeth_link_types link_type;
6213         struct carrier_info carrier_info;
6214         int rc;
6215
6216         if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
6217                 link_type = QETH_LINK_TYPE_10GBIT_ETH;
6218         else
6219                 link_type = card->info.link_type;
6220
6221         cmd->base.duplex = DUPLEX_FULL;
6222         cmd->base.autoneg = AUTONEG_ENABLE;
6223         cmd->base.phy_address = 0;
6224         cmd->base.mdio_support = 0;
6225         cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
6226         cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
6227
6228         switch (link_type) {
6229         case QETH_LINK_TYPE_FAST_ETH:
6230         case QETH_LINK_TYPE_LANE_ETH100:
6231                 cmd->base.speed = SPEED_100;
6232                 cmd->base.port = PORT_TP;
6233                 break;
6234         case QETH_LINK_TYPE_GBIT_ETH:
6235         case QETH_LINK_TYPE_LANE_ETH1000:
6236                 cmd->base.speed = SPEED_1000;
6237                 cmd->base.port = PORT_FIBRE;
6238                 break;
6239         case QETH_LINK_TYPE_10GBIT_ETH:
6240                 cmd->base.speed = SPEED_10000;
6241                 cmd->base.port = PORT_FIBRE;
6242                 break;
6243         case QETH_LINK_TYPE_25GBIT_ETH:
6244                 cmd->base.speed = SPEED_25000;
6245                 cmd->base.port = PORT_FIBRE;
6246                 break;
6247         default:
6248                 cmd->base.speed = SPEED_10;
6249                 cmd->base.port = PORT_TP;
6250         }
6251         qeth_set_cmd_adv_sup(cmd, cmd->base.speed, cmd->base.port);
6252
6253         /* Check if we can obtain more accurate information.     */
6254         /* If QUERY_CARD_INFO command is not supported or fails, */
6255         /* just return the heuristics that was filled above.     */
6256         if (!qeth_card_hw_is_reachable(card))
6257                 return -ENODEV;
6258         rc = qeth_query_card_info(card, &carrier_info);
6259         if (rc == -EOPNOTSUPP) /* for old hardware, return heuristic */
6260                 return 0;
6261         if (rc) /* report error from the hardware operation */
6262                 return rc;
6263         /* on success, fill in the information got from the hardware */
6264
6265         netdev_dbg(netdev,
6266         "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
6267                         carrier_info.card_type,
6268                         carrier_info.port_mode,
6269                         carrier_info.port_speed);
6270
6271         /* Update attributes for which we've obtained more authoritative */
6272         /* information, leave the rest the way they where filled above.  */
6273         switch (carrier_info.card_type) {
6274         case CARD_INFO_TYPE_1G_COPPER_A:
6275         case CARD_INFO_TYPE_1G_COPPER_B:
6276                 cmd->base.port = PORT_TP;
6277                 qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port);
6278                 break;
6279         case CARD_INFO_TYPE_1G_FIBRE_A:
6280         case CARD_INFO_TYPE_1G_FIBRE_B:
6281                 cmd->base.port = PORT_FIBRE;
6282                 qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port);
6283                 break;
6284         case CARD_INFO_TYPE_10G_FIBRE_A:
6285         case CARD_INFO_TYPE_10G_FIBRE_B:
6286                 cmd->base.port = PORT_FIBRE;
6287                 qeth_set_cmd_adv_sup(cmd, SPEED_10000, cmd->base.port);
6288                 break;
6289         }
6290
6291         switch (carrier_info.port_mode) {
6292         case CARD_INFO_PORTM_FULLDUPLEX:
6293                 cmd->base.duplex = DUPLEX_FULL;
6294                 break;
6295         case CARD_INFO_PORTM_HALFDUPLEX:
6296                 cmd->base.duplex = DUPLEX_HALF;
6297                 break;
6298         }
6299
6300         switch (carrier_info.port_speed) {
6301         case CARD_INFO_PORTS_10M:
6302                 cmd->base.speed = SPEED_10;
6303                 break;
6304         case CARD_INFO_PORTS_100M:
6305                 cmd->base.speed = SPEED_100;
6306                 break;
6307         case CARD_INFO_PORTS_1G:
6308                 cmd->base.speed = SPEED_1000;
6309                 break;
6310         case CARD_INFO_PORTS_10G:
6311                 cmd->base.speed = SPEED_10000;
6312                 break;
6313         case CARD_INFO_PORTS_25G:
6314                 cmd->base.speed = SPEED_25000;
6315                 break;
6316         }
6317
6318         return 0;
6319 }
6320 EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_link_ksettings);
6321
6322 /* Callback to handle checksum offload command reply from OSA card.
6323  * Verify that required features have been enabled on the card.
6324  * Return error in hdr->return_code as this value is checked by caller.
6325  *
6326  * Always returns zero to indicate no further messages from the OSA card.
6327  */
6328 static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card,
6329                                         struct qeth_reply *reply,
6330                                         unsigned long data)
6331 {
6332         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6333         struct qeth_checksum_cmd *chksum_cb =
6334                                 (struct qeth_checksum_cmd *)reply->param;
6335
6336         QETH_CARD_TEXT(card, 4, "chkdoccb");
6337         if (qeth_setassparms_inspect_rc(cmd))
6338                 return 0;
6339
6340         memset(chksum_cb, 0, sizeof(*chksum_cb));
6341         if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
6342                 chksum_cb->supported =
6343                                 cmd->data.setassparms.data.chksum.supported;
6344                 QETH_CARD_TEXT_(card, 3, "strt:%x", chksum_cb->supported);
6345         }
6346         if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_ENABLE) {
6347                 chksum_cb->supported =
6348                                 cmd->data.setassparms.data.chksum.supported;
6349                 chksum_cb->enabled =
6350                                 cmd->data.setassparms.data.chksum.enabled;
6351                 QETH_CARD_TEXT_(card, 3, "supp:%x", chksum_cb->supported);
6352                 QETH_CARD_TEXT_(card, 3, "enab:%x", chksum_cb->enabled);
6353         }
6354         return 0;
6355 }
6356
6357 /* Send command to OSA card and check results. */
6358 static int qeth_ipa_checksum_run_cmd(struct qeth_card *card,
6359                                      enum qeth_ipa_funcs ipa_func,
6360                                      __u16 cmd_code, long data,
6361                                      struct qeth_checksum_cmd *chksum_cb,
6362                                      enum qeth_prot_versions prot)
6363 {
6364         struct qeth_cmd_buffer *iob;
6365
6366         QETH_CARD_TEXT(card, 4, "chkdocmd");
6367         iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
6368                                        sizeof(__u32), prot);
6369         if (!iob)
6370                 return -ENOMEM;
6371
6372         __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (__u32) data;
6373         return qeth_send_ipa_cmd(card, iob, qeth_ipa_checksum_run_cmd_cb,
6374                                  chksum_cb);
6375 }
6376
6377 static int qeth_send_checksum_on(struct qeth_card *card, int cstype,
6378                                  enum qeth_prot_versions prot)
6379 {
6380         u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
6381         struct qeth_checksum_cmd chksum_cb;
6382         int rc;
6383
6384         if (prot == QETH_PROT_IPV4)
6385                 required_features |= QETH_IPA_CHECKSUM_IP_HDR;
6386         rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_START, 0,
6387                                        &chksum_cb, prot);
6388         if (!rc) {
6389                 if ((required_features & chksum_cb.supported) !=
6390                     required_features)
6391                         rc = -EIO;
6392                 else if (!(QETH_IPA_CHECKSUM_LP2LP & chksum_cb.supported) &&
6393                          cstype == IPA_INBOUND_CHECKSUM)
6394                         dev_warn(&card->gdev->dev,
6395                                  "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n",
6396                                  QETH_CARD_IFNAME(card));
6397         }
6398         if (rc) {
6399                 qeth_send_simple_setassparms_prot(card, cstype,
6400                                                   IPA_CMD_ASS_STOP, 0, prot);
6401                 dev_warn(&card->gdev->dev,
6402                          "Starting HW IPv%d checksumming for %s failed, using SW checksumming\n",
6403                          prot, QETH_CARD_IFNAME(card));
6404                 return rc;
6405         }
6406         rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
6407                                        chksum_cb.supported, &chksum_cb,
6408                                        prot);
6409         if (!rc) {
6410                 if ((required_features & chksum_cb.enabled) !=
6411                     required_features)
6412                         rc = -EIO;
6413         }
6414         if (rc) {
6415                 qeth_send_simple_setassparms_prot(card, cstype,
6416                                                   IPA_CMD_ASS_STOP, 0, prot);
6417                 dev_warn(&card->gdev->dev,
6418                          "Enabling HW IPv%d checksumming for %s failed, using SW checksumming\n",
6419                          prot, QETH_CARD_IFNAME(card));
6420                 return rc;
6421         }
6422
6423         dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
6424                  cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
6425         return 0;
6426 }
6427
6428 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6429                              enum qeth_prot_versions prot)
6430 {
6431         int rc = (on) ? qeth_send_checksum_on(card, cstype, prot)
6432                       : qeth_send_simple_setassparms_prot(card, cstype,
6433                                                           IPA_CMD_ASS_STOP, 0,
6434                                                           prot);
6435         return rc ? -EIO : 0;
6436 }
6437
6438 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
6439                              unsigned long data)
6440 {
6441         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6442         struct qeth_tso_start_data *tso_data = reply->param;
6443
6444         if (qeth_setassparms_inspect_rc(cmd))
6445                 return 0;
6446
6447         tso_data->mss = cmd->data.setassparms.data.tso.mss;
6448         tso_data->supported = cmd->data.setassparms.data.tso.supported;
6449         return 0;
6450 }
6451
6452 static int qeth_set_tso_off(struct qeth_card *card,
6453                             enum qeth_prot_versions prot)
6454 {
6455         return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
6456                                                  IPA_CMD_ASS_STOP, 0, prot);
6457 }
6458
6459 static int qeth_set_tso_on(struct qeth_card *card,
6460                            enum qeth_prot_versions prot)
6461 {
6462         struct qeth_tso_start_data tso_data;
6463         struct qeth_cmd_buffer *iob;
6464         struct qeth_ipa_caps caps;
6465         int rc;
6466
6467         iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6468                                        IPA_CMD_ASS_START, 0, prot);
6469         if (!iob)
6470                 return -ENOMEM;
6471
6472         rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6473         if (rc)
6474                 return rc;
6475
6476         if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
6477                 qeth_set_tso_off(card, prot);
6478                 return -EOPNOTSUPP;
6479         }
6480
6481         iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6482                                        IPA_CMD_ASS_ENABLE, sizeof(caps), prot);
6483         if (!iob) {
6484                 qeth_set_tso_off(card, prot);
6485                 return -ENOMEM;
6486         }
6487
6488         /* enable TSO capability */
6489         __ipa_cmd(iob)->data.setassparms.data.caps.enabled =
6490                 QETH_IPA_LARGE_SEND_TCP;
6491         rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6492         if (rc) {
6493                 qeth_set_tso_off(card, prot);
6494                 return rc;
6495         }
6496
6497         if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
6498             !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
6499                 qeth_set_tso_off(card, prot);
6500                 return -EOPNOTSUPP;
6501         }
6502
6503         dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
6504                  tso_data.mss);
6505         return 0;
6506 }
6507
6508 static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
6509                             enum qeth_prot_versions prot)
6510 {
6511         int rc = on ? qeth_set_tso_on(card, prot) :
6512                       qeth_set_tso_off(card, prot);
6513
6514         return rc ? -EIO : 0;
6515 }
6516
6517 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
6518 {
6519         int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
6520         int rc_ipv6;
6521
6522         if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
6523                 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6524                                             QETH_PROT_IPV4);
6525         if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6526                 /* no/one Offload Assist available, so the rc is trivial */
6527                 return rc_ipv4;
6528
6529         rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6530                                     QETH_PROT_IPV6);
6531
6532         if (on)
6533                 /* enable: success if any Assist is active */
6534                 return (rc_ipv6) ? rc_ipv4 : 0;
6535
6536         /* disable: failure if any Assist is still active */
6537         return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
6538 }
6539
6540 #define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \
6541                           NETIF_F_IPV6_CSUM | NETIF_F_TSO6)
6542 /**
6543  * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6544  * @dev:        a net_device
6545  */
6546 void qeth_enable_hw_features(struct net_device *dev)
6547 {
6548         struct qeth_card *card = dev->ml_priv;
6549         netdev_features_t features;
6550
6551         rtnl_lock();
6552         features = dev->features;
6553         /* force-off any feature that needs an IPA sequence.
6554          * netdev_update_features() will restart them.
6555          */
6556         dev->features &= ~QETH_HW_FEATURES;
6557         netdev_update_features(dev);
6558         if (features != dev->features)
6559                 dev_warn(&card->gdev->dev,
6560                          "Device recovery failed to restore all offload features\n");
6561         rtnl_unlock();
6562 }
6563 EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6564
6565 int qeth_set_features(struct net_device *dev, netdev_features_t features)
6566 {
6567         struct qeth_card *card = dev->ml_priv;
6568         netdev_features_t changed = dev->features ^ features;
6569         int rc = 0;
6570
6571         QETH_DBF_TEXT(SETUP, 2, "setfeat");
6572         QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
6573
6574         if ((changed & NETIF_F_IP_CSUM)) {
6575                 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6576                                        IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4);
6577                 if (rc)
6578                         changed ^= NETIF_F_IP_CSUM;
6579         }
6580         if (changed & NETIF_F_IPV6_CSUM) {
6581                 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6582                                        IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6);
6583                 if (rc)
6584                         changed ^= NETIF_F_IPV6_CSUM;
6585         }
6586         if (changed & NETIF_F_RXCSUM) {
6587                 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6588                 if (rc)
6589                         changed ^= NETIF_F_RXCSUM;
6590         }
6591         if (changed & NETIF_F_TSO) {
6592                 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
6593                                       QETH_PROT_IPV4);
6594                 if (rc)
6595                         changed ^= NETIF_F_TSO;
6596         }
6597         if (changed & NETIF_F_TSO6) {
6598                 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
6599                                       QETH_PROT_IPV6);
6600                 if (rc)
6601                         changed ^= NETIF_F_TSO6;
6602         }
6603
6604         /* everything changed successfully? */
6605         if ((dev->features ^ features) == changed)
6606                 return 0;
6607         /* something went wrong. save changed features and return error */
6608         dev->features ^= changed;
6609         return -EIO;
6610 }
6611 EXPORT_SYMBOL_GPL(qeth_set_features);
6612
6613 netdev_features_t qeth_fix_features(struct net_device *dev,
6614                                     netdev_features_t features)
6615 {
6616         struct qeth_card *card = dev->ml_priv;
6617
6618         QETH_DBF_TEXT(SETUP, 2, "fixfeat");
6619         if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
6620                 features &= ~NETIF_F_IP_CSUM;
6621         if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
6622                 features &= ~NETIF_F_IPV6_CSUM;
6623         if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
6624             !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6625                 features &= ~NETIF_F_RXCSUM;
6626         if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
6627                 features &= ~NETIF_F_TSO;
6628         if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
6629                 features &= ~NETIF_F_TSO6;
6630         /* if the card isn't up, remove features that require hw changes */
6631         if (card->state == CARD_STATE_DOWN ||
6632             card->state == CARD_STATE_RECOVER)
6633                 features &= ~QETH_HW_FEATURES;
6634         QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
6635         return features;
6636 }
6637 EXPORT_SYMBOL_GPL(qeth_fix_features);
6638
6639 netdev_features_t qeth_features_check(struct sk_buff *skb,
6640                                       struct net_device *dev,
6641                                       netdev_features_t features)
6642 {
6643         /* GSO segmentation builds skbs with
6644          *      a (small) linear part for the headers, and
6645          *      page frags for the data.
6646          * Compared to a linear skb, the header-only part consumes an
6647          * additional buffer element. This reduces buffer utilization, and
6648          * hurts throughput. So compress small segments into one element.
6649          */
6650         if (netif_needs_gso(skb, features)) {
6651                 /* match skb_segment(): */
6652                 unsigned int doffset = skb->data - skb_mac_header(skb);
6653                 unsigned int hsize = skb_shinfo(skb)->gso_size;
6654                 unsigned int hroom = skb_headroom(skb);
6655
6656                 /* linearize only if resulting skb allocations are order-0: */
6657                 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
6658                         features &= ~NETIF_F_SG;
6659         }
6660
6661         return vlan_features_check(skb, features);
6662 }
6663 EXPORT_SYMBOL_GPL(qeth_features_check);
6664
6665 static int __init qeth_core_init(void)
6666 {
6667         int rc;
6668
6669         pr_info("loading core functions\n");
6670
6671         qeth_wq = create_singlethread_workqueue("qeth_wq");
6672         if (!qeth_wq) {
6673                 rc = -ENOMEM;
6674                 goto out_err;
6675         }
6676
6677         rc = qeth_register_dbf_views();
6678         if (rc)
6679                 goto dbf_err;
6680         qeth_core_root_dev = root_device_register("qeth");
6681         rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
6682         if (rc)
6683                 goto register_err;
6684         qeth_core_header_cache =
6685                 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
6686                                   roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
6687                                   0, NULL);
6688         if (!qeth_core_header_cache) {
6689                 rc = -ENOMEM;
6690                 goto slab_err;
6691         }
6692         qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
6693                         sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
6694         if (!qeth_qdio_outbuf_cache) {
6695                 rc = -ENOMEM;
6696                 goto cqslab_err;
6697         }
6698         rc = ccw_driver_register(&qeth_ccw_driver);
6699         if (rc)
6700                 goto ccw_err;
6701         rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
6702         if (rc)
6703                 goto ccwgroup_err;
6704
6705         return 0;
6706
6707 ccwgroup_err:
6708         ccw_driver_unregister(&qeth_ccw_driver);
6709 ccw_err:
6710         kmem_cache_destroy(qeth_qdio_outbuf_cache);
6711 cqslab_err:
6712         kmem_cache_destroy(qeth_core_header_cache);
6713 slab_err:
6714         root_device_unregister(qeth_core_root_dev);
6715 register_err:
6716         qeth_unregister_dbf_views();
6717 dbf_err:
6718         destroy_workqueue(qeth_wq);
6719 out_err:
6720         pr_err("Initializing the qeth device driver failed\n");
6721         return rc;
6722 }
6723
6724 static void __exit qeth_core_exit(void)
6725 {
6726         qeth_clear_dbf_list();
6727         destroy_workqueue(qeth_wq);
6728         ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
6729         ccw_driver_unregister(&qeth_ccw_driver);
6730         kmem_cache_destroy(qeth_qdio_outbuf_cache);
6731         kmem_cache_destroy(qeth_core_header_cache);
6732         root_device_unregister(qeth_core_root_dev);
6733         qeth_unregister_dbf_views();
6734         pr_info("core functions removed\n");
6735 }
6736
6737 module_init(qeth_core_init);
6738 module_exit(qeth_core_exit);
6739 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
6740 MODULE_DESCRIPTION("qeth core functions");
6741 MODULE_LICENSE("GPL");