Merge tag 'driver-core-5.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / scsi / qedi / qedi_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic iSCSI Offload Driver
4  * Copyright (c) 2016 Cavium Inc.
5  */
6
7 #include <linux/module.h>
8 #include <linux/pci.h>
9 #include <linux/kernel.h>
10 #include <linux/if_arp.h>
11 #include <scsi/iscsi_if.h>
12 #include <linux/inet.h>
13 #include <net/arp.h>
14 #include <linux/list.h>
15 #include <linux/kthread.h>
16 #include <linux/mm.h>
17 #include <linux/if_vlan.h>
18 #include <linux/cpu.h>
19 #include <linux/iscsi_boot_sysfs.h>
20
21 #include <scsi/scsi_cmnd.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_eh.h>
24 #include <scsi/scsi_host.h>
25 #include <scsi/scsi.h>
26
27 #include "qedi.h"
28 #include "qedi_gbl.h"
29 #include "qedi_iscsi.h"
30
31 static uint qedi_qed_debug;
32 module_param(qedi_qed_debug, uint, 0644);
33 MODULE_PARM_DESC(qedi_qed_debug, " QED debug level 0 (default)");
34
35 static uint qedi_fw_debug;
36 module_param(qedi_fw_debug, uint, 0644);
37 MODULE_PARM_DESC(qedi_fw_debug, " Firmware debug level 0(default) to 3");
38
39 uint qedi_dbg_log = QEDI_LOG_WARN | QEDI_LOG_SCSI_TM;
40 module_param(qedi_dbg_log, uint, 0644);
41 MODULE_PARM_DESC(qedi_dbg_log, " Default debug level");
42
43 uint qedi_io_tracing;
44 module_param(qedi_io_tracing, uint, 0644);
45 MODULE_PARM_DESC(qedi_io_tracing,
46                  " Enable logging of SCSI requests/completions into trace buffer. (default off).");
47
48 static uint qedi_ll2_buf_size = 0x400;
49 module_param(qedi_ll2_buf_size, uint, 0644);
50 MODULE_PARM_DESC(qedi_ll2_buf_size,
51                  "parameter to set ping packet size, default - 0x400, Jumbo packets - 0x2400.");
52
53 static uint qedi_flags_override;
54 module_param(qedi_flags_override, uint, 0644);
55 MODULE_PARM_DESC(qedi_flags_override, "Disable/Enable MFW error flags bits action.");
56
57 const struct qed_iscsi_ops *qedi_ops;
58 static struct scsi_transport_template *qedi_scsi_transport;
59 static struct pci_driver qedi_pci_driver;
60 static DEFINE_PER_CPU(struct qedi_percpu_s, qedi_percpu);
61 static LIST_HEAD(qedi_udev_list);
62 /* Static function declaration */
63 static int qedi_alloc_global_queues(struct qedi_ctx *qedi);
64 static void qedi_free_global_queues(struct qedi_ctx *qedi);
65 static struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid);
66 static void qedi_reset_uio_rings(struct qedi_uio_dev *udev);
67 static void qedi_ll2_free_skbs(struct qedi_ctx *qedi);
68 static struct nvm_iscsi_block *qedi_get_nvram_block(struct qedi_ctx *qedi);
69 static void qedi_recovery_handler(struct work_struct *work);
70 static void qedi_schedule_hw_err_handler(void *dev,
71                                          enum qed_hw_err_type err_type);
72
73 static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
74 {
75         struct qedi_ctx *qedi;
76         struct qedi_endpoint *qedi_ep;
77         struct iscsi_eqe_data *data;
78         int rval = 0;
79
80         if (!context || !fw_handle) {
81                 QEDI_ERR(NULL, "Recv event with ctx NULL\n");
82                 return -EINVAL;
83         }
84
85         qedi = (struct qedi_ctx *)context;
86         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
87                   "Recv Event %d fw_handle %p\n", fw_event_code, fw_handle);
88
89         data = (struct iscsi_eqe_data *)fw_handle;
90         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
91                   "icid=0x%x conn_id=0x%x err-code=0x%x error-pdu-opcode-reserved=0x%x\n",
92                    data->icid, data->conn_id, data->error_code,
93                    data->error_pdu_opcode_reserved);
94
95         qedi_ep = qedi->ep_tbl[data->icid];
96
97         if (!qedi_ep) {
98                 QEDI_WARN(&qedi->dbg_ctx,
99                           "Cannot process event, ep already disconnected, cid=0x%x\n",
100                            data->icid);
101                 WARN_ON(1);
102                 return -ENODEV;
103         }
104
105         switch (fw_event_code) {
106         case ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE:
107                 if (qedi_ep->state == EP_STATE_OFLDCONN_START)
108                         qedi_ep->state = EP_STATE_OFLDCONN_COMPL;
109
110                 wake_up_interruptible(&qedi_ep->tcp_ofld_wait);
111                 break;
112         case ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE:
113                 qedi_ep->state = EP_STATE_DISCONN_COMPL;
114                 wake_up_interruptible(&qedi_ep->tcp_ofld_wait);
115                 break;
116         case ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR:
117                 qedi_process_iscsi_error(qedi_ep, data);
118                 break;
119         case ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD:
120         case ISCSI_EVENT_TYPE_ASYN_SYN_RCVD:
121         case ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME:
122         case ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT:
123         case ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT:
124         case ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2:
125         case ISCSI_EVENT_TYPE_TCP_CONN_ERROR:
126                 qedi_process_tcp_error(qedi_ep, data);
127                 break;
128         default:
129                 QEDI_ERR(&qedi->dbg_ctx, "Recv Unknown Event %u\n",
130                          fw_event_code);
131         }
132
133         return rval;
134 }
135
136 static int qedi_uio_open(struct uio_info *uinfo, struct inode *inode)
137 {
138         struct qedi_uio_dev *udev = uinfo->priv;
139         struct qedi_ctx *qedi = udev->qedi;
140
141         if (!capable(CAP_NET_ADMIN))
142                 return -EPERM;
143
144         if (udev->uio_dev != -1)
145                 return -EBUSY;
146
147         rtnl_lock();
148         udev->uio_dev = iminor(inode);
149         qedi_reset_uio_rings(udev);
150         set_bit(UIO_DEV_OPENED, &qedi->flags);
151         rtnl_unlock();
152
153         return 0;
154 }
155
156 static int qedi_uio_close(struct uio_info *uinfo, struct inode *inode)
157 {
158         struct qedi_uio_dev *udev = uinfo->priv;
159         struct qedi_ctx *qedi = udev->qedi;
160
161         udev->uio_dev = -1;
162         clear_bit(UIO_DEV_OPENED, &qedi->flags);
163         qedi_ll2_free_skbs(qedi);
164         return 0;
165 }
166
167 static void __qedi_free_uio_rings(struct qedi_uio_dev *udev)
168 {
169         if (udev->uctrl) {
170                 free_page((unsigned long)udev->uctrl);
171                 udev->uctrl = NULL;
172         }
173
174         if (udev->ll2_ring) {
175                 free_page((unsigned long)udev->ll2_ring);
176                 udev->ll2_ring = NULL;
177         }
178
179         if (udev->ll2_buf) {
180                 free_pages((unsigned long)udev->ll2_buf, 2);
181                 udev->ll2_buf = NULL;
182         }
183 }
184
185 static void __qedi_free_uio(struct qedi_uio_dev *udev)
186 {
187         uio_unregister_device(&udev->qedi_uinfo);
188
189         __qedi_free_uio_rings(udev);
190
191         pci_dev_put(udev->pdev);
192         kfree(udev);
193 }
194
195 static void qedi_free_uio(struct qedi_uio_dev *udev)
196 {
197         if (!udev)
198                 return;
199
200         list_del_init(&udev->list);
201         __qedi_free_uio(udev);
202 }
203
204 static void qedi_reset_uio_rings(struct qedi_uio_dev *udev)
205 {
206         struct qedi_ctx *qedi = NULL;
207         struct qedi_uio_ctrl *uctrl = NULL;
208
209         qedi = udev->qedi;
210         uctrl = udev->uctrl;
211
212         spin_lock_bh(&qedi->ll2_lock);
213         uctrl->host_rx_cons = 0;
214         uctrl->hw_rx_prod = 0;
215         uctrl->hw_rx_bd_prod = 0;
216         uctrl->host_rx_bd_cons = 0;
217
218         memset(udev->ll2_ring, 0, udev->ll2_ring_size);
219         memset(udev->ll2_buf, 0, udev->ll2_buf_size);
220         spin_unlock_bh(&qedi->ll2_lock);
221 }
222
223 static int __qedi_alloc_uio_rings(struct qedi_uio_dev *udev)
224 {
225         int rc = 0;
226
227         if (udev->ll2_ring || udev->ll2_buf)
228                 return rc;
229
230         /* Memory for control area.  */
231         udev->uctrl = (void *)get_zeroed_page(GFP_KERNEL);
232         if (!udev->uctrl)
233                 return -ENOMEM;
234
235         /* Allocating memory for LL2 ring  */
236         udev->ll2_ring_size = QEDI_PAGE_SIZE;
237         udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP);
238         if (!udev->ll2_ring) {
239                 rc = -ENOMEM;
240                 goto exit_alloc_ring;
241         }
242
243         /* Allocating memory for Tx/Rx pkt buffer */
244         udev->ll2_buf_size = TX_RX_RING * qedi_ll2_buf_size;
245         udev->ll2_buf_size = QEDI_PAGE_ALIGN(udev->ll2_buf_size);
246         udev->ll2_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_COMP |
247                                                  __GFP_ZERO, 2);
248         if (!udev->ll2_buf) {
249                 rc = -ENOMEM;
250                 goto exit_alloc_buf;
251         }
252         return rc;
253
254 exit_alloc_buf:
255         free_page((unsigned long)udev->ll2_ring);
256         udev->ll2_ring = NULL;
257 exit_alloc_ring:
258         return rc;
259 }
260
261 static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
262 {
263         struct qedi_uio_dev *udev = NULL;
264         int rc = 0;
265
266         list_for_each_entry(udev, &qedi_udev_list, list) {
267                 if (udev->pdev == qedi->pdev) {
268                         udev->qedi = qedi;
269                         if (__qedi_alloc_uio_rings(udev)) {
270                                 udev->qedi = NULL;
271                                 return -ENOMEM;
272                         }
273                         qedi->udev = udev;
274                         return 0;
275                 }
276         }
277
278         udev = kzalloc(sizeof(*udev), GFP_KERNEL);
279         if (!udev) {
280                 rc = -ENOMEM;
281                 goto err_udev;
282         }
283
284         udev->uio_dev = -1;
285
286         udev->qedi = qedi;
287         udev->pdev = qedi->pdev;
288
289         rc = __qedi_alloc_uio_rings(udev);
290         if (rc)
291                 goto err_uctrl;
292
293         list_add(&udev->list, &qedi_udev_list);
294
295         pci_dev_get(udev->pdev);
296         qedi->udev = udev;
297
298         udev->tx_pkt = udev->ll2_buf;
299         udev->rx_pkt = udev->ll2_buf + qedi_ll2_buf_size;
300         return 0;
301
302  err_uctrl:
303         kfree(udev);
304  err_udev:
305         return -ENOMEM;
306 }
307
308 static int qedi_init_uio(struct qedi_ctx *qedi)
309 {
310         struct qedi_uio_dev *udev = qedi->udev;
311         struct uio_info *uinfo;
312         int ret = 0;
313
314         if (!udev)
315                 return -ENOMEM;
316
317         uinfo = &udev->qedi_uinfo;
318
319         uinfo->mem[0].addr = (unsigned long)udev->uctrl;
320         uinfo->mem[0].size = sizeof(struct qedi_uio_ctrl);
321         uinfo->mem[0].memtype = UIO_MEM_LOGICAL;
322
323         uinfo->mem[1].addr = (unsigned long)udev->ll2_ring;
324         uinfo->mem[1].size = udev->ll2_ring_size;
325         uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
326
327         uinfo->mem[2].addr = (unsigned long)udev->ll2_buf;
328         uinfo->mem[2].size = udev->ll2_buf_size;
329         uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
330
331         uinfo->name = "qedi_uio";
332         uinfo->version = QEDI_MODULE_VERSION;
333         uinfo->irq = UIO_IRQ_CUSTOM;
334
335         uinfo->open = qedi_uio_open;
336         uinfo->release = qedi_uio_close;
337
338         if (udev->uio_dev == -1) {
339                 if (!uinfo->priv) {
340                         uinfo->priv = udev;
341
342                         ret = uio_register_device(&udev->pdev->dev, uinfo);
343                         if (ret) {
344                                 QEDI_ERR(&qedi->dbg_ctx,
345                                          "UIO registration failed\n");
346                         }
347                 }
348         }
349
350         return ret;
351 }
352
353 static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
354                                   struct qed_sb_info *sb_info, u16 sb_id)
355 {
356         struct status_block_e4 *sb_virt;
357         dma_addr_t sb_phys;
358         int ret;
359
360         sb_virt = dma_alloc_coherent(&qedi->pdev->dev,
361                                      sizeof(struct status_block_e4), &sb_phys,
362                                      GFP_KERNEL);
363         if (!sb_virt) {
364                 QEDI_ERR(&qedi->dbg_ctx,
365                          "Status block allocation failed for id = %d.\n",
366                           sb_id);
367                 return -ENOMEM;
368         }
369
370         ret = qedi_ops->common->sb_init(qedi->cdev, sb_info, sb_virt, sb_phys,
371                                        sb_id, QED_SB_TYPE_STORAGE);
372         if (ret) {
373                 QEDI_ERR(&qedi->dbg_ctx,
374                          "Status block initialization failed for id = %d.\n",
375                           sb_id);
376                 return ret;
377         }
378
379         return 0;
380 }
381
382 static void qedi_free_sb(struct qedi_ctx *qedi)
383 {
384         struct qed_sb_info *sb_info;
385         int id;
386
387         for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
388                 sb_info = &qedi->sb_array[id];
389                 if (sb_info->sb_virt)
390                         dma_free_coherent(&qedi->pdev->dev,
391                                           sizeof(*sb_info->sb_virt),
392                                           (void *)sb_info->sb_virt,
393                                           sb_info->sb_phys);
394         }
395 }
396
397 static void qedi_free_fp(struct qedi_ctx *qedi)
398 {
399         kfree(qedi->fp_array);
400         kfree(qedi->sb_array);
401 }
402
403 static void qedi_destroy_fp(struct qedi_ctx *qedi)
404 {
405         qedi_free_sb(qedi);
406         qedi_free_fp(qedi);
407 }
408
409 static int qedi_alloc_fp(struct qedi_ctx *qedi)
410 {
411         int ret = 0;
412
413         qedi->fp_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi),
414                                  sizeof(struct qedi_fastpath), GFP_KERNEL);
415         if (!qedi->fp_array) {
416                 QEDI_ERR(&qedi->dbg_ctx,
417                          "fastpath fp array allocation failed.\n");
418                 return -ENOMEM;
419         }
420
421         qedi->sb_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi),
422                                  sizeof(struct qed_sb_info), GFP_KERNEL);
423         if (!qedi->sb_array) {
424                 QEDI_ERR(&qedi->dbg_ctx,
425                          "fastpath sb array allocation failed.\n");
426                 ret = -ENOMEM;
427                 goto free_fp;
428         }
429
430         return ret;
431
432 free_fp:
433         qedi_free_fp(qedi);
434         return ret;
435 }
436
437 static void qedi_int_fp(struct qedi_ctx *qedi)
438 {
439         struct qedi_fastpath *fp;
440         int id;
441
442         memset(qedi->fp_array, 0, MIN_NUM_CPUS_MSIX(qedi) *
443                sizeof(*qedi->fp_array));
444         memset(qedi->sb_array, 0, MIN_NUM_CPUS_MSIX(qedi) *
445                sizeof(*qedi->sb_array));
446
447         for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
448                 fp = &qedi->fp_array[id];
449                 fp->sb_info = &qedi->sb_array[id];
450                 fp->sb_id = id;
451                 fp->qedi = qedi;
452                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
453                          "qedi", id);
454
455                 /* fp_array[i] ---- irq cookie
456                  * So init data which is needed in int ctx
457                  */
458         }
459 }
460
461 static int qedi_prepare_fp(struct qedi_ctx *qedi)
462 {
463         struct qedi_fastpath *fp;
464         int id, ret = 0;
465
466         ret = qedi_alloc_fp(qedi);
467         if (ret)
468                 goto err;
469
470         qedi_int_fp(qedi);
471
472         for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
473                 fp = &qedi->fp_array[id];
474                 ret = qedi_alloc_and_init_sb(qedi, fp->sb_info, fp->sb_id);
475                 if (ret) {
476                         QEDI_ERR(&qedi->dbg_ctx,
477                                  "SB allocation and initialization failed.\n");
478                         ret = -EIO;
479                         goto err_init;
480                 }
481         }
482
483         return 0;
484
485 err_init:
486         qedi_free_sb(qedi);
487         qedi_free_fp(qedi);
488 err:
489         return ret;
490 }
491
492 static int qedi_setup_cid_que(struct qedi_ctx *qedi)
493 {
494         int i;
495
496         qedi->cid_que.cid_que_base = kmalloc_array(qedi->max_active_conns,
497                                                    sizeof(u32), GFP_KERNEL);
498         if (!qedi->cid_que.cid_que_base)
499                 return -ENOMEM;
500
501         qedi->cid_que.conn_cid_tbl = kmalloc_array(qedi->max_active_conns,
502                                                    sizeof(struct qedi_conn *),
503                                                    GFP_KERNEL);
504         if (!qedi->cid_que.conn_cid_tbl) {
505                 kfree(qedi->cid_que.cid_que_base);
506                 qedi->cid_que.cid_que_base = NULL;
507                 return -ENOMEM;
508         }
509
510         qedi->cid_que.cid_que = (u32 *)qedi->cid_que.cid_que_base;
511         qedi->cid_que.cid_q_prod_idx = 0;
512         qedi->cid_que.cid_q_cons_idx = 0;
513         qedi->cid_que.cid_q_max_idx = qedi->max_active_conns;
514         qedi->cid_que.cid_free_cnt = qedi->max_active_conns;
515
516         for (i = 0; i < qedi->max_active_conns; i++) {
517                 qedi->cid_que.cid_que[i] = i;
518                 qedi->cid_que.conn_cid_tbl[i] = NULL;
519         }
520
521         return 0;
522 }
523
524 static void qedi_release_cid_que(struct qedi_ctx *qedi)
525 {
526         kfree(qedi->cid_que.cid_que_base);
527         qedi->cid_que.cid_que_base = NULL;
528
529         kfree(qedi->cid_que.conn_cid_tbl);
530         qedi->cid_que.conn_cid_tbl = NULL;
531 }
532
533 static int qedi_init_id_tbl(struct qedi_portid_tbl *id_tbl, u16 size,
534                             u16 start_id, u16 next)
535 {
536         id_tbl->start = start_id;
537         id_tbl->max = size;
538         id_tbl->next = next;
539         spin_lock_init(&id_tbl->lock);
540         id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
541         if (!id_tbl->table)
542                 return -ENOMEM;
543
544         return 0;
545 }
546
547 static void qedi_free_id_tbl(struct qedi_portid_tbl *id_tbl)
548 {
549         kfree(id_tbl->table);
550         id_tbl->table = NULL;
551 }
552
553 int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id)
554 {
555         int ret = -1;
556
557         id -= id_tbl->start;
558         if (id >= id_tbl->max)
559                 return ret;
560
561         spin_lock(&id_tbl->lock);
562         if (!test_bit(id, id_tbl->table)) {
563                 set_bit(id, id_tbl->table);
564                 ret = 0;
565         }
566         spin_unlock(&id_tbl->lock);
567         return ret;
568 }
569
570 u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl)
571 {
572         u16 id;
573
574         spin_lock(&id_tbl->lock);
575         id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
576         if (id >= id_tbl->max) {
577                 id = QEDI_LOCAL_PORT_INVALID;
578                 if (id_tbl->next != 0) {
579                         id = find_first_zero_bit(id_tbl->table, id_tbl->next);
580                         if (id >= id_tbl->next)
581                                 id = QEDI_LOCAL_PORT_INVALID;
582                 }
583         }
584
585         if (id < id_tbl->max) {
586                 set_bit(id, id_tbl->table);
587                 id_tbl->next = (id + 1) & (id_tbl->max - 1);
588                 id += id_tbl->start;
589         }
590
591         spin_unlock(&id_tbl->lock);
592
593         return id;
594 }
595
596 void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id)
597 {
598         if (id == QEDI_LOCAL_PORT_INVALID)
599                 return;
600
601         id -= id_tbl->start;
602         if (id >= id_tbl->max)
603                 return;
604
605         clear_bit(id, id_tbl->table);
606 }
607
608 static void qedi_cm_free_mem(struct qedi_ctx *qedi)
609 {
610         kfree(qedi->ep_tbl);
611         qedi->ep_tbl = NULL;
612         qedi_free_id_tbl(&qedi->lcl_port_tbl);
613 }
614
615 static int qedi_cm_alloc_mem(struct qedi_ctx *qedi)
616 {
617         u16 port_id;
618
619         qedi->ep_tbl = kzalloc((qedi->max_active_conns *
620                                 sizeof(struct qedi_endpoint *)), GFP_KERNEL);
621         if (!qedi->ep_tbl)
622                 return -ENOMEM;
623         port_id = prandom_u32() % QEDI_LOCAL_PORT_RANGE;
624         if (qedi_init_id_tbl(&qedi->lcl_port_tbl, QEDI_LOCAL_PORT_RANGE,
625                              QEDI_LOCAL_PORT_MIN, port_id)) {
626                 qedi_cm_free_mem(qedi);
627                 return -ENOMEM;
628         }
629
630         return 0;
631 }
632
633 static struct qedi_ctx *qedi_host_alloc(struct pci_dev *pdev)
634 {
635         struct Scsi_Host *shost;
636         struct qedi_ctx *qedi = NULL;
637
638         shost = iscsi_host_alloc(&qedi_host_template,
639                                  sizeof(struct qedi_ctx), 0);
640         if (!shost) {
641                 QEDI_ERR(NULL, "Could not allocate shost\n");
642                 goto exit_setup_shost;
643         }
644
645         shost->max_id = QEDI_MAX_ISCSI_CONNS_PER_HBA;
646         shost->max_channel = 0;
647         shost->max_lun = ~0;
648         shost->max_cmd_len = 16;
649         shost->transportt = qedi_scsi_transport;
650
651         qedi = iscsi_host_priv(shost);
652         memset(qedi, 0, sizeof(*qedi));
653         qedi->shost = shost;
654         qedi->dbg_ctx.host_no = shost->host_no;
655         qedi->pdev = pdev;
656         qedi->dbg_ctx.pdev = pdev;
657         qedi->max_active_conns = ISCSI_MAX_SESS_PER_HBA;
658         qedi->max_sqes = QEDI_SQ_SIZE;
659
660         shost->nr_hw_queues = MIN_NUM_CPUS_MSIX(qedi);
661
662         pci_set_drvdata(pdev, qedi);
663
664 exit_setup_shost:
665         return qedi;
666 }
667
668 static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
669 {
670         struct qedi_ctx *qedi = (struct qedi_ctx *)cookie;
671         struct skb_work_list *work;
672         struct ethhdr *eh;
673
674         if (!qedi) {
675                 QEDI_ERR(NULL, "qedi is NULL\n");
676                 return -1;
677         }
678
679         if (!test_bit(UIO_DEV_OPENED, &qedi->flags)) {
680                 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UIO,
681                           "UIO DEV is not opened\n");
682                 kfree_skb(skb);
683                 return 0;
684         }
685
686         eh = (struct ethhdr *)skb->data;
687         /* Undo VLAN encapsulation */
688         if (eh->h_proto == htons(ETH_P_8021Q)) {
689                 memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
690                 eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
691                 skb_reset_mac_header(skb);
692         }
693
694         /* Filter out non FIP/FCoE frames here to free them faster */
695         if (eh->h_proto != htons(ETH_P_ARP) &&
696             eh->h_proto != htons(ETH_P_IP) &&
697             eh->h_proto != htons(ETH_P_IPV6)) {
698                 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_LL2,
699                           "Dropping frame ethertype [0x%x] len [0x%x].\n",
700                           eh->h_proto, skb->len);
701                 kfree_skb(skb);
702                 return 0;
703         }
704
705         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_LL2,
706                   "Allowed frame ethertype [0x%x] len [0x%x].\n",
707                   eh->h_proto, skb->len);
708
709         work = kzalloc(sizeof(*work), GFP_ATOMIC);
710         if (!work) {
711                 QEDI_WARN(&qedi->dbg_ctx,
712                           "Could not allocate work so dropping frame.\n");
713                 kfree_skb(skb);
714                 return 0;
715         }
716
717         INIT_LIST_HEAD(&work->list);
718         work->skb = skb;
719
720         if (skb_vlan_tag_present(skb))
721                 work->vlan_id = skb_vlan_tag_get(skb);
722
723         if (work->vlan_id)
724                 __vlan_insert_tag(work->skb, htons(ETH_P_8021Q), work->vlan_id);
725
726         spin_lock_bh(&qedi->ll2_lock);
727         list_add_tail(&work->list, &qedi->ll2_skb_list);
728         spin_unlock_bh(&qedi->ll2_lock);
729
730         wake_up_process(qedi->ll2_recv_thread);
731
732         return 0;
733 }
734
735 /* map this skb to iscsiuio mmaped region */
736 static int qedi_ll2_process_skb(struct qedi_ctx *qedi, struct sk_buff *skb,
737                                 u16 vlan_id)
738 {
739         struct qedi_uio_dev *udev = NULL;
740         struct qedi_uio_ctrl *uctrl = NULL;
741         struct qedi_rx_bd rxbd;
742         struct qedi_rx_bd *p_rxbd;
743         u32 rx_bd_prod;
744         void *pkt;
745         int len = 0;
746         u32 prod;
747
748         if (!qedi) {
749                 QEDI_ERR(NULL, "qedi is NULL\n");
750                 return -1;
751         }
752
753         udev = qedi->udev;
754         uctrl = udev->uctrl;
755
756         ++uctrl->hw_rx_prod_cnt;
757         prod = (uctrl->hw_rx_prod + 1) % RX_RING;
758
759         pkt = udev->rx_pkt + (prod * qedi_ll2_buf_size);
760         len = min_t(u32, skb->len, (u32)qedi_ll2_buf_size);
761         memcpy(pkt, skb->data, len);
762
763         memset(&rxbd, 0, sizeof(rxbd));
764         rxbd.rx_pkt_index = prod;
765         rxbd.rx_pkt_len = len;
766         rxbd.vlan_id = vlan_id;
767
768         uctrl->hw_rx_bd_prod = (uctrl->hw_rx_bd_prod + 1) % QEDI_NUM_RX_BD;
769         rx_bd_prod = uctrl->hw_rx_bd_prod;
770         p_rxbd = (struct qedi_rx_bd *)udev->ll2_ring;
771         p_rxbd += rx_bd_prod;
772
773         memcpy(p_rxbd, &rxbd, sizeof(rxbd));
774
775         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_LL2,
776                   "hw_rx_prod [%d] prod [%d] hw_rx_bd_prod [%d] rx_pkt_idx [%d] rx_len [%d].\n",
777                   uctrl->hw_rx_prod, prod, uctrl->hw_rx_bd_prod,
778                   rxbd.rx_pkt_index, rxbd.rx_pkt_len);
779         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_LL2,
780                   "host_rx_cons [%d] hw_rx_bd_cons [%d].\n",
781                   uctrl->host_rx_cons, uctrl->host_rx_bd_cons);
782
783         uctrl->hw_rx_prod = prod;
784
785         /* notify the iscsiuio about new packet */
786         uio_event_notify(&udev->qedi_uinfo);
787
788         return 0;
789 }
790
791 static void qedi_ll2_free_skbs(struct qedi_ctx *qedi)
792 {
793         struct skb_work_list *work, *work_tmp;
794
795         spin_lock_bh(&qedi->ll2_lock);
796         list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, list) {
797                 list_del(&work->list);
798                 kfree_skb(work->skb);
799                 kfree(work);
800         }
801         spin_unlock_bh(&qedi->ll2_lock);
802 }
803
804 static int qedi_ll2_recv_thread(void *arg)
805 {
806         struct qedi_ctx *qedi = (struct qedi_ctx *)arg;
807         struct skb_work_list *work, *work_tmp;
808
809         set_user_nice(current, -20);
810
811         while (!kthread_should_stop()) {
812                 spin_lock_bh(&qedi->ll2_lock);
813                 list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list,
814                                          list) {
815                         list_del(&work->list);
816                         qedi_ll2_process_skb(qedi, work->skb, work->vlan_id);
817                         kfree_skb(work->skb);
818                         kfree(work);
819                 }
820                 set_current_state(TASK_INTERRUPTIBLE);
821                 spin_unlock_bh(&qedi->ll2_lock);
822                 schedule();
823         }
824
825         __set_current_state(TASK_RUNNING);
826         return 0;
827 }
828
829 static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
830 {
831         u8 num_sq_pages;
832         u32 log_page_size;
833         int rval = 0;
834
835
836         num_sq_pages = (MAX_OUTSTANDING_TASKS_PER_CON * 8) / QEDI_PAGE_SIZE;
837
838         qedi->num_queues = MIN_NUM_CPUS_MSIX(qedi);
839
840         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
841                   "Number of CQ count is %d\n", qedi->num_queues);
842
843         memset(&qedi->pf_params.iscsi_pf_params, 0,
844                sizeof(qedi->pf_params.iscsi_pf_params));
845
846         qedi->p_cpuq = dma_alloc_coherent(&qedi->pdev->dev,
847                         qedi->num_queues * sizeof(struct qedi_glbl_q_params),
848                         &qedi->hw_p_cpuq, GFP_KERNEL);
849         if (!qedi->p_cpuq) {
850                 QEDI_ERR(&qedi->dbg_ctx, "dma_alloc_coherent fail\n");
851                 rval = -1;
852                 goto err_alloc_mem;
853         }
854
855         rval = qedi_alloc_global_queues(qedi);
856         if (rval) {
857                 QEDI_ERR(&qedi->dbg_ctx, "Global queue allocation failed.\n");
858                 rval = -1;
859                 goto err_alloc_mem;
860         }
861
862         qedi->pf_params.iscsi_pf_params.num_cons = QEDI_MAX_ISCSI_CONNS_PER_HBA;
863         qedi->pf_params.iscsi_pf_params.num_tasks = QEDI_MAX_ISCSI_TASK;
864         qedi->pf_params.iscsi_pf_params.half_way_close_timeout = 10;
865         qedi->pf_params.iscsi_pf_params.num_sq_pages_in_ring = num_sq_pages;
866         qedi->pf_params.iscsi_pf_params.num_r2tq_pages_in_ring = num_sq_pages;
867         qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages;
868         qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues;
869         qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug;
870         qedi->pf_params.iscsi_pf_params.two_msl_timer = 4000;
871         qedi->pf_params.iscsi_pf_params.max_fin_rt = 2;
872
873         for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) {
874                 if ((1 << log_page_size) == QEDI_PAGE_SIZE)
875                         break;
876         }
877         qedi->pf_params.iscsi_pf_params.log_page_size = log_page_size;
878
879         qedi->pf_params.iscsi_pf_params.glbl_q_params_addr =
880                                                            (u64)qedi->hw_p_cpuq;
881
882         /* RQ BDQ initializations.
883          * rq_num_entries: suggested value for Initiator is 16 (4KB RQ)
884          * rqe_log_size: 8 for 256B RQE
885          */
886         qedi->pf_params.iscsi_pf_params.rqe_log_size = 8;
887         /* BDQ address and size */
888         qedi->pf_params.iscsi_pf_params.bdq_pbl_base_addr[BDQ_ID_RQ] =
889                                                         qedi->bdq_pbl_list_dma;
890         qedi->pf_params.iscsi_pf_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
891                                                 qedi->bdq_pbl_list_num_entries;
892         qedi->pf_params.iscsi_pf_params.rq_buffer_size = QEDI_BDQ_BUF_SIZE;
893
894         /* cq_num_entries: num_tasks + rq_num_entries */
895         qedi->pf_params.iscsi_pf_params.cq_num_entries = 2048;
896
897         qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX;
898         qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1;
899
900 err_alloc_mem:
901         return rval;
902 }
903
904 /* Free DMA coherent memory for array of queue pointers we pass to qed */
905 static void qedi_free_iscsi_pf_param(struct qedi_ctx *qedi)
906 {
907         size_t size = 0;
908
909         if (qedi->p_cpuq) {
910                 size = qedi->num_queues * sizeof(struct qedi_glbl_q_params);
911                 dma_free_coherent(&qedi->pdev->dev, size, qedi->p_cpuq,
912                                     qedi->hw_p_cpuq);
913         }
914
915         qedi_free_global_queues(qedi);
916
917         kfree(qedi->global_queues);
918 }
919
920 static void qedi_get_boot_tgt_info(struct nvm_iscsi_block *block,
921                                    struct qedi_boot_target *tgt, u8 index)
922 {
923         u32 ipv6_en;
924
925         ipv6_en = !!(block->generic.ctrl_flags &
926                      NVM_ISCSI_CFG_GEN_IPV6_ENABLED);
927
928         snprintf(tgt->iscsi_name, sizeof(tgt->iscsi_name), "%s",
929                  block->target[index].target_name.byte);
930
931         tgt->ipv6_en = ipv6_en;
932
933         if (ipv6_en)
934                 snprintf(tgt->ip_addr, IPV6_LEN, "%pI6\n",
935                          block->target[index].ipv6_addr.byte);
936         else
937                 snprintf(tgt->ip_addr, IPV4_LEN, "%pI4\n",
938                          block->target[index].ipv4_addr.byte);
939 }
940
941 static int qedi_find_boot_info(struct qedi_ctx *qedi,
942                                struct qed_mfw_tlv_iscsi *iscsi,
943                                struct nvm_iscsi_block *block)
944 {
945         struct qedi_boot_target *pri_tgt = NULL, *sec_tgt = NULL;
946         u32 pri_ctrl_flags = 0, sec_ctrl_flags = 0, found = 0;
947         struct iscsi_cls_session *cls_sess;
948         struct iscsi_cls_conn *cls_conn;
949         struct qedi_conn *qedi_conn;
950         struct iscsi_session *sess;
951         struct iscsi_conn *conn;
952         char ep_ip_addr[64];
953         int i, ret = 0;
954
955         pri_ctrl_flags = !!(block->target[0].ctrl_flags &
956                                         NVM_ISCSI_CFG_TARGET_ENABLED);
957         if (pri_ctrl_flags) {
958                 pri_tgt = kzalloc(sizeof(*pri_tgt), GFP_KERNEL);
959                 if (!pri_tgt)
960                         return -1;
961                 qedi_get_boot_tgt_info(block, pri_tgt, 0);
962         }
963
964         sec_ctrl_flags = !!(block->target[1].ctrl_flags &
965                                         NVM_ISCSI_CFG_TARGET_ENABLED);
966         if (sec_ctrl_flags) {
967                 sec_tgt = kzalloc(sizeof(*sec_tgt), GFP_KERNEL);
968                 if (!sec_tgt) {
969                         ret = -1;
970                         goto free_tgt;
971                 }
972                 qedi_get_boot_tgt_info(block, sec_tgt, 1);
973         }
974
975         for (i = 0; i < qedi->max_active_conns; i++) {
976                 qedi_conn = qedi_get_conn_from_id(qedi, i);
977                 if (!qedi_conn)
978                         continue;
979
980                 if (qedi_conn->ep->ip_type == TCP_IPV4)
981                         snprintf(ep_ip_addr, IPV4_LEN, "%pI4\n",
982                                  qedi_conn->ep->dst_addr);
983                 else
984                         snprintf(ep_ip_addr, IPV6_LEN, "%pI6\n",
985                                  qedi_conn->ep->dst_addr);
986
987                 cls_conn = qedi_conn->cls_conn;
988                 conn = cls_conn->dd_data;
989                 cls_sess = iscsi_conn_to_session(cls_conn);
990                 sess = cls_sess->dd_data;
991
992                 if (!iscsi_is_session_online(cls_sess))
993                         continue;
994
995                 if (!sess->targetname)
996                         continue;
997
998                 if (pri_ctrl_flags) {
999                         if (!strcmp(pri_tgt->iscsi_name, sess->targetname) &&
1000                             !strcmp(pri_tgt->ip_addr, ep_ip_addr)) {
1001                                 found = 1;
1002                                 break;
1003                         }
1004                 }
1005
1006                 if (sec_ctrl_flags) {
1007                         if (!strcmp(sec_tgt->iscsi_name, sess->targetname) &&
1008                             !strcmp(sec_tgt->ip_addr, ep_ip_addr)) {
1009                                 found = 1;
1010                                 break;
1011                         }
1012                 }
1013         }
1014
1015         if (found) {
1016                 if (conn->hdrdgst_en) {
1017                         iscsi->header_digest_set = true;
1018                         iscsi->header_digest = 1;
1019                 }
1020
1021                 if (conn->datadgst_en) {
1022                         iscsi->data_digest_set = true;
1023                         iscsi->data_digest = 1;
1024                 }
1025                 iscsi->boot_taget_portal_set = true;
1026                 iscsi->boot_taget_portal = sess->tpgt;
1027
1028         } else {
1029                 ret = -1;
1030         }
1031
1032         if (sec_ctrl_flags)
1033                 kfree(sec_tgt);
1034 free_tgt:
1035         if (pri_ctrl_flags)
1036                 kfree(pri_tgt);
1037
1038         return ret;
1039 }
1040
1041 static void qedi_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
1042 {
1043         struct qedi_ctx *qedi;
1044
1045         if (!dev) {
1046                 QEDI_INFO(NULL, QEDI_LOG_EVT,
1047                           "dev is NULL so ignoring get_generic_tlv_data request.\n");
1048                 return;
1049         }
1050         qedi = (struct qedi_ctx *)dev;
1051
1052         memset(data, 0, sizeof(struct qed_generic_tlvs));
1053         ether_addr_copy(data->mac[0], qedi->mac);
1054 }
1055
1056 /*
1057  * Protocol TLV handler
1058  */
1059 static void qedi_get_protocol_tlv_data(void *dev, void *data)
1060 {
1061         struct qed_mfw_tlv_iscsi *iscsi = data;
1062         struct qed_iscsi_stats *fw_iscsi_stats;
1063         struct nvm_iscsi_block *block = NULL;
1064         u32 chap_en = 0, mchap_en = 0;
1065         struct qedi_ctx *qedi = dev;
1066         int rval = 0;
1067
1068         fw_iscsi_stats = kmalloc(sizeof(*fw_iscsi_stats), GFP_KERNEL);
1069         if (!fw_iscsi_stats) {
1070                 QEDI_ERR(&qedi->dbg_ctx,
1071                          "Could not allocate memory for fw_iscsi_stats.\n");
1072                 goto exit_get_data;
1073         }
1074
1075         mutex_lock(&qedi->stats_lock);
1076         /* Query firmware for offload stats */
1077         qedi_ops->get_stats(qedi->cdev, fw_iscsi_stats);
1078         mutex_unlock(&qedi->stats_lock);
1079
1080         iscsi->rx_frames_set = true;
1081         iscsi->rx_frames = fw_iscsi_stats->iscsi_rx_packet_cnt;
1082         iscsi->rx_bytes_set = true;
1083         iscsi->rx_bytes = fw_iscsi_stats->iscsi_rx_bytes_cnt;
1084         iscsi->tx_frames_set = true;
1085         iscsi->tx_frames = fw_iscsi_stats->iscsi_tx_packet_cnt;
1086         iscsi->tx_bytes_set = true;
1087         iscsi->tx_bytes = fw_iscsi_stats->iscsi_tx_bytes_cnt;
1088         iscsi->frame_size_set = true;
1089         iscsi->frame_size = qedi->ll2_mtu;
1090         block = qedi_get_nvram_block(qedi);
1091         if (block) {
1092                 chap_en = !!(block->generic.ctrl_flags &
1093                              NVM_ISCSI_CFG_GEN_CHAP_ENABLED);
1094                 mchap_en = !!(block->generic.ctrl_flags &
1095                               NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED);
1096
1097                 iscsi->auth_method_set = (chap_en || mchap_en) ? true : false;
1098                 iscsi->auth_method = 1;
1099                 if (chap_en)
1100                         iscsi->auth_method = 2;
1101                 if (mchap_en)
1102                         iscsi->auth_method = 3;
1103
1104                 iscsi->tx_desc_size_set = true;
1105                 iscsi->tx_desc_size = QEDI_SQ_SIZE;
1106                 iscsi->rx_desc_size_set = true;
1107                 iscsi->rx_desc_size = QEDI_CQ_SIZE;
1108
1109                 /* tpgt, hdr digest, data digest */
1110                 rval = qedi_find_boot_info(qedi, iscsi, block);
1111                 if (rval)
1112                         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1113                                   "Boot target not set");
1114         }
1115
1116         kfree(fw_iscsi_stats);
1117 exit_get_data:
1118         return;
1119 }
1120
1121 void qedi_schedule_hw_err_handler(void *dev,
1122                                   enum qed_hw_err_type err_type)
1123 {
1124         struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
1125         unsigned long override_flags = qedi_flags_override;
1126
1127         if (override_flags && test_bit(QEDI_ERR_OVERRIDE_EN, &override_flags))
1128                 qedi->qedi_err_flags = qedi_flags_override;
1129
1130         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1131                   "HW error handler scheduled, err=%d err_flags=0x%x\n",
1132                   err_type, qedi->qedi_err_flags);
1133
1134         switch (err_type) {
1135         case QED_HW_ERR_FAN_FAIL:
1136                 schedule_delayed_work(&qedi->board_disable_work, 0);
1137                 break;
1138         case QED_HW_ERR_MFW_RESP_FAIL:
1139         case QED_HW_ERR_HW_ATTN:
1140         case QED_HW_ERR_DMAE_FAIL:
1141         case QED_HW_ERR_RAMROD_FAIL:
1142         case QED_HW_ERR_FW_ASSERT:
1143                 /* Prevent HW attentions from being reasserted */
1144                 if (test_bit(QEDI_ERR_ATTN_CLR_EN, &qedi->qedi_err_flags))
1145                         qedi_ops->common->attn_clr_enable(qedi->cdev, true);
1146
1147                 if (err_type == QED_HW_ERR_RAMROD_FAIL &&
1148                     test_bit(QEDI_ERR_IS_RECOVERABLE, &qedi->qedi_err_flags))
1149                         qedi_ops->common->recovery_process(qedi->cdev);
1150
1151                 break;
1152         default:
1153                 break;
1154         }
1155 }
1156
1157 static void qedi_schedule_recovery_handler(void *dev)
1158 {
1159         struct qedi_ctx *qedi = dev;
1160
1161         QEDI_ERR(&qedi->dbg_ctx, "Recovery handler scheduled.\n");
1162
1163         if (test_and_set_bit(QEDI_IN_RECOVERY, &qedi->flags))
1164                 return;
1165
1166         atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
1167
1168         schedule_delayed_work(&qedi->recovery_work, 0);
1169 }
1170
1171 static void qedi_set_conn_recovery(struct iscsi_cls_session *cls_session)
1172 {
1173         struct iscsi_session *session = cls_session->dd_data;
1174         struct iscsi_conn *conn = session->leadconn;
1175         struct qedi_conn *qedi_conn = conn->dd_data;
1176
1177         qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
1178 }
1179
1180 static void qedi_link_update(void *dev, struct qed_link_output *link)
1181 {
1182         struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
1183
1184         if (link->link_up) {
1185                 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Link Up event.\n");
1186                 atomic_set(&qedi->link_state, QEDI_LINK_UP);
1187         } else {
1188                 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1189                           "Link Down event.\n");
1190                 atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
1191                 iscsi_host_for_each_session(qedi->shost, qedi_set_conn_recovery);
1192         }
1193 }
1194
1195 static struct qed_iscsi_cb_ops qedi_cb_ops = {
1196         {
1197                 .link_update =          qedi_link_update,
1198                 .schedule_recovery_handler = qedi_schedule_recovery_handler,
1199                 .schedule_hw_err_handler = qedi_schedule_hw_err_handler,
1200                 .get_protocol_tlv_data = qedi_get_protocol_tlv_data,
1201                 .get_generic_tlv_data = qedi_get_generic_tlv_data,
1202         }
1203 };
1204
1205 static int qedi_queue_cqe(struct qedi_ctx *qedi, union iscsi_cqe *cqe,
1206                           u16 que_idx, struct qedi_percpu_s *p)
1207 {
1208         struct qedi_work *qedi_work;
1209         struct qedi_conn *q_conn;
1210         struct qedi_cmd *qedi_cmd;
1211         u32 iscsi_cid;
1212         int rc = 0;
1213
1214         iscsi_cid  = cqe->cqe_common.conn_id;
1215         q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
1216         if (!q_conn) {
1217                 QEDI_WARN(&qedi->dbg_ctx,
1218                           "Session no longer exists for cid=0x%x!!\n",
1219                           iscsi_cid);
1220                 return -1;
1221         }
1222
1223         switch (cqe->cqe_common.cqe_type) {
1224         case ISCSI_CQE_TYPE_SOLICITED:
1225         case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE:
1226                 qedi_cmd = qedi_get_cmd_from_tid(qedi, cqe->cqe_solicited.itid);
1227                 if (!qedi_cmd) {
1228                         rc = -1;
1229                         break;
1230                 }
1231                 INIT_LIST_HEAD(&qedi_cmd->cqe_work.list);
1232                 qedi_cmd->cqe_work.qedi = qedi;
1233                 memcpy(&qedi_cmd->cqe_work.cqe, cqe, sizeof(union iscsi_cqe));
1234                 qedi_cmd->cqe_work.que_idx = que_idx;
1235                 qedi_cmd->cqe_work.is_solicited = true;
1236                 list_add_tail(&qedi_cmd->cqe_work.list, &p->work_list);
1237                 break;
1238         case ISCSI_CQE_TYPE_UNSOLICITED:
1239         case ISCSI_CQE_TYPE_DUMMY:
1240         case ISCSI_CQE_TYPE_TASK_CLEANUP:
1241                 qedi_work = kzalloc(sizeof(*qedi_work), GFP_ATOMIC);
1242                 if (!qedi_work) {
1243                         rc = -1;
1244                         break;
1245                 }
1246                 INIT_LIST_HEAD(&qedi_work->list);
1247                 qedi_work->qedi = qedi;
1248                 memcpy(&qedi_work->cqe, cqe, sizeof(union iscsi_cqe));
1249                 qedi_work->que_idx = que_idx;
1250                 qedi_work->is_solicited = false;
1251                 list_add_tail(&qedi_work->list, &p->work_list);
1252                 break;
1253         default:
1254                 rc = -1;
1255                 QEDI_ERR(&qedi->dbg_ctx, "FW Error cqe.\n");
1256         }
1257         return rc;
1258 }
1259
1260 static bool qedi_process_completions(struct qedi_fastpath *fp)
1261 {
1262         struct qedi_ctx *qedi = fp->qedi;
1263         struct qed_sb_info *sb_info = fp->sb_info;
1264         struct status_block_e4 *sb = sb_info->sb_virt;
1265         struct qedi_percpu_s *p = NULL;
1266         struct global_queue *que;
1267         u16 prod_idx;
1268         unsigned long flags;
1269         union iscsi_cqe *cqe;
1270         int cpu;
1271         int ret;
1272
1273         /* Get the current firmware producer index */
1274         prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX];
1275
1276         if (prod_idx >= QEDI_CQ_SIZE)
1277                 prod_idx = prod_idx % QEDI_CQ_SIZE;
1278
1279         que = qedi->global_queues[fp->sb_id];
1280         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
1281                   "Before: global queue=%p prod_idx=%d cons_idx=%d, sb_id=%d\n",
1282                   que, prod_idx, que->cq_cons_idx, fp->sb_id);
1283
1284         qedi->intr_cpu = fp->sb_id;
1285         cpu = smp_processor_id();
1286         p = &per_cpu(qedi_percpu, cpu);
1287
1288         if (unlikely(!p->iothread))
1289                 WARN_ON(1);
1290
1291         spin_lock_irqsave(&p->p_work_lock, flags);
1292         while (que->cq_cons_idx != prod_idx) {
1293                 cqe = &que->cq[que->cq_cons_idx];
1294
1295                 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
1296                           "cqe=%p prod_idx=%d cons_idx=%d.\n",
1297                           cqe, prod_idx, que->cq_cons_idx);
1298
1299                 ret = qedi_queue_cqe(qedi, cqe, fp->sb_id, p);
1300                 if (ret)
1301                         QEDI_WARN(&qedi->dbg_ctx,
1302                                   "Dropping CQE 0x%x for cid=0x%x.\n",
1303                                   que->cq_cons_idx, cqe->cqe_common.conn_id);
1304
1305                 que->cq_cons_idx++;
1306                 if (que->cq_cons_idx == QEDI_CQ_SIZE)
1307                         que->cq_cons_idx = 0;
1308         }
1309         wake_up_process(p->iothread);
1310         spin_unlock_irqrestore(&p->p_work_lock, flags);
1311
1312         return true;
1313 }
1314
1315 static bool qedi_fp_has_work(struct qedi_fastpath *fp)
1316 {
1317         struct qedi_ctx *qedi = fp->qedi;
1318         struct global_queue *que;
1319         struct qed_sb_info *sb_info = fp->sb_info;
1320         struct status_block_e4 *sb = sb_info->sb_virt;
1321         u16 prod_idx;
1322
1323         barrier();
1324
1325         /* Get the current firmware producer index */
1326         prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX];
1327
1328         /* Get the pointer to the global CQ this completion is on */
1329         que = qedi->global_queues[fp->sb_id];
1330
1331         /* prod idx wrap around uint16 */
1332         if (prod_idx >= QEDI_CQ_SIZE)
1333                 prod_idx = prod_idx % QEDI_CQ_SIZE;
1334
1335         return (que->cq_cons_idx != prod_idx);
1336 }
1337
1338 /* MSI-X fastpath handler code */
1339 static irqreturn_t qedi_msix_handler(int irq, void *dev_id)
1340 {
1341         struct qedi_fastpath *fp = dev_id;
1342         struct qedi_ctx *qedi = fp->qedi;
1343         bool wake_io_thread = true;
1344
1345         qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
1346
1347 process_again:
1348         wake_io_thread = qedi_process_completions(fp);
1349         if (wake_io_thread) {
1350                 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
1351                           "process already running\n");
1352         }
1353
1354         if (!qedi_fp_has_work(fp))
1355                 qed_sb_update_sb_idx(fp->sb_info);
1356
1357         /* Check for more work */
1358         rmb();
1359
1360         if (!qedi_fp_has_work(fp))
1361                 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
1362         else
1363                 goto process_again;
1364
1365         return IRQ_HANDLED;
1366 }
1367
1368 /* simd handler for MSI/INTa */
1369 static void qedi_simd_int_handler(void *cookie)
1370 {
1371         /* Cookie is qedi_ctx struct */
1372         struct qedi_ctx *qedi = (struct qedi_ctx *)cookie;
1373
1374         QEDI_WARN(&qedi->dbg_ctx, "qedi=%p.\n", qedi);
1375 }
1376
1377 #define QEDI_SIMD_HANDLER_NUM           0
1378 static void qedi_sync_free_irqs(struct qedi_ctx *qedi)
1379 {
1380         int i;
1381         u16 idx;
1382
1383         if (qedi->int_info.msix_cnt) {
1384                 for (i = 0; i < qedi->int_info.used_cnt; i++) {
1385                         idx = i * qedi->dev_info.common.num_hwfns +
1386                         qedi_ops->common->get_affin_hwfn_idx(qedi->cdev);
1387
1388                         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1389                                   "Freeing IRQ #%d vector_idx=%d.\n", i, idx);
1390
1391                         synchronize_irq(qedi->int_info.msix[idx].vector);
1392                         irq_set_affinity_hint(qedi->int_info.msix[idx].vector,
1393                                               NULL);
1394                         free_irq(qedi->int_info.msix[idx].vector,
1395                                  &qedi->fp_array[i]);
1396                 }
1397         } else {
1398                 qedi_ops->common->simd_handler_clean(qedi->cdev,
1399                                                      QEDI_SIMD_HANDLER_NUM);
1400         }
1401
1402         qedi->int_info.used_cnt = 0;
1403         qedi_ops->common->set_fp_int(qedi->cdev, 0);
1404 }
1405
1406 static int qedi_request_msix_irq(struct qedi_ctx *qedi)
1407 {
1408         int i, rc, cpu;
1409         u16 idx;
1410
1411         cpu = cpumask_first(cpu_online_mask);
1412         for (i = 0; i < qedi->msix_count; i++) {
1413                 idx = i * qedi->dev_info.common.num_hwfns +
1414                           qedi_ops->common->get_affin_hwfn_idx(qedi->cdev);
1415
1416                 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1417                           "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n",
1418                           qedi->dev_info.common.num_hwfns,
1419                           qedi_ops->common->get_affin_hwfn_idx(qedi->cdev));
1420
1421                 rc = request_irq(qedi->int_info.msix[idx].vector,
1422                                  qedi_msix_handler, 0, "qedi",
1423                                  &qedi->fp_array[i]);
1424                 if (rc) {
1425                         QEDI_WARN(&qedi->dbg_ctx, "request_irq failed.\n");
1426                         qedi_sync_free_irqs(qedi);
1427                         return rc;
1428                 }
1429                 qedi->int_info.used_cnt++;
1430                 rc = irq_set_affinity_hint(qedi->int_info.msix[idx].vector,
1431                                            get_cpu_mask(cpu));
1432                 cpu = cpumask_next(cpu, cpu_online_mask);
1433         }
1434
1435         return 0;
1436 }
1437
1438 static int qedi_setup_int(struct qedi_ctx *qedi)
1439 {
1440         int rc = 0;
1441
1442         rc = qedi_ops->common->set_fp_int(qedi->cdev, qedi->num_queues);
1443         if (rc < 0)
1444                 goto exit_setup_int;
1445
1446         qedi->msix_count = rc;
1447
1448         rc = qedi_ops->common->get_fp_int(qedi->cdev, &qedi->int_info);
1449         if (rc)
1450                 goto exit_setup_int;
1451
1452         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
1453                   "Number of msix_cnt = 0x%x num of cpus = 0x%x\n",
1454                    qedi->int_info.msix_cnt, num_online_cpus());
1455
1456         if (qedi->int_info.msix_cnt) {
1457                 rc = qedi_request_msix_irq(qedi);
1458                 goto exit_setup_int;
1459         } else {
1460                 qedi_ops->common->simd_handler_config(qedi->cdev, &qedi,
1461                                                       QEDI_SIMD_HANDLER_NUM,
1462                                                       qedi_simd_int_handler);
1463                 qedi->int_info.used_cnt = 1;
1464         }
1465
1466 exit_setup_int:
1467         return rc;
1468 }
1469
1470 static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
1471 {
1472         if (qedi->iscsi_image)
1473                 dma_free_coherent(&qedi->pdev->dev,
1474                                   sizeof(struct qedi_nvm_iscsi_image),
1475                                   qedi->iscsi_image, qedi->nvm_buf_dma);
1476 }
1477
1478 static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
1479 {
1480         qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev,
1481                                                sizeof(struct qedi_nvm_iscsi_image),
1482                                                &qedi->nvm_buf_dma, GFP_KERNEL);
1483         if (!qedi->iscsi_image) {
1484                 QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
1485                 return -ENOMEM;
1486         }
1487         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1488                   "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_image,
1489                   qedi->nvm_buf_dma);
1490
1491         return 0;
1492 }
1493
1494 static void qedi_free_bdq(struct qedi_ctx *qedi)
1495 {
1496         int i;
1497
1498         if (qedi->bdq_pbl_list)
1499                 dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
1500                                   qedi->bdq_pbl_list, qedi->bdq_pbl_list_dma);
1501
1502         if (qedi->bdq_pbl)
1503                 dma_free_coherent(&qedi->pdev->dev, qedi->bdq_pbl_mem_size,
1504                                   qedi->bdq_pbl, qedi->bdq_pbl_dma);
1505
1506         for (i = 0; i < QEDI_BDQ_NUM; i++) {
1507                 if (qedi->bdq[i].buf_addr) {
1508                         dma_free_coherent(&qedi->pdev->dev, QEDI_BDQ_BUF_SIZE,
1509                                           qedi->bdq[i].buf_addr,
1510                                           qedi->bdq[i].buf_dma);
1511                 }
1512         }
1513 }
1514
1515 static void qedi_free_global_queues(struct qedi_ctx *qedi)
1516 {
1517         int i;
1518         struct global_queue **gl = qedi->global_queues;
1519
1520         for (i = 0; i < qedi->num_queues; i++) {
1521                 if (!gl[i])
1522                         continue;
1523
1524                 if (gl[i]->cq)
1525                         dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_mem_size,
1526                                           gl[i]->cq, gl[i]->cq_dma);
1527                 if (gl[i]->cq_pbl)
1528                         dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_pbl_size,
1529                                           gl[i]->cq_pbl, gl[i]->cq_pbl_dma);
1530
1531                 kfree(gl[i]);
1532         }
1533         qedi_free_bdq(qedi);
1534         qedi_free_nvm_iscsi_cfg(qedi);
1535 }
1536
1537 static int qedi_alloc_bdq(struct qedi_ctx *qedi)
1538 {
1539         int i;
1540         struct scsi_bd *pbl;
1541         u64 *list;
1542         dma_addr_t page;
1543
1544         /* Alloc dma memory for BDQ buffers */
1545         for (i = 0; i < QEDI_BDQ_NUM; i++) {
1546                 qedi->bdq[i].buf_addr =
1547                                 dma_alloc_coherent(&qedi->pdev->dev,
1548                                                    QEDI_BDQ_BUF_SIZE,
1549                                                    &qedi->bdq[i].buf_dma,
1550                                                    GFP_KERNEL);
1551                 if (!qedi->bdq[i].buf_addr) {
1552                         QEDI_ERR(&qedi->dbg_ctx,
1553                                  "Could not allocate BDQ buffer %d.\n", i);
1554                         return -ENOMEM;
1555                 }
1556         }
1557
1558         /* Alloc dma memory for BDQ page buffer list */
1559         qedi->bdq_pbl_mem_size = QEDI_BDQ_NUM * sizeof(struct scsi_bd);
1560         qedi->bdq_pbl_mem_size = ALIGN(qedi->bdq_pbl_mem_size, QEDI_PAGE_SIZE);
1561         qedi->rq_num_entries = qedi->bdq_pbl_mem_size / sizeof(struct scsi_bd);
1562
1563         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "rq_num_entries = %d.\n",
1564                   qedi->rq_num_entries);
1565
1566         qedi->bdq_pbl = dma_alloc_coherent(&qedi->pdev->dev,
1567                                            qedi->bdq_pbl_mem_size,
1568                                            &qedi->bdq_pbl_dma, GFP_KERNEL);
1569         if (!qedi->bdq_pbl) {
1570                 QEDI_ERR(&qedi->dbg_ctx, "Could not allocate BDQ PBL.\n");
1571                 return -ENOMEM;
1572         }
1573
1574         /*
1575          * Populate BDQ PBL with physical and virtual address of individual
1576          * BDQ buffers
1577          */
1578         pbl = (struct scsi_bd  *)qedi->bdq_pbl;
1579         for (i = 0; i < QEDI_BDQ_NUM; i++) {
1580                 pbl->address.hi =
1581                                 cpu_to_le32(QEDI_U64_HI(qedi->bdq[i].buf_dma));
1582                 pbl->address.lo =
1583                                 cpu_to_le32(QEDI_U64_LO(qedi->bdq[i].buf_dma));
1584                 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
1585                           "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n",
1586                           pbl, pbl->address.hi, pbl->address.lo, i);
1587                 pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
1588                 pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
1589                 pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
1590                 pbl->opaque.iscsi_opaque.opaque = cpu_to_le16(i);
1591                 pbl++;
1592         }
1593
1594         /* Allocate list of PBL pages */
1595         qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev,
1596                                                 QEDI_PAGE_SIZE,
1597                                                 &qedi->bdq_pbl_list_dma,
1598                                                 GFP_KERNEL);
1599         if (!qedi->bdq_pbl_list) {
1600                 QEDI_ERR(&qedi->dbg_ctx,
1601                          "Could not allocate list of PBL pages.\n");
1602                 return -ENOMEM;
1603         }
1604
1605         /*
1606          * Now populate PBL list with pages that contain pointers to the
1607          * individual buffers.
1608          */
1609         qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size /
1610                                          QEDI_PAGE_SIZE;
1611         list = (u64 *)qedi->bdq_pbl_list;
1612         page = qedi->bdq_pbl_list_dma;
1613         for (i = 0; i < qedi->bdq_pbl_list_num_entries; i++) {
1614                 *list = qedi->bdq_pbl_dma;
1615                 list++;
1616                 page += QEDI_PAGE_SIZE;
1617         }
1618
1619         return 0;
1620 }
1621
1622 static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
1623 {
1624         u32 *list;
1625         int i;
1626         int status = 0, rc;
1627         u32 *pbl;
1628         dma_addr_t page;
1629         int num_pages;
1630
1631         /*
1632          * Number of global queues (CQ / RQ). This should
1633          * be <= number of available MSIX vectors for the PF
1634          */
1635         if (!qedi->num_queues) {
1636                 QEDI_ERR(&qedi->dbg_ctx, "No MSI-X vectors available!\n");
1637                 return 1;
1638         }
1639
1640         /* Make sure we allocated the PBL that will contain the physical
1641          * addresses of our queues
1642          */
1643         if (!qedi->p_cpuq) {
1644                 status = 1;
1645                 goto mem_alloc_failure;
1646         }
1647
1648         qedi->global_queues = kzalloc((sizeof(struct global_queue *) *
1649                                        qedi->num_queues), GFP_KERNEL);
1650         if (!qedi->global_queues) {
1651                 QEDI_ERR(&qedi->dbg_ctx,
1652                          "Unable to allocate global queues array ptr memory\n");
1653                 return -ENOMEM;
1654         }
1655         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
1656                   "qedi->global_queues=%p.\n", qedi->global_queues);
1657
1658         /* Allocate DMA coherent buffers for BDQ */
1659         rc = qedi_alloc_bdq(qedi);
1660         if (rc)
1661                 goto mem_alloc_failure;
1662
1663         /* Allocate DMA coherent buffers for NVM_ISCSI_CFG */
1664         rc = qedi_alloc_nvm_iscsi_cfg(qedi);
1665         if (rc)
1666                 goto mem_alloc_failure;
1667
1668         /* Allocate a CQ and an associated PBL for each MSI-X
1669          * vector.
1670          */
1671         for (i = 0; i < qedi->num_queues; i++) {
1672                 qedi->global_queues[i] =
1673                                         kzalloc(sizeof(*qedi->global_queues[0]),
1674                                                 GFP_KERNEL);
1675                 if (!qedi->global_queues[i]) {
1676                         QEDI_ERR(&qedi->dbg_ctx,
1677                                  "Unable to allocation global queue %d.\n", i);
1678                         goto mem_alloc_failure;
1679                 }
1680
1681                 qedi->global_queues[i]->cq_mem_size =
1682                     (QEDI_CQ_SIZE + 8) * sizeof(union iscsi_cqe);
1683                 qedi->global_queues[i]->cq_mem_size =
1684                     (qedi->global_queues[i]->cq_mem_size +
1685                     (QEDI_PAGE_SIZE - 1));
1686
1687                 qedi->global_queues[i]->cq_pbl_size =
1688                     (qedi->global_queues[i]->cq_mem_size /
1689                     QEDI_PAGE_SIZE) * sizeof(void *);
1690                 qedi->global_queues[i]->cq_pbl_size =
1691                     (qedi->global_queues[i]->cq_pbl_size +
1692                     (QEDI_PAGE_SIZE - 1));
1693
1694                 qedi->global_queues[i]->cq = dma_alloc_coherent(&qedi->pdev->dev,
1695                                                                 qedi->global_queues[i]->cq_mem_size,
1696                                                                 &qedi->global_queues[i]->cq_dma,
1697                                                                 GFP_KERNEL);
1698
1699                 if (!qedi->global_queues[i]->cq) {
1700                         QEDI_WARN(&qedi->dbg_ctx,
1701                                   "Could not allocate cq.\n");
1702                         status = -ENOMEM;
1703                         goto mem_alloc_failure;
1704                 }
1705                 qedi->global_queues[i]->cq_pbl = dma_alloc_coherent(&qedi->pdev->dev,
1706                                                                     qedi->global_queues[i]->cq_pbl_size,
1707                                                                     &qedi->global_queues[i]->cq_pbl_dma,
1708                                                                     GFP_KERNEL);
1709
1710                 if (!qedi->global_queues[i]->cq_pbl) {
1711                         QEDI_WARN(&qedi->dbg_ctx,
1712                                   "Could not allocate cq PBL.\n");
1713                         status = -ENOMEM;
1714                         goto mem_alloc_failure;
1715                 }
1716
1717                 /* Create PBL */
1718                 num_pages = qedi->global_queues[i]->cq_mem_size /
1719                     QEDI_PAGE_SIZE;
1720                 page = qedi->global_queues[i]->cq_dma;
1721                 pbl = (u32 *)qedi->global_queues[i]->cq_pbl;
1722
1723                 while (num_pages--) {
1724                         *pbl = (u32)page;
1725                         pbl++;
1726                         *pbl = (u32)((u64)page >> 32);
1727                         pbl++;
1728                         page += QEDI_PAGE_SIZE;
1729                 }
1730         }
1731
1732         list = (u32 *)qedi->p_cpuq;
1733
1734         /*
1735          * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer,
1736          * CQ#1 PBL pointer, RQ#1 PBL pointer, etc.  Each PBL pointer points
1737          * to the physical address which contains an array of pointers to the
1738          * physical addresses of the specific queue pages.
1739          */
1740         for (i = 0; i < qedi->num_queues; i++) {
1741                 *list = (u32)qedi->global_queues[i]->cq_pbl_dma;
1742                 list++;
1743                 *list = (u32)((u64)qedi->global_queues[i]->cq_pbl_dma >> 32);
1744                 list++;
1745
1746                 *list = (u32)0;
1747                 list++;
1748                 *list = (u32)((u64)0 >> 32);
1749                 list++;
1750         }
1751
1752         return 0;
1753
1754 mem_alloc_failure:
1755         qedi_free_global_queues(qedi);
1756         return status;
1757 }
1758
1759 int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
1760 {
1761         int rval = 0;
1762         u32 *pbl;
1763         dma_addr_t page;
1764         int num_pages;
1765
1766         if (!ep)
1767                 return -EIO;
1768
1769         /* Calculate appropriate queue and PBL sizes */
1770         ep->sq_mem_size = QEDI_SQ_SIZE * sizeof(struct iscsi_wqe);
1771         ep->sq_mem_size += QEDI_PAGE_SIZE - 1;
1772
1773         ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *);
1774         ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE;
1775
1776         ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
1777                                     &ep->sq_dma, GFP_KERNEL);
1778         if (!ep->sq) {
1779                 QEDI_WARN(&qedi->dbg_ctx,
1780                           "Could not allocate send queue.\n");
1781                 rval = -ENOMEM;
1782                 goto out;
1783         }
1784         ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
1785                                         &ep->sq_pbl_dma, GFP_KERNEL);
1786         if (!ep->sq_pbl) {
1787                 QEDI_WARN(&qedi->dbg_ctx,
1788                           "Could not allocate send queue PBL.\n");
1789                 rval = -ENOMEM;
1790                 goto out_free_sq;
1791         }
1792
1793         /* Create PBL */
1794         num_pages = ep->sq_mem_size / QEDI_PAGE_SIZE;
1795         page = ep->sq_dma;
1796         pbl = (u32 *)ep->sq_pbl;
1797
1798         while (num_pages--) {
1799                 *pbl = (u32)page;
1800                 pbl++;
1801                 *pbl = (u32)((u64)page >> 32);
1802                 pbl++;
1803                 page += QEDI_PAGE_SIZE;
1804         }
1805
1806         return rval;
1807
1808 out_free_sq:
1809         dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq,
1810                           ep->sq_dma);
1811 out:
1812         return rval;
1813 }
1814
1815 void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
1816 {
1817         if (ep->sq_pbl)
1818                 dma_free_coherent(&qedi->pdev->dev, ep->sq_pbl_size, ep->sq_pbl,
1819                                   ep->sq_pbl_dma);
1820         if (ep->sq)
1821                 dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq,
1822                                   ep->sq_dma);
1823 }
1824
1825 int qedi_get_task_idx(struct qedi_ctx *qedi)
1826 {
1827         s16 tmp_idx;
1828
1829 again:
1830         tmp_idx = find_first_zero_bit(qedi->task_idx_map,
1831                                       MAX_ISCSI_TASK_ENTRIES);
1832
1833         if (tmp_idx >= MAX_ISCSI_TASK_ENTRIES) {
1834                 QEDI_ERR(&qedi->dbg_ctx, "FW task context pool is full.\n");
1835                 tmp_idx = -1;
1836                 goto err_idx;
1837         }
1838
1839         if (test_and_set_bit(tmp_idx, qedi->task_idx_map))
1840                 goto again;
1841
1842 err_idx:
1843         return tmp_idx;
1844 }
1845
1846 void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx)
1847 {
1848         if (!test_and_clear_bit(idx, qedi->task_idx_map))
1849                 QEDI_ERR(&qedi->dbg_ctx,
1850                          "FW task context, already cleared, tid=0x%x\n", idx);
1851 }
1852
1853 void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
1854                          struct qedi_cmd *cmd)
1855 {
1856         qedi->itt_map[tid].itt = proto_itt;
1857         qedi->itt_map[tid].p_cmd = cmd;
1858
1859         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
1860                   "update itt map tid=0x%x, with proto itt=0x%x\n", tid,
1861                   qedi->itt_map[tid].itt);
1862 }
1863
1864 void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, s16 *tid)
1865 {
1866         u16 i;
1867
1868         for (i = 0; i < MAX_ISCSI_TASK_ENTRIES; i++) {
1869                 if (qedi->itt_map[i].itt == itt) {
1870                         *tid = i;
1871                         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
1872                                   "Ref itt=0x%x, found at tid=0x%x\n",
1873                                   itt, *tid);
1874                         return;
1875                 }
1876         }
1877
1878         WARN_ON(1);
1879 }
1880
1881 void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt)
1882 {
1883         *proto_itt = qedi->itt_map[tid].itt;
1884         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
1885                   "Get itt map tid [0x%x with proto itt[0x%x]",
1886                   tid, *proto_itt);
1887 }
1888
1889 struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid)
1890 {
1891         struct qedi_cmd *cmd = NULL;
1892
1893         if (tid >= MAX_ISCSI_TASK_ENTRIES)
1894                 return NULL;
1895
1896         cmd = qedi->itt_map[tid].p_cmd;
1897         if (cmd->task_id != tid)
1898                 return NULL;
1899
1900         qedi->itt_map[tid].p_cmd = NULL;
1901
1902         return cmd;
1903 }
1904
1905 static int qedi_alloc_itt(struct qedi_ctx *qedi)
1906 {
1907         qedi->itt_map = kcalloc(MAX_ISCSI_TASK_ENTRIES,
1908                                 sizeof(struct qedi_itt_map), GFP_KERNEL);
1909         if (!qedi->itt_map) {
1910                 QEDI_ERR(&qedi->dbg_ctx,
1911                          "Unable to allocate itt map array memory\n");
1912                 return -ENOMEM;
1913         }
1914         return 0;
1915 }
1916
1917 static void qedi_free_itt(struct qedi_ctx *qedi)
1918 {
1919         kfree(qedi->itt_map);
1920 }
1921
1922 static struct qed_ll2_cb_ops qedi_ll2_cb_ops = {
1923         .rx_cb = qedi_ll2_rx,
1924         .tx_cb = NULL,
1925 };
1926
1927 static int qedi_percpu_io_thread(void *arg)
1928 {
1929         struct qedi_percpu_s *p = arg;
1930         struct qedi_work *work, *tmp;
1931         unsigned long flags;
1932         LIST_HEAD(work_list);
1933
1934         set_user_nice(current, -20);
1935
1936         while (!kthread_should_stop()) {
1937                 spin_lock_irqsave(&p->p_work_lock, flags);
1938                 while (!list_empty(&p->work_list)) {
1939                         list_splice_init(&p->work_list, &work_list);
1940                         spin_unlock_irqrestore(&p->p_work_lock, flags);
1941
1942                         list_for_each_entry_safe(work, tmp, &work_list, list) {
1943                                 list_del_init(&work->list);
1944                                 qedi_fp_process_cqes(work);
1945                                 if (!work->is_solicited)
1946                                         kfree(work);
1947                         }
1948                         cond_resched();
1949                         spin_lock_irqsave(&p->p_work_lock, flags);
1950                 }
1951                 set_current_state(TASK_INTERRUPTIBLE);
1952                 spin_unlock_irqrestore(&p->p_work_lock, flags);
1953                 schedule();
1954         }
1955         __set_current_state(TASK_RUNNING);
1956
1957         return 0;
1958 }
1959
1960 static int qedi_cpu_online(unsigned int cpu)
1961 {
1962         struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu);
1963         struct task_struct *thread;
1964
1965         thread = kthread_create_on_node(qedi_percpu_io_thread, (void *)p,
1966                                         cpu_to_node(cpu),
1967                                         "qedi_thread/%d", cpu);
1968         if (IS_ERR(thread))
1969                 return PTR_ERR(thread);
1970
1971         kthread_bind(thread, cpu);
1972         p->iothread = thread;
1973         wake_up_process(thread);
1974         return 0;
1975 }
1976
1977 static int qedi_cpu_offline(unsigned int cpu)
1978 {
1979         struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu);
1980         struct qedi_work *work, *tmp;
1981         struct task_struct *thread;
1982
1983         spin_lock_bh(&p->p_work_lock);
1984         thread = p->iothread;
1985         p->iothread = NULL;
1986
1987         list_for_each_entry_safe(work, tmp, &p->work_list, list) {
1988                 list_del_init(&work->list);
1989                 qedi_fp_process_cqes(work);
1990                 if (!work->is_solicited)
1991                         kfree(work);
1992         }
1993
1994         spin_unlock_bh(&p->p_work_lock);
1995         if (thread)
1996                 kthread_stop(thread);
1997         return 0;
1998 }
1999
2000 void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu)
2001 {
2002         struct qed_ll2_params params;
2003
2004         qedi_recover_all_conns(qedi);
2005
2006         qedi_ops->ll2->stop(qedi->cdev);
2007         qedi_ll2_free_skbs(qedi);
2008
2009         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "old MTU %u, new MTU %u\n",
2010                   qedi->ll2_mtu, mtu);
2011         memset(&params, 0, sizeof(params));
2012         qedi->ll2_mtu = mtu;
2013         params.mtu = qedi->ll2_mtu + IPV6_HDR_LEN + TCP_HDR_LEN;
2014         params.drop_ttl0_packets = 0;
2015         params.rx_vlan_stripping = 1;
2016         ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac);
2017         qedi_ops->ll2->start(qedi->cdev, &params);
2018 }
2019
2020 /*
2021  * qedi_get_nvram_block: - Scan through the iSCSI NVRAM block (while accounting
2022  * for gaps) for the matching absolute-pf-id of the QEDI device.
2023  */
2024 static struct nvm_iscsi_block *
2025 qedi_get_nvram_block(struct qedi_ctx *qedi)
2026 {
2027         int i;
2028         u8 pf;
2029         u32 flags;
2030         struct nvm_iscsi_block *block;
2031
2032         pf = qedi->dev_info.common.abs_pf_id;
2033         block = &qedi->iscsi_image->iscsi_cfg.block[0];
2034         for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) {
2035                 flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >>
2036                         NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET;
2037                 if (flags & (NVM_ISCSI_CFG_BLK_CTRL_FLAG_IS_NOT_EMPTY |
2038                                 NVM_ISCSI_CFG_BLK_CTRL_FLAG_PF_MAPPED) &&
2039                         (pf == (block->id & NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_MASK)
2040                                 >> NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_OFFSET))
2041                         return block;
2042         }
2043         return NULL;
2044 }
2045
2046 static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf)
2047 {
2048         struct qedi_ctx *qedi = data;
2049         struct nvm_iscsi_initiator *initiator;
2050         int rc = 1;
2051         u32 ipv6_en, dhcp_en, ip_len;
2052         struct nvm_iscsi_block *block;
2053         char *fmt, *ip, *sub, *gw;
2054
2055         block = qedi_get_nvram_block(qedi);
2056         if (!block)
2057                 return 0;
2058
2059         initiator = &block->initiator;
2060         ipv6_en = block->generic.ctrl_flags &
2061                   NVM_ISCSI_CFG_GEN_IPV6_ENABLED;
2062         dhcp_en = block->generic.ctrl_flags &
2063                   NVM_ISCSI_CFG_GEN_DHCP_TCPIP_CONFIG_ENABLED;
2064         /* Static IP assignments. */
2065         fmt = ipv6_en ? "%pI6\n" : "%pI4\n";
2066         ip = ipv6_en ? initiator->ipv6.addr.byte : initiator->ipv4.addr.byte;
2067         ip_len = ipv6_en ? IPV6_LEN : IPV4_LEN;
2068         sub = ipv6_en ? initiator->ipv6.subnet_mask.byte :
2069               initiator->ipv4.subnet_mask.byte;
2070         gw = ipv6_en ? initiator->ipv6.gateway.byte :
2071              initiator->ipv4.gateway.byte;
2072         /* DHCP IP adjustments. */
2073         fmt = dhcp_en ? "%s\n" : fmt;
2074         if (dhcp_en) {
2075                 ip = ipv6_en ? "0::0" : "0.0.0.0";
2076                 sub = ip;
2077                 gw = ip;
2078                 ip_len = ipv6_en ? 5 : 8;
2079         }
2080
2081         switch (type) {
2082         case ISCSI_BOOT_ETH_IP_ADDR:
2083                 rc = snprintf(buf, ip_len, fmt, ip);
2084                 break;
2085         case ISCSI_BOOT_ETH_SUBNET_MASK:
2086                 rc = snprintf(buf, ip_len, fmt, sub);
2087                 break;
2088         case ISCSI_BOOT_ETH_GATEWAY:
2089                 rc = snprintf(buf, ip_len, fmt, gw);
2090                 break;
2091         case ISCSI_BOOT_ETH_FLAGS:
2092                 rc = snprintf(buf, 3, "%hhd\n",
2093                               SYSFS_FLAG_FW_SEL_BOOT);
2094                 break;
2095         case ISCSI_BOOT_ETH_INDEX:
2096                 rc = snprintf(buf, 3, "0\n");
2097                 break;
2098         case ISCSI_BOOT_ETH_MAC:
2099                 rc = sysfs_format_mac(buf, qedi->mac, ETH_ALEN);
2100                 break;
2101         case ISCSI_BOOT_ETH_VLAN:
2102                 rc = snprintf(buf, 12, "%d\n",
2103                               GET_FIELD2(initiator->generic_cont0,
2104                                          NVM_ISCSI_CFG_INITIATOR_VLAN));
2105                 break;
2106         case ISCSI_BOOT_ETH_ORIGIN:
2107                 if (dhcp_en)
2108                         rc = snprintf(buf, 3, "3\n");
2109                 break;
2110         default:
2111                 rc = 0;
2112                 break;
2113         }
2114
2115         return rc;
2116 }
2117
2118 static umode_t qedi_eth_get_attr_visibility(void *data, int type)
2119 {
2120         int rc = 1;
2121
2122         switch (type) {
2123         case ISCSI_BOOT_ETH_FLAGS:
2124         case ISCSI_BOOT_ETH_MAC:
2125         case ISCSI_BOOT_ETH_INDEX:
2126         case ISCSI_BOOT_ETH_IP_ADDR:
2127         case ISCSI_BOOT_ETH_SUBNET_MASK:
2128         case ISCSI_BOOT_ETH_GATEWAY:
2129         case ISCSI_BOOT_ETH_ORIGIN:
2130         case ISCSI_BOOT_ETH_VLAN:
2131                 rc = 0444;
2132                 break;
2133         default:
2134                 rc = 0;
2135                 break;
2136         }
2137         return rc;
2138 }
2139
2140 static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf)
2141 {
2142         struct qedi_ctx *qedi = data;
2143         struct nvm_iscsi_initiator *initiator;
2144         int rc;
2145         struct nvm_iscsi_block *block;
2146
2147         block = qedi_get_nvram_block(qedi);
2148         if (!block)
2149                 return 0;
2150
2151         initiator = &block->initiator;
2152
2153         switch (type) {
2154         case ISCSI_BOOT_INI_INITIATOR_NAME:
2155                 rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN,
2156                              initiator->initiator_name.byte);
2157                 break;
2158         default:
2159                 rc = 0;
2160                 break;
2161         }
2162         return rc;
2163 }
2164
2165 static umode_t qedi_ini_get_attr_visibility(void *data, int type)
2166 {
2167         int rc;
2168
2169         switch (type) {
2170         case ISCSI_BOOT_INI_INITIATOR_NAME:
2171                 rc = 0444;
2172                 break;
2173         default:
2174                 rc = 0;
2175                 break;
2176         }
2177         return rc;
2178 }
2179
2180 static ssize_t
2181 qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
2182                         char *buf, enum qedi_nvm_tgts idx)
2183 {
2184         int rc = 1;
2185         u32 ctrl_flags, ipv6_en, chap_en, mchap_en, ip_len;
2186         struct nvm_iscsi_block *block;
2187         char *chap_name, *chap_secret;
2188         char *mchap_name, *mchap_secret;
2189
2190         block = qedi_get_nvram_block(qedi);
2191         if (!block)
2192                 goto exit_show_tgt_info;
2193
2194         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_EVT,
2195                   "Port:%d, tgt_idx:%d\n",
2196                   GET_FIELD2(block->id, NVM_ISCSI_CFG_BLK_MAPPED_PF_ID), idx);
2197
2198         ctrl_flags = block->target[idx].ctrl_flags &
2199                      NVM_ISCSI_CFG_TARGET_ENABLED;
2200
2201         if (!ctrl_flags) {
2202                 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_EVT,
2203                           "Target disabled\n");
2204                 goto exit_show_tgt_info;
2205         }
2206
2207         ipv6_en = block->generic.ctrl_flags &
2208                   NVM_ISCSI_CFG_GEN_IPV6_ENABLED;
2209         ip_len = ipv6_en ? IPV6_LEN : IPV4_LEN;
2210         chap_en = block->generic.ctrl_flags &
2211                   NVM_ISCSI_CFG_GEN_CHAP_ENABLED;
2212         chap_name = chap_en ? block->initiator.chap_name.byte : NULL;
2213         chap_secret = chap_en ? block->initiator.chap_password.byte : NULL;
2214
2215         mchap_en = block->generic.ctrl_flags &
2216                   NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED;
2217         mchap_name = mchap_en ? block->target[idx].chap_name.byte : NULL;
2218         mchap_secret = mchap_en ? block->target[idx].chap_password.byte : NULL;
2219
2220         switch (type) {
2221         case ISCSI_BOOT_TGT_NAME:
2222                 rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN,
2223                              block->target[idx].target_name.byte);
2224                 break;
2225         case ISCSI_BOOT_TGT_IP_ADDR:
2226                 if (ipv6_en)
2227                         rc = snprintf(buf, ip_len, "%pI6\n",
2228                                       block->target[idx].ipv6_addr.byte);
2229                 else
2230                         rc = snprintf(buf, ip_len, "%pI4\n",
2231                                       block->target[idx].ipv4_addr.byte);
2232                 break;
2233         case ISCSI_BOOT_TGT_PORT:
2234                 rc = snprintf(buf, 12, "%d\n",
2235                               GET_FIELD2(block->target[idx].generic_cont0,
2236                                          NVM_ISCSI_CFG_TARGET_TCP_PORT));
2237                 break;
2238         case ISCSI_BOOT_TGT_LUN:
2239                 rc = snprintf(buf, 22, "%.*d\n",
2240                               block->target[idx].lun.value[1],
2241                               block->target[idx].lun.value[0]);
2242                 break;
2243         case ISCSI_BOOT_TGT_CHAP_NAME:
2244                 rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
2245                              chap_name);
2246                 break;
2247         case ISCSI_BOOT_TGT_CHAP_SECRET:
2248                 rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
2249                              chap_secret);
2250                 break;
2251         case ISCSI_BOOT_TGT_REV_CHAP_NAME:
2252                 rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
2253                              mchap_name);
2254                 break;
2255         case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
2256                 rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
2257                              mchap_secret);
2258                 break;
2259         case ISCSI_BOOT_TGT_FLAGS:
2260                 rc = snprintf(buf, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);
2261                 break;
2262         case ISCSI_BOOT_TGT_NIC_ASSOC:
2263                 rc = snprintf(buf, 3, "0\n");
2264                 break;
2265         default:
2266                 rc = 0;
2267                 break;
2268         }
2269
2270 exit_show_tgt_info:
2271         return rc;
2272 }
2273
2274 static ssize_t qedi_show_boot_tgt_pri_info(void *data, int type, char *buf)
2275 {
2276         struct qedi_ctx *qedi = data;
2277
2278         return qedi_show_boot_tgt_info(qedi, type, buf, QEDI_NVM_TGT_PRI);
2279 }
2280
2281 static ssize_t qedi_show_boot_tgt_sec_info(void *data, int type, char *buf)
2282 {
2283         struct qedi_ctx *qedi = data;
2284
2285         return qedi_show_boot_tgt_info(qedi, type, buf, QEDI_NVM_TGT_SEC);
2286 }
2287
2288 static umode_t qedi_tgt_get_attr_visibility(void *data, int type)
2289 {
2290         int rc;
2291
2292         switch (type) {
2293         case ISCSI_BOOT_TGT_NAME:
2294         case ISCSI_BOOT_TGT_IP_ADDR:
2295         case ISCSI_BOOT_TGT_PORT:
2296         case ISCSI_BOOT_TGT_LUN:
2297         case ISCSI_BOOT_TGT_CHAP_NAME:
2298         case ISCSI_BOOT_TGT_CHAP_SECRET:
2299         case ISCSI_BOOT_TGT_REV_CHAP_NAME:
2300         case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
2301         case ISCSI_BOOT_TGT_NIC_ASSOC:
2302         case ISCSI_BOOT_TGT_FLAGS:
2303                 rc = 0444;
2304                 break;
2305         default:
2306                 rc = 0;
2307                 break;
2308         }
2309         return rc;
2310 }
2311
2312 static void qedi_boot_release(void *data)
2313 {
2314         struct qedi_ctx *qedi = data;
2315
2316         scsi_host_put(qedi->shost);
2317 }
2318
2319 static int qedi_get_boot_info(struct qedi_ctx *qedi)
2320 {
2321         int ret = 1;
2322
2323         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
2324                   "Get NVM iSCSI CFG image\n");
2325         ret = qedi_ops->common->nvm_get_image(qedi->cdev,
2326                                               QED_NVM_IMAGE_ISCSI_CFG,
2327                                               (char *)qedi->iscsi_image,
2328                                               sizeof(struct qedi_nvm_iscsi_image));
2329         if (ret)
2330                 QEDI_ERR(&qedi->dbg_ctx,
2331                          "Could not get NVM image. ret = %d\n", ret);
2332
2333         return ret;
2334 }
2335
2336 static int qedi_setup_boot_info(struct qedi_ctx *qedi)
2337 {
2338         struct iscsi_boot_kobj *boot_kobj;
2339
2340         if (qedi_get_boot_info(qedi))
2341                 return -EPERM;
2342
2343         qedi->boot_kset = iscsi_boot_create_host_kset(qedi->shost->host_no);
2344         if (!qedi->boot_kset)
2345                 goto kset_free;
2346
2347         if (!scsi_host_get(qedi->shost))
2348                 goto kset_free;
2349
2350         boot_kobj = iscsi_boot_create_target(qedi->boot_kset, 0, qedi,
2351                                              qedi_show_boot_tgt_pri_info,
2352                                              qedi_tgt_get_attr_visibility,
2353                                              qedi_boot_release);
2354         if (!boot_kobj)
2355                 goto put_host;
2356
2357         if (!scsi_host_get(qedi->shost))
2358                 goto kset_free;
2359
2360         boot_kobj = iscsi_boot_create_target(qedi->boot_kset, 1, qedi,
2361                                              qedi_show_boot_tgt_sec_info,
2362                                              qedi_tgt_get_attr_visibility,
2363                                              qedi_boot_release);
2364         if (!boot_kobj)
2365                 goto put_host;
2366
2367         if (!scsi_host_get(qedi->shost))
2368                 goto kset_free;
2369
2370         boot_kobj = iscsi_boot_create_initiator(qedi->boot_kset, 0, qedi,
2371                                                 qedi_show_boot_ini_info,
2372                                                 qedi_ini_get_attr_visibility,
2373                                                 qedi_boot_release);
2374         if (!boot_kobj)
2375                 goto put_host;
2376
2377         if (!scsi_host_get(qedi->shost))
2378                 goto kset_free;
2379
2380         boot_kobj = iscsi_boot_create_ethernet(qedi->boot_kset, 0, qedi,
2381                                                qedi_show_boot_eth_info,
2382                                                qedi_eth_get_attr_visibility,
2383                                                qedi_boot_release);
2384         if (!boot_kobj)
2385                 goto put_host;
2386
2387         return 0;
2388
2389 put_host:
2390         scsi_host_put(qedi->shost);
2391 kset_free:
2392         iscsi_boot_destroy_kset(qedi->boot_kset);
2393         return -ENOMEM;
2394 }
2395
2396 static pci_ers_result_t qedi_io_error_detected(struct pci_dev *pdev,
2397                                                pci_channel_state_t state)
2398 {
2399         struct qedi_ctx *qedi = pci_get_drvdata(pdev);
2400
2401         QEDI_ERR(&qedi->dbg_ctx, "%s: PCI error detected [%d]\n",
2402                  __func__, state);
2403
2404         if (test_and_set_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
2405                 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
2406                           "Recovery already in progress.\n");
2407                 return PCI_ERS_RESULT_NONE;
2408         }
2409
2410         qedi_ops->common->recovery_process(qedi->cdev);
2411
2412         return PCI_ERS_RESULT_CAN_RECOVER;
2413 }
2414
2415 static void __qedi_remove(struct pci_dev *pdev, int mode)
2416 {
2417         struct qedi_ctx *qedi = pci_get_drvdata(pdev);
2418         int rval;
2419         u16 retry = 10;
2420
2421         if (mode == QEDI_MODE_SHUTDOWN)
2422                 iscsi_host_for_each_session(qedi->shost,
2423                                             qedi_clear_session_ctx);
2424
2425         if (mode == QEDI_MODE_NORMAL || mode == QEDI_MODE_SHUTDOWN) {
2426                 if (qedi->tmf_thread) {
2427                         flush_workqueue(qedi->tmf_thread);
2428                         destroy_workqueue(qedi->tmf_thread);
2429                         qedi->tmf_thread = NULL;
2430                 }
2431
2432                 if (qedi->offload_thread) {
2433                         flush_workqueue(qedi->offload_thread);
2434                         destroy_workqueue(qedi->offload_thread);
2435                         qedi->offload_thread = NULL;
2436                 }
2437         }
2438
2439 #ifdef CONFIG_DEBUG_FS
2440         qedi_dbg_host_exit(&qedi->dbg_ctx);
2441 #endif
2442         if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags))
2443                 qedi_ops->common->set_power_state(qedi->cdev, PCI_D0);
2444
2445         qedi_sync_free_irqs(qedi);
2446
2447         if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
2448                 while (retry--) {
2449                         rval = qedi_ops->stop(qedi->cdev);
2450                         if (rval < 0)
2451                                 msleep(1000);
2452                         else
2453                                 break;
2454                 }
2455                 qedi_ops->ll2->stop(qedi->cdev);
2456         }
2457
2458         qedi_free_iscsi_pf_param(qedi);
2459
2460         rval = qedi_ops->common->update_drv_state(qedi->cdev, false);
2461         if (rval)
2462                 QEDI_ERR(&qedi->dbg_ctx, "Failed to send drv state to MFW\n");
2463
2464         if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
2465                 qedi_ops->common->slowpath_stop(qedi->cdev);
2466                 qedi_ops->common->remove(qedi->cdev);
2467         }
2468
2469         qedi_destroy_fp(qedi);
2470
2471         if (mode == QEDI_MODE_NORMAL || mode == QEDI_MODE_SHUTDOWN) {
2472                 qedi_release_cid_que(qedi);
2473                 qedi_cm_free_mem(qedi);
2474                 qedi_free_uio(qedi->udev);
2475                 qedi_free_itt(qedi);
2476
2477                 if (qedi->ll2_recv_thread) {
2478                         kthread_stop(qedi->ll2_recv_thread);
2479                         qedi->ll2_recv_thread = NULL;
2480                 }
2481                 qedi_ll2_free_skbs(qedi);
2482
2483                 if (qedi->boot_kset)
2484                         iscsi_boot_destroy_kset(qedi->boot_kset);
2485
2486                 iscsi_host_remove(qedi->shost);
2487                 iscsi_host_free(qedi->shost);
2488         }
2489 }
2490
2491 static void qedi_board_disable_work(struct work_struct *work)
2492 {
2493         struct qedi_ctx *qedi =
2494                         container_of(work, struct qedi_ctx,
2495                                      board_disable_work.work);
2496
2497         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
2498                   "Fan failure, Unloading firmware context.\n");
2499
2500         if (test_and_set_bit(QEDI_IN_SHUTDOWN, &qedi->flags))
2501                 return;
2502
2503         __qedi_remove(qedi->pdev, QEDI_MODE_SHUTDOWN);
2504 }
2505
2506 static void qedi_shutdown(struct pci_dev *pdev)
2507 {
2508         struct qedi_ctx *qedi = pci_get_drvdata(pdev);
2509
2510         QEDI_ERR(&qedi->dbg_ctx, "%s: Shutdown qedi\n", __func__);
2511         if (test_and_set_bit(QEDI_IN_SHUTDOWN, &qedi->flags))
2512                 return;
2513         __qedi_remove(pdev, QEDI_MODE_SHUTDOWN);
2514 }
2515
2516 static int __qedi_probe(struct pci_dev *pdev, int mode)
2517 {
2518         struct qedi_ctx *qedi;
2519         struct qed_ll2_params params;
2520         u8 dp_level = 0;
2521         bool is_vf = false;
2522         char host_buf[16];
2523         struct qed_link_params link_params;
2524         struct qed_slowpath_params sp_params;
2525         struct qed_probe_params qed_params;
2526         void *task_start, *task_end;
2527         int rc;
2528         u16 retry = 10;
2529
2530         if (mode != QEDI_MODE_RECOVERY) {
2531                 qedi = qedi_host_alloc(pdev);
2532                 if (!qedi) {
2533                         rc = -ENOMEM;
2534                         goto exit_probe;
2535                 }
2536         } else {
2537                 qedi = pci_get_drvdata(pdev);
2538         }
2539
2540 retry_probe:
2541         if (mode == QEDI_MODE_RECOVERY)
2542                 msleep(2000);
2543
2544         memset(&qed_params, 0, sizeof(qed_params));
2545         qed_params.protocol = QED_PROTOCOL_ISCSI;
2546         qed_params.dp_module = qedi_qed_debug;
2547         qed_params.dp_level = dp_level;
2548         qed_params.is_vf = is_vf;
2549         qedi->cdev = qedi_ops->common->probe(pdev, &qed_params);
2550         if (!qedi->cdev) {
2551                 if (mode == QEDI_MODE_RECOVERY && retry) {
2552                         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
2553                                   "Retry %d initialize hardware\n", retry);
2554                         retry--;
2555                         goto retry_probe;
2556                 }
2557
2558                 rc = -ENODEV;
2559                 QEDI_ERR(&qedi->dbg_ctx, "Cannot initialize hardware\n");
2560                 goto free_host;
2561         }
2562
2563         set_bit(QEDI_ERR_ATTN_CLR_EN, &qedi->qedi_err_flags);
2564         set_bit(QEDI_ERR_IS_RECOVERABLE, &qedi->qedi_err_flags);
2565         atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
2566
2567         rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info);
2568         if (rc)
2569                 goto free_host;
2570
2571         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
2572                   "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n",
2573                   qedi->dev_info.common.num_hwfns,
2574                   qedi_ops->common->get_affin_hwfn_idx(qedi->cdev));
2575
2576         rc = qedi_set_iscsi_pf_param(qedi);
2577         if (rc) {
2578                 rc = -ENOMEM;
2579                 QEDI_ERR(&qedi->dbg_ctx,
2580                          "Set iSCSI pf param fail\n");
2581                 goto free_host;
2582         }
2583
2584         qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
2585
2586         rc = qedi_prepare_fp(qedi);
2587         if (rc) {
2588                 QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath.\n");
2589                 goto free_pf_params;
2590         }
2591
2592         /* Start the Slowpath-process */
2593         memset(&sp_params, 0, sizeof(struct qed_slowpath_params));
2594         sp_params.int_mode = QED_INT_MODE_MSIX;
2595         sp_params.drv_major = QEDI_DRIVER_MAJOR_VER;
2596         sp_params.drv_minor = QEDI_DRIVER_MINOR_VER;
2597         sp_params.drv_rev = QEDI_DRIVER_REV_VER;
2598         sp_params.drv_eng = QEDI_DRIVER_ENG_VER;
2599         strlcpy(sp_params.name, "qedi iSCSI", QED_DRV_VER_STR_SIZE);
2600         rc = qedi_ops->common->slowpath_start(qedi->cdev, &sp_params);
2601         if (rc) {
2602                 QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath\n");
2603                 goto stop_hw;
2604         }
2605
2606         /* update_pf_params needs to be called before and after slowpath
2607          * start
2608          */
2609         qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
2610
2611         rc = qedi_setup_int(qedi);
2612         if (rc)
2613                 goto stop_iscsi_func;
2614
2615         qedi_ops->common->set_power_state(qedi->cdev, PCI_D0);
2616
2617         /* Learn information crucial for qedi to progress */
2618         rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info);
2619         if (rc)
2620                 goto stop_iscsi_func;
2621
2622         /* Record BDQ producer doorbell addresses */
2623         qedi->bdq_primary_prod = qedi->dev_info.primary_dbq_rq_addr;
2624         qedi->bdq_secondary_prod = qedi->dev_info.secondary_bdq_rq_addr;
2625         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
2626                   "BDQ primary_prod=%p secondary_prod=%p.\n",
2627                   qedi->bdq_primary_prod,
2628                   qedi->bdq_secondary_prod);
2629
2630         /*
2631          * We need to write the number of BDs in the BDQ we've preallocated so
2632          * the f/w will do a prefetch and we'll get an unsolicited CQE when a
2633          * packet arrives.
2634          */
2635         qedi->bdq_prod_idx = QEDI_BDQ_NUM;
2636         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
2637                   "Writing %d to primary and secondary BDQ doorbell registers.\n",
2638                   qedi->bdq_prod_idx);
2639         writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod);
2640         readw(qedi->bdq_primary_prod);
2641         writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod);
2642         readw(qedi->bdq_secondary_prod);
2643
2644         ether_addr_copy(qedi->mac, qedi->dev_info.common.hw_mac);
2645         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "MAC address is %pM.\n",
2646                   qedi->mac);
2647
2648         snprintf(host_buf, sizeof(host_buf), "host_%d", qedi->shost->host_no);
2649         qedi_ops->common->set_name(qedi->cdev, host_buf);
2650
2651         qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi);
2652
2653         memset(&params, 0, sizeof(params));
2654         params.mtu = DEF_PATH_MTU + IPV6_HDR_LEN + TCP_HDR_LEN;
2655         qedi->ll2_mtu = DEF_PATH_MTU;
2656         params.drop_ttl0_packets = 0;
2657         params.rx_vlan_stripping = 1;
2658         ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac);
2659
2660         if (mode != QEDI_MODE_RECOVERY) {
2661                 /* set up rx path */
2662                 INIT_LIST_HEAD(&qedi->ll2_skb_list);
2663                 spin_lock_init(&qedi->ll2_lock);
2664                 /* start qedi context */
2665                 spin_lock_init(&qedi->hba_lock);
2666                 spin_lock_init(&qedi->task_idx_lock);
2667                 mutex_init(&qedi->stats_lock);
2668         }
2669         qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi);
2670         qedi_ops->ll2->start(qedi->cdev, &params);
2671
2672         if (mode != QEDI_MODE_RECOVERY) {
2673                 qedi->ll2_recv_thread = kthread_run(qedi_ll2_recv_thread,
2674                                                     (void *)qedi,
2675                                                     "qedi_ll2_thread");
2676         }
2677
2678         rc = qedi_ops->start(qedi->cdev, &qedi->tasks,
2679                              qedi, qedi_iscsi_event_cb);
2680         if (rc) {
2681                 rc = -ENODEV;
2682                 QEDI_ERR(&qedi->dbg_ctx, "Cannot start iSCSI function\n");
2683                 goto stop_slowpath;
2684         }
2685
2686         task_start = qedi_get_task_mem(&qedi->tasks, 0);
2687         task_end = qedi_get_task_mem(&qedi->tasks, MAX_TID_BLOCKS_ISCSI - 1);
2688         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
2689                   "Task context start=%p, end=%p block_size=%u.\n",
2690                    task_start, task_end, qedi->tasks.size);
2691
2692         memset(&link_params, 0, sizeof(link_params));
2693         link_params.link_up = true;
2694         rc = qedi_ops->common->set_link(qedi->cdev, &link_params);
2695         if (rc) {
2696                 QEDI_WARN(&qedi->dbg_ctx, "Link set up failed.\n");
2697                 atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
2698         }
2699
2700 #ifdef CONFIG_DEBUG_FS
2701         qedi_dbg_host_init(&qedi->dbg_ctx, qedi_debugfs_ops,
2702                            qedi_dbg_fops);
2703 #endif
2704         QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
2705                   "QLogic FastLinQ iSCSI Module qedi %s, FW %d.%d.%d.%d\n",
2706                   QEDI_MODULE_VERSION, FW_MAJOR_VERSION, FW_MINOR_VERSION,
2707                   FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
2708
2709         if (mode == QEDI_MODE_NORMAL) {
2710                 if (iscsi_host_add(qedi->shost, &pdev->dev)) {
2711                         QEDI_ERR(&qedi->dbg_ctx,
2712                                  "Could not add iscsi host\n");
2713                         rc = -ENOMEM;
2714                         goto remove_host;
2715                 }
2716
2717                 /* Allocate uio buffers */
2718                 rc = qedi_alloc_uio_rings(qedi);
2719                 if (rc) {
2720                         QEDI_ERR(&qedi->dbg_ctx,
2721                                  "UIO alloc ring failed err=%d\n", rc);
2722                         goto remove_host;
2723                 }
2724
2725                 rc = qedi_init_uio(qedi);
2726                 if (rc) {
2727                         QEDI_ERR(&qedi->dbg_ctx,
2728                                  "UIO init failed, err=%d\n", rc);
2729                         goto free_uio;
2730                 }
2731
2732                 /* host the array on iscsi_conn */
2733                 rc = qedi_setup_cid_que(qedi);
2734                 if (rc) {
2735                         QEDI_ERR(&qedi->dbg_ctx,
2736                                  "Could not setup cid que\n");
2737                         goto free_uio;
2738                 }
2739
2740                 rc = qedi_cm_alloc_mem(qedi);
2741                 if (rc) {
2742                         QEDI_ERR(&qedi->dbg_ctx,
2743                                  "Could not alloc cm memory\n");
2744                         goto free_cid_que;
2745                 }
2746
2747                 rc = qedi_alloc_itt(qedi);
2748                 if (rc) {
2749                         QEDI_ERR(&qedi->dbg_ctx,
2750                                  "Could not alloc itt memory\n");
2751                         goto free_cid_que;
2752                 }
2753
2754                 sprintf(host_buf, "host_%d", qedi->shost->host_no);
2755                 qedi->tmf_thread = create_singlethread_workqueue(host_buf);
2756                 if (!qedi->tmf_thread) {
2757                         QEDI_ERR(&qedi->dbg_ctx,
2758                                  "Unable to start tmf thread!\n");
2759                         rc = -ENODEV;
2760                         goto free_cid_que;
2761                 }
2762
2763                 sprintf(host_buf, "qedi_ofld%d", qedi->shost->host_no);
2764                 qedi->offload_thread = create_workqueue(host_buf);
2765                 if (!qedi->offload_thread) {
2766                         QEDI_ERR(&qedi->dbg_ctx,
2767                                  "Unable to start offload thread!\n");
2768                         rc = -ENODEV;
2769                         goto free_cid_que;
2770                 }
2771
2772                 INIT_DELAYED_WORK(&qedi->recovery_work, qedi_recovery_handler);
2773                 INIT_DELAYED_WORK(&qedi->board_disable_work,
2774                                   qedi_board_disable_work);
2775
2776                 /* F/w needs 1st task context memory entry for performance */
2777                 set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map);
2778                 atomic_set(&qedi->num_offloads, 0);
2779
2780                 if (qedi_setup_boot_info(qedi))
2781                         QEDI_ERR(&qedi->dbg_ctx,
2782                                  "No iSCSI boot target configured\n");
2783
2784                 rc = qedi_ops->common->update_drv_state(qedi->cdev, true);
2785                 if (rc)
2786                         QEDI_ERR(&qedi->dbg_ctx,
2787                                  "Failed to send drv state to MFW\n");
2788
2789         }
2790
2791         return 0;
2792
2793 free_cid_que:
2794         qedi_release_cid_que(qedi);
2795 free_uio:
2796         qedi_free_uio(qedi->udev);
2797 remove_host:
2798 #ifdef CONFIG_DEBUG_FS
2799         qedi_dbg_host_exit(&qedi->dbg_ctx);
2800 #endif
2801         iscsi_host_remove(qedi->shost);
2802 stop_iscsi_func:
2803         qedi_ops->stop(qedi->cdev);
2804 stop_slowpath:
2805         qedi_ops->common->slowpath_stop(qedi->cdev);
2806 stop_hw:
2807         qedi_ops->common->remove(qedi->cdev);
2808 free_pf_params:
2809         qedi_free_iscsi_pf_param(qedi);
2810 free_host:
2811         iscsi_host_free(qedi->shost);
2812 exit_probe:
2813         return rc;
2814 }
2815
2816 static void qedi_mark_conn_recovery(struct iscsi_cls_session *cls_session)
2817 {
2818         struct iscsi_session *session = cls_session->dd_data;
2819         struct iscsi_conn *conn = session->leadconn;
2820         struct qedi_conn *qedi_conn = conn->dd_data;
2821
2822         iscsi_conn_failure(qedi_conn->cls_conn->dd_data, ISCSI_ERR_CONN_FAILED);
2823 }
2824
2825 static void qedi_recovery_handler(struct work_struct *work)
2826 {
2827         struct qedi_ctx *qedi =
2828                         container_of(work, struct qedi_ctx, recovery_work.work);
2829
2830         iscsi_host_for_each_session(qedi->shost, qedi_mark_conn_recovery);
2831
2832         /* Call common_ops->recovery_prolog to allow the MFW to quiesce
2833          * any PCI transactions.
2834          */
2835         qedi_ops->common->recovery_prolog(qedi->cdev);
2836
2837         __qedi_remove(qedi->pdev, QEDI_MODE_RECOVERY);
2838         __qedi_probe(qedi->pdev, QEDI_MODE_RECOVERY);
2839         clear_bit(QEDI_IN_RECOVERY, &qedi->flags);
2840 }
2841
2842 static int qedi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2843 {
2844         return __qedi_probe(pdev, QEDI_MODE_NORMAL);
2845 }
2846
2847 static void qedi_remove(struct pci_dev *pdev)
2848 {
2849         __qedi_remove(pdev, QEDI_MODE_NORMAL);
2850 }
2851
2852 static struct pci_device_id qedi_pci_tbl[] = {
2853         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) },
2854         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8084) },
2855         { 0 },
2856 };
2857 MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
2858
2859 static enum cpuhp_state qedi_cpuhp_state;
2860
2861 static struct pci_error_handlers qedi_err_handler = {
2862         .error_detected = qedi_io_error_detected,
2863 };
2864
2865 static struct pci_driver qedi_pci_driver = {
2866         .name = QEDI_MODULE_NAME,
2867         .id_table = qedi_pci_tbl,
2868         .probe = qedi_probe,
2869         .remove = qedi_remove,
2870         .shutdown = qedi_shutdown,
2871         .err_handler = &qedi_err_handler,
2872 };
2873
2874 static int __init qedi_init(void)
2875 {
2876         struct qedi_percpu_s *p;
2877         int cpu, rc = 0;
2878
2879         qedi_ops = qed_get_iscsi_ops();
2880         if (!qedi_ops) {
2881                 QEDI_ERR(NULL, "Failed to get qed iSCSI operations\n");
2882                 return -EINVAL;
2883         }
2884
2885 #ifdef CONFIG_DEBUG_FS
2886         qedi_dbg_init("qedi");
2887 #endif
2888
2889         qedi_scsi_transport = iscsi_register_transport(&qedi_iscsi_transport);
2890         if (!qedi_scsi_transport) {
2891                 QEDI_ERR(NULL, "Could not register qedi transport");
2892                 rc = -ENOMEM;
2893                 goto exit_qedi_init_1;
2894         }
2895
2896         for_each_possible_cpu(cpu) {
2897                 p = &per_cpu(qedi_percpu, cpu);
2898                 INIT_LIST_HEAD(&p->work_list);
2899                 spin_lock_init(&p->p_work_lock);
2900                 p->iothread = NULL;
2901         }
2902
2903         rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/qedi:online",
2904                                qedi_cpu_online, qedi_cpu_offline);
2905         if (rc < 0)
2906                 goto exit_qedi_init_2;
2907         qedi_cpuhp_state = rc;
2908
2909         rc = pci_register_driver(&qedi_pci_driver);
2910         if (rc) {
2911                 QEDI_ERR(NULL, "Failed to register driver\n");
2912                 goto exit_qedi_hp;
2913         }
2914
2915         return 0;
2916
2917 exit_qedi_hp:
2918         cpuhp_remove_state(qedi_cpuhp_state);
2919 exit_qedi_init_2:
2920         iscsi_unregister_transport(&qedi_iscsi_transport);
2921 exit_qedi_init_1:
2922 #ifdef CONFIG_DEBUG_FS
2923         qedi_dbg_exit();
2924 #endif
2925         qed_put_iscsi_ops();
2926         return rc;
2927 }
2928
2929 static void __exit qedi_cleanup(void)
2930 {
2931         pci_unregister_driver(&qedi_pci_driver);
2932         cpuhp_remove_state(qedi_cpuhp_state);
2933         iscsi_unregister_transport(&qedi_iscsi_transport);
2934
2935 #ifdef CONFIG_DEBUG_FS
2936         qedi_dbg_exit();
2937 #endif
2938         qed_put_iscsi_ops();
2939 }
2940
2941 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx iSCSI Module");
2942 MODULE_LICENSE("GPL");
2943 MODULE_AUTHOR("QLogic Corporation");
2944 MODULE_VERSION(QEDI_MODULE_VERSION);
2945 module_init(qedi_init);
2946 module_exit(qedi_cleanup);