Merge tag 'arc-5.2-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
[linux-2.6-microblaze.git] / drivers / target / iscsi / cxgbit / cxgbit_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016 Chelsio Communications, Inc.
4  */
5
6 #define DRV_NAME "cxgbit"
7 #define DRV_VERSION "1.0.0-ko"
8 #define pr_fmt(fmt) DRV_NAME ": " fmt
9
10 #include "cxgbit.h"
11
12 #ifdef CONFIG_CHELSIO_T4_DCB
13 #include <net/dcbevent.h>
14 #include "cxgb4_dcb.h"
15 #endif
16
17 LIST_HEAD(cdev_list_head);
18 /* cdev list lock */
19 DEFINE_MUTEX(cdev_list_lock);
20
21 void _cxgbit_free_cdev(struct kref *kref)
22 {
23         struct cxgbit_device *cdev;
24
25         cdev = container_of(kref, struct cxgbit_device, kref);
26
27         cxgbi_ppm_release(cdev2ppm(cdev));
28         kfree(cdev);
29 }
30
31 static void cxgbit_set_mdsl(struct cxgbit_device *cdev)
32 {
33         struct cxgb4_lld_info *lldi = &cdev->lldi;
34         u32 mdsl;
35
36 #define ULP2_MAX_PKT_LEN 16224
37 #define ISCSI_PDU_NONPAYLOAD_LEN 312
38         mdsl = min_t(u32, lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN,
39                      ULP2_MAX_PKT_LEN - ISCSI_PDU_NONPAYLOAD_LEN);
40         mdsl = min_t(u32, mdsl, 8192);
41         mdsl = min_t(u32, mdsl, (MAX_SKB_FRAGS - 1) * PAGE_SIZE);
42
43         cdev->mdsl = mdsl;
44 }
45
46 static void *cxgbit_uld_add(const struct cxgb4_lld_info *lldi)
47 {
48         struct cxgbit_device *cdev;
49
50         if (is_t4(lldi->adapter_type))
51                 return ERR_PTR(-ENODEV);
52
53         cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
54         if (!cdev)
55                 return ERR_PTR(-ENOMEM);
56
57         kref_init(&cdev->kref);
58         spin_lock_init(&cdev->np_lock);
59
60         cdev->lldi = *lldi;
61
62         cxgbit_set_mdsl(cdev);
63
64         if (cxgbit_ddp_init(cdev) < 0) {
65                 kfree(cdev);
66                 return ERR_PTR(-EINVAL);
67         }
68
69         if (!test_bit(CDEV_DDP_ENABLE, &cdev->flags))
70                 pr_info("cdev %s ddp init failed\n",
71                         pci_name(lldi->pdev));
72
73         if (lldi->fw_vers >= 0x10d2b00)
74                 set_bit(CDEV_ISO_ENABLE, &cdev->flags);
75
76         spin_lock_init(&cdev->cskq.lock);
77         INIT_LIST_HEAD(&cdev->cskq.list);
78
79         mutex_lock(&cdev_list_lock);
80         list_add_tail(&cdev->list, &cdev_list_head);
81         mutex_unlock(&cdev_list_lock);
82
83         pr_info("cdev %s added for iSCSI target transport\n",
84                 pci_name(lldi->pdev));
85
86         return cdev;
87 }
88
89 static void cxgbit_close_conn(struct cxgbit_device *cdev)
90 {
91         struct cxgbit_sock *csk;
92         struct sk_buff *skb;
93         bool wakeup_thread = false;
94
95         spin_lock_bh(&cdev->cskq.lock);
96         list_for_each_entry(csk, &cdev->cskq.list, list) {
97                 skb = alloc_skb(0, GFP_ATOMIC);
98                 if (!skb)
99                         continue;
100
101                 spin_lock_bh(&csk->rxq.lock);
102                 __skb_queue_tail(&csk->rxq, skb);
103                 if (skb_queue_len(&csk->rxq) == 1)
104                         wakeup_thread = true;
105                 spin_unlock_bh(&csk->rxq.lock);
106
107                 if (wakeup_thread) {
108                         wake_up(&csk->waitq);
109                         wakeup_thread = false;
110                 }
111         }
112         spin_unlock_bh(&cdev->cskq.lock);
113 }
114
115 static void cxgbit_detach_cdev(struct cxgbit_device *cdev)
116 {
117         bool free_cdev = false;
118
119         spin_lock_bh(&cdev->cskq.lock);
120         if (list_empty(&cdev->cskq.list))
121                 free_cdev = true;
122         spin_unlock_bh(&cdev->cskq.lock);
123
124         if (free_cdev) {
125                 mutex_lock(&cdev_list_lock);
126                 list_del(&cdev->list);
127                 mutex_unlock(&cdev_list_lock);
128
129                 cxgbit_put_cdev(cdev);
130         } else {
131                 cxgbit_close_conn(cdev);
132         }
133 }
134
135 static int cxgbit_uld_state_change(void *handle, enum cxgb4_state state)
136 {
137         struct cxgbit_device *cdev = handle;
138
139         switch (state) {
140         case CXGB4_STATE_UP:
141                 set_bit(CDEV_STATE_UP, &cdev->flags);
142                 pr_info("cdev %s state UP.\n", pci_name(cdev->lldi.pdev));
143                 break;
144         case CXGB4_STATE_START_RECOVERY:
145                 clear_bit(CDEV_STATE_UP, &cdev->flags);
146                 cxgbit_close_conn(cdev);
147                 pr_info("cdev %s state RECOVERY.\n", pci_name(cdev->lldi.pdev));
148                 break;
149         case CXGB4_STATE_DOWN:
150                 pr_info("cdev %s state DOWN.\n", pci_name(cdev->lldi.pdev));
151                 break;
152         case CXGB4_STATE_DETACH:
153                 clear_bit(CDEV_STATE_UP, &cdev->flags);
154                 pr_info("cdev %s state DETACH.\n", pci_name(cdev->lldi.pdev));
155                 cxgbit_detach_cdev(cdev);
156                 break;
157         default:
158                 pr_info("cdev %s unknown state %d.\n",
159                         pci_name(cdev->lldi.pdev), state);
160                 break;
161         }
162         return 0;
163 }
164
165 static void
166 cxgbit_process_ddpvld(struct cxgbit_sock *csk, struct cxgbit_lro_pdu_cb *pdu_cb,
167                       u32 ddpvld)
168 {
169
170         if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT)) {
171                 pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", csk->tid, ddpvld);
172                 pdu_cb->flags |= PDUCBF_RX_HCRC_ERR;
173         }
174
175         if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT)) {
176                 pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", csk->tid, ddpvld);
177                 pdu_cb->flags |= PDUCBF_RX_DCRC_ERR;
178         }
179
180         if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT))
181                 pr_info("tid 0x%x, status 0x%x, pad bad.\n", csk->tid, ddpvld);
182
183         if ((ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT)) &&
184             (!(pdu_cb->flags & PDUCBF_RX_DATA))) {
185                 pdu_cb->flags |= PDUCBF_RX_DATA_DDPD;
186         }
187 }
188
189 static void
190 cxgbit_lro_add_packet_rsp(struct sk_buff *skb, u8 op, const __be64 *rsp)
191 {
192         struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
193         struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb,
194                                                 lro_cb->pdu_idx);
195         struct cpl_rx_iscsi_ddp *cpl = (struct cpl_rx_iscsi_ddp *)(rsp + 1);
196
197         cxgbit_process_ddpvld(lro_cb->csk, pdu_cb, be32_to_cpu(cpl->ddpvld));
198
199         pdu_cb->flags |= PDUCBF_RX_STATUS;
200         pdu_cb->ddigest = ntohl(cpl->ulp_crc);
201         pdu_cb->pdulen = ntohs(cpl->len);
202
203         if (pdu_cb->flags & PDUCBF_RX_HDR)
204                 pdu_cb->complete = true;
205
206         lro_cb->pdu_totallen += pdu_cb->pdulen;
207         lro_cb->complete = true;
208         lro_cb->pdu_idx++;
209 }
210
211 static void
212 cxgbit_copy_frags(struct sk_buff *skb, const struct pkt_gl *gl,
213                   unsigned int offset)
214 {
215         u8 skb_frag_idx = skb_shinfo(skb)->nr_frags;
216         u8 i;
217
218         /* usually there's just one frag */
219         __skb_fill_page_desc(skb, skb_frag_idx, gl->frags[0].page,
220                              gl->frags[0].offset + offset,
221                              gl->frags[0].size - offset);
222         for (i = 1; i < gl->nfrags; i++)
223                 __skb_fill_page_desc(skb, skb_frag_idx + i,
224                                      gl->frags[i].page,
225                                      gl->frags[i].offset,
226                                      gl->frags[i].size);
227
228         skb_shinfo(skb)->nr_frags += gl->nfrags;
229
230         /* get a reference to the last page, we don't own it */
231         get_page(gl->frags[gl->nfrags - 1].page);
232 }
233
234 static void
235 cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl)
236 {
237         struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
238         struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb,
239                                                 lro_cb->pdu_idx);
240         u32 len, offset;
241
242         if (op == CPL_ISCSI_HDR) {
243                 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)gl->va;
244
245                 offset = sizeof(struct cpl_iscsi_hdr);
246                 pdu_cb->flags |= PDUCBF_RX_HDR;
247                 pdu_cb->seq = ntohl(cpl->seq);
248                 len = ntohs(cpl->len);
249                 pdu_cb->hdr = gl->va + offset;
250                 pdu_cb->hlen = len;
251                 pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags;
252
253                 if (unlikely(gl->nfrags > 1))
254                         cxgbit_skcb_flags(skb) = 0;
255
256                 lro_cb->complete = false;
257         } else if (op == CPL_ISCSI_DATA) {
258                 struct cpl_iscsi_data *cpl = (struct cpl_iscsi_data *)gl->va;
259
260                 offset = sizeof(struct cpl_iscsi_data);
261                 pdu_cb->flags |= PDUCBF_RX_DATA;
262                 len = ntohs(cpl->len);
263                 pdu_cb->dlen = len;
264                 pdu_cb->doffset = lro_cb->offset;
265                 pdu_cb->nr_dfrags = gl->nfrags;
266                 pdu_cb->dfrag_idx = skb_shinfo(skb)->nr_frags;
267                 lro_cb->complete = false;
268         } else {
269                 struct cpl_rx_iscsi_cmp *cpl;
270
271                 cpl = (struct cpl_rx_iscsi_cmp *)gl->va;
272                 offset = sizeof(struct cpl_rx_iscsi_cmp);
273                 pdu_cb->flags |= (PDUCBF_RX_HDR | PDUCBF_RX_STATUS);
274                 len = be16_to_cpu(cpl->len);
275                 pdu_cb->hdr = gl->va + offset;
276                 pdu_cb->hlen = len;
277                 pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags;
278                 pdu_cb->ddigest = be32_to_cpu(cpl->ulp_crc);
279                 pdu_cb->pdulen = ntohs(cpl->len);
280
281                 if (unlikely(gl->nfrags > 1))
282                         cxgbit_skcb_flags(skb) = 0;
283
284                 cxgbit_process_ddpvld(lro_cb->csk, pdu_cb,
285                                       be32_to_cpu(cpl->ddpvld));
286
287                 if (pdu_cb->flags & PDUCBF_RX_DATA_DDPD) {
288                         pdu_cb->flags |= PDUCBF_RX_DDP_CMP;
289                         pdu_cb->complete = true;
290                 } else if (pdu_cb->flags & PDUCBF_RX_DATA) {
291                         pdu_cb->complete = true;
292                 }
293
294                 lro_cb->pdu_totallen += pdu_cb->hlen + pdu_cb->dlen;
295                 lro_cb->complete = true;
296                 lro_cb->pdu_idx++;
297         }
298
299         cxgbit_copy_frags(skb, gl, offset);
300
301         pdu_cb->frags += gl->nfrags;
302         lro_cb->offset += len;
303         skb->len += len;
304         skb->data_len += len;
305         skb->truesize += len;
306 }
307
308 static struct sk_buff *
309 cxgbit_lro_init_skb(struct cxgbit_sock *csk, u8 op, const struct pkt_gl *gl,
310                     const __be64 *rsp, struct napi_struct *napi)
311 {
312         struct sk_buff *skb;
313         struct cxgbit_lro_cb *lro_cb;
314
315         skb = napi_alloc_skb(napi, LRO_SKB_MAX_HEADROOM);
316
317         if (unlikely(!skb))
318                 return NULL;
319
320         memset(skb->data, 0, LRO_SKB_MAX_HEADROOM);
321
322         cxgbit_skcb_flags(skb) |= SKCBF_RX_LRO;
323
324         lro_cb = cxgbit_skb_lro_cb(skb);
325
326         cxgbit_get_csk(csk);
327
328         lro_cb->csk = csk;
329
330         return skb;
331 }
332
333 static void cxgbit_queue_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
334 {
335         bool wakeup_thread = false;
336
337         spin_lock(&csk->rxq.lock);
338         __skb_queue_tail(&csk->rxq, skb);
339         if (skb_queue_len(&csk->rxq) == 1)
340                 wakeup_thread = true;
341         spin_unlock(&csk->rxq.lock);
342
343         if (wakeup_thread)
344                 wake_up(&csk->waitq);
345 }
346
347 static void cxgbit_lro_flush(struct t4_lro_mgr *lro_mgr, struct sk_buff *skb)
348 {
349         struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
350         struct cxgbit_sock *csk = lro_cb->csk;
351
352         csk->lro_skb = NULL;
353
354         __skb_unlink(skb, &lro_mgr->lroq);
355         cxgbit_queue_lro_skb(csk, skb);
356
357         cxgbit_put_csk(csk);
358
359         lro_mgr->lro_pkts++;
360         lro_mgr->lro_session_cnt--;
361 }
362
363 static void cxgbit_uld_lro_flush(struct t4_lro_mgr *lro_mgr)
364 {
365         struct sk_buff *skb;
366
367         while ((skb = skb_peek(&lro_mgr->lroq)))
368                 cxgbit_lro_flush(lro_mgr, skb);
369 }
370
371 static int
372 cxgbit_lro_receive(struct cxgbit_sock *csk, u8 op, const __be64 *rsp,
373                    const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr,
374                    struct napi_struct *napi)
375 {
376         struct sk_buff *skb;
377         struct cxgbit_lro_cb *lro_cb;
378
379         if (!csk) {
380                 pr_err("%s: csk NULL, op 0x%x.\n", __func__, op);
381                 goto out;
382         }
383
384         if (csk->lro_skb)
385                 goto add_packet;
386
387 start_lro:
388         if (lro_mgr->lro_session_cnt >= MAX_LRO_SESSIONS) {
389                 cxgbit_uld_lro_flush(lro_mgr);
390                 goto start_lro;
391         }
392
393         skb = cxgbit_lro_init_skb(csk, op, gl, rsp, napi);
394         if (unlikely(!skb))
395                 goto out;
396
397         csk->lro_skb = skb;
398
399         __skb_queue_tail(&lro_mgr->lroq, skb);
400         lro_mgr->lro_session_cnt++;
401
402 add_packet:
403         skb = csk->lro_skb;
404         lro_cb = cxgbit_skb_lro_cb(skb);
405
406         if ((gl && (((skb_shinfo(skb)->nr_frags + gl->nfrags) >
407             MAX_SKB_FRAGS) || (lro_cb->pdu_totallen >= LRO_FLUSH_LEN_MAX))) ||
408             (lro_cb->pdu_idx >= MAX_SKB_FRAGS)) {
409                 cxgbit_lro_flush(lro_mgr, skb);
410                 goto start_lro;
411         }
412
413         if (gl)
414                 cxgbit_lro_add_packet_gl(skb, op, gl);
415         else
416                 cxgbit_lro_add_packet_rsp(skb, op, rsp);
417
418         lro_mgr->lro_merged++;
419
420         return 0;
421
422 out:
423         return -1;
424 }
425
426 static int
427 cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp,
428                           const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr,
429                           struct napi_struct *napi)
430 {
431         struct cxgbit_device *cdev = hndl;
432         struct cxgb4_lld_info *lldi = &cdev->lldi;
433         struct cpl_tx_data *rpl = NULL;
434         struct cxgbit_sock *csk = NULL;
435         unsigned int tid = 0;
436         struct sk_buff *skb;
437         unsigned int op = *(u8 *)rsp;
438         bool lro_flush = true;
439
440         switch (op) {
441         case CPL_ISCSI_HDR:
442         case CPL_ISCSI_DATA:
443         case CPL_RX_ISCSI_CMP:
444         case CPL_RX_ISCSI_DDP:
445         case CPL_FW4_ACK:
446                 lro_flush = false;
447                 /* fall through */
448         case CPL_ABORT_RPL_RSS:
449         case CPL_PASS_ESTABLISH:
450         case CPL_PEER_CLOSE:
451         case CPL_CLOSE_CON_RPL:
452         case CPL_ABORT_REQ_RSS:
453         case CPL_SET_TCB_RPL:
454         case CPL_RX_DATA:
455                 rpl = gl ? (struct cpl_tx_data *)gl->va :
456                            (struct cpl_tx_data *)(rsp + 1);
457                 tid = GET_TID(rpl);
458                 csk = lookup_tid(lldi->tids, tid);
459                 break;
460         default:
461                 break;
462         }
463
464         if (csk && csk->lro_skb && lro_flush)
465                 cxgbit_lro_flush(lro_mgr, csk->lro_skb);
466
467         if (!gl) {
468                 unsigned int len;
469
470                 if (op == CPL_RX_ISCSI_DDP) {
471                         if (!cxgbit_lro_receive(csk, op, rsp, NULL, lro_mgr,
472                                                 napi))
473                                 return 0;
474                 }
475
476                 len = 64 - sizeof(struct rsp_ctrl) - 8;
477                 skb = napi_alloc_skb(napi, len);
478                 if (!skb)
479                         goto nomem;
480                 __skb_put(skb, len);
481                 skb_copy_to_linear_data(skb, &rsp[1], len);
482         } else {
483                 if (unlikely(op != *(u8 *)gl->va)) {
484                         pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
485                                 gl->va, be64_to_cpu(*rsp),
486                                 get_unaligned_be64(gl->va),
487                                 gl->tot_len);
488                         return 0;
489                 }
490
491                 if ((op == CPL_ISCSI_HDR) || (op == CPL_ISCSI_DATA) ||
492                     (op == CPL_RX_ISCSI_CMP)) {
493                         if (!cxgbit_lro_receive(csk, op, rsp, gl, lro_mgr,
494                                                 napi))
495                                 return 0;
496                 }
497
498 #define RX_PULL_LEN 128
499                 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
500                 if (unlikely(!skb))
501                         goto nomem;
502         }
503
504         rpl = (struct cpl_tx_data *)skb->data;
505         op = rpl->ot.opcode;
506         cxgbit_skcb_rx_opcode(skb) = op;
507
508         pr_debug("cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
509                  cdev, op, rpl->ot.opcode_tid,
510                  ntohl(rpl->ot.opcode_tid), skb);
511
512         if (op < NUM_CPL_CMDS && cxgbit_cplhandlers[op]) {
513                 cxgbit_cplhandlers[op](cdev, skb);
514         } else {
515                 pr_err("No handler for opcode 0x%x.\n", op);
516                 __kfree_skb(skb);
517         }
518         return 0;
519 nomem:
520         pr_err("%s OOM bailing out.\n", __func__);
521         return 1;
522 }
523
524 #ifdef CONFIG_CHELSIO_T4_DCB
525 struct cxgbit_dcb_work {
526         struct dcb_app_type dcb_app;
527         struct work_struct work;
528 };
529
530 static void
531 cxgbit_update_dcb_priority(struct cxgbit_device *cdev, u8 port_id,
532                            u8 dcb_priority, u16 port_num)
533 {
534         struct cxgbit_sock *csk;
535         struct sk_buff *skb;
536         u16 local_port;
537         bool wakeup_thread = false;
538
539         spin_lock_bh(&cdev->cskq.lock);
540         list_for_each_entry(csk, &cdev->cskq.list, list) {
541                 if (csk->port_id != port_id)
542                         continue;
543
544                 if (csk->com.local_addr.ss_family == AF_INET6) {
545                         struct sockaddr_in6 *sock_in6;
546
547                         sock_in6 = (struct sockaddr_in6 *)&csk->com.local_addr;
548                         local_port = ntohs(sock_in6->sin6_port);
549                 } else {
550                         struct sockaddr_in *sock_in;
551
552                         sock_in = (struct sockaddr_in *)&csk->com.local_addr;
553                         local_port = ntohs(sock_in->sin_port);
554                 }
555
556                 if (local_port != port_num)
557                         continue;
558
559                 if (csk->dcb_priority == dcb_priority)
560                         continue;
561
562                 skb = alloc_skb(0, GFP_ATOMIC);
563                 if (!skb)
564                         continue;
565
566                 spin_lock(&csk->rxq.lock);
567                 __skb_queue_tail(&csk->rxq, skb);
568                 if (skb_queue_len(&csk->rxq) == 1)
569                         wakeup_thread = true;
570                 spin_unlock(&csk->rxq.lock);
571
572                 if (wakeup_thread) {
573                         wake_up(&csk->waitq);
574                         wakeup_thread = false;
575                 }
576         }
577         spin_unlock_bh(&cdev->cskq.lock);
578 }
579
580 static void cxgbit_dcb_workfn(struct work_struct *work)
581 {
582         struct cxgbit_dcb_work *dcb_work;
583         struct net_device *ndev;
584         struct cxgbit_device *cdev = NULL;
585         struct dcb_app_type *iscsi_app;
586         u8 priority, port_id = 0xff;
587
588         dcb_work = container_of(work, struct cxgbit_dcb_work, work);
589         iscsi_app = &dcb_work->dcb_app;
590
591         if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) {
592                 if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY)
593                         goto out;
594
595                 priority = iscsi_app->app.priority;
596
597         } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) {
598                 if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM)
599                         goto out;
600
601                 if (!iscsi_app->app.priority)
602                         goto out;
603
604                 priority = ffs(iscsi_app->app.priority) - 1;
605         } else {
606                 goto out;
607         }
608
609         pr_debug("priority for ifid %d is %u\n",
610                  iscsi_app->ifindex, priority);
611
612         ndev = dev_get_by_index(&init_net, iscsi_app->ifindex);
613
614         if (!ndev)
615                 goto out;
616
617         mutex_lock(&cdev_list_lock);
618         cdev = cxgbit_find_device(ndev, &port_id);
619
620         dev_put(ndev);
621
622         if (!cdev) {
623                 mutex_unlock(&cdev_list_lock);
624                 goto out;
625         }
626
627         cxgbit_update_dcb_priority(cdev, port_id, priority,
628                                    iscsi_app->app.protocol);
629         mutex_unlock(&cdev_list_lock);
630 out:
631         kfree(dcb_work);
632 }
633
634 static int
635 cxgbit_dcbevent_notify(struct notifier_block *nb, unsigned long action,
636                        void *data)
637 {
638         struct cxgbit_dcb_work *dcb_work;
639         struct dcb_app_type *dcb_app = data;
640
641         dcb_work = kzalloc(sizeof(*dcb_work), GFP_ATOMIC);
642         if (!dcb_work)
643                 return NOTIFY_DONE;
644
645         dcb_work->dcb_app = *dcb_app;
646         INIT_WORK(&dcb_work->work, cxgbit_dcb_workfn);
647         schedule_work(&dcb_work->work);
648         return NOTIFY_OK;
649 }
650 #endif
651
652 static enum target_prot_op cxgbit_get_sup_prot_ops(struct iscsi_conn *conn)
653 {
654         return TARGET_PROT_NORMAL;
655 }
656
657 static struct iscsit_transport cxgbit_transport = {
658         .name                   = DRV_NAME,
659         .transport_type         = ISCSI_CXGBIT,
660         .rdma_shutdown          = false,
661         .priv_size              = sizeof(struct cxgbit_cmd),
662         .owner                  = THIS_MODULE,
663         .iscsit_setup_np        = cxgbit_setup_np,
664         .iscsit_accept_np       = cxgbit_accept_np,
665         .iscsit_free_np         = cxgbit_free_np,
666         .iscsit_free_conn       = cxgbit_free_conn,
667         .iscsit_get_login_rx    = cxgbit_get_login_rx,
668         .iscsit_put_login_tx    = cxgbit_put_login_tx,
669         .iscsit_immediate_queue = iscsit_immediate_queue,
670         .iscsit_response_queue  = iscsit_response_queue,
671         .iscsit_get_dataout     = iscsit_build_r2ts_for_cmd,
672         .iscsit_queue_data_in   = iscsit_queue_rsp,
673         .iscsit_queue_status    = iscsit_queue_rsp,
674         .iscsit_xmit_pdu        = cxgbit_xmit_pdu,
675         .iscsit_get_r2t_ttt     = cxgbit_get_r2t_ttt,
676         .iscsit_get_rx_pdu      = cxgbit_get_rx_pdu,
677         .iscsit_validate_params = cxgbit_validate_params,
678         .iscsit_unmap_cmd       = cxgbit_unmap_cmd,
679         .iscsit_aborted_task    = iscsit_aborted_task,
680         .iscsit_get_sup_prot_ops = cxgbit_get_sup_prot_ops,
681 };
682
683 static struct cxgb4_uld_info cxgbit_uld_info = {
684         .name           = DRV_NAME,
685         .nrxq           = MAX_ULD_QSETS,
686         .ntxq           = MAX_ULD_QSETS,
687         .rxq_size       = 1024,
688         .lro            = true,
689         .add            = cxgbit_uld_add,
690         .state_change   = cxgbit_uld_state_change,
691         .lro_rx_handler = cxgbit_uld_lro_rx_handler,
692         .lro_flush      = cxgbit_uld_lro_flush,
693 };
694
695 #ifdef CONFIG_CHELSIO_T4_DCB
696 static struct notifier_block cxgbit_dcbevent_nb = {
697         .notifier_call = cxgbit_dcbevent_notify,
698 };
699 #endif
700
701 static int __init cxgbit_init(void)
702 {
703         cxgb4_register_uld(CXGB4_ULD_ISCSIT, &cxgbit_uld_info);
704         iscsit_register_transport(&cxgbit_transport);
705
706 #ifdef CONFIG_CHELSIO_T4_DCB
707         pr_info("%s dcb enabled.\n", DRV_NAME);
708         register_dcbevent_notifier(&cxgbit_dcbevent_nb);
709 #endif
710         BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) <
711                      sizeof(union cxgbit_skb_cb));
712         return 0;
713 }
714
715 static void __exit cxgbit_exit(void)
716 {
717         struct cxgbit_device *cdev, *tmp;
718
719 #ifdef CONFIG_CHELSIO_T4_DCB
720         unregister_dcbevent_notifier(&cxgbit_dcbevent_nb);
721 #endif
722         mutex_lock(&cdev_list_lock);
723         list_for_each_entry_safe(cdev, tmp, &cdev_list_head, list) {
724                 list_del(&cdev->list);
725                 cxgbit_put_cdev(cdev);
726         }
727         mutex_unlock(&cdev_list_lock);
728         iscsit_unregister_transport(&cxgbit_transport);
729         cxgb4_unregister_uld(CXGB4_ULD_ISCSIT);
730 }
731
732 module_init(cxgbit_init);
733 module_exit(cxgbit_exit);
734
735 MODULE_DESCRIPTION("Chelsio iSCSI target offload driver");
736 MODULE_AUTHOR("Chelsio Communications");
737 MODULE_VERSION(DRV_VERSION);
738 MODULE_LICENSE("GPL");