Merge branch 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad...
[linux-2.6-microblaze.git] / drivers / scsi / cxgbi / cxgb4i / cxgb4i.c
1 /*
2  * cxgb4i.c: Chelsio T4 iSCSI driver.
3  *
4  * Copyright (c) 2010-2015 Chelsio Communications, Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written by:  Karen Xie (kxie@chelsio.com)
11  *              Rakesh Ranjan (rranjan@chelsio.com)
12  */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
15
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <scsi/scsi_host.h>
20 #include <net/tcp.h>
21 #include <net/dst.h>
22 #include <linux/netdevice.h>
23 #include <net/addrconf.h>
24
25 #include "t4_regs.h"
26 #include "t4_msg.h"
27 #include "cxgb4.h"
28 #include "cxgb4_uld.h"
29 #include "t4fw_api.h"
30 #include "l2t.h"
31 #include "cxgb4i.h"
32 #include "clip_tbl.h"
33
34 static unsigned int dbg_level;
35
36 #include "../libcxgbi.h"
37
38 #ifdef CONFIG_CHELSIO_T4_DCB
39 #include <net/dcbevent.h>
40 #include "cxgb4_dcb.h"
41 #endif
42
43 #define DRV_MODULE_NAME         "cxgb4i"
44 #define DRV_MODULE_DESC         "Chelsio T4-T6 iSCSI Driver"
45 #define DRV_MODULE_VERSION      "0.9.5-ko"
46 #define DRV_MODULE_RELDATE      "Apr. 2015"
47
48 static char version[] =
49         DRV_MODULE_DESC " " DRV_MODULE_NAME
50         " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
51
52 MODULE_AUTHOR("Chelsio Communications, Inc.");
53 MODULE_DESCRIPTION(DRV_MODULE_DESC);
54 MODULE_VERSION(DRV_MODULE_VERSION);
55 MODULE_LICENSE("GPL");
56
57 module_param(dbg_level, uint, 0644);
58 MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)");
59
60 #define CXGB4I_DEFAULT_10G_RCV_WIN (256 * 1024)
61 static int cxgb4i_rcv_win = -1;
62 module_param(cxgb4i_rcv_win, int, 0644);
63 MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP receive window in bytes");
64
65 #define CXGB4I_DEFAULT_10G_SND_WIN (128 * 1024)
66 static int cxgb4i_snd_win = -1;
67 module_param(cxgb4i_snd_win, int, 0644);
68 MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes");
69
70 static int cxgb4i_rx_credit_thres = 10 * 1024;
71 module_param(cxgb4i_rx_credit_thres, int, 0644);
72 MODULE_PARM_DESC(cxgb4i_rx_credit_thres,
73                 "RX credits return threshold in bytes (default=10KB)");
74
75 static unsigned int cxgb4i_max_connect = (8 * 1024);
76 module_param(cxgb4i_max_connect, uint, 0644);
77 MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections");
78
79 static unsigned short cxgb4i_sport_base = 20000;
80 module_param(cxgb4i_sport_base, ushort, 0644);
81 MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)");
82
83 typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *);
84
85 static void *t4_uld_add(const struct cxgb4_lld_info *);
86 static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *);
87 static int t4_uld_state_change(void *, enum cxgb4_state state);
88 static inline int send_tx_flowc_wr(struct cxgbi_sock *);
89
90 static const struct cxgb4_uld_info cxgb4i_uld_info = {
91         .name = DRV_MODULE_NAME,
92         .nrxq = MAX_ULD_QSETS,
93         .ntxq = MAX_ULD_QSETS,
94         .rxq_size = 1024,
95         .lro = false,
96         .add = t4_uld_add,
97         .rx_handler = t4_uld_rx_handler,
98         .state_change = t4_uld_state_change,
99 };
100
101 static struct scsi_host_template cxgb4i_host_template = {
102         .module         = THIS_MODULE,
103         .name           = DRV_MODULE_NAME,
104         .proc_name      = DRV_MODULE_NAME,
105         .can_queue      = CXGB4I_SCSI_HOST_QDEPTH,
106         .queuecommand   = iscsi_queuecommand,
107         .change_queue_depth = scsi_change_queue_depth,
108         .sg_tablesize   = SG_ALL,
109         .max_sectors    = 0xFFFF,
110         .cmd_per_lun    = ISCSI_DEF_CMD_PER_LUN,
111         .eh_timed_out   = iscsi_eh_cmd_timed_out,
112         .eh_abort_handler = iscsi_eh_abort,
113         .eh_device_reset_handler = iscsi_eh_device_reset,
114         .eh_target_reset_handler = iscsi_eh_recover_target,
115         .target_alloc   = iscsi_target_alloc,
116         .dma_boundary   = PAGE_SIZE - 1,
117         .this_id        = -1,
118         .track_queue_depth = 1,
119 };
120
121 static struct iscsi_transport cxgb4i_iscsi_transport = {
122         .owner          = THIS_MODULE,
123         .name           = DRV_MODULE_NAME,
124         .caps           = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
125                                 CAP_DATADGST | CAP_DIGEST_OFFLOAD |
126                                 CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
127         .attr_is_visible        = cxgbi_attr_is_visible,
128         .get_host_param = cxgbi_get_host_param,
129         .set_host_param = cxgbi_set_host_param,
130         /* session management */
131         .create_session = cxgbi_create_session,
132         .destroy_session        = cxgbi_destroy_session,
133         .get_session_param = iscsi_session_get_param,
134         /* connection management */
135         .create_conn    = cxgbi_create_conn,
136         .bind_conn              = cxgbi_bind_conn,
137         .unbind_conn    = iscsi_conn_unbind,
138         .destroy_conn   = iscsi_tcp_conn_teardown,
139         .start_conn             = iscsi_conn_start,
140         .stop_conn              = iscsi_conn_stop,
141         .get_conn_param = iscsi_conn_get_param,
142         .set_param      = cxgbi_set_conn_param,
143         .get_stats      = cxgbi_get_conn_stats,
144         /* pdu xmit req from user space */
145         .send_pdu       = iscsi_conn_send_pdu,
146         /* task */
147         .init_task      = iscsi_tcp_task_init,
148         .xmit_task      = iscsi_tcp_task_xmit,
149         .cleanup_task   = cxgbi_cleanup_task,
150         /* pdu */
151         .alloc_pdu      = cxgbi_conn_alloc_pdu,
152         .init_pdu       = cxgbi_conn_init_pdu,
153         .xmit_pdu       = cxgbi_conn_xmit_pdu,
154         .parse_pdu_itt  = cxgbi_parse_pdu_itt,
155         /* TCP connect/disconnect */
156         .get_ep_param   = cxgbi_get_ep_param,
157         .ep_connect     = cxgbi_ep_connect,
158         .ep_poll        = cxgbi_ep_poll,
159         .ep_disconnect  = cxgbi_ep_disconnect,
160         /* Error recovery timeout call */
161         .session_recovery_timedout = iscsi_session_recovery_timedout,
162 };
163
164 #ifdef CONFIG_CHELSIO_T4_DCB
165 static int
166 cxgb4_dcb_change_notify(struct notifier_block *, unsigned long, void *);
167
168 static struct notifier_block cxgb4_dcb_change = {
169         .notifier_call = cxgb4_dcb_change_notify,
170 };
171 #endif
172
173 static struct scsi_transport_template *cxgb4i_stt;
174
175 /*
176  * CPL (Chelsio Protocol Language) defines a message passing interface between
177  * the host driver and Chelsio asic.
178  * The section below implments CPLs that related to iscsi tcp connection
179  * open/close/abort and data send/receive.
180  */
181
182 #define RCV_BUFSIZ_MASK         0x3FFU
183 #define MAX_IMM_TX_PKT_LEN      256
184
185 static int push_tx_frames(struct cxgbi_sock *, int);
186
187 /*
188  * is_ofld_imm - check whether a packet can be sent as immediate data
189  * @skb: the packet
190  *
191  * Returns true if a packet can be sent as an offload WR with immediate
192  * data.  We currently use the same limit as for Ethernet packets.
193  */
194 static inline bool is_ofld_imm(const struct sk_buff *skb)
195 {
196         int len = skb->len;
197
198         if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
199                 len += sizeof(struct fw_ofld_tx_data_wr);
200
201         if  (likely(cxgbi_skcb_test_flag((struct sk_buff *)skb, SKCBF_TX_ISO)))
202                 len += sizeof(struct cpl_tx_data_iso);
203
204         return (len <= MAX_IMM_OFLD_TX_DATA_WR_LEN);
205 }
206
207 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
208                                 struct l2t_entry *e)
209 {
210         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
211         int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
212         unsigned long long opt0;
213         unsigned int opt2;
214         unsigned int qid_atid = ((unsigned int)csk->atid) |
215                                  (((unsigned int)csk->rss_qid) << 14);
216
217         opt0 = KEEP_ALIVE_F |
218                 WND_SCALE_V(wscale) |
219                 MSS_IDX_V(csk->mss_idx) |
220                 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
221                 TX_CHAN_V(csk->tx_chan) |
222                 SMAC_SEL_V(csk->smac_idx) |
223                 ULP_MODE_V(ULP_MODE_ISCSI) |
224                 RCV_BUFSIZ_V(csk->rcv_win >> 10);
225
226         opt2 = RX_CHANNEL_V(0) |
227                 RSS_QUEUE_VALID_F |
228                 RSS_QUEUE_V(csk->rss_qid);
229
230         if (is_t4(lldi->adapter_type)) {
231                 struct cpl_act_open_req *req =
232                                 (struct cpl_act_open_req *)skb->head;
233
234                 INIT_TP_WR(req, 0);
235                 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
236                                         qid_atid));
237                 req->local_port = csk->saddr.sin_port;
238                 req->peer_port = csk->daddr.sin_port;
239                 req->local_ip = csk->saddr.sin_addr.s_addr;
240                 req->peer_ip = csk->daddr.sin_addr.s_addr;
241                 req->opt0 = cpu_to_be64(opt0);
242                 req->params = cpu_to_be32(cxgb4_select_ntuple(
243                                         csk->cdev->ports[csk->port_id],
244                                         csk->l2t));
245                 opt2 |= RX_FC_VALID_F;
246                 req->opt2 = cpu_to_be32(opt2);
247
248                 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
249                         "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
250                         csk, &req->local_ip, ntohs(req->local_port),
251                         &req->peer_ip, ntohs(req->peer_port),
252                         csk->atid, csk->rss_qid);
253         } else if (is_t5(lldi->adapter_type)) {
254                 struct cpl_t5_act_open_req *req =
255                                 (struct cpl_t5_act_open_req *)skb->head;
256                 u32 isn = (prandom_u32() & ~7UL) - 1;
257
258                 INIT_TP_WR(req, 0);
259                 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
260                                         qid_atid));
261                 req->local_port = csk->saddr.sin_port;
262                 req->peer_port = csk->daddr.sin_port;
263                 req->local_ip = csk->saddr.sin_addr.s_addr;
264                 req->peer_ip = csk->daddr.sin_addr.s_addr;
265                 req->opt0 = cpu_to_be64(opt0);
266                 req->params = cpu_to_be64(FILTER_TUPLE_V(
267                                 cxgb4_select_ntuple(
268                                         csk->cdev->ports[csk->port_id],
269                                         csk->l2t)));
270                 req->rsvd = cpu_to_be32(isn);
271                 opt2 |= T5_ISS_VALID;
272                 opt2 |= T5_OPT_2_VALID_F;
273
274                 req->opt2 = cpu_to_be32(opt2);
275
276                 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
277                         "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
278                         csk, &req->local_ip, ntohs(req->local_port),
279                         &req->peer_ip, ntohs(req->peer_port),
280                         csk->atid, csk->rss_qid);
281         } else {
282                 struct cpl_t6_act_open_req *req =
283                                 (struct cpl_t6_act_open_req *)skb->head;
284                 u32 isn = (prandom_u32() & ~7UL) - 1;
285
286                 INIT_TP_WR(req, 0);
287                 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
288                                                             qid_atid));
289                 req->local_port = csk->saddr.sin_port;
290                 req->peer_port = csk->daddr.sin_port;
291                 req->local_ip = csk->saddr.sin_addr.s_addr;
292                 req->peer_ip = csk->daddr.sin_addr.s_addr;
293                 req->opt0 = cpu_to_be64(opt0);
294                 req->params = cpu_to_be64(FILTER_TUPLE_V(
295                                 cxgb4_select_ntuple(
296                                         csk->cdev->ports[csk->port_id],
297                                         csk->l2t)));
298                 req->rsvd = cpu_to_be32(isn);
299
300                 opt2 |= T5_ISS_VALID;
301                 opt2 |= RX_FC_DISABLE_F;
302                 opt2 |= T5_OPT_2_VALID_F;
303
304                 req->opt2 = cpu_to_be32(opt2);
305                 req->rsvd2 = cpu_to_be32(0);
306                 req->opt3 = cpu_to_be32(0);
307
308                 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
309                           "csk t6 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
310                           csk, &req->local_ip, ntohs(req->local_port),
311                           &req->peer_ip, ntohs(req->peer_port),
312                           csk->atid, csk->rss_qid);
313         }
314
315         set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
316
317         pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
318                        (&csk->saddr), (&csk->daddr),
319                        CHELSIO_CHIP_VERSION(lldi->adapter_type), csk,
320                        csk->state, csk->flags, csk->atid, csk->rss_qid);
321
322         cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
323 }
324
325 #if IS_ENABLED(CONFIG_IPV6)
326 static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
327                                struct l2t_entry *e)
328 {
329         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
330         int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
331         unsigned long long opt0;
332         unsigned int opt2;
333         unsigned int qid_atid = ((unsigned int)csk->atid) |
334                                  (((unsigned int)csk->rss_qid) << 14);
335
336         opt0 = KEEP_ALIVE_F |
337                 WND_SCALE_V(wscale) |
338                 MSS_IDX_V(csk->mss_idx) |
339                 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
340                 TX_CHAN_V(csk->tx_chan) |
341                 SMAC_SEL_V(csk->smac_idx) |
342                 ULP_MODE_V(ULP_MODE_ISCSI) |
343                 RCV_BUFSIZ_V(csk->rcv_win >> 10);
344
345         opt2 = RX_CHANNEL_V(0) |
346                 RSS_QUEUE_VALID_F |
347                 RSS_QUEUE_V(csk->rss_qid);
348
349         if (is_t4(lldi->adapter_type)) {
350                 struct cpl_act_open_req6 *req =
351                             (struct cpl_act_open_req6 *)skb->head;
352
353                 INIT_TP_WR(req, 0);
354                 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
355                                                             qid_atid));
356                 req->local_port = csk->saddr6.sin6_port;
357                 req->peer_port = csk->daddr6.sin6_port;
358
359                 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
360                 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
361                                                                     8);
362                 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
363                 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
364                                                                     8);
365
366                 req->opt0 = cpu_to_be64(opt0);
367
368                 opt2 |= RX_FC_VALID_F;
369                 req->opt2 = cpu_to_be32(opt2);
370
371                 req->params = cpu_to_be32(cxgb4_select_ntuple(
372                                           csk->cdev->ports[csk->port_id],
373                                           csk->l2t));
374         } else if (is_t5(lldi->adapter_type)) {
375                 struct cpl_t5_act_open_req6 *req =
376                                 (struct cpl_t5_act_open_req6 *)skb->head;
377
378                 INIT_TP_WR(req, 0);
379                 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
380                                                             qid_atid));
381                 req->local_port = csk->saddr6.sin6_port;
382                 req->peer_port = csk->daddr6.sin6_port;
383                 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
384                 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
385                                                                         8);
386                 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
387                 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
388                                                                         8);
389                 req->opt0 = cpu_to_be64(opt0);
390
391                 opt2 |= T5_OPT_2_VALID_F;
392                 req->opt2 = cpu_to_be32(opt2);
393
394                 req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
395                                           csk->cdev->ports[csk->port_id],
396                                           csk->l2t)));
397         } else {
398                 struct cpl_t6_act_open_req6 *req =
399                                 (struct cpl_t6_act_open_req6 *)skb->head;
400
401                 INIT_TP_WR(req, 0);
402                 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
403                                                             qid_atid));
404                 req->local_port = csk->saddr6.sin6_port;
405                 req->peer_port = csk->daddr6.sin6_port;
406                 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
407                 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
408                                                                         8);
409                 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
410                 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
411                                                                         8);
412                 req->opt0 = cpu_to_be64(opt0);
413
414                 opt2 |= RX_FC_DISABLE_F;
415                 opt2 |= T5_OPT_2_VALID_F;
416
417                 req->opt2 = cpu_to_be32(opt2);
418
419                 req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
420                                           csk->cdev->ports[csk->port_id],
421                                           csk->l2t)));
422
423                 req->rsvd2 = cpu_to_be32(0);
424                 req->opt3 = cpu_to_be32(0);
425         }
426
427         set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
428
429         pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
430                 CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state,
431                 csk->flags, csk->atid,
432                 &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port),
433                 &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port),
434                 csk->rss_qid);
435
436         cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
437 }
438 #endif
439
440 static void send_close_req(struct cxgbi_sock *csk)
441 {
442         struct sk_buff *skb = csk->cpl_close;
443         struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
444         unsigned int tid = csk->tid;
445
446         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
447                 "csk 0x%p,%u,0x%lx, tid %u.\n",
448                 csk, csk->state, csk->flags, csk->tid);
449         csk->cpl_close = NULL;
450         set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
451         INIT_TP_WR(req, tid);
452         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
453         req->rsvd = 0;
454
455         cxgbi_sock_skb_entail(csk, skb);
456         if (csk->state >= CTP_ESTABLISHED)
457                 push_tx_frames(csk, 1);
458 }
459
460 static void abort_arp_failure(void *handle, struct sk_buff *skb)
461 {
462         struct cxgbi_sock *csk = (struct cxgbi_sock *)handle;
463         struct cpl_abort_req *req;
464
465         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
466                 "csk 0x%p,%u,0x%lx, tid %u, abort.\n",
467                 csk, csk->state, csk->flags, csk->tid);
468         req = (struct cpl_abort_req *)skb->data;
469         req->cmd = CPL_ABORT_NO_RST;
470         cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
471 }
472
473 static void send_abort_req(struct cxgbi_sock *csk)
474 {
475         struct cpl_abort_req *req;
476         struct sk_buff *skb = csk->cpl_abort_req;
477
478         if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
479                 return;
480
481         if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
482                 send_tx_flowc_wr(csk);
483                 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
484         }
485
486         cxgbi_sock_set_state(csk, CTP_ABORTING);
487         cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
488         cxgbi_sock_purge_write_queue(csk);
489
490         csk->cpl_abort_req = NULL;
491         req = (struct cpl_abort_req *)skb->head;
492         set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
493         req->cmd = CPL_ABORT_SEND_RST;
494         t4_set_arp_err_handler(skb, csk, abort_arp_failure);
495         INIT_TP_WR(req, csk->tid);
496         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
497         req->rsvd0 = htonl(csk->snd_nxt);
498         req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
499
500         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
501                 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
502                 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
503                 req->rsvd1);
504
505         cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
506 }
507
508 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
509 {
510         struct sk_buff *skb = csk->cpl_abort_rpl;
511         struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
512
513         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
514                 "csk 0x%p,%u,0x%lx,%u, status %d.\n",
515                 csk, csk->state, csk->flags, csk->tid, rst_status);
516
517         csk->cpl_abort_rpl = NULL;
518         set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
519         INIT_TP_WR(rpl, csk->tid);
520         OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
521         rpl->cmd = rst_status;
522         cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
523 }
524
525 /*
526  * CPL connection rx data ack: host ->
527  * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
528  * credits sent.
529  */
530 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
531 {
532         struct sk_buff *skb;
533         struct cpl_rx_data_ack *req;
534
535         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
536                 "csk 0x%p,%u,0x%lx,%u, credit %u.\n",
537                 csk, csk->state, csk->flags, csk->tid, credits);
538
539         skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
540         if (!skb) {
541                 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
542                 return 0;
543         }
544         req = (struct cpl_rx_data_ack *)skb->head;
545
546         set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
547         INIT_TP_WR(req, csk->tid);
548         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
549                                       csk->tid));
550         req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits)
551                                        | RX_FORCE_ACK_F);
552         cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
553         return credits;
554 }
555
556 /*
557  * sgl_len - calculates the size of an SGL of the given capacity
558  * @n: the number of SGL entries
559  * Calculates the number of flits needed for a scatter/gather list that
560  * can hold the given number of entries.
561  */
562 static inline unsigned int sgl_len(unsigned int n)
563 {
564         n--;
565         return (3 * n) / 2 + (n & 1) + 2;
566 }
567
568 /*
569  * calc_tx_flits_ofld - calculate # of flits for an offload packet
570  * @skb: the packet
571  *
572  * Returns the number of flits needed for the given offload packet.
573  * These packets are already fully constructed and no additional headers
574  * will be added.
575  */
576 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
577 {
578         unsigned int flits, cnt;
579
580         if (is_ofld_imm(skb))
581                 return DIV_ROUND_UP(skb->len, 8);
582         flits = skb_transport_offset(skb) / 8;
583         cnt = skb_shinfo(skb)->nr_frags;
584         if (skb_tail_pointer(skb) != skb_transport_header(skb))
585                 cnt++;
586         return flits + sgl_len(cnt);
587 }
588
589 #define FLOWC_WR_NPARAMS_MIN    9
590 static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp)
591 {
592         int nparams, flowclen16, flowclen;
593
594         nparams = FLOWC_WR_NPARAMS_MIN;
595 #ifdef CONFIG_CHELSIO_T4_DCB
596         nparams++;
597 #endif
598         flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
599         flowclen16 = DIV_ROUND_UP(flowclen, 16);
600         flowclen = flowclen16 * 16;
601         /*
602          * Return the number of 16-byte credits used by the FlowC request.
603          * Pass back the nparams and actual FlowC length if requested.
604          */
605         if (nparamsp)
606                 *nparamsp = nparams;
607         if (flowclenp)
608                 *flowclenp = flowclen;
609
610         return flowclen16;
611 }
612
613 static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
614 {
615         struct sk_buff *skb;
616         struct fw_flowc_wr *flowc;
617         int nparams, flowclen16, flowclen;
618
619 #ifdef CONFIG_CHELSIO_T4_DCB
620         u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
621 #endif
622         flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen);
623         skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
624         flowc = (struct fw_flowc_wr *)skb->head;
625         flowc->op_to_nparams =
626                 htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams));
627         flowc->flowid_len16 =
628                 htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid));
629         flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
630         flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
631         flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
632         flowc->mnemval[1].val = htonl(csk->tx_chan);
633         flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
634         flowc->mnemval[2].val = htonl(csk->tx_chan);
635         flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
636         flowc->mnemval[3].val = htonl(csk->rss_qid);
637         flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
638         flowc->mnemval[4].val = htonl(csk->snd_nxt);
639         flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
640         flowc->mnemval[5].val = htonl(csk->rcv_nxt);
641         flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
642         flowc->mnemval[6].val = htonl(csk->snd_win);
643         flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
644         flowc->mnemval[7].val = htonl(csk->advmss);
645         flowc->mnemval[8].mnemonic = 0;
646         flowc->mnemval[8].val = 0;
647         flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
648         if (csk->cdev->skb_iso_txhdr)
649                 flowc->mnemval[8].val = cpu_to_be32(CXGBI_MAX_ISO_DATA_IN_SKB);
650         else
651                 flowc->mnemval[8].val = cpu_to_be32(16128);
652 #ifdef CONFIG_CHELSIO_T4_DCB
653         flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
654         if (vlan == CPL_L2T_VLAN_NONE) {
655                 pr_warn_ratelimited("csk %u without VLAN Tag on DCB Link\n",
656                                     csk->tid);
657                 flowc->mnemval[9].val = cpu_to_be32(0);
658         } else {
659                 flowc->mnemval[9].val = cpu_to_be32((vlan & VLAN_PRIO_MASK) >>
660                                         VLAN_PRIO_SHIFT);
661         }
662 #endif
663
664         set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
665
666         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
667                 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
668                 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid,
669                 csk->snd_nxt, csk->rcv_nxt, csk->snd_win,
670                 csk->advmss);
671
672         cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
673
674         return flowclen16;
675 }
676
677 static void
678 cxgb4i_make_tx_iso_cpl(struct sk_buff *skb, struct cpl_tx_data_iso *cpl)
679 {
680         struct cxgbi_iso_info *info = (struct cxgbi_iso_info *)skb->head;
681         u32 imm_en = !!(info->flags & CXGBI_ISO_INFO_IMM_ENABLE);
682         u32 fslice = !!(info->flags & CXGBI_ISO_INFO_FSLICE);
683         u32 lslice = !!(info->flags & CXGBI_ISO_INFO_LSLICE);
684         u32 pdu_type = (info->op == ISCSI_OP_SCSI_CMD) ? 0 : 1;
685         u32 submode = cxgbi_skcb_tx_ulp_mode(skb) & 0x3;
686
687         cpl->op_to_scsi = cpu_to_be32(CPL_TX_DATA_ISO_OP_V(CPL_TX_DATA_ISO) |
688                                 CPL_TX_DATA_ISO_FIRST_V(fslice) |
689                                 CPL_TX_DATA_ISO_LAST_V(lslice) |
690                                 CPL_TX_DATA_ISO_CPLHDRLEN_V(0) |
691                                 CPL_TX_DATA_ISO_HDRCRC_V(submode & 1) |
692                                 CPL_TX_DATA_ISO_PLDCRC_V(((submode >> 1) & 1)) |
693                                 CPL_TX_DATA_ISO_IMMEDIATE_V(imm_en) |
694                                 CPL_TX_DATA_ISO_SCSI_V(pdu_type));
695
696         cpl->ahs_len = info->ahs;
697         cpl->mpdu = cpu_to_be16(DIV_ROUND_UP(info->mpdu, 4));
698         cpl->burst_size = cpu_to_be32(info->burst_size);
699         cpl->len = cpu_to_be32(info->len);
700         cpl->reserved2_seglen_offset =
701              cpu_to_be32(CPL_TX_DATA_ISO_SEGLEN_OFFSET_V(info->segment_offset));
702         cpl->datasn_offset = cpu_to_be32(info->datasn_offset);
703         cpl->buffer_offset = cpu_to_be32(info->buffer_offset);
704         cpl->reserved3 = cpu_to_be32(0);
705         log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
706                   "iso: flags 0x%x, op %u, ahs %u, num_pdu %u, mpdu %u, "
707                   "burst_size %u, iso_len %u\n",
708                   info->flags, info->op, info->ahs, info->num_pdu,
709                   info->mpdu, info->burst_size << 2, info->len);
710 }
711
712 static void
713 cxgb4i_make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, int dlen,
714                        int len, u32 credits, int compl)
715 {
716         struct cxgbi_device *cdev = csk->cdev;
717         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
718         struct fw_ofld_tx_data_wr *req;
719         struct cpl_tx_data_iso *cpl;
720         u32 submode = cxgbi_skcb_tx_ulp_mode(skb) & 0x3;
721         u32 wr_ulp_mode = 0;
722         u32 hdr_size = sizeof(*req);
723         u32 opcode = FW_OFLD_TX_DATA_WR;
724         u32 immlen = 0;
725         u32 force = is_t5(lldi->adapter_type) ? TX_FORCE_V(!submode) :
726                                                 T6_TX_FORCE_F;
727
728         if (cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO)) {
729                 hdr_size += sizeof(struct cpl_tx_data_iso);
730                 opcode = FW_ISCSI_TX_DATA_WR;
731                 immlen += sizeof(struct cpl_tx_data_iso);
732                 submode |= 8;
733         }
734
735         if (is_ofld_imm(skb))
736                 immlen += dlen;
737
738         req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, hdr_size);
739         req->op_to_immdlen = cpu_to_be32(FW_WR_OP_V(opcode) |
740                                          FW_WR_COMPL_V(compl) |
741                                          FW_WR_IMMDLEN_V(immlen));
742         req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
743                                         FW_WR_LEN16_V(credits));
744         req->plen = cpu_to_be32(len);
745         cpl =  (struct cpl_tx_data_iso *)(req + 1);
746
747         if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO)))
748                 cxgb4i_make_tx_iso_cpl(skb, cpl);
749
750         if (submode)
751                 wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI) |
752                               FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode);
753
754         req->tunnel_to_proxy = cpu_to_be32(wr_ulp_mode | force |
755                                            FW_OFLD_TX_DATA_WR_SHOVE_V(1U));
756
757         if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
758                 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
759 }
760
761 static void arp_failure_skb_discard(void *handle, struct sk_buff *skb)
762 {
763         kfree_skb(skb);
764 }
765
766 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
767 {
768         int total_size = 0;
769         struct sk_buff *skb;
770
771         if (unlikely(csk->state < CTP_ESTABLISHED ||
772                 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
773                 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK |
774                           1 << CXGBI_DBG_PDU_TX,
775                           "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
776                           csk, csk->state, csk->flags, csk->tid);
777                 return 0;
778         }
779
780         while (csk->wr_cred && ((skb = skb_peek(&csk->write_queue)) != NULL)) {
781                 struct cxgbi_iso_info *iso_cpl;
782                 u32 dlen = skb->len;
783                 u32 len = skb->len;
784                 u32 iso_cpl_len = 0;
785                 u32 flowclen16 = 0;
786                 u32 credits_needed;
787                 u32 num_pdu = 1, hdr_len;
788
789                 if (cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO))
790                         iso_cpl_len = sizeof(struct cpl_tx_data_iso);
791
792                 if (is_ofld_imm(skb))
793                         credits_needed = DIV_ROUND_UP(dlen + iso_cpl_len, 16);
794                 else
795                         credits_needed =
796                                 DIV_ROUND_UP((8 * calc_tx_flits_ofld(skb)) +
797                                              iso_cpl_len, 16);
798
799                 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
800                         credits_needed +=
801                            DIV_ROUND_UP(sizeof(struct fw_ofld_tx_data_wr), 16);
802
803                 /*
804                  * Assumes the initial credits is large enough to support
805                  * fw_flowc_wr plus largest possible first payload
806                  */
807                 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
808                         flowclen16 = send_tx_flowc_wr(csk);
809                         csk->wr_cred -= flowclen16;
810                         csk->wr_una_cred += flowclen16;
811                         cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
812                 }
813
814                 if (csk->wr_cred < credits_needed) {
815                         log_debug(1 << CXGBI_DBG_PDU_TX,
816                                   "csk 0x%p, skb %u/%u, wr %d < %u.\n",
817                                   csk, skb->len, skb->data_len,
818                                   credits_needed, csk->wr_cred);
819
820                         csk->no_tx_credits++;
821                         break;
822                 }
823
824                 csk->no_tx_credits = 0;
825
826                 __skb_unlink(skb, &csk->write_queue);
827                 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
828                 skb->csum = (__force __wsum)(credits_needed + flowclen16);
829                 csk->wr_cred -= credits_needed;
830                 csk->wr_una_cred += credits_needed;
831                 cxgbi_sock_enqueue_wr(csk, skb);
832
833                 log_debug(1 << CXGBI_DBG_PDU_TX,
834                         "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
835                         csk, skb->len, skb->data_len, credits_needed,
836                         csk->wr_cred, csk->wr_una_cred);
837
838                 if (!req_completion &&
839                     ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) ||
840                      after(csk->write_seq, (csk->snd_una + csk->snd_win / 2))))
841                         req_completion = 1;
842
843                 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
844                         u32 ulp_mode = cxgbi_skcb_tx_ulp_mode(skb);
845
846                         if (cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO)) {
847                                 iso_cpl = (struct cxgbi_iso_info *)skb->head;
848                                 num_pdu = iso_cpl->num_pdu;
849                                 hdr_len = cxgbi_skcb_tx_iscsi_hdrlen(skb);
850                                 len += (cxgbi_ulp_extra_len(ulp_mode) * num_pdu) +
851                                        (hdr_len * (num_pdu - 1));
852                         } else {
853                                 len += cxgbi_ulp_extra_len(ulp_mode);
854                         }
855
856                         cxgb4i_make_tx_data_wr(csk, skb, dlen, len,
857                                                credits_needed, req_completion);
858                         csk->snd_nxt += len;
859                         cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
860                 } else if (cxgbi_skcb_test_flag(skb, SKCBF_TX_FLAG_COMPL) &&
861                            (csk->wr_una_cred >= (csk->wr_max_cred / 2))) {
862                         struct cpl_close_con_req *req =
863                                 (struct cpl_close_con_req *)skb->data;
864
865                         req->wr.wr_hi |= cpu_to_be32(FW_WR_COMPL_F);
866                 }
867
868                 total_size += skb->truesize;
869                 t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard);
870
871                 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
872                           "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n",
873                           csk, csk->state, csk->flags, csk->tid, skb, len);
874                 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
875         }
876         return total_size;
877 }
878
879 static inline void free_atid(struct cxgbi_sock *csk)
880 {
881         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
882
883         if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
884                 cxgb4_free_atid(lldi->tids, csk->atid);
885                 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
886                 cxgbi_sock_put(csk);
887         }
888 }
889
890 static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
891 {
892         struct cxgbi_sock *csk;
893         struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data;
894         unsigned short tcp_opt = ntohs(req->tcp_opt);
895         unsigned int tid = GET_TID(req);
896         unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
897         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
898         struct tid_info *t = lldi->tids;
899         u32 rcv_isn = be32_to_cpu(req->rcv_isn);
900
901         csk = lookup_atid(t, atid);
902         if (unlikely(!csk)) {
903                 pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev);
904                 goto rel_skb;
905         }
906
907         if (csk->atid != atid) {
908                 pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n",
909                         atid, csk, csk->state, csk->flags, csk->tid, csk->atid);
910                 goto rel_skb;
911         }
912
913         pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
914                        (&csk->saddr), (&csk->daddr),
915                        atid, tid, csk, csk->state, csk->flags, rcv_isn);
916
917         module_put(cdev->owner);
918
919         cxgbi_sock_get(csk);
920         csk->tid = tid;
921         cxgb4_insert_tid(lldi->tids, csk, tid, csk->csk_family);
922         cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
923
924         free_atid(csk);
925
926         spin_lock_bh(&csk->lock);
927         if (unlikely(csk->state != CTP_ACTIVE_OPEN))
928                 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
929                         csk, csk->state, csk->flags, csk->tid);
930
931         if (csk->retry_timer.function) {
932                 del_timer(&csk->retry_timer);
933                 csk->retry_timer.function = NULL;
934         }
935
936         csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
937         /*
938          * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
939          * pass through opt0.
940          */
941         if (csk->rcv_win > (RCV_BUFSIZ_MASK << 10))
942                 csk->rcv_wup -= csk->rcv_win - (RCV_BUFSIZ_MASK << 10);
943
944         csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40;
945         if (TCPOPT_TSTAMP_G(tcp_opt))
946                 csk->advmss -= 12;
947         if (csk->advmss < 128)
948                 csk->advmss = 128;
949
950         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
951                 "csk 0x%p, mss_idx %u, advmss %u.\n",
952                         csk, TCPOPT_MSS_G(tcp_opt), csk->advmss);
953
954         cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
955
956         if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
957                 send_abort_req(csk);
958         else {
959                 if (skb_queue_len(&csk->write_queue))
960                         push_tx_frames(csk, 0);
961                 cxgbi_conn_tx_open(csk);
962         }
963         spin_unlock_bh(&csk->lock);
964
965 rel_skb:
966         __kfree_skb(skb);
967 }
968
969 static int act_open_rpl_status_to_errno(int status)
970 {
971         switch (status) {
972         case CPL_ERR_CONN_RESET:
973                 return -ECONNREFUSED;
974         case CPL_ERR_ARP_MISS:
975                 return -EHOSTUNREACH;
976         case CPL_ERR_CONN_TIMEDOUT:
977                 return -ETIMEDOUT;
978         case CPL_ERR_TCAM_FULL:
979                 return -ENOMEM;
980         case CPL_ERR_CONN_EXIST:
981                 return -EADDRINUSE;
982         default:
983                 return -EIO;
984         }
985 }
986
987 static void csk_act_open_retry_timer(struct timer_list *t)
988 {
989         struct sk_buff *skb = NULL;
990         struct cxgbi_sock *csk = from_timer(csk, t, retry_timer);
991         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
992         void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *,
993                                    struct l2t_entry *);
994         int t4 = is_t4(lldi->adapter_type), size, size6;
995
996         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
997                 "csk 0x%p,%u,0x%lx,%u.\n",
998                 csk, csk->state, csk->flags, csk->tid);
999
1000         cxgbi_sock_get(csk);
1001         spin_lock_bh(&csk->lock);
1002
1003         if (t4) {
1004                 size = sizeof(struct cpl_act_open_req);
1005                 size6 = sizeof(struct cpl_act_open_req6);
1006         } else {
1007                 size = sizeof(struct cpl_t5_act_open_req);
1008                 size6 = sizeof(struct cpl_t5_act_open_req6);
1009         }
1010
1011         if (csk->csk_family == AF_INET) {
1012                 send_act_open_func = send_act_open_req;
1013                 skb = alloc_wr(size, 0, GFP_ATOMIC);
1014 #if IS_ENABLED(CONFIG_IPV6)
1015         } else {
1016                 send_act_open_func = send_act_open_req6;
1017                 skb = alloc_wr(size6, 0, GFP_ATOMIC);
1018 #endif
1019         }
1020
1021         if (!skb)
1022                 cxgbi_sock_fail_act_open(csk, -ENOMEM);
1023         else {
1024                 skb->sk = (struct sock *)csk;
1025                 t4_set_arp_err_handler(skb, csk,
1026                                        cxgbi_sock_act_open_req_arp_failure);
1027                 send_act_open_func(csk, skb, csk->l2t);
1028         }
1029
1030         spin_unlock_bh(&csk->lock);
1031         cxgbi_sock_put(csk);
1032
1033 }
1034
1035 static inline bool is_neg_adv(unsigned int status)
1036 {
1037         return status == CPL_ERR_RTX_NEG_ADVICE ||
1038                 status == CPL_ERR_KEEPALV_NEG_ADVICE ||
1039                 status == CPL_ERR_PERSIST_NEG_ADVICE;
1040 }
1041
1042 static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1043 {
1044         struct cxgbi_sock *csk;
1045         struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data;
1046         unsigned int tid = GET_TID(rpl);
1047         unsigned int atid =
1048                 TID_TID_G(AOPEN_ATID_G(be32_to_cpu(rpl->atid_status)));
1049         unsigned int status = AOPEN_STATUS_G(be32_to_cpu(rpl->atid_status));
1050         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1051         struct tid_info *t = lldi->tids;
1052
1053         csk = lookup_atid(t, atid);
1054         if (unlikely(!csk)) {
1055                 pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid);
1056                 goto rel_skb;
1057         }
1058
1059         pr_info_ipaddr("tid %u/%u, status %u.\n"
1060                        "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr),
1061                        atid, tid, status, csk, csk->state, csk->flags);
1062
1063         if (is_neg_adv(status))
1064                 goto rel_skb;
1065
1066         module_put(cdev->owner);
1067
1068         if (status && status != CPL_ERR_TCAM_FULL &&
1069             status != CPL_ERR_CONN_EXIST &&
1070             status != CPL_ERR_ARP_MISS)
1071                 cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl),
1072                                  csk->csk_family);
1073
1074         cxgbi_sock_get(csk);
1075         spin_lock_bh(&csk->lock);
1076
1077         if (status == CPL_ERR_CONN_EXIST &&
1078             csk->retry_timer.function != csk_act_open_retry_timer) {
1079                 csk->retry_timer.function = csk_act_open_retry_timer;
1080                 mod_timer(&csk->retry_timer, jiffies + HZ / 2);
1081         } else
1082                 cxgbi_sock_fail_act_open(csk,
1083                                         act_open_rpl_status_to_errno(status));
1084
1085         spin_unlock_bh(&csk->lock);
1086         cxgbi_sock_put(csk);
1087 rel_skb:
1088         __kfree_skb(skb);
1089 }
1090
1091 static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb)
1092 {
1093         struct cxgbi_sock *csk;
1094         struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data;
1095         unsigned int tid = GET_TID(req);
1096         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1097         struct tid_info *t = lldi->tids;
1098
1099         csk = lookup_tid(t, tid);
1100         if (unlikely(!csk)) {
1101                 pr_err("can't find connection for tid %u.\n", tid);
1102                 goto rel_skb;
1103         }
1104         pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
1105                        (&csk->saddr), (&csk->daddr),
1106                        csk, csk->state, csk->flags, csk->tid);
1107         cxgbi_sock_rcv_peer_close(csk);
1108 rel_skb:
1109         __kfree_skb(skb);
1110 }
1111
1112 static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1113 {
1114         struct cxgbi_sock *csk;
1115         struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data;
1116         unsigned int tid = GET_TID(rpl);
1117         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1118         struct tid_info *t = lldi->tids;
1119
1120         csk = lookup_tid(t, tid);
1121         if (unlikely(!csk)) {
1122                 pr_err("can't find connection for tid %u.\n", tid);
1123                 goto rel_skb;
1124         }
1125         pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
1126                        (&csk->saddr), (&csk->daddr),
1127                        csk, csk->state, csk->flags, csk->tid);
1128         cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
1129 rel_skb:
1130         __kfree_skb(skb);
1131 }
1132
1133 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
1134                                                                 int *need_rst)
1135 {
1136         switch (abort_reason) {
1137         case CPL_ERR_BAD_SYN:
1138         case CPL_ERR_CONN_RESET:
1139                 return csk->state > CTP_ESTABLISHED ?
1140                         -EPIPE : -ECONNRESET;
1141         case CPL_ERR_XMIT_TIMEDOUT:
1142         case CPL_ERR_PERSIST_TIMEDOUT:
1143         case CPL_ERR_FINWAIT2_TIMEDOUT:
1144         case CPL_ERR_KEEPALIVE_TIMEDOUT:
1145                 return -ETIMEDOUT;
1146         default:
1147                 return -EIO;
1148         }
1149 }
1150
1151 static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
1152 {
1153         struct cxgbi_sock *csk;
1154         struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data;
1155         unsigned int tid = GET_TID(req);
1156         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1157         struct tid_info *t = lldi->tids;
1158         int rst_status = CPL_ABORT_NO_RST;
1159
1160         csk = lookup_tid(t, tid);
1161         if (unlikely(!csk)) {
1162                 pr_err("can't find connection for tid %u.\n", tid);
1163                 goto rel_skb;
1164         }
1165
1166         pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
1167                        (&csk->saddr), (&csk->daddr),
1168                        csk, csk->state, csk->flags, csk->tid, req->status);
1169
1170         if (is_neg_adv(req->status))
1171                 goto rel_skb;
1172
1173         cxgbi_sock_get(csk);
1174         spin_lock_bh(&csk->lock);
1175
1176         cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
1177
1178         if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
1179                 send_tx_flowc_wr(csk);
1180                 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
1181         }
1182
1183         cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
1184         cxgbi_sock_set_state(csk, CTP_ABORTING);
1185
1186         send_abort_rpl(csk, rst_status);
1187
1188         if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
1189                 csk->err = abort_status_to_errno(csk, req->status, &rst_status);
1190                 cxgbi_sock_closed(csk);
1191         }
1192
1193         spin_unlock_bh(&csk->lock);
1194         cxgbi_sock_put(csk);
1195 rel_skb:
1196         __kfree_skb(skb);
1197 }
1198
1199 static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
1200 {
1201         struct cxgbi_sock *csk;
1202         struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data;
1203         unsigned int tid = GET_TID(rpl);
1204         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1205         struct tid_info *t = lldi->tids;
1206
1207         csk = lookup_tid(t, tid);
1208         if (!csk)
1209                 goto rel_skb;
1210
1211         pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
1212                        (&csk->saddr), (&csk->daddr), csk,
1213                        csk->state, csk->flags, csk->tid, rpl->status);
1214
1215         if (rpl->status == CPL_ERR_ABORT_FAILED)
1216                 goto rel_skb;
1217
1218         cxgbi_sock_rcv_abort_rpl(csk);
1219 rel_skb:
1220         __kfree_skb(skb);
1221 }
1222
1223 static void do_rx_data(struct cxgbi_device *cdev, struct sk_buff *skb)
1224 {
1225         struct cxgbi_sock *csk;
1226         struct cpl_rx_data *cpl = (struct cpl_rx_data *)skb->data;
1227         unsigned int tid = GET_TID(cpl);
1228         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1229         struct tid_info *t = lldi->tids;
1230
1231         csk = lookup_tid(t, tid);
1232         if (!csk) {
1233                 pr_err("can't find connection for tid %u.\n", tid);
1234         } else {
1235                 /* not expecting this, reset the connection. */
1236                 pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid);
1237                 spin_lock_bh(&csk->lock);
1238                 send_abort_req(csk);
1239                 spin_unlock_bh(&csk->lock);
1240         }
1241         __kfree_skb(skb);
1242 }
1243
1244 static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
1245 {
1246         struct cxgbi_sock *csk;
1247         struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
1248         unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
1249         unsigned int tid = GET_TID(cpl);
1250         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1251         struct tid_info *t = lldi->tids;
1252
1253         csk = lookup_tid(t, tid);
1254         if (unlikely(!csk)) {
1255                 pr_err("can't find conn. for tid %u.\n", tid);
1256                 goto rel_skb;
1257         }
1258
1259         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1260                 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
1261                 csk, csk->state, csk->flags, csk->tid, skb, skb->len,
1262                 pdu_len_ddp);
1263
1264         spin_lock_bh(&csk->lock);
1265
1266         if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1267                 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1268                         "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1269                         csk, csk->state, csk->flags, csk->tid);
1270                 if (csk->state != CTP_ABORTING)
1271                         goto abort_conn;
1272                 else
1273                         goto discard;
1274         }
1275
1276         cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq);
1277         cxgbi_skcb_flags(skb) = 0;
1278
1279         skb_reset_transport_header(skb);
1280         __skb_pull(skb, sizeof(*cpl));
1281         __pskb_trim(skb, ntohs(cpl->len));
1282
1283         if (!csk->skb_ulp_lhdr) {
1284                 unsigned char *bhs;
1285                 unsigned int hlen, dlen, plen;
1286
1287                 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1288                         "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n",
1289                         csk, csk->state, csk->flags, csk->tid, skb);
1290                 csk->skb_ulp_lhdr = skb;
1291                 cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
1292
1293                 if ((CHELSIO_CHIP_VERSION(lldi->adapter_type) <= CHELSIO_T5) &&
1294                     (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt)) {
1295                         pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n",
1296                                 csk->tid, cxgbi_skcb_tcp_seq(skb),
1297                                 csk->rcv_nxt);
1298                         goto abort_conn;
1299                 }
1300
1301                 bhs = skb->data;
1302                 hlen = ntohs(cpl->len);
1303                 dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF;
1304
1305                 plen = ISCSI_PDU_LEN_G(pdu_len_ddp);
1306                 if (is_t4(lldi->adapter_type))
1307                         plen -= 40;
1308
1309                 if ((hlen + dlen) != plen) {
1310                         pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len "
1311                                 "mismatch %u != %u + %u, seq 0x%x.\n",
1312                                 csk->tid, plen, hlen, dlen,
1313                                 cxgbi_skcb_tcp_seq(skb));
1314                         goto abort_conn;
1315                 }
1316
1317                 cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3);
1318                 if (dlen)
1319                         cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len;
1320                 csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb);
1321
1322                 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1323                         "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n",
1324                         csk, skb, *bhs, hlen, dlen,
1325                         ntohl(*((unsigned int *)(bhs + 16))),
1326                         ntohl(*((unsigned int *)(bhs + 24))));
1327
1328         } else {
1329                 struct sk_buff *lskb = csk->skb_ulp_lhdr;
1330
1331                 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
1332                 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1333                         "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1334                         csk, csk->state, csk->flags, skb, lskb);
1335         }
1336
1337         __skb_queue_tail(&csk->receive_queue, skb);
1338         spin_unlock_bh(&csk->lock);
1339         return;
1340
1341 abort_conn:
1342         send_abort_req(csk);
1343 discard:
1344         spin_unlock_bh(&csk->lock);
1345 rel_skb:
1346         __kfree_skb(skb);
1347 }
1348
1349 static void do_rx_iscsi_data(struct cxgbi_device *cdev, struct sk_buff *skb)
1350 {
1351         struct cxgbi_sock *csk;
1352         struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
1353         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1354         struct tid_info *t = lldi->tids;
1355         struct sk_buff *lskb;
1356         u32 tid = GET_TID(cpl);
1357         u16 pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
1358
1359         csk = lookup_tid(t, tid);
1360         if (unlikely(!csk)) {
1361                 pr_err("can't find conn. for tid %u.\n", tid);
1362                 goto rel_skb;
1363         }
1364
1365         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1366                   "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
1367                   csk, csk->state, csk->flags, csk->tid, skb,
1368                   skb->len, pdu_len_ddp);
1369
1370         spin_lock_bh(&csk->lock);
1371
1372         if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1373                 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1374                           "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1375                           csk, csk->state, csk->flags, csk->tid);
1376
1377                 if (csk->state != CTP_ABORTING)
1378                         goto abort_conn;
1379                 else
1380                         goto discard;
1381         }
1382
1383         cxgbi_skcb_tcp_seq(skb) = be32_to_cpu(cpl->seq);
1384         cxgbi_skcb_flags(skb) = 0;
1385
1386         skb_reset_transport_header(skb);
1387         __skb_pull(skb, sizeof(*cpl));
1388         __pskb_trim(skb, ntohs(cpl->len));
1389
1390         if (!csk->skb_ulp_lhdr)
1391                 csk->skb_ulp_lhdr = skb;
1392
1393         lskb = csk->skb_ulp_lhdr;
1394         cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
1395
1396         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1397                   "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1398                   csk, csk->state, csk->flags, skb, lskb);
1399
1400         __skb_queue_tail(&csk->receive_queue, skb);
1401         spin_unlock_bh(&csk->lock);
1402         return;
1403
1404 abort_conn:
1405         send_abort_req(csk);
1406 discard:
1407         spin_unlock_bh(&csk->lock);
1408 rel_skb:
1409         __kfree_skb(skb);
1410 }
1411
1412 static void
1413 cxgb4i_process_ddpvld(struct cxgbi_sock *csk,
1414                       struct sk_buff *skb, u32 ddpvld)
1415 {
1416         if (ddpvld & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
1417                 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
1418                         csk, skb, ddpvld, cxgbi_skcb_flags(skb));
1419                 cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR);
1420         }
1421
1422         if (ddpvld & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
1423                 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
1424                         csk, skb, ddpvld, cxgbi_skcb_flags(skb));
1425                 cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR);
1426         }
1427
1428         if (ddpvld & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
1429                 log_debug(1 << CXGBI_DBG_PDU_RX,
1430                           "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
1431                           csk, skb, ddpvld);
1432                 cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR);
1433         }
1434
1435         if ((ddpvld & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
1436             !cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) {
1437                 log_debug(1 << CXGBI_DBG_PDU_RX,
1438                           "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
1439                           csk, skb, ddpvld);
1440                 cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD);
1441         }
1442 }
1443
1444 static void do_rx_data_ddp(struct cxgbi_device *cdev,
1445                                   struct sk_buff *skb)
1446 {
1447         struct cxgbi_sock *csk;
1448         struct sk_buff *lskb;
1449         struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data;
1450         unsigned int tid = GET_TID(rpl);
1451         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1452         struct tid_info *t = lldi->tids;
1453         u32 ddpvld = be32_to_cpu(rpl->ddpvld);
1454
1455         csk = lookup_tid(t, tid);
1456         if (unlikely(!csk)) {
1457                 pr_err("can't find connection for tid %u.\n", tid);
1458                 goto rel_skb;
1459         }
1460
1461         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1462                 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
1463                 csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr);
1464
1465         spin_lock_bh(&csk->lock);
1466
1467         if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1468                 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1469                         "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1470                         csk, csk->state, csk->flags, csk->tid);
1471                 if (csk->state != CTP_ABORTING)
1472                         goto abort_conn;
1473                 else
1474                         goto discard;
1475         }
1476
1477         if (!csk->skb_ulp_lhdr) {
1478                 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid);
1479                 goto abort_conn;
1480         }
1481
1482         lskb = csk->skb_ulp_lhdr;
1483         csk->skb_ulp_lhdr = NULL;
1484
1485         cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc);
1486
1487         if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb))
1488                 pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
1489                         csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
1490
1491         cxgb4i_process_ddpvld(csk, lskb, ddpvld);
1492
1493         log_debug(1 << CXGBI_DBG_PDU_RX,
1494                 "csk 0x%p, lskb 0x%p, f 0x%lx.\n",
1495                 csk, lskb, cxgbi_skcb_flags(lskb));
1496
1497         cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS);
1498         cxgbi_conn_pdu_ready(csk);
1499         spin_unlock_bh(&csk->lock);
1500         goto rel_skb;
1501
1502 abort_conn:
1503         send_abort_req(csk);
1504 discard:
1505         spin_unlock_bh(&csk->lock);
1506 rel_skb:
1507         __kfree_skb(skb);
1508 }
1509
1510 static void
1511 do_rx_iscsi_cmp(struct cxgbi_device *cdev, struct sk_buff *skb)
1512 {
1513         struct cxgbi_sock *csk;
1514         struct cpl_rx_iscsi_cmp *rpl = (struct cpl_rx_iscsi_cmp *)skb->data;
1515         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1516         struct tid_info *t = lldi->tids;
1517         struct sk_buff *data_skb = NULL;
1518         u32 tid = GET_TID(rpl);
1519         u32 ddpvld = be32_to_cpu(rpl->ddpvld);
1520         u32 seq = be32_to_cpu(rpl->seq);
1521         u16 pdu_len_ddp = be16_to_cpu(rpl->pdu_len_ddp);
1522
1523         csk = lookup_tid(t, tid);
1524         if (unlikely(!csk)) {
1525                 pr_err("can't find connection for tid %u.\n", tid);
1526                 goto rel_skb;
1527         }
1528
1529         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1530                   "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p, len %u, "
1531                   "pdu_len_ddp %u, status %u.\n",
1532                   csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr,
1533                   ntohs(rpl->len), pdu_len_ddp,  rpl->status);
1534
1535         spin_lock_bh(&csk->lock);
1536
1537         if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1538                 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1539                           "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1540                           csk, csk->state, csk->flags, csk->tid);
1541
1542                 if (csk->state != CTP_ABORTING)
1543                         goto abort_conn;
1544                 else
1545                         goto discard;
1546         }
1547
1548         cxgbi_skcb_tcp_seq(skb) = seq;
1549         cxgbi_skcb_flags(skb) = 0;
1550         cxgbi_skcb_rx_pdulen(skb) = 0;
1551
1552         skb_reset_transport_header(skb);
1553         __skb_pull(skb, sizeof(*rpl));
1554         __pskb_trim(skb, be16_to_cpu(rpl->len));
1555
1556         csk->rcv_nxt = seq + pdu_len_ddp;
1557
1558         if (csk->skb_ulp_lhdr) {
1559                 data_skb = skb_peek(&csk->receive_queue);
1560                 if (!data_skb ||
1561                     !cxgbi_skcb_test_flag(data_skb, SKCBF_RX_DATA)) {
1562                         pr_err("Error! freelist data not found 0x%p, tid %u\n",
1563                                data_skb, tid);
1564
1565                         goto abort_conn;
1566                 }
1567                 __skb_unlink(data_skb, &csk->receive_queue);
1568
1569                 cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA);
1570
1571                 __skb_queue_tail(&csk->receive_queue, skb);
1572                 __skb_queue_tail(&csk->receive_queue, data_skb);
1573         } else {
1574                  __skb_queue_tail(&csk->receive_queue, skb);
1575         }
1576
1577         csk->skb_ulp_lhdr = NULL;
1578
1579         cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
1580         cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS);
1581         cxgbi_skcb_set_flag(skb, SKCBF_RX_ISCSI_COMPL);
1582         cxgbi_skcb_rx_ddigest(skb) = be32_to_cpu(rpl->ulp_crc);
1583
1584         cxgb4i_process_ddpvld(csk, skb, ddpvld);
1585
1586         log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p, f 0x%lx.\n",
1587                   csk, skb, cxgbi_skcb_flags(skb));
1588
1589         cxgbi_conn_pdu_ready(csk);
1590         spin_unlock_bh(&csk->lock);
1591
1592         return;
1593
1594 abort_conn:
1595         send_abort_req(csk);
1596 discard:
1597         spin_unlock_bh(&csk->lock);
1598 rel_skb:
1599         __kfree_skb(skb);
1600 }
1601
1602 static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
1603 {
1604         struct cxgbi_sock *csk;
1605         struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data;
1606         unsigned int tid = GET_TID(rpl);
1607         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1608         struct tid_info *t = lldi->tids;
1609
1610         csk = lookup_tid(t, tid);
1611         if (unlikely(!csk))
1612                 pr_err("can't find connection for tid %u.\n", tid);
1613         else {
1614                 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1615                         "csk 0x%p,%u,0x%lx,%u.\n",
1616                         csk, csk->state, csk->flags, csk->tid);
1617                 cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una),
1618                                         rpl->seq_vld);
1619         }
1620         __kfree_skb(skb);
1621 }
1622
1623 static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1624 {
1625         struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1626         unsigned int tid = GET_TID(rpl);
1627         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1628         struct tid_info *t = lldi->tids;
1629         struct cxgbi_sock *csk;
1630
1631         csk = lookup_tid(t, tid);
1632         if (!csk) {
1633                 pr_err("can't find conn. for tid %u.\n", tid);
1634                 return;
1635         }
1636
1637         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1638                 "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
1639                 csk, csk->state, csk->flags, csk->tid, rpl->status);
1640
1641         if (rpl->status != CPL_ERR_NONE) {
1642                 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
1643                         csk, tid, rpl->status);
1644                 csk->err = -EINVAL;
1645         }
1646
1647         complete(&csk->cmpl);
1648
1649         __kfree_skb(skb);
1650 }
1651
1652 static int alloc_cpls(struct cxgbi_sock *csk)
1653 {
1654         csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req),
1655                                         0, GFP_KERNEL);
1656         if (!csk->cpl_close)
1657                 return -ENOMEM;
1658
1659         csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req),
1660                                         0, GFP_KERNEL);
1661         if (!csk->cpl_abort_req)
1662                 goto free_cpls;
1663
1664         csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl),
1665                                         0, GFP_KERNEL);
1666         if (!csk->cpl_abort_rpl)
1667                 goto free_cpls;
1668         return 0;
1669
1670 free_cpls:
1671         cxgbi_sock_free_cpl_skbs(csk);
1672         return -ENOMEM;
1673 }
1674
1675 static inline void l2t_put(struct cxgbi_sock *csk)
1676 {
1677         if (csk->l2t) {
1678                 cxgb4_l2t_release(csk->l2t);
1679                 csk->l2t = NULL;
1680                 cxgbi_sock_put(csk);
1681         }
1682 }
1683
1684 static void release_offload_resources(struct cxgbi_sock *csk)
1685 {
1686         struct cxgb4_lld_info *lldi;
1687 #if IS_ENABLED(CONFIG_IPV6)
1688         struct net_device *ndev = csk->cdev->ports[csk->port_id];
1689 #endif
1690
1691         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1692                 "csk 0x%p,%u,0x%lx,%u.\n",
1693                 csk, csk->state, csk->flags, csk->tid);
1694
1695         cxgbi_sock_free_cpl_skbs(csk);
1696         cxgbi_sock_purge_write_queue(csk);
1697         if (csk->wr_cred != csk->wr_max_cred) {
1698                 cxgbi_sock_purge_wr_queue(csk);
1699                 cxgbi_sock_reset_wr_list(csk);
1700         }
1701
1702         l2t_put(csk);
1703 #if IS_ENABLED(CONFIG_IPV6)
1704         if (csk->csk_family == AF_INET6)
1705                 cxgb4_clip_release(ndev,
1706                                    (const u32 *)&csk->saddr6.sin6_addr, 1);
1707 #endif
1708
1709         if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
1710                 free_atid(csk);
1711         else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
1712                 lldi = cxgbi_cdev_priv(csk->cdev);
1713                 cxgb4_remove_tid(lldi->tids, 0, csk->tid,
1714                                  csk->csk_family);
1715                 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
1716                 cxgbi_sock_put(csk);
1717         }
1718         csk->dst = NULL;
1719 }
1720
1721 #ifdef CONFIG_CHELSIO_T4_DCB
1722 static inline u8 get_iscsi_dcb_state(struct net_device *ndev)
1723 {
1724         return ndev->dcbnl_ops->getstate(ndev);
1725 }
1726
1727 static int select_priority(int pri_mask)
1728 {
1729         if (!pri_mask)
1730                 return 0;
1731         return (ffs(pri_mask) - 1);
1732 }
1733
1734 static u8 get_iscsi_dcb_priority(struct net_device *ndev)
1735 {
1736         int rv;
1737         u8 caps;
1738
1739         struct dcb_app iscsi_dcb_app = {
1740                 .protocol = 3260
1741         };
1742
1743         rv = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
1744         if (rv)
1745                 return 0;
1746
1747         if (caps & DCB_CAP_DCBX_VER_IEEE) {
1748                 iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_STREAM;
1749                 rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
1750                 if (!rv) {
1751                         iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
1752                         rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
1753                 }
1754         } else if (caps & DCB_CAP_DCBX_VER_CEE) {
1755                 iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
1756                 rv = dcb_getapp(ndev, &iscsi_dcb_app);
1757         }
1758
1759         log_debug(1 << CXGBI_DBG_ISCSI,
1760                   "iSCSI priority is set to %u\n", select_priority(rv));
1761         return select_priority(rv);
1762 }
1763 #endif
1764
1765 static int init_act_open(struct cxgbi_sock *csk)
1766 {
1767         struct cxgbi_device *cdev = csk->cdev;
1768         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1769         struct net_device *ndev = cdev->ports[csk->port_id];
1770         struct sk_buff *skb = NULL;
1771         struct neighbour *n = NULL;
1772         void *daddr;
1773         unsigned int step;
1774         unsigned int rxq_idx;
1775         unsigned int size, size6;
1776         unsigned int linkspeed;
1777         unsigned int rcv_winf, snd_winf;
1778 #ifdef CONFIG_CHELSIO_T4_DCB
1779         u8 priority = 0;
1780 #endif
1781         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1782                 "csk 0x%p,%u,0x%lx,%u.\n",
1783                 csk, csk->state, csk->flags, csk->tid);
1784
1785         if (csk->csk_family == AF_INET)
1786                 daddr = &csk->daddr.sin_addr.s_addr;
1787 #if IS_ENABLED(CONFIG_IPV6)
1788         else if (csk->csk_family == AF_INET6)
1789                 daddr = &csk->daddr6.sin6_addr;
1790 #endif
1791         else {
1792                 pr_err("address family 0x%x not supported\n", csk->csk_family);
1793                 goto rel_resource;
1794         }
1795
1796         n = dst_neigh_lookup(csk->dst, daddr);
1797
1798         if (!n) {
1799                 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
1800                 goto rel_resource;
1801         }
1802
1803         if (!(n->nud_state & NUD_VALID))
1804                 neigh_event_send(n, NULL);
1805
1806         csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
1807         if (csk->atid < 0) {
1808                 pr_err("%s, NO atid available.\n", ndev->name);
1809                 goto rel_resource_without_clip;
1810         }
1811         cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
1812         cxgbi_sock_get(csk);
1813
1814 #ifdef CONFIG_CHELSIO_T4_DCB
1815         if (get_iscsi_dcb_state(ndev))
1816                 priority = get_iscsi_dcb_priority(ndev);
1817
1818         csk->dcb_priority = priority;
1819         csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, priority);
1820 #else
1821         csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
1822 #endif
1823         if (!csk->l2t) {
1824                 pr_err("%s, cannot alloc l2t.\n", ndev->name);
1825                 goto rel_resource_without_clip;
1826         }
1827         cxgbi_sock_get(csk);
1828
1829 #if IS_ENABLED(CONFIG_IPV6)
1830         if (csk->csk_family == AF_INET6)
1831                 cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1);
1832 #endif
1833
1834         if (is_t4(lldi->adapter_type)) {
1835                 size = sizeof(struct cpl_act_open_req);
1836                 size6 = sizeof(struct cpl_act_open_req6);
1837         } else if (is_t5(lldi->adapter_type)) {
1838                 size = sizeof(struct cpl_t5_act_open_req);
1839                 size6 = sizeof(struct cpl_t5_act_open_req6);
1840         } else {
1841                 size = sizeof(struct cpl_t6_act_open_req);
1842                 size6 = sizeof(struct cpl_t6_act_open_req6);
1843         }
1844
1845         if (csk->csk_family == AF_INET)
1846                 skb = alloc_wr(size, 0, GFP_NOIO);
1847 #if IS_ENABLED(CONFIG_IPV6)
1848         else
1849                 skb = alloc_wr(size6, 0, GFP_NOIO);
1850 #endif
1851
1852         if (!skb)
1853                 goto rel_resource;
1854         skb->sk = (struct sock *)csk;
1855         t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure);
1856
1857         if (!csk->mtu)
1858                 csk->mtu = dst_mtu(csk->dst);
1859         cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
1860         csk->tx_chan = cxgb4_port_chan(ndev);
1861         csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
1862         step = lldi->ntxq / lldi->nchan;
1863         csk->txq_idx = cxgb4_port_idx(ndev) * step;
1864         step = lldi->nrxq / lldi->nchan;
1865         rxq_idx = (cxgb4_port_idx(ndev) * step) + (cdev->rxq_idx_cntr % step);
1866         cdev->rxq_idx_cntr++;
1867         csk->rss_qid = lldi->rxq_ids[rxq_idx];
1868         linkspeed = ((struct port_info *)netdev_priv(ndev))->link_cfg.speed;
1869         csk->snd_win = cxgb4i_snd_win;
1870         csk->rcv_win = cxgb4i_rcv_win;
1871         if (cxgb4i_rcv_win <= 0) {
1872                 csk->rcv_win = CXGB4I_DEFAULT_10G_RCV_WIN;
1873                 rcv_winf = linkspeed / SPEED_10000;
1874                 if (rcv_winf)
1875                         csk->rcv_win *= rcv_winf;
1876         }
1877         if (cxgb4i_snd_win <= 0) {
1878                 csk->snd_win = CXGB4I_DEFAULT_10G_SND_WIN;
1879                 snd_winf = linkspeed / SPEED_10000;
1880                 if (snd_winf)
1881                         csk->snd_win *= snd_winf;
1882         }
1883         csk->wr_cred = lldi->wr_cred -
1884                        DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
1885         csk->wr_max_cred = csk->wr_cred;
1886         csk->wr_una_cred = 0;
1887         cxgbi_sock_reset_wr_list(csk);
1888         csk->err = 0;
1889
1890         pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n",
1891                        (&csk->saddr), (&csk->daddr), csk, csk->state,
1892                        csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid,
1893                        csk->mtu, csk->mss_idx, csk->smac_idx);
1894
1895         /* must wait for either a act_open_rpl or act_open_establish */
1896         if (!try_module_get(cdev->owner)) {
1897                 pr_err("%s, try_module_get failed.\n", ndev->name);
1898                 goto rel_resource;
1899         }
1900
1901         cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1902         if (csk->csk_family == AF_INET)
1903                 send_act_open_req(csk, skb, csk->l2t);
1904 #if IS_ENABLED(CONFIG_IPV6)
1905         else
1906                 send_act_open_req6(csk, skb, csk->l2t);
1907 #endif
1908         neigh_release(n);
1909
1910         return 0;
1911
1912 rel_resource:
1913 #if IS_ENABLED(CONFIG_IPV6)
1914         if (csk->csk_family == AF_INET6)
1915                 cxgb4_clip_release(ndev,
1916                                    (const u32 *)&csk->saddr6.sin6_addr, 1);
1917 #endif
1918 rel_resource_without_clip:
1919         if (n)
1920                 neigh_release(n);
1921         if (skb)
1922                 __kfree_skb(skb);
1923         return -EINVAL;
1924 }
1925
1926 static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
1927         [CPL_ACT_ESTABLISH] = do_act_establish,
1928         [CPL_ACT_OPEN_RPL] = do_act_open_rpl,
1929         [CPL_PEER_CLOSE] = do_peer_close,
1930         [CPL_ABORT_REQ_RSS] = do_abort_req_rss,
1931         [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss,
1932         [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
1933         [CPL_FW4_ACK] = do_fw4_ack,
1934         [CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
1935         [CPL_ISCSI_DATA] = do_rx_iscsi_data,
1936         [CPL_SET_TCB_RPL] = do_set_tcb_rpl,
1937         [CPL_RX_DATA_DDP] = do_rx_data_ddp,
1938         [CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
1939         [CPL_RX_ISCSI_CMP] = do_rx_iscsi_cmp,
1940         [CPL_RX_DATA] = do_rx_data,
1941 };
1942
1943 static int cxgb4i_ofld_init(struct cxgbi_device *cdev)
1944 {
1945         int rc;
1946
1947         if (cxgb4i_max_connect > CXGB4I_MAX_CONN)
1948                 cxgb4i_max_connect = CXGB4I_MAX_CONN;
1949
1950         rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base,
1951                                         cxgb4i_max_connect);
1952         if (rc < 0)
1953                 return rc;
1954
1955         cdev->csk_release_offload_resources = release_offload_resources;
1956         cdev->csk_push_tx_frames = push_tx_frames;
1957         cdev->csk_send_abort_req = send_abort_req;
1958         cdev->csk_send_close_req = send_close_req;
1959         cdev->csk_send_rx_credits = send_rx_credits;
1960         cdev->csk_alloc_cpls = alloc_cpls;
1961         cdev->csk_init_act_open = init_act_open;
1962
1963         pr_info("cdev 0x%p, offload up, added.\n", cdev);
1964         return 0;
1965 }
1966
1967 static inline void
1968 ulp_mem_io_set_hdr(struct cxgbi_device *cdev,
1969                    struct ulp_mem_io *req,
1970                    unsigned int wr_len, unsigned int dlen,
1971                    unsigned int pm_addr,
1972                    int tid)
1973 {
1974         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1975         struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1);
1976
1977         INIT_ULPTX_WR(req, wr_len, 0, tid);
1978         req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) |
1979                 FW_WR_ATOMIC_V(0));
1980         req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
1981                 ULP_MEMIO_ORDER_V(is_t4(lldi->adapter_type)) |
1982                 T5_ULP_MEMIO_IMM_V(!is_t4(lldi->adapter_type)));
1983         req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
1984         req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
1985         req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
1986
1987         idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
1988         idata->len = htonl(dlen);
1989 }
1990
1991 static struct sk_buff *
1992 ddp_ppod_init_idata(struct cxgbi_device *cdev,
1993                     struct cxgbi_ppm *ppm,
1994                     unsigned int idx, unsigned int npods,
1995                     unsigned int tid)
1996 {
1997         unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
1998         unsigned int dlen = npods << PPOD_SIZE_SHIFT;
1999         unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
2000                                 sizeof(struct ulptx_idata) + dlen, 16);
2001         struct sk_buff *skb = alloc_wr(wr_len, 0, GFP_ATOMIC);
2002
2003         if (!skb) {
2004                 pr_err("%s: %s idx %u, npods %u, OOM.\n",
2005                        __func__, ppm->ndev->name, idx, npods);
2006                 return NULL;
2007         }
2008
2009         ulp_mem_io_set_hdr(cdev, (struct ulp_mem_io *)skb->head, wr_len, dlen,
2010                            pm_addr, tid);
2011
2012         return skb;
2013 }
2014
2015 static int ddp_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
2016                                 struct cxgbi_task_tag_info *ttinfo,
2017                                 unsigned int idx, unsigned int npods,
2018                                 struct scatterlist **sg_pp,
2019                                 unsigned int *sg_off)
2020 {
2021         struct cxgbi_device *cdev = csk->cdev;
2022         struct sk_buff *skb = ddp_ppod_init_idata(cdev, ppm, idx, npods,
2023                                                   csk->tid);
2024         struct ulp_mem_io *req;
2025         struct ulptx_idata *idata;
2026         struct cxgbi_pagepod *ppod;
2027         int i;
2028
2029         if (!skb)
2030                 return -ENOMEM;
2031
2032         req = (struct ulp_mem_io *)skb->head;
2033         idata = (struct ulptx_idata *)(req + 1);
2034         ppod = (struct cxgbi_pagepod *)(idata + 1);
2035
2036         for (i = 0; i < npods; i++, ppod++)
2037                 cxgbi_ddp_set_one_ppod(ppod, ttinfo, sg_pp, sg_off);
2038
2039         cxgbi_skcb_set_flag(skb, SKCBF_TX_MEM_WRITE);
2040         cxgbi_skcb_set_flag(skb, SKCBF_TX_FLAG_COMPL);
2041         set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
2042
2043         spin_lock_bh(&csk->lock);
2044         cxgbi_sock_skb_entail(csk, skb);
2045         spin_unlock_bh(&csk->lock);
2046
2047         return 0;
2048 }
2049
2050 static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
2051                        struct cxgbi_task_tag_info *ttinfo)
2052 {
2053         unsigned int pidx = ttinfo->idx;
2054         unsigned int npods = ttinfo->npods;
2055         unsigned int i, cnt;
2056         int err = 0;
2057         struct scatterlist *sg = ttinfo->sgl;
2058         unsigned int offset = 0;
2059
2060         ttinfo->cid = csk->port_id;
2061
2062         for (i = 0; i < npods; i += cnt, pidx += cnt) {
2063                 cnt = npods - i;
2064
2065                 if (cnt > ULPMEM_IDATA_MAX_NPPODS)
2066                         cnt = ULPMEM_IDATA_MAX_NPPODS;
2067                 err = ddp_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt,
2068                                            &sg, &offset);
2069                 if (err < 0)
2070                         break;
2071         }
2072
2073         return err;
2074 }
2075
2076 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
2077                                 int pg_idx)
2078 {
2079         struct sk_buff *skb;
2080         struct cpl_set_tcb_field *req;
2081
2082         if (!pg_idx || pg_idx >= DDP_PGIDX_MAX)
2083                 return 0;
2084
2085         skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
2086         if (!skb)
2087                 return -ENOMEM;
2088
2089         /*  set up ulp page size */
2090         req = (struct cpl_set_tcb_field *)skb->head;
2091         INIT_TP_WR(req, csk->tid);
2092         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
2093         req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
2094         req->word_cookie = htons(0);
2095         req->mask = cpu_to_be64(0x3 << 8);
2096         req->val = cpu_to_be64(pg_idx << 8);
2097         set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
2098
2099         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
2100                 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
2101
2102         reinit_completion(&csk->cmpl);
2103         cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2104         wait_for_completion(&csk->cmpl);
2105
2106         return csk->err;
2107 }
2108
2109 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2110                                  int hcrc, int dcrc)
2111 {
2112         struct sk_buff *skb;
2113         struct cpl_set_tcb_field *req;
2114
2115         if (!hcrc && !dcrc)
2116                 return 0;
2117
2118         skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
2119         if (!skb)
2120                 return -ENOMEM;
2121
2122         csk->hcrc_len = (hcrc ? 4 : 0);
2123         csk->dcrc_len = (dcrc ? 4 : 0);
2124         /*  set up ulp submode */
2125         req = (struct cpl_set_tcb_field *)skb->head;
2126         INIT_TP_WR(req, tid);
2127         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
2128         req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
2129         req->word_cookie = htons(0);
2130         req->mask = cpu_to_be64(0x3 << 4);
2131         req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
2132                                 (dcrc ? ULP_CRC_DATA : 0)) << 4);
2133         set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
2134
2135         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
2136                 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
2137
2138         reinit_completion(&csk->cmpl);
2139         cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2140         wait_for_completion(&csk->cmpl);
2141
2142         return csk->err;
2143 }
2144
2145 static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
2146 {
2147         return (struct cxgbi_ppm *)(*((struct cxgb4_lld_info *)
2148                                        (cxgbi_cdev_priv(cdev)))->iscsi_ppm);
2149 }
2150
2151 static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
2152 {
2153         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
2154         struct net_device *ndev = cdev->ports[0];
2155         struct cxgbi_tag_format tformat;
2156         int i, err;
2157
2158         if (!lldi->vr->iscsi.size) {
2159                 pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name);
2160                 return -EACCES;
2161         }
2162
2163         cdev->flags |= CXGBI_FLAG_USE_PPOD_OFLDQ;
2164
2165         memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
2166         for (i = 0; i < 4; i++)
2167                 tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3))
2168                                          & 0xF;
2169         cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat);
2170
2171         pr_info("iscsi_edram.start 0x%x iscsi_edram.size 0x%x",
2172                 lldi->vr->ppod_edram.start, lldi->vr->ppod_edram.size);
2173
2174         err = cxgbi_ddp_ppm_setup(lldi->iscsi_ppm, cdev, &tformat,
2175                                   lldi->vr->iscsi.size, lldi->iscsi_llimit,
2176                                   lldi->vr->iscsi.start, 2,
2177                                   lldi->vr->ppod_edram.start,
2178                                   lldi->vr->ppod_edram.size);
2179
2180         if (err < 0)
2181                 return err;
2182
2183         cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
2184         cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
2185         cdev->csk_ddp_set_map = ddp_set_map;
2186         cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
2187                                   lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN);
2188         cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
2189                                   lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN);
2190         cdev->cdev2ppm = cdev2ppm;
2191
2192         return 0;
2193 }
2194
2195 static bool is_memfree(struct adapter *adap)
2196 {
2197         u32 io;
2198
2199         io = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
2200         if (is_t5(adap->params.chip)) {
2201                 if ((io & EXT_MEM0_ENABLE_F) || (io & EXT_MEM1_ENABLE_F))
2202                         return false;
2203         } else if (io & EXT_MEM_ENABLE_F) {
2204                 return false;
2205         }
2206
2207         return true;
2208 }
2209
2210 static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
2211 {
2212         struct cxgbi_device *cdev;
2213         struct port_info *pi;
2214         struct net_device *ndev;
2215         struct adapter *adap;
2216         struct tid_info *t;
2217         u32 max_cmds = CXGB4I_SCSI_HOST_QDEPTH;
2218         u32 max_conn = CXGBI_MAX_CONN;
2219         int i, rc;
2220
2221         cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports);
2222         if (!cdev) {
2223                 pr_info("t4 device 0x%p, register failed.\n", lldi);
2224                 return NULL;
2225         }
2226         pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n",
2227                 cdev, lldi->adapter_type, lldi->nports,
2228                 lldi->ports[0]->name, lldi->nchan, lldi->ntxq,
2229                 lldi->nrxq, lldi->wr_cred);
2230         for (i = 0; i < lldi->nrxq; i++)
2231                 log_debug(1 << CXGBI_DBG_DEV,
2232                         "t4 0x%p, rxq id #%d: %u.\n",
2233                         cdev, i, lldi->rxq_ids[i]);
2234
2235         memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi));
2236         cdev->flags = CXGBI_FLAG_DEV_T4;
2237         cdev->pdev = lldi->pdev;
2238         cdev->ports = lldi->ports;
2239         cdev->nports = lldi->nports;
2240         cdev->mtus = lldi->mtus;
2241         cdev->nmtus = NMTUS;
2242         cdev->rx_credit_thres = (CHELSIO_CHIP_VERSION(lldi->adapter_type) <=
2243                                  CHELSIO_T5) ? cxgb4i_rx_credit_thres : 0;
2244         cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
2245         cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
2246         cdev->itp = &cxgb4i_iscsi_transport;
2247         cdev->owner = THIS_MODULE;
2248
2249         cdev->pfvf = FW_PFVF_CMD_PFN_V(lldi->pf);
2250         pr_info("cdev 0x%p,%s, pfvf %u.\n",
2251                 cdev, lldi->ports[0]->name, cdev->pfvf);
2252
2253         rc = cxgb4i_ddp_init(cdev);
2254         if (rc) {
2255                 pr_info("t4 0x%p ddp init failed %d.\n", cdev, rc);
2256                 goto err_out;
2257         }
2258
2259         ndev = cdev->ports[0];
2260         adap = netdev2adap(ndev);
2261         if (adap) {
2262                 t = &adap->tids;
2263                 if (t->ntids <= CXGBI_MAX_CONN)
2264                         max_conn = t->ntids;
2265
2266                 if (is_memfree(adap)) {
2267                         cdev->flags |=  CXGBI_FLAG_DEV_ISO_OFF;
2268                         max_cmds = CXGB4I_SCSI_HOST_QDEPTH >> 2;
2269
2270                         pr_info("%s: 0x%p, tid %u, SO adapter.\n",
2271                                 ndev->name, cdev, t->ntids);
2272                 }
2273         } else {
2274                 pr_info("%s, 0x%p, NO adapter struct.\n", ndev->name, cdev);
2275         }
2276
2277         /* ISO is enabled in T5/T6 firmware version >= 1.13.43.0 */
2278         if (!is_t4(lldi->adapter_type) &&
2279             (lldi->fw_vers >= 0x10d2b00) &&
2280             !(cdev->flags & CXGBI_FLAG_DEV_ISO_OFF))
2281                 cdev->skb_iso_txhdr = sizeof(struct cpl_tx_data_iso);
2282
2283         rc = cxgb4i_ofld_init(cdev);
2284         if (rc) {
2285                 pr_info("t4 0x%p ofld init failed.\n", cdev);
2286                 goto err_out;
2287         }
2288
2289         cxgb4i_host_template.can_queue = max_cmds;
2290         rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, max_conn,
2291                             &cxgb4i_host_template, cxgb4i_stt);
2292         if (rc)
2293                 goto err_out;
2294
2295         for (i = 0; i < cdev->nports; i++) {
2296                 pi = netdev_priv(lldi->ports[i]);
2297                 cdev->hbas[i]->port_id = pi->port_id;
2298         }
2299         return cdev;
2300
2301 err_out:
2302         cxgbi_device_unregister(cdev);
2303         return ERR_PTR(-ENOMEM);
2304 }
2305
2306 #define RX_PULL_LEN     128
2307 static int t4_uld_rx_handler(void *handle, const __be64 *rsp,
2308                                 const struct pkt_gl *pgl)
2309 {
2310         const struct cpl_act_establish *rpl;
2311         struct sk_buff *skb;
2312         unsigned int opc;
2313         struct cxgbi_device *cdev = handle;
2314
2315         if (pgl == NULL) {
2316                 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
2317
2318                 skb = alloc_wr(len, 0, GFP_ATOMIC);
2319                 if (!skb)
2320                         goto nomem;
2321                 skb_copy_to_linear_data(skb, &rsp[1], len);
2322         } else {
2323                 if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) {
2324                         pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
2325                                 pgl->va, be64_to_cpu(*rsp),
2326                                 be64_to_cpu(*(u64 *)pgl->va),
2327                                 pgl->tot_len);
2328                         return 0;
2329                 }
2330                 skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN);
2331                 if (unlikely(!skb))
2332                         goto nomem;
2333         }
2334
2335         rpl = (struct cpl_act_establish *)skb->data;
2336         opc = rpl->ot.opcode;
2337         log_debug(1 << CXGBI_DBG_TOE,
2338                 "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
2339                  cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb);
2340         if (opc >= ARRAY_SIZE(cxgb4i_cplhandlers) || !cxgb4i_cplhandlers[opc]) {
2341                 pr_err("No handler for opcode 0x%x.\n", opc);
2342                 __kfree_skb(skb);
2343         } else
2344                 cxgb4i_cplhandlers[opc](cdev, skb);
2345
2346         return 0;
2347 nomem:
2348         log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n");
2349         return 1;
2350 }
2351
2352 static int t4_uld_state_change(void *handle, enum cxgb4_state state)
2353 {
2354         struct cxgbi_device *cdev = handle;
2355
2356         switch (state) {
2357         case CXGB4_STATE_UP:
2358                 pr_info("cdev 0x%p, UP.\n", cdev);
2359                 break;
2360         case CXGB4_STATE_START_RECOVERY:
2361                 pr_info("cdev 0x%p, RECOVERY.\n", cdev);
2362                 /* close all connections */
2363                 break;
2364         case CXGB4_STATE_DOWN:
2365                 pr_info("cdev 0x%p, DOWN.\n", cdev);
2366                 break;
2367         case CXGB4_STATE_DETACH:
2368                 pr_info("cdev 0x%p, DETACH.\n", cdev);
2369                 cxgbi_device_unregister(cdev);
2370                 break;
2371         default:
2372                 pr_info("cdev 0x%p, unknown state %d.\n", cdev, state);
2373                 break;
2374         }
2375         return 0;
2376 }
2377
2378 #ifdef CONFIG_CHELSIO_T4_DCB
2379 static int
2380 cxgb4_dcb_change_notify(struct notifier_block *self, unsigned long val,
2381                         void *data)
2382 {
2383         int i, port = 0xFF;
2384         struct net_device *ndev;
2385         struct cxgbi_device *cdev = NULL;
2386         struct dcb_app_type *iscsi_app = data;
2387         struct cxgbi_ports_map *pmap;
2388         u8 priority;
2389
2390         if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) {
2391                 if ((iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_STREAM) &&
2392                     (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY))
2393                         return NOTIFY_DONE;
2394
2395                 priority = iscsi_app->app.priority;
2396         } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) {
2397                 if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM)
2398                         return NOTIFY_DONE;
2399
2400                 if (!iscsi_app->app.priority)
2401                         return NOTIFY_DONE;
2402
2403                 priority = ffs(iscsi_app->app.priority) - 1;
2404         } else {
2405                 return NOTIFY_DONE;
2406         }
2407
2408         if (iscsi_app->app.protocol != 3260)
2409                 return NOTIFY_DONE;
2410
2411         log_debug(1 << CXGBI_DBG_ISCSI, "iSCSI priority for ifid %d is %u\n",
2412                   iscsi_app->ifindex, priority);
2413
2414         ndev = dev_get_by_index(&init_net, iscsi_app->ifindex);
2415         if (!ndev)
2416                 return NOTIFY_DONE;
2417
2418         cdev = cxgbi_device_find_by_netdev_rcu(ndev, &port);
2419
2420         dev_put(ndev);
2421         if (!cdev)
2422                 return NOTIFY_DONE;
2423
2424         pmap = &cdev->pmap;
2425
2426         for (i = 0; i < pmap->used; i++) {
2427                 if (pmap->port_csk[i]) {
2428                         struct cxgbi_sock *csk = pmap->port_csk[i];
2429
2430                         if (csk->dcb_priority != priority) {
2431                                 iscsi_conn_failure(csk->user_data,
2432                                                    ISCSI_ERR_CONN_FAILED);
2433                                 pr_info("Restarting iSCSI connection %p with "
2434                                         "priority %u->%u.\n", csk,
2435                                         csk->dcb_priority, priority);
2436                         }
2437                 }
2438         }
2439         return NOTIFY_OK;
2440 }
2441 #endif
2442
2443 static int __init cxgb4i_init_module(void)
2444 {
2445         int rc;
2446
2447         printk(KERN_INFO "%s", version);
2448
2449         rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt);
2450         if (rc < 0)
2451                 return rc;
2452         cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
2453
2454 #ifdef CONFIG_CHELSIO_T4_DCB
2455         pr_info("%s dcb enabled.\n", DRV_MODULE_NAME);
2456         register_dcbevent_notifier(&cxgb4_dcb_change);
2457 #endif
2458         return 0;
2459 }
2460
2461 static void __exit cxgb4i_exit_module(void)
2462 {
2463 #ifdef CONFIG_CHELSIO_T4_DCB
2464         unregister_dcbevent_notifier(&cxgb4_dcb_change);
2465 #endif
2466         cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
2467         cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
2468         cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
2469 }
2470
2471 module_init(cxgb4i_init_module);
2472 module_exit(cxgb4i_exit_module);