Merge tag 'xtensa-20190510' of git://github.com/jcmvbkbc/linux-xtensa
[linux-2.6-microblaze.git] / drivers / scsi / bfa / bfa_fcpim.c
1 /*
2  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
3  * Copyright (c) 2014- QLogic Corporation.
4  * All rights reserved
5  * www.qlogic.com
6  *
7  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License (GPL) Version 2 as
11  * published by the Free Software Foundation
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  */
18
19 #include "bfad_drv.h"
20 #include "bfa_modules.h"
21
22 BFA_TRC_FILE(HAL, FCPIM);
23
24 /*
25  *  BFA ITNIM Related definitions
26  */
27 static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
28
29 #define BFA_ITNIM_FROM_TAG(_fcpim, _tag)                                \
30         (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
31
32 #define bfa_fcpim_additn(__itnim)                                       \
33         list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
34 #define bfa_fcpim_delitn(__itnim)       do {                            \
35         WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim));   \
36         bfa_itnim_update_del_itn_stats(__itnim);      \
37         list_del(&(__itnim)->qe);      \
38         WARN_ON(!list_empty(&(__itnim)->io_q));                         \
39         WARN_ON(!list_empty(&(__itnim)->io_cleanup_q));                 \
40         WARN_ON(!list_empty(&(__itnim)->pending_q));                    \
41 } while (0)
42
43 #define bfa_itnim_online_cb(__itnim) do {                               \
44         if ((__itnim)->bfa->fcs)                                        \
45                 bfa_cb_itnim_online((__itnim)->ditn);      \
46         else {                                                          \
47                 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,        \
48                 __bfa_cb_itnim_online, (__itnim));      \
49         }                                                               \
50 } while (0)
51
52 #define bfa_itnim_offline_cb(__itnim) do {                              \
53         if ((__itnim)->bfa->fcs)                                        \
54                 bfa_cb_itnim_offline((__itnim)->ditn);      \
55         else {                                                          \
56                 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,        \
57                 __bfa_cb_itnim_offline, (__itnim));      \
58         }                                                               \
59 } while (0)
60
61 #define bfa_itnim_sler_cb(__itnim) do {                                 \
62         if ((__itnim)->bfa->fcs)                                        \
63                 bfa_cb_itnim_sler((__itnim)->ditn);      \
64         else {                                                          \
65                 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,        \
66                 __bfa_cb_itnim_sler, (__itnim));      \
67         }                                                               \
68 } while (0)
69
70 enum bfa_ioim_lm_ua_status {
71         BFA_IOIM_LM_UA_RESET = 0,
72         BFA_IOIM_LM_UA_SET = 1,
73 };
74
75 /*
76  *  itnim state machine event
77  */
78 enum bfa_itnim_event {
79         BFA_ITNIM_SM_CREATE = 1,        /*  itnim is created */
80         BFA_ITNIM_SM_ONLINE = 2,        /*  itnim is online */
81         BFA_ITNIM_SM_OFFLINE = 3,       /*  itnim is offline */
82         BFA_ITNIM_SM_FWRSP = 4,         /*  firmware response */
83         BFA_ITNIM_SM_DELETE = 5,        /*  deleting an existing itnim */
84         BFA_ITNIM_SM_CLEANUP = 6,       /*  IO cleanup completion */
85         BFA_ITNIM_SM_SLER = 7,          /*  second level error recovery */
86         BFA_ITNIM_SM_HWFAIL = 8,        /*  IOC h/w failure event */
87         BFA_ITNIM_SM_QRESUME = 9,       /*  queue space available */
88 };
89
90 /*
91  *  BFA IOIM related definitions
92  */
93 #define bfa_ioim_move_to_comp_q(__ioim) do {                            \
94         list_del(&(__ioim)->qe);                                        \
95         list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q);    \
96 } while (0)
97
98
99 #define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do {                  \
100         if ((__fcpim)->profile_comp)                                    \
101                 (__fcpim)->profile_comp(__ioim);                        \
102 } while (0)
103
104 #define bfa_ioim_cb_profile_start(__fcpim, __ioim) do {                 \
105         if ((__fcpim)->profile_start)                                   \
106                 (__fcpim)->profile_start(__ioim);                       \
107 } while (0)
108
109 /*
110  * IO state machine events
111  */
112 enum bfa_ioim_event {
113         BFA_IOIM_SM_START       = 1,    /*  io start request from host */
114         BFA_IOIM_SM_COMP_GOOD   = 2,    /*  io good comp, resource free */
115         BFA_IOIM_SM_COMP        = 3,    /*  io comp, resource is free */
116         BFA_IOIM_SM_COMP_UTAG   = 4,    /*  io comp, resource is free */
117         BFA_IOIM_SM_DONE        = 5,    /*  io comp, resource not free */
118         BFA_IOIM_SM_FREE        = 6,    /*  io resource is freed */
119         BFA_IOIM_SM_ABORT       = 7,    /*  abort request from scsi stack */
120         BFA_IOIM_SM_ABORT_COMP  = 8,    /*  abort from f/w */
121         BFA_IOIM_SM_ABORT_DONE  = 9,    /*  abort completion from f/w */
122         BFA_IOIM_SM_QRESUME     = 10,   /*  CQ space available to queue IO */
123         BFA_IOIM_SM_SGALLOCED   = 11,   /*  SG page allocation successful */
124         BFA_IOIM_SM_SQRETRY     = 12,   /*  sequence recovery retry */
125         BFA_IOIM_SM_HCB         = 13,   /*  bfa callback complete */
126         BFA_IOIM_SM_CLEANUP     = 14,   /*  IO cleanup from itnim */
127         BFA_IOIM_SM_TMSTART     = 15,   /*  IO cleanup from tskim */
128         BFA_IOIM_SM_TMDONE      = 16,   /*  IO cleanup from tskim */
129         BFA_IOIM_SM_HWFAIL      = 17,   /*  IOC h/w failure event */
130         BFA_IOIM_SM_IOTOV       = 18,   /*  ITN offline TOV */
131 };
132
133
134 /*
135  *  BFA TSKIM related definitions
136  */
137
138 /*
139  * task management completion handling
140  */
141 #define bfa_tskim_qcomp(__tskim, __cbfn) do {                           \
142         bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
143         bfa_tskim_notify_comp(__tskim);      \
144 } while (0)
145
146 #define bfa_tskim_notify_comp(__tskim) do {                             \
147         if ((__tskim)->notify)                                          \
148                 bfa_itnim_tskdone((__tskim)->itnim);      \
149 } while (0)
150
151
152 enum bfa_tskim_event {
153         BFA_TSKIM_SM_START      = 1,    /*  TM command start            */
154         BFA_TSKIM_SM_DONE       = 2,    /*  TM completion               */
155         BFA_TSKIM_SM_QRESUME    = 3,    /*  resume after qfull          */
156         BFA_TSKIM_SM_HWFAIL     = 5,    /*  IOC h/w failure event       */
157         BFA_TSKIM_SM_HCB        = 6,    /*  BFA callback completion     */
158         BFA_TSKIM_SM_IOS_DONE   = 7,    /*  IO and sub TM completions   */
159         BFA_TSKIM_SM_CLEANUP    = 8,    /*  TM cleanup on ITN offline   */
160         BFA_TSKIM_SM_CLEANUP_DONE = 9,  /*  TM abort completion */
161         BFA_TSKIM_SM_UTAG       = 10,   /*  TM completion unknown tag  */
162 };
163
164 /*
165  * forward declaration for BFA ITNIM functions
166  */
167 static void     bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
168 static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
169 static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
170 static void     bfa_itnim_cleanp_comp(void *itnim_cbarg);
171 static void     bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
172 static void     __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
173 static void     __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
174 static void     __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
175 static void     bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
176 static void     bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
177 static void     bfa_itnim_iotov(void *itnim_arg);
178 static void     bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
179 static void     bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
180 static void     bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
181
182 /*
183  * forward declaration of ITNIM state machine
184  */
185 static void     bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
186                                         enum bfa_itnim_event event);
187 static void     bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
188                                         enum bfa_itnim_event event);
189 static void     bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
190                                         enum bfa_itnim_event event);
191 static void     bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
192                                         enum bfa_itnim_event event);
193 static void     bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
194                                         enum bfa_itnim_event event);
195 static void     bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
196                                         enum bfa_itnim_event event);
197 static void     bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
198                                         enum bfa_itnim_event event);
199 static void     bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
200                                         enum bfa_itnim_event event);
201 static void     bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
202                                         enum bfa_itnim_event event);
203 static void     bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
204                                         enum bfa_itnim_event event);
205 static void     bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
206                                         enum bfa_itnim_event event);
207 static void     bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
208                                         enum bfa_itnim_event event);
209 static void     bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
210                                         enum bfa_itnim_event event);
211 static void     bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
212                                         enum bfa_itnim_event event);
213 static void     bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
214                                         enum bfa_itnim_event event);
215
216 /*
217  * forward declaration for BFA IOIM functions
218  */
219 static bfa_boolean_t    bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
220 static bfa_boolean_t    bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
221 static bfa_boolean_t    bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
222 static void             bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
223 static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
224 static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
225 static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
226 static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
227 static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
228 static bfa_boolean_t    bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
229
230 /*
231  * forward declaration of BFA IO state machine
232  */
233 static void     bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
234                                         enum bfa_ioim_event event);
235 static void     bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
236                                         enum bfa_ioim_event event);
237 static void     bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
238                                         enum bfa_ioim_event event);
239 static void     bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
240                                         enum bfa_ioim_event event);
241 static void     bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
242                                         enum bfa_ioim_event event);
243 static void     bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
244                                         enum bfa_ioim_event event);
245 static void     bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
246                                         enum bfa_ioim_event event);
247 static void     bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
248                                         enum bfa_ioim_event event);
249 static void     bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
250                                         enum bfa_ioim_event event);
251 static void     bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
252                                         enum bfa_ioim_event event);
253 static void     bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
254                                         enum bfa_ioim_event event);
255 static void     bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
256                                         enum bfa_ioim_event event);
257 /*
258  * forward declaration for BFA TSKIM functions
259  */
260 static void     __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
261 static void     __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
262 static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
263                                         struct scsi_lun lun);
264 static void     bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
265 static void     bfa_tskim_cleanp_comp(void *tskim_cbarg);
266 static void     bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
267 static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
268 static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
269 static void     bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
270
271 /*
272  * forward declaration of BFA TSKIM state machine
273  */
274 static void     bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
275                                         enum bfa_tskim_event event);
276 static void     bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
277                                         enum bfa_tskim_event event);
278 static void     bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
279                                         enum bfa_tskim_event event);
280 static void     bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
281                                         enum bfa_tskim_event event);
282 static void     bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
283                                         enum bfa_tskim_event event);
284 static void     bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
285                                         enum bfa_tskim_event event);
286 static void     bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
287                                         enum bfa_tskim_event event);
288 /*
289  *  BFA FCP Initiator Mode module
290  */
291
292 /*
293  * Compute and return memory needed by FCP(im) module.
294  */
295 static void
296 bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
297 {
298         bfa_itnim_meminfo(cfg, km_len);
299
300         /*
301          * IO memory
302          */
303         *km_len += cfg->fwcfg.num_ioim_reqs *
304           (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
305
306         /*
307          * task management command memory
308          */
309         if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
310                 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
311         *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
312 }
313
314
315 static void
316 bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
317                 struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev)
318 {
319         struct bfa_fcpim_s *fcpim = &fcp->fcpim;
320         struct bfa_s *bfa = fcp->bfa;
321
322         bfa_trc(bfa, cfg->drvcfg.path_tov);
323         bfa_trc(bfa, cfg->fwcfg.num_rports);
324         bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
325         bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
326
327         fcpim->fcp              = fcp;
328         fcpim->bfa              = bfa;
329         fcpim->num_itnims       = cfg->fwcfg.num_rports;
330         fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
331         fcpim->path_tov         = cfg->drvcfg.path_tov;
332         fcpim->delay_comp       = cfg->drvcfg.delay_comp;
333         fcpim->profile_comp = NULL;
334         fcpim->profile_start = NULL;
335
336         bfa_itnim_attach(fcpim);
337         bfa_tskim_attach(fcpim);
338         bfa_ioim_attach(fcpim);
339 }
340
341 void
342 bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp)
343 {
344         struct bfa_fcpim_s *fcpim = &fcp->fcpim;
345         struct bfa_itnim_s *itnim;
346         struct list_head *qe, *qen;
347
348         /* Enqueue unused ioim resources to free_q */
349         list_splice_tail_init(&fcpim->tskim_unused_q, &fcpim->tskim_free_q);
350
351         list_for_each_safe(qe, qen, &fcpim->itnim_q) {
352                 itnim = (struct bfa_itnim_s *) qe;
353                 bfa_itnim_iocdisable(itnim);
354         }
355 }
356
357 void
358 bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
359 {
360         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
361
362         fcpim->path_tov = path_tov * 1000;
363         if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
364                 fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
365 }
366
367 u16
368 bfa_fcpim_path_tov_get(struct bfa_s *bfa)
369 {
370         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
371
372         return fcpim->path_tov / 1000;
373 }
374
375 #define bfa_fcpim_add_iostats(__l, __r, __stats)        \
376         (__l->__stats += __r->__stats)
377
378 void
379 bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
380                 struct bfa_itnim_iostats_s *rstats)
381 {
382         bfa_fcpim_add_iostats(lstats, rstats, total_ios);
383         bfa_fcpim_add_iostats(lstats, rstats, qresumes);
384         bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
385         bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
386         bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
387         bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
388         bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
389         bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
390         bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
391         bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
392         bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
393         bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
394         bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
395         bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
396         bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
397         bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
398         bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
399         bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
400         bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
401         bfa_fcpim_add_iostats(lstats, rstats, onlines);
402         bfa_fcpim_add_iostats(lstats, rstats, offlines);
403         bfa_fcpim_add_iostats(lstats, rstats, creates);
404         bfa_fcpim_add_iostats(lstats, rstats, deletes);
405         bfa_fcpim_add_iostats(lstats, rstats, create_comps);
406         bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
407         bfa_fcpim_add_iostats(lstats, rstats, sler_events);
408         bfa_fcpim_add_iostats(lstats, rstats, fw_create);
409         bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
410         bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
411         bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
412         bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
413         bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
414         bfa_fcpim_add_iostats(lstats, rstats, tm_success);
415         bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
416         bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
417         bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
418         bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
419         bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
420         bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
421         bfa_fcpim_add_iostats(lstats, rstats, io_comps);
422         bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
423         bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
424         bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
425         bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
426 }
427
428 bfa_status_t
429 bfa_fcpim_port_iostats(struct bfa_s *bfa,
430                 struct bfa_itnim_iostats_s *stats, u8 lp_tag)
431 {
432         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
433         struct list_head *qe, *qen;
434         struct bfa_itnim_s *itnim;
435
436         /* accumulate IO stats from itnim */
437         memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
438         list_for_each_safe(qe, qen, &fcpim->itnim_q) {
439                 itnim = (struct bfa_itnim_s *) qe;
440                 if (itnim->rport->rport_info.lp_tag != lp_tag)
441                         continue;
442                 bfa_fcpim_add_stats(stats, &(itnim->stats));
443         }
444         return BFA_STATUS_OK;
445 }
446
447 void
448 bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
449 {
450         struct bfa_itnim_latency_s *io_lat =
451                         &(ioim->itnim->ioprofile.io_latency);
452         u32 val, idx;
453
454         val = (u32)(jiffies - ioim->start_time);
455         idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio));
456         bfa_itnim_ioprofile_update(ioim->itnim, idx);
457
458         io_lat->count[idx]++;
459         io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val;
460         io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val;
461         io_lat->avg[idx] += val;
462 }
463
464 void
465 bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
466 {
467         ioim->start_time = jiffies;
468 }
469
470 bfa_status_t
471 bfa_fcpim_profile_on(struct bfa_s *bfa, time64_t time)
472 {
473         struct bfa_itnim_s *itnim;
474         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
475         struct list_head *qe, *qen;
476
477         /* accumulate IO stats from itnim */
478         list_for_each_safe(qe, qen, &fcpim->itnim_q) {
479                 itnim = (struct bfa_itnim_s *) qe;
480                 bfa_itnim_clear_stats(itnim);
481         }
482         fcpim->io_profile = BFA_TRUE;
483         fcpim->io_profile_start_time = time;
484         fcpim->profile_comp = bfa_ioim_profile_comp;
485         fcpim->profile_start = bfa_ioim_profile_start;
486         return BFA_STATUS_OK;
487 }
488
489 bfa_status_t
490 bfa_fcpim_profile_off(struct bfa_s *bfa)
491 {
492         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
493         fcpim->io_profile = BFA_FALSE;
494         fcpim->io_profile_start_time = 0;
495         fcpim->profile_comp = NULL;
496         fcpim->profile_start = NULL;
497         return BFA_STATUS_OK;
498 }
499
500 u16
501 bfa_fcpim_qdepth_get(struct bfa_s *bfa)
502 {
503         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
504
505         return fcpim->q_depth;
506 }
507
508 /*
509  *  BFA ITNIM module state machine functions
510  */
511
512 /*
513  * Beginning/unallocated state - no events expected.
514  */
515 static void
516 bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
517 {
518         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
519         bfa_trc(itnim->bfa, event);
520
521         switch (event) {
522         case BFA_ITNIM_SM_CREATE:
523                 bfa_sm_set_state(itnim, bfa_itnim_sm_created);
524                 itnim->is_online = BFA_FALSE;
525                 bfa_fcpim_additn(itnim);
526                 break;
527
528         default:
529                 bfa_sm_fault(itnim->bfa, event);
530         }
531 }
532
533 /*
534  * Beginning state, only online event expected.
535  */
536 static void
537 bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
538 {
539         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
540         bfa_trc(itnim->bfa, event);
541
542         switch (event) {
543         case BFA_ITNIM_SM_ONLINE:
544                 if (bfa_itnim_send_fwcreate(itnim))
545                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
546                 else
547                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
548                 break;
549
550         case BFA_ITNIM_SM_DELETE:
551                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
552                 bfa_fcpim_delitn(itnim);
553                 break;
554
555         case BFA_ITNIM_SM_HWFAIL:
556                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
557                 break;
558
559         default:
560                 bfa_sm_fault(itnim->bfa, event);
561         }
562 }
563
564 /*
565  *      Waiting for itnim create response from firmware.
566  */
567 static void
568 bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
569 {
570         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
571         bfa_trc(itnim->bfa, event);
572
573         switch (event) {
574         case BFA_ITNIM_SM_FWRSP:
575                 bfa_sm_set_state(itnim, bfa_itnim_sm_online);
576                 itnim->is_online = BFA_TRUE;
577                 bfa_itnim_iotov_online(itnim);
578                 bfa_itnim_online_cb(itnim);
579                 break;
580
581         case BFA_ITNIM_SM_DELETE:
582                 bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
583                 break;
584
585         case BFA_ITNIM_SM_OFFLINE:
586                 if (bfa_itnim_send_fwdelete(itnim))
587                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
588                 else
589                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
590                 break;
591
592         case BFA_ITNIM_SM_HWFAIL:
593                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
594                 break;
595
596         default:
597                 bfa_sm_fault(itnim->bfa, event);
598         }
599 }
600
601 static void
602 bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
603                         enum bfa_itnim_event event)
604 {
605         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
606         bfa_trc(itnim->bfa, event);
607
608         switch (event) {
609         case BFA_ITNIM_SM_QRESUME:
610                 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
611                 bfa_itnim_send_fwcreate(itnim);
612                 break;
613
614         case BFA_ITNIM_SM_DELETE:
615                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
616                 bfa_reqq_wcancel(&itnim->reqq_wait);
617                 bfa_fcpim_delitn(itnim);
618                 break;
619
620         case BFA_ITNIM_SM_OFFLINE:
621                 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
622                 bfa_reqq_wcancel(&itnim->reqq_wait);
623                 bfa_itnim_offline_cb(itnim);
624                 break;
625
626         case BFA_ITNIM_SM_HWFAIL:
627                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
628                 bfa_reqq_wcancel(&itnim->reqq_wait);
629                 break;
630
631         default:
632                 bfa_sm_fault(itnim->bfa, event);
633         }
634 }
635
636 /*
637  * Waiting for itnim create response from firmware, a delete is pending.
638  */
639 static void
640 bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
641                                 enum bfa_itnim_event event)
642 {
643         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
644         bfa_trc(itnim->bfa, event);
645
646         switch (event) {
647         case BFA_ITNIM_SM_FWRSP:
648                 if (bfa_itnim_send_fwdelete(itnim))
649                         bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
650                 else
651                         bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
652                 break;
653
654         case BFA_ITNIM_SM_HWFAIL:
655                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
656                 bfa_fcpim_delitn(itnim);
657                 break;
658
659         default:
660                 bfa_sm_fault(itnim->bfa, event);
661         }
662 }
663
664 /*
665  * Online state - normal parking state.
666  */
667 static void
668 bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
669 {
670         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
671         bfa_trc(itnim->bfa, event);
672
673         switch (event) {
674         case BFA_ITNIM_SM_OFFLINE:
675                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
676                 itnim->is_online = BFA_FALSE;
677                 bfa_itnim_iotov_start(itnim);
678                 bfa_itnim_cleanup(itnim);
679                 break;
680
681         case BFA_ITNIM_SM_DELETE:
682                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
683                 itnim->is_online = BFA_FALSE;
684                 bfa_itnim_cleanup(itnim);
685                 break;
686
687         case BFA_ITNIM_SM_SLER:
688                 bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
689                 itnim->is_online = BFA_FALSE;
690                 bfa_itnim_iotov_start(itnim);
691                 bfa_itnim_sler_cb(itnim);
692                 break;
693
694         case BFA_ITNIM_SM_HWFAIL:
695                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
696                 itnim->is_online = BFA_FALSE;
697                 bfa_itnim_iotov_start(itnim);
698                 bfa_itnim_iocdisable_cleanup(itnim);
699                 break;
700
701         default:
702                 bfa_sm_fault(itnim->bfa, event);
703         }
704 }
705
706 /*
707  * Second level error recovery need.
708  */
709 static void
710 bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
711 {
712         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
713         bfa_trc(itnim->bfa, event);
714
715         switch (event) {
716         case BFA_ITNIM_SM_OFFLINE:
717                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
718                 bfa_itnim_cleanup(itnim);
719                 break;
720
721         case BFA_ITNIM_SM_DELETE:
722                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
723                 bfa_itnim_cleanup(itnim);
724                 bfa_itnim_iotov_delete(itnim);
725                 break;
726
727         case BFA_ITNIM_SM_HWFAIL:
728                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
729                 bfa_itnim_iocdisable_cleanup(itnim);
730                 break;
731
732         default:
733                 bfa_sm_fault(itnim->bfa, event);
734         }
735 }
736
737 /*
738  * Going offline. Waiting for active IO cleanup.
739  */
740 static void
741 bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
742                                  enum bfa_itnim_event event)
743 {
744         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
745         bfa_trc(itnim->bfa, event);
746
747         switch (event) {
748         case BFA_ITNIM_SM_CLEANUP:
749                 if (bfa_itnim_send_fwdelete(itnim))
750                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
751                 else
752                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
753                 break;
754
755         case BFA_ITNIM_SM_DELETE:
756                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
757                 bfa_itnim_iotov_delete(itnim);
758                 break;
759
760         case BFA_ITNIM_SM_HWFAIL:
761                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
762                 bfa_itnim_iocdisable_cleanup(itnim);
763                 bfa_itnim_offline_cb(itnim);
764                 break;
765
766         case BFA_ITNIM_SM_SLER:
767                 break;
768
769         default:
770                 bfa_sm_fault(itnim->bfa, event);
771         }
772 }
773
774 /*
775  * Deleting itnim. Waiting for active IO cleanup.
776  */
777 static void
778 bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
779                                 enum bfa_itnim_event event)
780 {
781         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
782         bfa_trc(itnim->bfa, event);
783
784         switch (event) {
785         case BFA_ITNIM_SM_CLEANUP:
786                 if (bfa_itnim_send_fwdelete(itnim))
787                         bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
788                 else
789                         bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
790                 break;
791
792         case BFA_ITNIM_SM_HWFAIL:
793                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
794                 bfa_itnim_iocdisable_cleanup(itnim);
795                 break;
796
797         default:
798                 bfa_sm_fault(itnim->bfa, event);
799         }
800 }
801
802 /*
803  * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
804  */
805 static void
806 bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
807 {
808         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
809         bfa_trc(itnim->bfa, event);
810
811         switch (event) {
812         case BFA_ITNIM_SM_FWRSP:
813                 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
814                 bfa_itnim_offline_cb(itnim);
815                 break;
816
817         case BFA_ITNIM_SM_DELETE:
818                 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
819                 break;
820
821         case BFA_ITNIM_SM_HWFAIL:
822                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
823                 bfa_itnim_offline_cb(itnim);
824                 break;
825
826         default:
827                 bfa_sm_fault(itnim->bfa, event);
828         }
829 }
830
831 static void
832 bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
833                         enum bfa_itnim_event event)
834 {
835         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
836         bfa_trc(itnim->bfa, event);
837
838         switch (event) {
839         case BFA_ITNIM_SM_QRESUME:
840                 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
841                 bfa_itnim_send_fwdelete(itnim);
842                 break;
843
844         case BFA_ITNIM_SM_DELETE:
845                 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
846                 break;
847
848         case BFA_ITNIM_SM_HWFAIL:
849                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
850                 bfa_reqq_wcancel(&itnim->reqq_wait);
851                 bfa_itnim_offline_cb(itnim);
852                 break;
853
854         default:
855                 bfa_sm_fault(itnim->bfa, event);
856         }
857 }
858
859 /*
860  * Offline state.
861  */
862 static void
863 bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
864 {
865         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
866         bfa_trc(itnim->bfa, event);
867
868         switch (event) {
869         case BFA_ITNIM_SM_DELETE:
870                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
871                 bfa_itnim_iotov_delete(itnim);
872                 bfa_fcpim_delitn(itnim);
873                 break;
874
875         case BFA_ITNIM_SM_ONLINE:
876                 if (bfa_itnim_send_fwcreate(itnim))
877                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
878                 else
879                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
880                 break;
881
882         case BFA_ITNIM_SM_HWFAIL:
883                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
884                 break;
885
886         default:
887                 bfa_sm_fault(itnim->bfa, event);
888         }
889 }
890
891 static void
892 bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
893                                 enum bfa_itnim_event event)
894 {
895         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
896         bfa_trc(itnim->bfa, event);
897
898         switch (event) {
899         case BFA_ITNIM_SM_DELETE:
900                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
901                 bfa_itnim_iotov_delete(itnim);
902                 bfa_fcpim_delitn(itnim);
903                 break;
904
905         case BFA_ITNIM_SM_OFFLINE:
906                 bfa_itnim_offline_cb(itnim);
907                 break;
908
909         case BFA_ITNIM_SM_ONLINE:
910                 if (bfa_itnim_send_fwcreate(itnim))
911                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
912                 else
913                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
914                 break;
915
916         case BFA_ITNIM_SM_HWFAIL:
917                 break;
918
919         default:
920                 bfa_sm_fault(itnim->bfa, event);
921         }
922 }
923
924 /*
925  * Itnim is deleted, waiting for firmware response to delete.
926  */
927 static void
928 bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
929 {
930         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
931         bfa_trc(itnim->bfa, event);
932
933         switch (event) {
934         case BFA_ITNIM_SM_FWRSP:
935         case BFA_ITNIM_SM_HWFAIL:
936                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
937                 bfa_fcpim_delitn(itnim);
938                 break;
939
940         default:
941                 bfa_sm_fault(itnim->bfa, event);
942         }
943 }
944
945 static void
946 bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
947                 enum bfa_itnim_event event)
948 {
949         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
950         bfa_trc(itnim->bfa, event);
951
952         switch (event) {
953         case BFA_ITNIM_SM_QRESUME:
954                 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
955                 bfa_itnim_send_fwdelete(itnim);
956                 break;
957
958         case BFA_ITNIM_SM_HWFAIL:
959                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
960                 bfa_reqq_wcancel(&itnim->reqq_wait);
961                 bfa_fcpim_delitn(itnim);
962                 break;
963
964         default:
965                 bfa_sm_fault(itnim->bfa, event);
966         }
967 }
968
969 /*
970  * Initiate cleanup of all IOs on an IOC failure.
971  */
972 static void
973 bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
974 {
975         struct bfa_tskim_s *tskim;
976         struct bfa_ioim_s *ioim;
977         struct list_head        *qe, *qen;
978
979         list_for_each_safe(qe, qen, &itnim->tsk_q) {
980                 tskim = (struct bfa_tskim_s *) qe;
981                 bfa_tskim_iocdisable(tskim);
982         }
983
984         list_for_each_safe(qe, qen, &itnim->io_q) {
985                 ioim = (struct bfa_ioim_s *) qe;
986                 bfa_ioim_iocdisable(ioim);
987         }
988
989         /*
990          * For IO request in pending queue, we pretend an early timeout.
991          */
992         list_for_each_safe(qe, qen, &itnim->pending_q) {
993                 ioim = (struct bfa_ioim_s *) qe;
994                 bfa_ioim_tov(ioim);
995         }
996
997         list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
998                 ioim = (struct bfa_ioim_s *) qe;
999                 bfa_ioim_iocdisable(ioim);
1000         }
1001 }
1002
1003 /*
1004  * IO cleanup completion
1005  */
1006 static void
1007 bfa_itnim_cleanp_comp(void *itnim_cbarg)
1008 {
1009         struct bfa_itnim_s *itnim = itnim_cbarg;
1010
1011         bfa_stats(itnim, cleanup_comps);
1012         bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
1013 }
1014
1015 /*
1016  * Initiate cleanup of all IOs.
1017  */
1018 static void
1019 bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
1020 {
1021         struct bfa_ioim_s  *ioim;
1022         struct bfa_tskim_s *tskim;
1023         struct list_head        *qe, *qen;
1024
1025         bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
1026
1027         list_for_each_safe(qe, qen, &itnim->io_q) {
1028                 ioim = (struct bfa_ioim_s *) qe;
1029
1030                 /*
1031                  * Move IO to a cleanup queue from active queue so that a later
1032                  * TM will not pickup this IO.
1033                  */
1034                 list_del(&ioim->qe);
1035                 list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
1036
1037                 bfa_wc_up(&itnim->wc);
1038                 bfa_ioim_cleanup(ioim);
1039         }
1040
1041         list_for_each_safe(qe, qen, &itnim->tsk_q) {
1042                 tskim = (struct bfa_tskim_s *) qe;
1043                 bfa_wc_up(&itnim->wc);
1044                 bfa_tskim_cleanup(tskim);
1045         }
1046
1047         bfa_wc_wait(&itnim->wc);
1048 }
1049
1050 static void
1051 __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
1052 {
1053         struct bfa_itnim_s *itnim = cbarg;
1054
1055         if (complete)
1056                 bfa_cb_itnim_online(itnim->ditn);
1057 }
1058
1059 static void
1060 __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
1061 {
1062         struct bfa_itnim_s *itnim = cbarg;
1063
1064         if (complete)
1065                 bfa_cb_itnim_offline(itnim->ditn);
1066 }
1067
1068 static void
1069 __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
1070 {
1071         struct bfa_itnim_s *itnim = cbarg;
1072
1073         if (complete)
1074                 bfa_cb_itnim_sler(itnim->ditn);
1075 }
1076
1077 /*
1078  * Call to resume any I/O requests waiting for room in request queue.
1079  */
1080 static void
1081 bfa_itnim_qresume(void *cbarg)
1082 {
1083         struct bfa_itnim_s *itnim = cbarg;
1084
1085         bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
1086 }
1087
1088 /*
1089  *  bfa_itnim_public
1090  */
1091
1092 void
1093 bfa_itnim_iodone(struct bfa_itnim_s *itnim)
1094 {
1095         bfa_wc_down(&itnim->wc);
1096 }
1097
1098 void
1099 bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
1100 {
1101         bfa_wc_down(&itnim->wc);
1102 }
1103
1104 void
1105 bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
1106 {
1107         /*
1108          * ITN memory
1109          */
1110         *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
1111 }
1112
1113 void
1114 bfa_itnim_attach(struct bfa_fcpim_s *fcpim)
1115 {
1116         struct bfa_s    *bfa = fcpim->bfa;
1117         struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
1118         struct bfa_itnim_s *itnim;
1119         int     i, j;
1120
1121         INIT_LIST_HEAD(&fcpim->itnim_q);
1122
1123         itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp);
1124         fcpim->itnim_arr = itnim;
1125
1126         for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
1127                 memset(itnim, 0, sizeof(struct bfa_itnim_s));
1128                 itnim->bfa = bfa;
1129                 itnim->fcpim = fcpim;
1130                 itnim->reqq = BFA_REQQ_QOS_LO;
1131                 itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
1132                 itnim->iotov_active = BFA_FALSE;
1133                 bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
1134
1135                 INIT_LIST_HEAD(&itnim->io_q);
1136                 INIT_LIST_HEAD(&itnim->io_cleanup_q);
1137                 INIT_LIST_HEAD(&itnim->pending_q);
1138                 INIT_LIST_HEAD(&itnim->tsk_q);
1139                 INIT_LIST_HEAD(&itnim->delay_comp_q);
1140                 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1141                         itnim->ioprofile.io_latency.min[j] = ~0;
1142                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1143         }
1144
1145         bfa_mem_kva_curp(fcp) = (u8 *) itnim;
1146 }
1147
1148 void
1149 bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
1150 {
1151         bfa_stats(itnim, ioc_disabled);
1152         bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
1153 }
1154
1155 static bfa_boolean_t
1156 bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1157 {
1158         struct bfi_itn_create_req_s *m;
1159
1160         itnim->msg_no++;
1161
1162         /*
1163          * check for room in queue to send request now
1164          */
1165         m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1166         if (!m) {
1167                 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1168                 return BFA_FALSE;
1169         }
1170
1171         bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_CREATE_REQ,
1172                         bfa_fn_lpu(itnim->bfa));
1173         m->fw_handle = itnim->rport->fw_handle;
1174         m->class = FC_CLASS_3;
1175         m->seq_rec = itnim->seq_rec;
1176         m->msg_no = itnim->msg_no;
1177         bfa_stats(itnim, fw_create);
1178
1179         /*
1180          * queue I/O message to firmware
1181          */
1182         bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
1183         return BFA_TRUE;
1184 }
1185
1186 static bfa_boolean_t
1187 bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1188 {
1189         struct bfi_itn_delete_req_s *m;
1190
1191         /*
1192          * check for room in queue to send request now
1193          */
1194         m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1195         if (!m) {
1196                 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1197                 return BFA_FALSE;
1198         }
1199
1200         bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_DELETE_REQ,
1201                         bfa_fn_lpu(itnim->bfa));
1202         m->fw_handle = itnim->rport->fw_handle;
1203         bfa_stats(itnim, fw_delete);
1204
1205         /*
1206          * queue I/O message to firmware
1207          */
1208         bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
1209         return BFA_TRUE;
1210 }
1211
1212 /*
1213  * Cleanup all pending failed inflight requests.
1214  */
1215 static void
1216 bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1217 {
1218         struct bfa_ioim_s *ioim;
1219         struct list_head *qe, *qen;
1220
1221         list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
1222                 ioim = (struct bfa_ioim_s *)qe;
1223                 bfa_ioim_delayed_comp(ioim, iotov);
1224         }
1225 }
1226
1227 /*
1228  * Start all pending IO requests.
1229  */
1230 static void
1231 bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1232 {
1233         struct bfa_ioim_s *ioim;
1234
1235         bfa_itnim_iotov_stop(itnim);
1236
1237         /*
1238          * Abort all inflight IO requests in the queue
1239          */
1240         bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1241
1242         /*
1243          * Start all pending IO requests.
1244          */
1245         while (!list_empty(&itnim->pending_q)) {
1246                 bfa_q_deq(&itnim->pending_q, &ioim);
1247                 list_add_tail(&ioim->qe, &itnim->io_q);
1248                 bfa_ioim_start(ioim);
1249         }
1250 }
1251
1252 /*
1253  * Fail all pending IO requests
1254  */
1255 static void
1256 bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1257 {
1258         struct bfa_ioim_s *ioim;
1259
1260         /*
1261          * Fail all inflight IO requests in the queue
1262          */
1263         bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1264
1265         /*
1266          * Fail any pending IO requests.
1267          */
1268         while (!list_empty(&itnim->pending_q)) {
1269                 bfa_q_deq(&itnim->pending_q, &ioim);
1270                 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1271                 bfa_ioim_tov(ioim);
1272         }
1273 }
1274
1275 /*
1276  * IO TOV timer callback. Fail any pending IO requests.
1277  */
1278 static void
1279 bfa_itnim_iotov(void *itnim_arg)
1280 {
1281         struct bfa_itnim_s *itnim = itnim_arg;
1282
1283         itnim->iotov_active = BFA_FALSE;
1284
1285         bfa_cb_itnim_tov_begin(itnim->ditn);
1286         bfa_itnim_iotov_cleanup(itnim);
1287         bfa_cb_itnim_tov(itnim->ditn);
1288 }
1289
1290 /*
1291  * Start IO TOV timer for failing back pending IO requests in offline state.
1292  */
1293 static void
1294 bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1295 {
1296         if (itnim->fcpim->path_tov > 0) {
1297
1298                 itnim->iotov_active = BFA_TRUE;
1299                 WARN_ON(!bfa_itnim_hold_io(itnim));
1300                 bfa_timer_start(itnim->bfa, &itnim->timer,
1301                         bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1302         }
1303 }
1304
1305 /*
1306  * Stop IO TOV timer.
1307  */
1308 static void
1309 bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1310 {
1311         if (itnim->iotov_active) {
1312                 itnim->iotov_active = BFA_FALSE;
1313                 bfa_timer_stop(&itnim->timer);
1314         }
1315 }
1316
1317 /*
1318  * Stop IO TOV timer.
1319  */
1320 static void
1321 bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1322 {
1323         bfa_boolean_t pathtov_active = BFA_FALSE;
1324
1325         if (itnim->iotov_active)
1326                 pathtov_active = BFA_TRUE;
1327
1328         bfa_itnim_iotov_stop(itnim);
1329         if (pathtov_active)
1330                 bfa_cb_itnim_tov_begin(itnim->ditn);
1331         bfa_itnim_iotov_cleanup(itnim);
1332         if (pathtov_active)
1333                 bfa_cb_itnim_tov(itnim->ditn);
1334 }
1335
1336 static void
1337 bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1338 {
1339         struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
1340         fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1341                 itnim->stats.iocomp_aborted;
1342         fcpim->del_itn_stats.del_itn_iocomp_timedout +=
1343                 itnim->stats.iocomp_timedout;
1344         fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
1345                 itnim->stats.iocom_sqer_needed;
1346         fcpim->del_itn_stats.del_itn_iocom_res_free +=
1347                 itnim->stats.iocom_res_free;
1348         fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
1349                 itnim->stats.iocom_hostabrts;
1350         fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
1351         fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
1352         fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1353 }
1354
1355 /*
1356  * bfa_itnim_public
1357  */
1358
1359 /*
1360  * Itnim interrupt processing.
1361  */
1362 void
1363 bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1364 {
1365         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
1366         union bfi_itn_i2h_msg_u msg;
1367         struct bfa_itnim_s *itnim;
1368
1369         bfa_trc(bfa, m->mhdr.msg_id);
1370
1371         msg.msg = m;
1372
1373         switch (m->mhdr.msg_id) {
1374         case BFI_ITN_I2H_CREATE_RSP:
1375                 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1376                                                 msg.create_rsp->bfa_handle);
1377                 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
1378                 bfa_stats(itnim, create_comps);
1379                 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1380                 break;
1381
1382         case BFI_ITN_I2H_DELETE_RSP:
1383                 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1384                                                 msg.delete_rsp->bfa_handle);
1385                 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
1386                 bfa_stats(itnim, delete_comps);
1387                 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1388                 break;
1389
1390         case BFI_ITN_I2H_SLER_EVENT:
1391                 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1392                                                 msg.sler_event->bfa_handle);
1393                 bfa_stats(itnim, sler_events);
1394                 bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1395                 break;
1396
1397         default:
1398                 bfa_trc(bfa, m->mhdr.msg_id);
1399                 WARN_ON(1);
1400         }
1401 }
1402
1403 /*
1404  * bfa_itnim_api
1405  */
1406
1407 struct bfa_itnim_s *
1408 bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1409 {
1410         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
1411         struct bfa_itnim_s *itnim;
1412
1413         bfa_itn_create(bfa, rport, bfa_itnim_isr);
1414
1415         itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1416         WARN_ON(itnim->rport != rport);
1417
1418         itnim->ditn = ditn;
1419
1420         bfa_stats(itnim, creates);
1421         bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1422
1423         return itnim;
1424 }
1425
1426 void
1427 bfa_itnim_delete(struct bfa_itnim_s *itnim)
1428 {
1429         bfa_stats(itnim, deletes);
1430         bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1431 }
1432
1433 void
1434 bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1435 {
1436         itnim->seq_rec = seq_rec;
1437         bfa_stats(itnim, onlines);
1438         bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1439 }
1440
1441 void
1442 bfa_itnim_offline(struct bfa_itnim_s *itnim)
1443 {
1444         bfa_stats(itnim, offlines);
1445         bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1446 }
1447
1448 /*
1449  * Return true if itnim is considered offline for holding off IO request.
1450  * IO is not held if itnim is being deleted.
1451  */
1452 bfa_boolean_t
1453 bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1454 {
1455         return itnim->fcpim->path_tov && itnim->iotov_active &&
1456                 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1457                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1458                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1459                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1460                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1461                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1462 }
1463
1464 #define bfa_io_lat_clock_res_div        HZ
1465 #define bfa_io_lat_clock_res_mul        1000
1466 bfa_status_t
1467 bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
1468                         struct bfa_itnim_ioprofile_s *ioprofile)
1469 {
1470         struct bfa_fcpim_s *fcpim;
1471
1472         if (!itnim)
1473                 return BFA_STATUS_NO_FCPIM_NEXUS;
1474
1475         fcpim = BFA_FCPIM(itnim->bfa);
1476
1477         if (!fcpim->io_profile)
1478                 return BFA_STATUS_IOPROFILE_OFF;
1479
1480         itnim->ioprofile.index = BFA_IOBUCKET_MAX;
1481         /* unsigned 32-bit time_t overflow here in y2106 */
1482         itnim->ioprofile.io_profile_start_time =
1483                                 bfa_io_profile_start_time(itnim->bfa);
1484         itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
1485         itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
1486         *ioprofile = itnim->ioprofile;
1487
1488         return BFA_STATUS_OK;
1489 }
1490
1491 void
1492 bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1493 {
1494         int j;
1495
1496         if (!itnim)
1497                 return;
1498
1499         memset(&itnim->stats, 0, sizeof(itnim->stats));
1500         memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
1501         for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1502                 itnim->ioprofile.io_latency.min[j] = ~0;
1503 }
1504
1505 /*
1506  *  BFA IO module state machine functions
1507  */
1508
1509 /*
1510  * IO is not started (unallocated).
1511  */
1512 static void
1513 bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1514 {
1515         switch (event) {
1516         case BFA_IOIM_SM_START:
1517                 if (!bfa_itnim_is_online(ioim->itnim)) {
1518                         if (!bfa_itnim_hold_io(ioim->itnim)) {
1519                                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1520                                 list_del(&ioim->qe);
1521                                 list_add_tail(&ioim->qe,
1522                                         &ioim->fcpim->ioim_comp_q);
1523                                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1524                                                 __bfa_cb_ioim_pathtov, ioim);
1525                         } else {
1526                                 list_del(&ioim->qe);
1527                                 list_add_tail(&ioim->qe,
1528                                         &ioim->itnim->pending_q);
1529                         }
1530                         break;
1531                 }
1532
1533                 if (ioim->nsges > BFI_SGE_INLINE) {
1534                         if (!bfa_ioim_sgpg_alloc(ioim)) {
1535                                 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1536                                 return;
1537                         }
1538                 }
1539
1540                 if (!bfa_ioim_send_ioreq(ioim)) {
1541                         bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1542                         break;
1543                 }
1544
1545                 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1546                 break;
1547
1548         case BFA_IOIM_SM_IOTOV:
1549                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1550                 bfa_ioim_move_to_comp_q(ioim);
1551                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1552                                 __bfa_cb_ioim_pathtov, ioim);
1553                 break;
1554
1555         case BFA_IOIM_SM_ABORT:
1556                 /*
1557                  * IO in pending queue can get abort requests. Complete abort
1558                  * requests immediately.
1559                  */
1560                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1561                 WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1562                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1563                         __bfa_cb_ioim_abort, ioim);
1564                 break;
1565
1566         default:
1567                 bfa_sm_fault(ioim->bfa, event);
1568         }
1569 }
1570
1571 /*
1572  * IO is waiting for SG pages.
1573  */
1574 static void
1575 bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1576 {
1577         bfa_trc(ioim->bfa, ioim->iotag);
1578         bfa_trc(ioim->bfa, event);
1579
1580         switch (event) {
1581         case BFA_IOIM_SM_SGALLOCED:
1582                 if (!bfa_ioim_send_ioreq(ioim)) {
1583                         bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1584                         break;
1585                 }
1586                 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1587                 break;
1588
1589         case BFA_IOIM_SM_CLEANUP:
1590                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1591                 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1592                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1593                               ioim);
1594                 bfa_ioim_notify_cleanup(ioim);
1595                 break;
1596
1597         case BFA_IOIM_SM_ABORT:
1598                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1599                 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1600                 bfa_ioim_move_to_comp_q(ioim);
1601                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1602                               ioim);
1603                 break;
1604
1605         case BFA_IOIM_SM_HWFAIL:
1606                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1607                 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1608                 bfa_ioim_move_to_comp_q(ioim);
1609                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1610                               ioim);
1611                 break;
1612
1613         default:
1614                 bfa_sm_fault(ioim->bfa, event);
1615         }
1616 }
1617
1618 /*
1619  * IO is active.
1620  */
1621 static void
1622 bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1623 {
1624         switch (event) {
1625         case BFA_IOIM_SM_COMP_GOOD:
1626                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1627                 bfa_ioim_move_to_comp_q(ioim);
1628                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1629                               __bfa_cb_ioim_good_comp, ioim);
1630                 break;
1631
1632         case BFA_IOIM_SM_COMP:
1633                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1634                 bfa_ioim_move_to_comp_q(ioim);
1635                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1636                               ioim);
1637                 break;
1638
1639         case BFA_IOIM_SM_DONE:
1640                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1641                 bfa_ioim_move_to_comp_q(ioim);
1642                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1643                               ioim);
1644                 break;
1645
1646         case BFA_IOIM_SM_ABORT:
1647                 ioim->iosp->abort_explicit = BFA_TRUE;
1648                 ioim->io_cbfn = __bfa_cb_ioim_abort;
1649
1650                 if (bfa_ioim_send_abort(ioim))
1651                         bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1652                 else {
1653                         bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
1654                         bfa_stats(ioim->itnim, qwait);
1655                         bfa_reqq_wait(ioim->bfa, ioim->reqq,
1656                                           &ioim->iosp->reqq_wait);
1657                 }
1658                 break;
1659
1660         case BFA_IOIM_SM_CLEANUP:
1661                 ioim->iosp->abort_explicit = BFA_FALSE;
1662                 ioim->io_cbfn = __bfa_cb_ioim_failed;
1663
1664                 if (bfa_ioim_send_abort(ioim))
1665                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1666                 else {
1667                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1668                         bfa_stats(ioim->itnim, qwait);
1669                         bfa_reqq_wait(ioim->bfa, ioim->reqq,
1670                                           &ioim->iosp->reqq_wait);
1671                 }
1672                 break;
1673
1674         case BFA_IOIM_SM_HWFAIL:
1675                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1676                 bfa_ioim_move_to_comp_q(ioim);
1677                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1678                               ioim);
1679                 break;
1680
1681         case BFA_IOIM_SM_SQRETRY:
1682                 if (bfa_ioim_maxretry_reached(ioim)) {
1683                         /* max retry reached, free IO */
1684                         bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1685                         bfa_ioim_move_to_comp_q(ioim);
1686                         bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1687                                         __bfa_cb_ioim_failed, ioim);
1688                         break;
1689                 }
1690                 /* waiting for IO tag resource free */
1691                 bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
1692                 break;
1693
1694         default:
1695                 bfa_sm_fault(ioim->bfa, event);
1696         }
1697 }
1698
1699 /*
1700  * IO is retried with new tag.
1701  */
1702 static void
1703 bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1704 {
1705         switch (event) {
1706         case BFA_IOIM_SM_FREE:
1707                 /* abts and rrq done. Now retry the IO with new tag */
1708                 bfa_ioim_update_iotag(ioim);
1709                 if (!bfa_ioim_send_ioreq(ioim)) {
1710                         bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1711                         break;
1712                 }
1713                 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1714         break;
1715
1716         case BFA_IOIM_SM_CLEANUP:
1717                 ioim->iosp->abort_explicit = BFA_FALSE;
1718                 ioim->io_cbfn = __bfa_cb_ioim_failed;
1719
1720                 if (bfa_ioim_send_abort(ioim))
1721                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1722                 else {
1723                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1724                         bfa_stats(ioim->itnim, qwait);
1725                         bfa_reqq_wait(ioim->bfa, ioim->reqq,
1726                                           &ioim->iosp->reqq_wait);
1727                 }
1728         break;
1729
1730         case BFA_IOIM_SM_HWFAIL:
1731                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1732                 bfa_ioim_move_to_comp_q(ioim);
1733                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1734                          __bfa_cb_ioim_failed, ioim);
1735                 break;
1736
1737         case BFA_IOIM_SM_ABORT:
1738                 /* in this state IO abort is done.
1739                  * Waiting for IO tag resource free.
1740                  */
1741                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1742                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1743                               ioim);
1744                 break;
1745
1746         default:
1747                 bfa_sm_fault(ioim->bfa, event);
1748         }
1749 }
1750
1751 /*
1752  * IO is being aborted, waiting for completion from firmware.
1753  */
1754 static void
1755 bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1756 {
1757         bfa_trc(ioim->bfa, ioim->iotag);
1758         bfa_trc(ioim->bfa, event);
1759
1760         switch (event) {
1761         case BFA_IOIM_SM_COMP_GOOD:
1762         case BFA_IOIM_SM_COMP:
1763         case BFA_IOIM_SM_DONE:
1764         case BFA_IOIM_SM_FREE:
1765                 break;
1766
1767         case BFA_IOIM_SM_ABORT_DONE:
1768                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1769                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1770                               ioim);
1771                 break;
1772
1773         case BFA_IOIM_SM_ABORT_COMP:
1774                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1775                 bfa_ioim_move_to_comp_q(ioim);
1776                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1777                               ioim);
1778                 break;
1779
1780         case BFA_IOIM_SM_COMP_UTAG:
1781                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1782                 bfa_ioim_move_to_comp_q(ioim);
1783                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1784                               ioim);
1785                 break;
1786
1787         case BFA_IOIM_SM_CLEANUP:
1788                 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1789                 ioim->iosp->abort_explicit = BFA_FALSE;
1790
1791                 if (bfa_ioim_send_abort(ioim))
1792                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1793                 else {
1794                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1795                         bfa_stats(ioim->itnim, qwait);
1796                         bfa_reqq_wait(ioim->bfa, ioim->reqq,
1797                                           &ioim->iosp->reqq_wait);
1798                 }
1799                 break;
1800
1801         case BFA_IOIM_SM_HWFAIL:
1802                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1803                 bfa_ioim_move_to_comp_q(ioim);
1804                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1805                               ioim);
1806                 break;
1807
1808         default:
1809                 bfa_sm_fault(ioim->bfa, event);
1810         }
1811 }
1812
1813 /*
1814  * IO is being cleaned up (implicit abort), waiting for completion from
1815  * firmware.
1816  */
1817 static void
1818 bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1819 {
1820         bfa_trc(ioim->bfa, ioim->iotag);
1821         bfa_trc(ioim->bfa, event);
1822
1823         switch (event) {
1824         case BFA_IOIM_SM_COMP_GOOD:
1825         case BFA_IOIM_SM_COMP:
1826         case BFA_IOIM_SM_DONE:
1827         case BFA_IOIM_SM_FREE:
1828                 break;
1829
1830         case BFA_IOIM_SM_ABORT:
1831                 /*
1832                  * IO is already being aborted implicitly
1833                  */
1834                 ioim->io_cbfn = __bfa_cb_ioim_abort;
1835                 break;
1836
1837         case BFA_IOIM_SM_ABORT_DONE:
1838                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1839                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1840                 bfa_ioim_notify_cleanup(ioim);
1841                 break;
1842
1843         case BFA_IOIM_SM_ABORT_COMP:
1844                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1845                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1846                 bfa_ioim_notify_cleanup(ioim);
1847                 break;
1848
1849         case BFA_IOIM_SM_COMP_UTAG:
1850                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1851                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1852                 bfa_ioim_notify_cleanup(ioim);
1853                 break;
1854
1855         case BFA_IOIM_SM_HWFAIL:
1856                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1857                 bfa_ioim_move_to_comp_q(ioim);
1858                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1859                               ioim);
1860                 break;
1861
1862         case BFA_IOIM_SM_CLEANUP:
1863                 /*
1864                  * IO can be in cleanup state already due to TM command.
1865                  * 2nd cleanup request comes from ITN offline event.
1866                  */
1867                 break;
1868
1869         default:
1870                 bfa_sm_fault(ioim->bfa, event);
1871         }
1872 }
1873
1874 /*
1875  * IO is waiting for room in request CQ
1876  */
1877 static void
1878 bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1879 {
1880         bfa_trc(ioim->bfa, ioim->iotag);
1881         bfa_trc(ioim->bfa, event);
1882
1883         switch (event) {
1884         case BFA_IOIM_SM_QRESUME:
1885                 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1886                 bfa_ioim_send_ioreq(ioim);
1887                 break;
1888
1889         case BFA_IOIM_SM_ABORT:
1890                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1891                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1892                 bfa_ioim_move_to_comp_q(ioim);
1893                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1894                               ioim);
1895                 break;
1896
1897         case BFA_IOIM_SM_CLEANUP:
1898                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1899                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1900                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1901                               ioim);
1902                 bfa_ioim_notify_cleanup(ioim);
1903                 break;
1904
1905         case BFA_IOIM_SM_HWFAIL:
1906                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1907                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1908                 bfa_ioim_move_to_comp_q(ioim);
1909                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1910                               ioim);
1911                 break;
1912
1913         default:
1914                 bfa_sm_fault(ioim->bfa, event);
1915         }
1916 }
1917
1918 /*
1919  * Active IO is being aborted, waiting for room in request CQ.
1920  */
1921 static void
1922 bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1923 {
1924         bfa_trc(ioim->bfa, ioim->iotag);
1925         bfa_trc(ioim->bfa, event);
1926
1927         switch (event) {
1928         case BFA_IOIM_SM_QRESUME:
1929                 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1930                 bfa_ioim_send_abort(ioim);
1931                 break;
1932
1933         case BFA_IOIM_SM_CLEANUP:
1934                 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1935                 ioim->iosp->abort_explicit = BFA_FALSE;
1936                 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1937                 break;
1938
1939         case BFA_IOIM_SM_COMP_GOOD:
1940         case BFA_IOIM_SM_COMP:
1941                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1942                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1943                 bfa_ioim_move_to_comp_q(ioim);
1944                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1945                               ioim);
1946                 break;
1947
1948         case BFA_IOIM_SM_DONE:
1949                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1950                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1951                 bfa_ioim_move_to_comp_q(ioim);
1952                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1953                               ioim);
1954                 break;
1955
1956         case BFA_IOIM_SM_HWFAIL:
1957                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1958                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1959                 bfa_ioim_move_to_comp_q(ioim);
1960                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1961                               ioim);
1962                 break;
1963
1964         default:
1965                 bfa_sm_fault(ioim->bfa, event);
1966         }
1967 }
1968
1969 /*
1970  * Active IO is being cleaned up, waiting for room in request CQ.
1971  */
1972 static void
1973 bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1974 {
1975         bfa_trc(ioim->bfa, ioim->iotag);
1976         bfa_trc(ioim->bfa, event);
1977
1978         switch (event) {
1979         case BFA_IOIM_SM_QRESUME:
1980                 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1981                 bfa_ioim_send_abort(ioim);
1982                 break;
1983
1984         case BFA_IOIM_SM_ABORT:
1985                 /*
1986                  * IO is already being cleaned up implicitly
1987                  */
1988                 ioim->io_cbfn = __bfa_cb_ioim_abort;
1989                 break;
1990
1991         case BFA_IOIM_SM_COMP_GOOD:
1992         case BFA_IOIM_SM_COMP:
1993                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1994                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1995                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1996                 bfa_ioim_notify_cleanup(ioim);
1997                 break;
1998
1999         case BFA_IOIM_SM_DONE:
2000                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
2001                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2002                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2003                 bfa_ioim_notify_cleanup(ioim);
2004                 break;
2005
2006         case BFA_IOIM_SM_HWFAIL:
2007                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2008                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2009                 bfa_ioim_move_to_comp_q(ioim);
2010                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2011                               ioim);
2012                 break;
2013
2014         default:
2015                 bfa_sm_fault(ioim->bfa, event);
2016         }
2017 }
2018
2019 /*
2020  * IO bfa callback is pending.
2021  */
2022 static void
2023 bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2024 {
2025         switch (event) {
2026         case BFA_IOIM_SM_HCB:
2027                 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2028                 bfa_ioim_free(ioim);
2029                 break;
2030
2031         case BFA_IOIM_SM_CLEANUP:
2032                 bfa_ioim_notify_cleanup(ioim);
2033                 break;
2034
2035         case BFA_IOIM_SM_HWFAIL:
2036                 break;
2037
2038         default:
2039                 bfa_sm_fault(ioim->bfa, event);
2040         }
2041 }
2042
2043 /*
2044  * IO bfa callback is pending. IO resource cannot be freed.
2045  */
2046 static void
2047 bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2048 {
2049         bfa_trc(ioim->bfa, ioim->iotag);
2050         bfa_trc(ioim->bfa, event);
2051
2052         switch (event) {
2053         case BFA_IOIM_SM_HCB:
2054                 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
2055                 list_del(&ioim->qe);
2056                 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
2057                 break;
2058
2059         case BFA_IOIM_SM_FREE:
2060                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2061                 break;
2062
2063         case BFA_IOIM_SM_CLEANUP:
2064                 bfa_ioim_notify_cleanup(ioim);
2065                 break;
2066
2067         case BFA_IOIM_SM_HWFAIL:
2068                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2069                 break;
2070
2071         default:
2072                 bfa_sm_fault(ioim->bfa, event);
2073         }
2074 }
2075
2076 /*
2077  * IO is completed, waiting resource free from firmware.
2078  */
2079 static void
2080 bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2081 {
2082         bfa_trc(ioim->bfa, ioim->iotag);
2083         bfa_trc(ioim->bfa, event);
2084
2085         switch (event) {
2086         case BFA_IOIM_SM_FREE:
2087                 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2088                 bfa_ioim_free(ioim);
2089                 break;
2090
2091         case BFA_IOIM_SM_CLEANUP:
2092                 bfa_ioim_notify_cleanup(ioim);
2093                 break;
2094
2095         case BFA_IOIM_SM_HWFAIL:
2096                 break;
2097
2098         default:
2099                 bfa_sm_fault(ioim->bfa, event);
2100         }
2101 }
2102
2103 /*
2104  * This is called from bfa_fcpim_start after the bfa_init() with flash read
2105  * is complete by driver. now invalidate the stale content of lun mask
2106  * like unit attention, rp tag and lp tag.
2107  */
2108 void
2109 bfa_ioim_lm_init(struct bfa_s *bfa)
2110 {
2111         struct bfa_lun_mask_s *lunm_list;
2112         int     i;
2113
2114         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2115                 return;
2116
2117         lunm_list = bfa_get_lun_mask_list(bfa);
2118         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2119                 lunm_list[i].ua = BFA_IOIM_LM_UA_RESET;
2120                 lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
2121                 lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
2122         }
2123 }
2124
2125 static void
2126 __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
2127 {
2128         struct bfa_ioim_s *ioim = cbarg;
2129
2130         if (!complete) {
2131                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2132                 return;
2133         }
2134
2135         bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
2136 }
2137
2138 static void
2139 __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2140 {
2141         struct bfa_ioim_s       *ioim = cbarg;
2142         struct bfi_ioim_rsp_s *m;
2143         u8      *snsinfo = NULL;
2144         u8      sns_len = 0;
2145         s32     residue = 0;
2146
2147         if (!complete) {
2148                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2149                 return;
2150         }
2151
2152         m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2153         if (m->io_status == BFI_IOIM_STS_OK) {
2154                 /*
2155                  * setup sense information, if present
2156                  */
2157                 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
2158                                         m->sns_len) {
2159                         sns_len = m->sns_len;
2160                         snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
2161                                                 ioim->iotag);
2162                 }
2163
2164                 /*
2165                  * setup residue value correctly for normal completions
2166                  */
2167                 if (m->resid_flags == FCP_RESID_UNDER) {
2168                         residue = be32_to_cpu(m->residue);
2169                         bfa_stats(ioim->itnim, iocomp_underrun);
2170                 }
2171                 if (m->resid_flags == FCP_RESID_OVER) {
2172                         residue = be32_to_cpu(m->residue);
2173                         residue = -residue;
2174                         bfa_stats(ioim->itnim, iocomp_overrun);
2175                 }
2176         }
2177
2178         bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
2179                           m->scsi_status, sns_len, snsinfo, residue);
2180 }
2181
2182 void
2183 bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
2184                         u16 rp_tag, u8 lp_tag)
2185 {
2186         struct bfa_lun_mask_s *lun_list;
2187         u8      i;
2188
2189         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2190                 return;
2191
2192         lun_list = bfa_get_lun_mask_list(bfa);
2193         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2194                 if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
2195                         if ((lun_list[i].lp_wwn == lp_wwn) &&
2196                             (lun_list[i].rp_wwn == rp_wwn)) {
2197                                 lun_list[i].rp_tag = rp_tag;
2198                                 lun_list[i].lp_tag = lp_tag;
2199                         }
2200                 }
2201         }
2202 }
2203
2204 /*
2205  * set UA for all active luns in LM DB
2206  */
2207 static void
2208 bfa_ioim_lm_set_ua(struct bfa_s *bfa)
2209 {
2210         struct bfa_lun_mask_s   *lunm_list;
2211         int     i;
2212
2213         lunm_list = bfa_get_lun_mask_list(bfa);
2214         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2215                 if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2216                         continue;
2217                 lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2218         }
2219 }
2220
2221 bfa_status_t
2222 bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update)
2223 {
2224         struct bfa_lunmask_cfg_s        *lun_mask;
2225
2226         bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2227         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2228                 return BFA_STATUS_FAILED;
2229
2230         if (bfa_get_lun_mask_status(bfa) == update)
2231                 return BFA_STATUS_NO_CHANGE;
2232
2233         lun_mask = bfa_get_lun_mask(bfa);
2234         lun_mask->status = update;
2235
2236         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED)
2237                 bfa_ioim_lm_set_ua(bfa);
2238
2239         return  bfa_dconf_update(bfa);
2240 }
2241
2242 bfa_status_t
2243 bfa_fcpim_lunmask_clear(struct bfa_s *bfa)
2244 {
2245         int i;
2246         struct bfa_lun_mask_s   *lunm_list;
2247
2248         bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2249         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2250                 return BFA_STATUS_FAILED;
2251
2252         lunm_list = bfa_get_lun_mask_list(bfa);
2253         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2254                 if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
2255                         if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID)
2256                                 bfa_rport_unset_lunmask(bfa,
2257                                   BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag));
2258                 }
2259         }
2260
2261         memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG);
2262         return bfa_dconf_update(bfa);
2263 }
2264
2265 bfa_status_t
2266 bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf)
2267 {
2268         struct bfa_lunmask_cfg_s *lun_mask;
2269
2270         bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2271         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2272                 return BFA_STATUS_FAILED;
2273
2274         lun_mask = bfa_get_lun_mask(bfa);
2275         memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s));
2276         return BFA_STATUS_OK;
2277 }
2278
2279 bfa_status_t
2280 bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2281                       wwn_t rpwwn, struct scsi_lun lun)
2282 {
2283         struct bfa_lun_mask_s *lunm_list;
2284         struct bfa_rport_s *rp = NULL;
2285         int i, free_index = MAX_LUN_MASK_CFG + 1;
2286         struct bfa_fcs_lport_s *port = NULL;
2287         struct bfa_fcs_rport_s *rp_fcs;
2288
2289         bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2290         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2291                 return BFA_STATUS_FAILED;
2292
2293         port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs,
2294                                    vf_id, *pwwn);
2295         if (port) {
2296                 *pwwn = port->port_cfg.pwwn;
2297                 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2298                 if (rp_fcs)
2299                         rp = rp_fcs->bfa_rport;
2300         }
2301
2302         lunm_list = bfa_get_lun_mask_list(bfa);
2303         /* if entry exists */
2304         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2305                 if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2306                         free_index = i;
2307                 if ((lunm_list[i].lp_wwn == *pwwn) &&
2308                     (lunm_list[i].rp_wwn == rpwwn) &&
2309                     (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
2310                      scsilun_to_int((struct scsi_lun *)&lun)))
2311                         return  BFA_STATUS_ENTRY_EXISTS;
2312         }
2313
2314         if (free_index > MAX_LUN_MASK_CFG)
2315                 return BFA_STATUS_MAX_ENTRY_REACHED;
2316
2317         if (rp) {
2318                 lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa,
2319                                                    rp->rport_info.local_pid);
2320                 lunm_list[free_index].rp_tag = rp->rport_tag;
2321         } else {
2322                 lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID;
2323                 lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID;
2324         }
2325
2326         lunm_list[free_index].lp_wwn = *pwwn;
2327         lunm_list[free_index].rp_wwn = rpwwn;
2328         lunm_list[free_index].lun = lun;
2329         lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE;
2330
2331         /* set for all luns in this rp */
2332         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2333                 if ((lunm_list[i].lp_wwn == *pwwn) &&
2334                     (lunm_list[i].rp_wwn == rpwwn))
2335                         lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2336         }
2337
2338         return bfa_dconf_update(bfa);
2339 }
2340
2341 bfa_status_t
2342 bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2343                          wwn_t rpwwn, struct scsi_lun lun)
2344 {
2345         struct bfa_lun_mask_s   *lunm_list;
2346         struct bfa_rport_s      *rp = NULL;
2347         struct bfa_fcs_lport_s *port = NULL;
2348         struct bfa_fcs_rport_s *rp_fcs;
2349         int     i;
2350
2351         /* in min cfg lunm_list could be NULL but  no commands should run. */
2352         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2353                 return BFA_STATUS_FAILED;
2354
2355         bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2356         bfa_trc(bfa, *pwwn);
2357         bfa_trc(bfa, rpwwn);
2358         bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun));
2359
2360         if (*pwwn == 0) {
2361                 port = bfa_fcs_lookup_port(
2362                                 &((struct bfad_s *)bfa->bfad)->bfa_fcs,
2363                                 vf_id, *pwwn);
2364                 if (port) {
2365                         *pwwn = port->port_cfg.pwwn;
2366                         rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2367                         if (rp_fcs)
2368                                 rp = rp_fcs->bfa_rport;
2369                 }
2370         }
2371
2372         lunm_list = bfa_get_lun_mask_list(bfa);
2373         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2374                 if ((lunm_list[i].lp_wwn == *pwwn) &&
2375                     (lunm_list[i].rp_wwn == rpwwn) &&
2376                     (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
2377                      scsilun_to_int((struct scsi_lun *)&lun))) {
2378                         lunm_list[i].lp_wwn = 0;
2379                         lunm_list[i].rp_wwn = 0;
2380                         int_to_scsilun(0, &lunm_list[i].lun);
2381                         lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE;
2382                         if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) {
2383                                 lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
2384                                 lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
2385                         }
2386                         return bfa_dconf_update(bfa);
2387                 }
2388         }
2389
2390         /* set for all luns in this rp */
2391         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2392                 if ((lunm_list[i].lp_wwn == *pwwn) &&
2393                     (lunm_list[i].rp_wwn == rpwwn))
2394                         lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2395         }
2396
2397         return BFA_STATUS_ENTRY_NOT_EXISTS;
2398 }
2399
2400 static void
2401 __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2402 {
2403         struct bfa_ioim_s *ioim = cbarg;
2404
2405         if (!complete) {
2406                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2407                 return;
2408         }
2409
2410         bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2411                           0, 0, NULL, 0);
2412 }
2413
2414 static void
2415 __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2416 {
2417         struct bfa_ioim_s *ioim = cbarg;
2418
2419         bfa_stats(ioim->itnim, path_tov_expired);
2420         if (!complete) {
2421                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2422                 return;
2423         }
2424
2425         bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2426                           0, 0, NULL, 0);
2427 }
2428
2429 static void
2430 __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2431 {
2432         struct bfa_ioim_s *ioim = cbarg;
2433
2434         if (!complete) {
2435                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2436                 return;
2437         }
2438
2439         bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2440 }
2441
2442 static void
2443 bfa_ioim_sgpg_alloced(void *cbarg)
2444 {
2445         struct bfa_ioim_s *ioim = cbarg;
2446
2447         ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2448         list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
2449         ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2450         bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2451 }
2452
2453 /*
2454  * Send I/O request to firmware.
2455  */
2456 static  bfa_boolean_t
2457 bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2458 {
2459         struct bfa_itnim_s *itnim = ioim->itnim;
2460         struct bfi_ioim_req_s *m;
2461         static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
2462         struct bfi_sge_s *sge, *sgpge;
2463         u32     pgdlen = 0;
2464         u32     fcp_dl;
2465         u64 addr;
2466         struct scatterlist *sg;
2467         struct bfa_sgpg_s *sgpg;
2468         struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2469         u32 i, sge_id, pgcumsz;
2470         enum dma_data_direction dmadir;
2471
2472         /*
2473          * check for room in queue to send request now
2474          */
2475         m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2476         if (!m) {
2477                 bfa_stats(ioim->itnim, qwait);
2478                 bfa_reqq_wait(ioim->bfa, ioim->reqq,
2479                                   &ioim->iosp->reqq_wait);
2480                 return BFA_FALSE;
2481         }
2482
2483         /*
2484          * build i/o request message next
2485          */
2486         m->io_tag = cpu_to_be16(ioim->iotag);
2487         m->rport_hdl = ioim->itnim->rport->fw_handle;
2488         m->io_timeout = 0;
2489
2490         sge = &m->sges[0];
2491         sgpg = ioim->sgpg;
2492         sge_id = 0;
2493         sgpge = NULL;
2494         pgcumsz = 0;
2495         scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
2496                 if (i == 0) {
2497                         /* build inline IO SG element */
2498                         addr = bfa_sgaddr_le(sg_dma_address(sg));
2499                         sge->sga = *(union bfi_addr_u *) &addr;
2500                         pgdlen = sg_dma_len(sg);
2501                         sge->sg_len = pgdlen;
2502                         sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
2503                                         BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
2504                         bfa_sge_to_be(sge);
2505                         sge++;
2506                 } else {
2507                         if (sge_id == 0)
2508                                 sgpge = sgpg->sgpg->sges;
2509
2510                         addr = bfa_sgaddr_le(sg_dma_address(sg));
2511                         sgpge->sga = *(union bfi_addr_u *) &addr;
2512                         sgpge->sg_len = sg_dma_len(sg);
2513                         pgcumsz += sgpge->sg_len;
2514
2515                         /* set flags */
2516                         if (i < (ioim->nsges - 1) &&
2517                                         sge_id < (BFI_SGPG_DATA_SGES - 1))
2518                                 sgpge->flags = BFI_SGE_DATA;
2519                         else if (i < (ioim->nsges - 1))
2520                                 sgpge->flags = BFI_SGE_DATA_CPL;
2521                         else
2522                                 sgpge->flags = BFI_SGE_DATA_LAST;
2523
2524                         bfa_sge_to_le(sgpge);
2525
2526                         sgpge++;
2527                         if (i == (ioim->nsges - 1)) {
2528                                 sgpge->flags = BFI_SGE_PGDLEN;
2529                                 sgpge->sga.a32.addr_lo = 0;
2530                                 sgpge->sga.a32.addr_hi = 0;
2531                                 sgpge->sg_len = pgcumsz;
2532                                 bfa_sge_to_le(sgpge);
2533                         } else if (++sge_id == BFI_SGPG_DATA_SGES) {
2534                                 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2535                                 sgpge->flags = BFI_SGE_LINK;
2536                                 sgpge->sga = sgpg->sgpg_pa;
2537                                 sgpge->sg_len = pgcumsz;
2538                                 bfa_sge_to_le(sgpge);
2539                                 sge_id = 0;
2540                                 pgcumsz = 0;
2541                         }
2542                 }
2543         }
2544
2545         if (ioim->nsges > BFI_SGE_INLINE) {
2546                 sge->sga = ioim->sgpg->sgpg_pa;
2547         } else {
2548                 sge->sga.a32.addr_lo = 0;
2549                 sge->sga.a32.addr_hi = 0;
2550         }
2551         sge->sg_len = pgdlen;
2552         sge->flags = BFI_SGE_PGDLEN;
2553         bfa_sge_to_be(sge);
2554
2555         /*
2556          * set up I/O command parameters
2557          */
2558         m->cmnd = cmnd_z0;
2559         int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
2560         dmadir = cmnd->sc_data_direction;
2561         if (dmadir == DMA_TO_DEVICE)
2562                 m->cmnd.iodir = FCP_IODIR_WRITE;
2563         else if (dmadir == DMA_FROM_DEVICE)
2564                 m->cmnd.iodir = FCP_IODIR_READ;
2565         else
2566                 m->cmnd.iodir = FCP_IODIR_NONE;
2567
2568         m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd;
2569         fcp_dl = scsi_bufflen(cmnd);
2570         m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
2571
2572         /*
2573          * set up I/O message header
2574          */
2575         switch (m->cmnd.iodir) {
2576         case FCP_IODIR_READ:
2577                 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa));
2578                 bfa_stats(itnim, input_reqs);
2579                 ioim->itnim->stats.rd_throughput += fcp_dl;
2580                 break;
2581         case FCP_IODIR_WRITE:
2582                 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa));
2583                 bfa_stats(itnim, output_reqs);
2584                 ioim->itnim->stats.wr_throughput += fcp_dl;
2585                 break;
2586         case FCP_IODIR_RW:
2587                 bfa_stats(itnim, input_reqs);
2588                 bfa_stats(itnim, output_reqs);
2589                 /* fall through */
2590         default:
2591                 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
2592         }
2593         if (itnim->seq_rec ||
2594             (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
2595                 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
2596
2597         /*
2598          * queue I/O message to firmware
2599          */
2600         bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
2601         return BFA_TRUE;
2602 }
2603
2604 /*
2605  * Setup any additional SG pages needed.Inline SG element is setup
2606  * at queuing time.
2607  */
2608 static bfa_boolean_t
2609 bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
2610 {
2611         u16     nsgpgs;
2612
2613         WARN_ON(ioim->nsges <= BFI_SGE_INLINE);
2614
2615         /*
2616          * allocate SG pages needed
2617          */
2618         nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2619         if (!nsgpgs)
2620                 return BFA_TRUE;
2621
2622         if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
2623             != BFA_STATUS_OK) {
2624                 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
2625                 return BFA_FALSE;
2626         }
2627
2628         ioim->nsgpgs = nsgpgs;
2629         ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2630
2631         return BFA_TRUE;
2632 }
2633
2634 /*
2635  * Send I/O abort request to firmware.
2636  */
2637 static  bfa_boolean_t
2638 bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2639 {
2640         struct bfi_ioim_abort_req_s *m;
2641         enum bfi_ioim_h2i       msgop;
2642
2643         /*
2644          * check for room in queue to send request now
2645          */
2646         m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2647         if (!m)
2648                 return BFA_FALSE;
2649
2650         /*
2651          * build i/o request message next
2652          */
2653         if (ioim->iosp->abort_explicit)
2654                 msgop = BFI_IOIM_H2I_IOABORT_REQ;
2655         else
2656                 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
2657
2658         bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa));
2659         m->io_tag    = cpu_to_be16(ioim->iotag);
2660         m->abort_tag = ++ioim->abort_tag;
2661
2662         /*
2663          * queue I/O message to firmware
2664          */
2665         bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
2666         return BFA_TRUE;
2667 }
2668
2669 /*
2670  * Call to resume any I/O requests waiting for room in request queue.
2671  */
2672 static void
2673 bfa_ioim_qresume(void *cbarg)
2674 {
2675         struct bfa_ioim_s *ioim = cbarg;
2676
2677         bfa_stats(ioim->itnim, qresumes);
2678         bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
2679 }
2680
2681
2682 static void
2683 bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2684 {
2685         /*
2686          * Move IO from itnim queue to fcpim global queue since itnim will be
2687          * freed.
2688          */
2689         list_del(&ioim->qe);
2690         list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2691
2692         if (!ioim->iosp->tskim) {
2693                 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
2694                         bfa_cb_dequeue(&ioim->hcb_qe);
2695                         list_del(&ioim->qe);
2696                         list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
2697                 }
2698                 bfa_itnim_iodone(ioim->itnim);
2699         } else
2700                 bfa_wc_down(&ioim->iosp->tskim->wc);
2701 }
2702
2703 static bfa_boolean_t
2704 bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2705 {
2706         if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
2707             (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim)))    ||
2708             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort))         ||
2709             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull))   ||
2710             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb))           ||
2711             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free))      ||
2712             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
2713                 return BFA_FALSE;
2714
2715         return BFA_TRUE;
2716 }
2717
2718 void
2719 bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2720 {
2721         /*
2722          * If path tov timer expired, failback with PATHTOV status - these
2723          * IO requests are not normally retried by IO stack.
2724          *
2725          * Otherwise device cameback online and fail it with normal failed
2726          * status so that IO stack retries these failed IO requests.
2727          */
2728         if (iotov)
2729                 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
2730         else {
2731                 ioim->io_cbfn = __bfa_cb_ioim_failed;
2732                 bfa_stats(ioim->itnim, iocom_nexus_abort);
2733         }
2734         bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2735
2736         /*
2737          * Move IO to fcpim global queue since itnim will be
2738          * freed.
2739          */
2740         list_del(&ioim->qe);
2741         list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2742 }
2743
2744
2745 /*
2746  * Memory allocation and initialization.
2747  */
2748 void
2749 bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
2750 {
2751         struct bfa_ioim_s               *ioim;
2752         struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
2753         struct bfa_ioim_sp_s    *iosp;
2754         u16             i;
2755
2756         /*
2757          * claim memory first
2758          */
2759         ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp);
2760         fcpim->ioim_arr = ioim;
2761         bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);
2762
2763         iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp);
2764         fcpim->ioim_sp_arr = iosp;
2765         bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);
2766
2767         /*
2768          * Initialize ioim free queues
2769          */
2770         INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
2771         INIT_LIST_HEAD(&fcpim->ioim_comp_q);
2772
2773         for (i = 0; i < fcpim->fcp->num_ioim_reqs;
2774              i++, ioim++, iosp++) {
2775                 /*
2776                  * initialize IOIM
2777                  */
2778                 memset(ioim, 0, sizeof(struct bfa_ioim_s));
2779                 ioim->iotag   = i;
2780                 ioim->bfa     = fcpim->bfa;
2781                 ioim->fcpim   = fcpim;
2782                 ioim->iosp    = iosp;
2783                 INIT_LIST_HEAD(&ioim->sgpg_q);
2784                 bfa_reqq_winit(&ioim->iosp->reqq_wait,
2785                                    bfa_ioim_qresume, ioim);
2786                 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
2787                                    bfa_ioim_sgpg_alloced, ioim);
2788                 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2789         }
2790 }
2791
2792 void
2793 bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2794 {
2795         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2796         struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2797         struct bfa_ioim_s *ioim;
2798         u16     iotag;
2799         enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
2800
2801         iotag = be16_to_cpu(rsp->io_tag);
2802
2803         ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2804         WARN_ON(ioim->iotag != iotag);
2805
2806         bfa_trc(ioim->bfa, ioim->iotag);
2807         bfa_trc(ioim->bfa, rsp->io_status);
2808         bfa_trc(ioim->bfa, rsp->reuse_io_tag);
2809
2810         if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
2811                 ioim->iosp->comp_rspmsg = *m;
2812
2813         switch (rsp->io_status) {
2814         case BFI_IOIM_STS_OK:
2815                 bfa_stats(ioim->itnim, iocomp_ok);
2816                 if (rsp->reuse_io_tag == 0)
2817                         evt = BFA_IOIM_SM_DONE;
2818                 else
2819                         evt = BFA_IOIM_SM_COMP;
2820                 break;
2821
2822         case BFI_IOIM_STS_TIMEDOUT:
2823                 bfa_stats(ioim->itnim, iocomp_timedout);
2824                 /* fall through */
2825         case BFI_IOIM_STS_ABORTED:
2826                 rsp->io_status = BFI_IOIM_STS_ABORTED;
2827                 bfa_stats(ioim->itnim, iocomp_aborted);
2828                 if (rsp->reuse_io_tag == 0)
2829                         evt = BFA_IOIM_SM_DONE;
2830                 else
2831                         evt = BFA_IOIM_SM_COMP;
2832                 break;
2833
2834         case BFI_IOIM_STS_PROTO_ERR:
2835                 bfa_stats(ioim->itnim, iocom_proto_err);
2836                 WARN_ON(!rsp->reuse_io_tag);
2837                 evt = BFA_IOIM_SM_COMP;
2838                 break;
2839
2840         case BFI_IOIM_STS_SQER_NEEDED:
2841                 bfa_stats(ioim->itnim, iocom_sqer_needed);
2842                 WARN_ON(rsp->reuse_io_tag != 0);
2843                 evt = BFA_IOIM_SM_SQRETRY;
2844                 break;
2845
2846         case BFI_IOIM_STS_RES_FREE:
2847                 bfa_stats(ioim->itnim, iocom_res_free);
2848                 evt = BFA_IOIM_SM_FREE;
2849                 break;
2850
2851         case BFI_IOIM_STS_HOST_ABORTED:
2852                 bfa_stats(ioim->itnim, iocom_hostabrts);
2853                 if (rsp->abort_tag != ioim->abort_tag) {
2854                         bfa_trc(ioim->bfa, rsp->abort_tag);
2855                         bfa_trc(ioim->bfa, ioim->abort_tag);
2856                         return;
2857                 }
2858
2859                 if (rsp->reuse_io_tag)
2860                         evt = BFA_IOIM_SM_ABORT_COMP;
2861                 else
2862                         evt = BFA_IOIM_SM_ABORT_DONE;
2863                 break;
2864
2865         case BFI_IOIM_STS_UTAG:
2866                 bfa_stats(ioim->itnim, iocom_utags);
2867                 evt = BFA_IOIM_SM_COMP_UTAG;
2868                 break;
2869
2870         default:
2871                 WARN_ON(1);
2872         }
2873
2874         bfa_sm_send_event(ioim, evt);
2875 }
2876
2877 void
2878 bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2879 {
2880         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2881         struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2882         struct bfa_ioim_s *ioim;
2883         u16     iotag;
2884
2885         iotag = be16_to_cpu(rsp->io_tag);
2886
2887         ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2888         WARN_ON(ioim->iotag != iotag);
2889
2890         bfa_ioim_cb_profile_comp(fcpim, ioim);
2891
2892         bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
2893 }
2894
2895 /*
2896  * Called by itnim to clean up IO while going offline.
2897  */
2898 void
2899 bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
2900 {
2901         bfa_trc(ioim->bfa, ioim->iotag);
2902         bfa_stats(ioim->itnim, io_cleanups);
2903
2904         ioim->iosp->tskim = NULL;
2905         bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2906 }
2907
2908 void
2909 bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
2910 {
2911         bfa_trc(ioim->bfa, ioim->iotag);
2912         bfa_stats(ioim->itnim, io_tmaborts);
2913
2914         ioim->iosp->tskim = tskim;
2915         bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2916 }
2917
2918 /*
2919  * IOC failure handling.
2920  */
2921 void
2922 bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
2923 {
2924         bfa_trc(ioim->bfa, ioim->iotag);
2925         bfa_stats(ioim->itnim, io_iocdowns);
2926         bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
2927 }
2928
2929 /*
2930  * IO offline TOV popped. Fail the pending IO.
2931  */
2932 void
2933 bfa_ioim_tov(struct bfa_ioim_s *ioim)
2934 {
2935         bfa_trc(ioim->bfa, ioim->iotag);
2936         bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
2937 }
2938
2939
2940 /*
2941  * Allocate IOIM resource for initiator mode I/O request.
2942  */
2943 struct bfa_ioim_s *
2944 bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2945                 struct bfa_itnim_s *itnim, u16 nsges)
2946 {
2947         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2948         struct bfa_ioim_s *ioim;
2949         struct bfa_iotag_s *iotag = NULL;
2950
2951         /*
2952          * alocate IOIM resource
2953          */
2954         bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag);
2955         if (!iotag) {
2956                 bfa_stats(itnim, no_iotags);
2957                 return NULL;
2958         }
2959
2960         ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag);
2961
2962         ioim->dio = dio;
2963         ioim->itnim = itnim;
2964         ioim->nsges = nsges;
2965         ioim->nsgpgs = 0;
2966
2967         bfa_stats(itnim, total_ios);
2968         fcpim->ios_active++;
2969
2970         list_add_tail(&ioim->qe, &itnim->io_q);
2971
2972         return ioim;
2973 }
2974
2975 void
2976 bfa_ioim_free(struct bfa_ioim_s *ioim)
2977 {
2978         struct bfa_fcpim_s *fcpim = ioim->fcpim;
2979         struct bfa_iotag_s *iotag;
2980
2981         if (ioim->nsgpgs > 0)
2982                 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
2983
2984         bfa_stats(ioim->itnim, io_comps);
2985         fcpim->ios_active--;
2986
2987         ioim->iotag &= BFA_IOIM_IOTAG_MASK;
2988
2989         WARN_ON(!(ioim->iotag <
2990                 (fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs)));
2991         iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag);
2992
2993         if (ioim->iotag < fcpim->fcp->num_ioim_reqs)
2994                 list_add_tail(&iotag->qe, &fcpim->fcp->iotag_ioim_free_q);
2995         else
2996                 list_add_tail(&iotag->qe, &fcpim->fcp->iotag_tio_free_q);
2997
2998         list_del(&ioim->qe);
2999 }
3000
3001 void
3002 bfa_ioim_start(struct bfa_ioim_s *ioim)
3003 {
3004         bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
3005
3006         /*
3007          * Obtain the queue over which this request has to be issued
3008          */
3009         ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
3010                         BFA_FALSE : bfa_itnim_get_reqq(ioim);
3011
3012         bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
3013 }
3014
3015 /*
3016  * Driver I/O abort request.
3017  */
3018 bfa_status_t
3019 bfa_ioim_abort(struct bfa_ioim_s *ioim)
3020 {
3021
3022         bfa_trc(ioim->bfa, ioim->iotag);
3023
3024         if (!bfa_ioim_is_abortable(ioim))
3025                 return BFA_STATUS_FAILED;
3026
3027         bfa_stats(ioim->itnim, io_aborts);
3028         bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
3029
3030         return BFA_STATUS_OK;
3031 }
3032
3033 /*
3034  *  BFA TSKIM state machine functions
3035  */
3036
3037 /*
3038  * Task management command beginning state.
3039  */
3040 static void
3041 bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3042 {
3043         bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3044
3045         switch (event) {
3046         case BFA_TSKIM_SM_START:
3047                 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3048                 bfa_tskim_gather_ios(tskim);
3049
3050                 /*
3051                  * If device is offline, do not send TM on wire. Just cleanup
3052                  * any pending IO requests and complete TM request.
3053                  */
3054                 if (!bfa_itnim_is_online(tskim->itnim)) {
3055                         bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3056                         tskim->tsk_status = BFI_TSKIM_STS_OK;
3057                         bfa_tskim_cleanup_ios(tskim);
3058                         return;
3059                 }
3060
3061                 if (!bfa_tskim_send(tskim)) {
3062                         bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
3063                         bfa_stats(tskim->itnim, tm_qwait);
3064                         bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3065                                           &tskim->reqq_wait);
3066                 }
3067                 break;
3068
3069         default:
3070                 bfa_sm_fault(tskim->bfa, event);
3071         }
3072 }
3073
3074 /*
3075  * TM command is active, awaiting completion from firmware to
3076  * cleanup IO requests in TM scope.
3077  */
3078 static void
3079 bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3080 {
3081         bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3082
3083         switch (event) {
3084         case BFA_TSKIM_SM_DONE:
3085                 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3086                 bfa_tskim_cleanup_ios(tskim);
3087                 break;
3088
3089         case BFA_TSKIM_SM_CLEANUP:
3090                 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3091                 if (!bfa_tskim_send_abort(tskim)) {
3092                         bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
3093                         bfa_stats(tskim->itnim, tm_qwait);
3094                         bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3095                                 &tskim->reqq_wait);
3096                 }
3097                 break;
3098
3099         case BFA_TSKIM_SM_HWFAIL:
3100                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3101                 bfa_tskim_iocdisable_ios(tskim);
3102                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3103                 break;
3104
3105         default:
3106                 bfa_sm_fault(tskim->bfa, event);
3107         }
3108 }
3109
3110 /*
3111  * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
3112  * completion event from firmware.
3113  */
3114 static void
3115 bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3116 {
3117         bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3118
3119         switch (event) {
3120         case BFA_TSKIM_SM_DONE:
3121                 /*
3122                  * Ignore and wait for ABORT completion from firmware.
3123                  */
3124                 break;
3125
3126         case BFA_TSKIM_SM_UTAG:
3127         case BFA_TSKIM_SM_CLEANUP_DONE:
3128                 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3129                 bfa_tskim_cleanup_ios(tskim);
3130                 break;
3131
3132         case BFA_TSKIM_SM_HWFAIL:
3133                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3134                 bfa_tskim_iocdisable_ios(tskim);
3135                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3136                 break;
3137
3138         default:
3139                 bfa_sm_fault(tskim->bfa, event);
3140         }
3141 }
3142
3143 static void
3144 bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3145 {
3146         bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3147
3148         switch (event) {
3149         case BFA_TSKIM_SM_IOS_DONE:
3150                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3151                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
3152                 break;
3153
3154         case BFA_TSKIM_SM_CLEANUP:
3155                 /*
3156                  * Ignore, TM command completed on wire.
3157                  * Notify TM conmpletion on IO cleanup completion.
3158                  */
3159                 break;
3160
3161         case BFA_TSKIM_SM_HWFAIL:
3162                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3163                 bfa_tskim_iocdisable_ios(tskim);
3164                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3165                 break;
3166
3167         default:
3168                 bfa_sm_fault(tskim->bfa, event);
3169         }
3170 }
3171
3172 /*
3173  * Task management command is waiting for room in request CQ
3174  */
3175 static void
3176 bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3177 {
3178         bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3179
3180         switch (event) {
3181         case BFA_TSKIM_SM_QRESUME:
3182                 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3183                 bfa_tskim_send(tskim);
3184                 break;
3185
3186         case BFA_TSKIM_SM_CLEANUP:
3187                 /*
3188                  * No need to send TM on wire since ITN is offline.
3189                  */
3190                 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3191                 bfa_reqq_wcancel(&tskim->reqq_wait);
3192                 bfa_tskim_cleanup_ios(tskim);
3193                 break;
3194
3195         case BFA_TSKIM_SM_HWFAIL:
3196                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3197                 bfa_reqq_wcancel(&tskim->reqq_wait);
3198                 bfa_tskim_iocdisable_ios(tskim);
3199                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3200                 break;
3201
3202         default:
3203                 bfa_sm_fault(tskim->bfa, event);
3204         }
3205 }
3206
3207 /*
3208  * Task management command is active, awaiting for room in request CQ
3209  * to send clean up request.
3210  */
3211 static void
3212 bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3213                 enum bfa_tskim_event event)
3214 {
3215         bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3216
3217         switch (event) {
3218         case BFA_TSKIM_SM_DONE:
3219                 bfa_reqq_wcancel(&tskim->reqq_wait);
3220                 /* fall through */
3221         case BFA_TSKIM_SM_QRESUME:
3222                 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3223                 bfa_tskim_send_abort(tskim);
3224                 break;
3225
3226         case BFA_TSKIM_SM_HWFAIL:
3227                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3228                 bfa_reqq_wcancel(&tskim->reqq_wait);
3229                 bfa_tskim_iocdisable_ios(tskim);
3230                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3231                 break;
3232
3233         default:
3234                 bfa_sm_fault(tskim->bfa, event);
3235         }
3236 }
3237
3238 /*
3239  * BFA callback is pending
3240  */
3241 static void
3242 bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3243 {
3244         bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3245
3246         switch (event) {
3247         case BFA_TSKIM_SM_HCB:
3248                 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3249                 bfa_tskim_free(tskim);
3250                 break;
3251
3252         case BFA_TSKIM_SM_CLEANUP:
3253                 bfa_tskim_notify_comp(tskim);
3254                 break;
3255
3256         case BFA_TSKIM_SM_HWFAIL:
3257                 break;
3258
3259         default:
3260                 bfa_sm_fault(tskim->bfa, event);
3261         }
3262 }
3263
3264 static void
3265 __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
3266 {
3267         struct bfa_tskim_s *tskim = cbarg;
3268
3269         if (!complete) {
3270                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3271                 return;
3272         }
3273
3274         bfa_stats(tskim->itnim, tm_success);
3275         bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
3276 }
3277
3278 static void
3279 __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
3280 {
3281         struct bfa_tskim_s *tskim = cbarg;
3282
3283         if (!complete) {
3284                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3285                 return;
3286         }
3287
3288         bfa_stats(tskim->itnim, tm_failures);
3289         bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
3290                                 BFI_TSKIM_STS_FAILED);
3291 }
3292
3293 static bfa_boolean_t
3294 bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
3295 {
3296         switch (tskim->tm_cmnd) {
3297         case FCP_TM_TARGET_RESET:
3298                 return BFA_TRUE;
3299
3300         case FCP_TM_ABORT_TASK_SET:
3301         case FCP_TM_CLEAR_TASK_SET:
3302         case FCP_TM_LUN_RESET:
3303         case FCP_TM_CLEAR_ACA:
3304                 return !memcmp(&tskim->lun, &lun, sizeof(lun));
3305
3306         default:
3307                 WARN_ON(1);
3308         }
3309
3310         return BFA_FALSE;
3311 }
3312
3313 /*
3314  * Gather affected IO requests and task management commands.
3315  */
3316 static void
3317 bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3318 {
3319         struct bfa_itnim_s *itnim = tskim->itnim;
3320         struct bfa_ioim_s *ioim;
3321         struct list_head *qe, *qen;
3322         struct scsi_cmnd *cmnd;
3323         struct scsi_lun scsilun;
3324
3325         INIT_LIST_HEAD(&tskim->io_q);
3326
3327         /*
3328          * Gather any active IO requests first.
3329          */
3330         list_for_each_safe(qe, qen, &itnim->io_q) {
3331                 ioim = (struct bfa_ioim_s *) qe;
3332                 cmnd = (struct scsi_cmnd *) ioim->dio;
3333                 int_to_scsilun(cmnd->device->lun, &scsilun);
3334                 if (bfa_tskim_match_scope(tskim, scsilun)) {
3335                         list_del(&ioim->qe);
3336                         list_add_tail(&ioim->qe, &tskim->io_q);
3337                 }
3338         }
3339
3340         /*
3341          * Failback any pending IO requests immediately.
3342          */
3343         list_for_each_safe(qe, qen, &itnim->pending_q) {
3344                 ioim = (struct bfa_ioim_s *) qe;
3345                 cmnd = (struct scsi_cmnd *) ioim->dio;
3346                 int_to_scsilun(cmnd->device->lun, &scsilun);
3347                 if (bfa_tskim_match_scope(tskim, scsilun)) {
3348                         list_del(&ioim->qe);
3349                         list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
3350                         bfa_ioim_tov(ioim);
3351                 }
3352         }
3353 }
3354
3355 /*
3356  * IO cleanup completion
3357  */
3358 static void
3359 bfa_tskim_cleanp_comp(void *tskim_cbarg)
3360 {
3361         struct bfa_tskim_s *tskim = tskim_cbarg;
3362
3363         bfa_stats(tskim->itnim, tm_io_comps);
3364         bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
3365 }
3366
3367 /*
3368  * Gather affected IO requests and task management commands.
3369  */
3370 static void
3371 bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
3372 {
3373         struct bfa_ioim_s *ioim;
3374         struct list_head        *qe, *qen;
3375
3376         bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
3377
3378         list_for_each_safe(qe, qen, &tskim->io_q) {
3379                 ioim = (struct bfa_ioim_s *) qe;
3380                 bfa_wc_up(&tskim->wc);
3381                 bfa_ioim_cleanup_tm(ioim, tskim);
3382         }
3383
3384         bfa_wc_wait(&tskim->wc);
3385 }
3386
3387 /*
3388  * Send task management request to firmware.
3389  */
3390 static bfa_boolean_t
3391 bfa_tskim_send(struct bfa_tskim_s *tskim)
3392 {
3393         struct bfa_itnim_s *itnim = tskim->itnim;
3394         struct bfi_tskim_req_s *m;
3395
3396         /*
3397          * check for room in queue to send request now
3398          */
3399         m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3400         if (!m)
3401                 return BFA_FALSE;
3402
3403         /*
3404          * build i/o request message next
3405          */
3406         bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
3407                         bfa_fn_lpu(tskim->bfa));
3408
3409         m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
3410         m->itn_fhdl = tskim->itnim->rport->fw_handle;
3411         m->t_secs = tskim->tsecs;
3412         m->lun = tskim->lun;
3413         m->tm_flags = tskim->tm_cmnd;
3414
3415         /*
3416          * queue I/O message to firmware
3417          */
3418         bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
3419         return BFA_TRUE;
3420 }
3421
3422 /*
3423  * Send abort request to cleanup an active TM to firmware.
3424  */
3425 static bfa_boolean_t
3426 bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3427 {
3428         struct bfa_itnim_s      *itnim = tskim->itnim;
3429         struct bfi_tskim_abortreq_s     *m;
3430
3431         /*
3432          * check for room in queue to send request now
3433          */
3434         m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3435         if (!m)
3436                 return BFA_FALSE;
3437
3438         /*
3439          * build i/o request message next
3440          */
3441         bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
3442                         bfa_fn_lpu(tskim->bfa));
3443
3444         m->tsk_tag  = cpu_to_be16(tskim->tsk_tag);
3445
3446         /*
3447          * queue I/O message to firmware
3448          */
3449         bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
3450         return BFA_TRUE;
3451 }
3452
3453 /*
3454  * Call to resume task management cmnd waiting for room in request queue.
3455  */
3456 static void
3457 bfa_tskim_qresume(void *cbarg)
3458 {
3459         struct bfa_tskim_s *tskim = cbarg;
3460
3461         bfa_stats(tskim->itnim, tm_qresumes);
3462         bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3463 }
3464
3465 /*
3466  * Cleanup IOs associated with a task mangement command on IOC failures.
3467  */
3468 static void
3469 bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3470 {
3471         struct bfa_ioim_s *ioim;
3472         struct list_head        *qe, *qen;
3473
3474         list_for_each_safe(qe, qen, &tskim->io_q) {
3475                 ioim = (struct bfa_ioim_s *) qe;
3476                 bfa_ioim_iocdisable(ioim);
3477         }
3478 }
3479
3480 /*
3481  * Notification on completions from related ioim.
3482  */
3483 void
3484 bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3485 {
3486         bfa_wc_down(&tskim->wc);
3487 }
3488
3489 /*
3490  * Handle IOC h/w failure notification from itnim.
3491  */
3492 void
3493 bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3494 {
3495         tskim->notify = BFA_FALSE;
3496         bfa_stats(tskim->itnim, tm_iocdowns);
3497         bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3498 }
3499
3500 /*
3501  * Cleanup TM command and associated IOs as part of ITNIM offline.
3502  */
3503 void
3504 bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3505 {
3506         tskim->notify = BFA_TRUE;
3507         bfa_stats(tskim->itnim, tm_cleanups);
3508         bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3509 }
3510
3511 /*
3512  * Memory allocation and initialization.
3513  */
3514 void
3515 bfa_tskim_attach(struct bfa_fcpim_s *fcpim)
3516 {
3517         struct bfa_tskim_s *tskim;
3518         struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
3519         u16     i;
3520
3521         INIT_LIST_HEAD(&fcpim->tskim_free_q);
3522         INIT_LIST_HEAD(&fcpim->tskim_unused_q);
3523
3524         tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp);
3525         fcpim->tskim_arr = tskim;
3526
3527         for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
3528                 /*
3529                  * initialize TSKIM
3530                  */
3531                 memset(tskim, 0, sizeof(struct bfa_tskim_s));
3532                 tskim->tsk_tag = i;
3533                 tskim->bfa      = fcpim->bfa;
3534                 tskim->fcpim    = fcpim;
3535                 tskim->notify  = BFA_FALSE;
3536                 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
3537                                         tskim);
3538                 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3539
3540                 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3541         }
3542
3543         bfa_mem_kva_curp(fcp) = (u8 *) tskim;
3544 }
3545
3546 void
3547 bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3548 {
3549         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3550         struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3551         struct bfa_tskim_s *tskim;
3552         u16     tsk_tag = be16_to_cpu(rsp->tsk_tag);
3553
3554         tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
3555         WARN_ON(tskim->tsk_tag != tsk_tag);
3556
3557         tskim->tsk_status = rsp->tsk_status;
3558
3559         /*
3560          * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3561          * requests. All other statuses are for normal completions.
3562          */
3563         if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3564                 bfa_stats(tskim->itnim, tm_cleanup_comps);
3565                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3566         } else if (rsp->tsk_status == BFI_TSKIM_STS_UTAG) {
3567                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_UTAG);
3568         } else {
3569                 bfa_stats(tskim->itnim, tm_fw_rsps);
3570                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
3571         }
3572 }
3573
3574
3575 struct bfa_tskim_s *
3576 bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3577 {
3578         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3579         struct bfa_tskim_s *tskim;
3580
3581         bfa_q_deq(&fcpim->tskim_free_q, &tskim);
3582
3583         if (tskim)
3584                 tskim->dtsk = dtsk;
3585
3586         return tskim;
3587 }
3588
3589 void
3590 bfa_tskim_free(struct bfa_tskim_s *tskim)
3591 {
3592         WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
3593         list_del(&tskim->qe);
3594         list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3595 }
3596
3597 /*
3598  * Start a task management command.
3599  *
3600  * @param[in]   tskim   BFA task management command instance
3601  * @param[in]   itnim   i-t nexus for the task management command
3602  * @param[in]   lun     lun, if applicable
3603  * @param[in]   tm_cmnd Task management command code.
3604  * @param[in]   t_secs  Timeout in seconds
3605  *
3606  * @return None.
3607  */
3608 void
3609 bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
3610                         struct scsi_lun lun,
3611                         enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
3612 {
3613         tskim->itnim    = itnim;
3614         tskim->lun      = lun;
3615         tskim->tm_cmnd = tm_cmnd;
3616         tskim->tsecs    = tsecs;
3617         tskim->notify  = BFA_FALSE;
3618         bfa_stats(itnim, tm_cmnds);
3619
3620         list_add_tail(&tskim->qe, &itnim->tsk_q);
3621         bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
3622 }
3623
3624 void
3625 bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw)
3626 {
3627         struct bfa_fcpim_s      *fcpim = BFA_FCPIM(bfa);
3628         struct list_head        *qe;
3629         int     i;
3630
3631         for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) {
3632                 bfa_q_deq_tail(&fcpim->tskim_free_q, &qe);
3633                 list_add_tail(qe, &fcpim->tskim_unused_q);
3634         }
3635 }
3636
3637 void
3638 bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
3639                 struct bfa_s *bfa)
3640 {
3641         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3642         struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa);
3643         struct bfa_mem_dma_s *seg_ptr;
3644         u16     nsegs, idx, per_seg_ios, num_io_req;
3645         u32     km_len = 0;
3646
3647         /*
3648          * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value.
3649          * So if the values are non zero, adjust them appropriately.
3650          */
3651         if (cfg->fwcfg.num_ioim_reqs &&
3652             cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
3653                 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
3654         else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
3655                 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
3656
3657         if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX)
3658                 cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
3659
3660         num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3661         if (num_io_req > BFA_IO_MAX) {
3662                 if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) {
3663                         cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2;
3664                         cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2;
3665                 } else if (cfg->fwcfg.num_fwtio_reqs)
3666                         cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
3667                 else
3668                         cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
3669         }
3670
3671         bfa_fcpim_meminfo(cfg, &km_len);
3672
3673         num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3674         km_len += num_io_req * sizeof(struct bfa_iotag_s);
3675         km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);
3676
3677         /* dma memory */
3678         nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
3679         per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN);
3680
3681         bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
3682                 if (num_io_req >= per_seg_ios) {
3683                         num_io_req -= per_seg_ios;
3684                         bfa_mem_dma_setup(minfo, seg_ptr,
3685                                 per_seg_ios * BFI_IOIM_SNSLEN);
3686                 } else
3687                         bfa_mem_dma_setup(minfo, seg_ptr,
3688                                 num_io_req * BFI_IOIM_SNSLEN);
3689         }
3690
3691         /* kva memory */
3692         bfa_mem_kva_setup(minfo, fcp_kva, km_len);
3693 }
3694
3695 void
3696 bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
3697                 struct bfa_pcidev_s *pcidev)
3698 {
3699         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3700         struct bfa_mem_dma_s *seg_ptr;
3701         u16     idx, nsegs, num_io_req;
3702
3703         fcp->max_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
3704         fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
3705         fcp->num_fwtio_reqs  = cfg->fwcfg.num_fwtio_reqs;
3706         fcp->num_itns   = cfg->fwcfg.num_rports;
3707         fcp->bfa = bfa;
3708
3709         /*
3710          * Setup the pool of snsbase addr's, that is passed to fw as
3711          * part of bfi_iocfc_cfg_s.
3712          */
3713         num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3714         nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
3715
3716         bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
3717
3718                 if (!bfa_mem_dma_virt(seg_ptr))
3719                         break;
3720
3721                 fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr);
3722                 fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr);
3723                 bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
3724         }
3725
3726         fcp->throttle_update_required = 1;
3727         bfa_fcpim_attach(fcp, bfad, cfg, pcidev);
3728
3729         bfa_iotag_attach(fcp);
3730
3731         fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
3732         bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
3733                         (fcp->num_itns * sizeof(struct bfa_itn_s));
3734         memset(fcp->itn_arr, 0,
3735                         (fcp->num_itns * sizeof(struct bfa_itn_s)));
3736 }
3737
3738 void
3739 bfa_fcp_iocdisable(struct bfa_s *bfa)
3740 {
3741         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3742
3743         bfa_fcpim_iocdisable(fcp);
3744 }
3745
3746 void
3747 bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw)
3748 {
3749         struct bfa_fcp_mod_s    *mod = BFA_FCP_MOD(bfa);
3750         struct list_head        *qe;
3751         int     i;
3752
3753         /* Update io throttle value only once during driver load time */
3754         if (!mod->throttle_update_required)
3755                 return;
3756
3757         for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) {
3758                 bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe);
3759                 list_add_tail(qe, &mod->iotag_unused_q);
3760         }
3761
3762         if (mod->num_ioim_reqs != num_ioim_fw) {
3763                 bfa_trc(bfa, mod->num_ioim_reqs);
3764                 bfa_trc(bfa, num_ioim_fw);
3765         }
3766
3767         mod->max_ioim_reqs = max_ioim_fw;
3768         mod->num_ioim_reqs = num_ioim_fw;
3769         mod->throttle_update_required = 0;
3770 }
3771
3772 void
3773 bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
3774                 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
3775 {
3776         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3777         struct bfa_itn_s *itn;
3778
3779         itn =  BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
3780         itn->isr = isr;
3781 }
3782
3783 /*
3784  * Itn interrupt processing.
3785  */
3786 void
3787 bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3788 {
3789         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3790         union bfi_itn_i2h_msg_u msg;
3791         struct bfa_itn_s *itn;
3792
3793         msg.msg = m;
3794         itn =  BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle);
3795
3796         if (itn->isr)
3797                 itn->isr(bfa, m);
3798         else
3799                 WARN_ON(1);
3800 }
3801
3802 void
3803 bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
3804 {
3805         struct bfa_iotag_s *iotag;
3806         u16     num_io_req, i;
3807
3808         iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp);
3809         fcp->iotag_arr = iotag;
3810
3811         INIT_LIST_HEAD(&fcp->iotag_ioim_free_q);
3812         INIT_LIST_HEAD(&fcp->iotag_tio_free_q);
3813         INIT_LIST_HEAD(&fcp->iotag_unused_q);
3814
3815         num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs;
3816         for (i = 0; i < num_io_req; i++, iotag++) {
3817                 memset(iotag, 0, sizeof(struct bfa_iotag_s));
3818                 iotag->tag = i;
3819                 if (i < fcp->num_ioim_reqs)
3820                         list_add_tail(&iotag->qe, &fcp->iotag_ioim_free_q);
3821                 else
3822                         list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q);
3823         }
3824
3825         bfa_mem_kva_curp(fcp) = (u8 *) iotag;
3826 }
3827
3828
3829 /**
3830  * To send config req, first try to use throttle value from flash
3831  * If 0, then use driver parameter
3832  * We need to use min(flash_val, drv_val) because
3833  * memory allocation was done based on this cfg'd value
3834  */
3835 u16
3836 bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param)
3837 {
3838         u16 tmp;
3839         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3840
3841         /*
3842          * If throttle value from flash is already in effect after driver is
3843          * loaded then until next load, always return current value instead
3844          * of actual flash value
3845          */
3846         if (!fcp->throttle_update_required)
3847                 return (u16)fcp->num_ioim_reqs;
3848
3849         tmp = bfa_dconf_read_data_valid(bfa) ? bfa_fcpim_read_throttle(bfa) : 0;
3850         if (!tmp || (tmp > drv_cfg_param))
3851                 tmp = drv_cfg_param;
3852
3853         return tmp;
3854 }
3855
3856 bfa_status_t
3857 bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value)
3858 {
3859         if (!bfa_dconf_get_min_cfg(bfa)) {
3860                 BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.value = value;
3861                 BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.is_valid = 1;
3862                 return BFA_STATUS_OK;
3863         }
3864
3865         return BFA_STATUS_FAILED;
3866 }
3867
3868 u16
3869 bfa_fcpim_read_throttle(struct bfa_s *bfa)
3870 {
3871         struct bfa_throttle_cfg_s *throttle_cfg =
3872                         &(BFA_DCONF_MOD(bfa)->dconf->throttle_cfg);
3873
3874         return ((!bfa_dconf_get_min_cfg(bfa)) ?
3875                ((throttle_cfg->is_valid == 1) ? (throttle_cfg->value) : 0) : 0);
3876 }
3877
3878 bfa_status_t
3879 bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value)
3880 {
3881         /* in min cfg no commands should run. */
3882         if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
3883             (!bfa_dconf_read_data_valid(bfa)))
3884                 return BFA_STATUS_FAILED;
3885
3886         bfa_fcpim_write_throttle(bfa, value);
3887
3888         return bfa_dconf_update(bfa);
3889 }
3890
3891 bfa_status_t
3892 bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf)
3893 {
3894         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3895         struct bfa_defs_fcpim_throttle_s throttle;
3896
3897         if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
3898             (!bfa_dconf_read_data_valid(bfa)))
3899                 return BFA_STATUS_FAILED;
3900
3901         memset(&throttle, 0, sizeof(struct bfa_defs_fcpim_throttle_s));
3902
3903         throttle.cur_value = (u16)(fcpim->fcp->num_ioim_reqs);
3904         throttle.cfg_value = bfa_fcpim_read_throttle(bfa);
3905         if (!throttle.cfg_value)
3906                 throttle.cfg_value = throttle.cur_value;
3907         throttle.max_value = (u16)(fcpim->fcp->max_ioim_reqs);
3908         memcpy(buf, &throttle, sizeof(struct bfa_defs_fcpim_throttle_s));
3909
3910         return BFA_STATUS_OK;
3911 }