1 // SPDX-License-Identifier: GPL-2.0-only
3 * Linux network driver for QLogic BR-series Converged Network Adapter.
6 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
7 * Copyright (c) 2014-2015 QLogic Corporation
16 /* IOC local definitions */
18 /* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
20 #define bfa_ioc_firmware_lock(__ioc) \
21 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
22 #define bfa_ioc_firmware_unlock(__ioc) \
23 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
24 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
25 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
26 #define bfa_ioc_notify_fail(__ioc) \
27 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
28 #define bfa_ioc_sync_start(__ioc) \
29 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
30 #define bfa_ioc_sync_join(__ioc) \
31 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
32 #define bfa_ioc_sync_leave(__ioc) \
33 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
34 #define bfa_ioc_sync_ack(__ioc) \
35 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
36 #define bfa_ioc_sync_complete(__ioc) \
37 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
38 #define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \
39 ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
40 #define bfa_ioc_get_cur_ioc_fwstate(__ioc) \
41 ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
42 #define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \
43 ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
45 static bool bfa_nw_auto_recover = true;
48 * forward declarations
50 static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc);
51 static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
52 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
53 static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
54 static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc);
55 static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
56 static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
57 static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
58 static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
59 static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
60 static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
61 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
62 static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
63 static void bfa_ioc_recover(struct bfa_ioc *ioc);
64 static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
65 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
66 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
67 static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc);
68 static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
69 static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
70 static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
71 static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
72 static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
73 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
74 static enum bfa_status bfa_ioc_boot(struct bfa_ioc *ioc,
75 enum bfi_fwboot_type boot_type, u32 boot_param);
76 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
77 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
79 static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
81 static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
83 static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
85 static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
87 static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
88 static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
90 /* IOC state machine definitions/declarations */
92 IOC_E_RESET = 1, /*!< IOC reset request */
93 IOC_E_ENABLE = 2, /*!< IOC enable request */
94 IOC_E_DISABLE = 3, /*!< IOC disable request */
95 IOC_E_DETACH = 4, /*!< driver detach cleanup */
96 IOC_E_ENABLED = 5, /*!< f/w enabled */
97 IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */
98 IOC_E_DISABLED = 7, /*!< f/w disabled */
99 IOC_E_PFFAILED = 8, /*!< failure notice by iocpf sm */
100 IOC_E_HBFAIL = 9, /*!< heartbeat failure */
101 IOC_E_HWERROR = 10, /*!< hardware error interrupt */
102 IOC_E_TIMEOUT = 11, /*!< timeout */
103 IOC_E_HWFAILED = 12, /*!< PCI mapping failure notice */
106 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
107 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
108 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
109 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
110 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
111 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
112 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
113 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
114 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
115 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event);
117 static struct bfa_sm_table ioc_sm_table[] = {
118 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
119 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
120 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
121 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
122 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
123 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
124 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
125 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
126 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
127 {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
131 * Forward declareations for iocpf state machine
133 static void bfa_iocpf_enable(struct bfa_ioc *ioc);
134 static void bfa_iocpf_disable(struct bfa_ioc *ioc);
135 static void bfa_iocpf_fail(struct bfa_ioc *ioc);
136 static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
137 static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
138 static void bfa_iocpf_stop(struct bfa_ioc *ioc);
140 /* IOCPF state machine events */
142 IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */
143 IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */
144 IOCPF_E_STOP = 3, /*!< stop on driver detach */
145 IOCPF_E_FWREADY = 4, /*!< f/w initialization done */
146 IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */
147 IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */
148 IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */
149 IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */
150 IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */
151 IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
152 IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */
153 IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */
157 enum bfa_iocpf_state {
158 BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */
159 BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
160 BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */
161 BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */
162 BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */
163 BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */
164 BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */
165 BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */
166 BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */
169 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
170 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
171 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
172 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
173 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
174 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
175 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
176 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
178 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
179 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
180 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
181 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
182 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
184 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
186 static struct bfa_sm_table iocpf_sm_table[] = {
187 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
188 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
189 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
190 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
191 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
192 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
193 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
194 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
195 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
196 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
197 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
198 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
199 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
200 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
203 /* IOC State Machine */
205 /* Beginning state. IOC uninit state. */
207 bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
211 /* IOC is in uninit state. */
213 bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
217 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
225 /* Reset entry actions -- initialize state machine */
227 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
229 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
232 /* IOC is in reset state. */
234 bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
238 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
242 bfa_ioc_disable_comp(ioc);
246 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
255 bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
257 bfa_iocpf_enable(ioc);
260 /* Host IOC function is being enabled, awaiting response from firmware.
261 * Semaphore is acquired.
264 bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
268 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
272 /* !!! fall through !!! */
274 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
275 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
276 if (event != IOC_E_PFFAILED)
277 bfa_iocpf_initfail(ioc);
281 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
282 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
286 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
290 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
302 /* Semaphore should be acquired for version check. */
304 bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
306 mod_timer(&ioc->ioc_timer, jiffies +
307 msecs_to_jiffies(BFA_IOC_TOV));
308 bfa_ioc_send_getattr(ioc);
311 /* IOC configuration in progress. Timer is active. */
313 bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
316 case IOC_E_FWRSP_GETATTR:
317 del_timer(&ioc->ioc_timer);
318 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
323 del_timer(&ioc->ioc_timer);
326 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
327 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
328 if (event != IOC_E_PFFAILED)
329 bfa_iocpf_getattrfail(ioc);
333 del_timer(&ioc->ioc_timer);
334 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
346 bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
348 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
349 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
350 bfa_ioc_hb_monitor(ioc);
354 bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
361 bfa_ioc_hb_stop(ioc);
362 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
367 bfa_ioc_hb_stop(ioc);
368 /* !!! fall through !!! */
370 if (ioc->iocpf.auto_recover)
371 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
373 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
375 bfa_ioc_fail_notify(ioc);
377 if (event != IOC_E_PFFAILED)
387 bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
389 bfa_iocpf_disable(ioc);
392 /* IOC is being disabled */
394 bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
398 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
403 * No state change. Will move to disabled state
404 * after iocpf sm completes failure processing and
405 * moves to disabled state.
411 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
412 bfa_ioc_disable_comp(ioc);
420 /* IOC disable completion entry. */
422 bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
424 bfa_ioc_disable_comp(ioc);
428 bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
432 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
436 ioc->cbfn->disable_cbfn(ioc->bfa);
440 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
450 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
454 /* Hardware initialization retry. */
456 bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
460 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
466 * Initialization retry failed.
468 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
469 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
470 if (event != IOC_E_PFFAILED)
471 bfa_iocpf_initfail(ioc);
475 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
476 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
483 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
487 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
497 bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
503 bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
507 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
511 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
515 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
520 /* HB failure notification, ignore. */
529 bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
535 bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
540 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
544 ioc->cbfn->disable_cbfn(ioc->bfa);
548 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
556 /* IOCPF State Machine */
558 /* Reset entry actions -- initialize state machine */
560 bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
562 iocpf->fw_mismatch_notified = false;
563 iocpf->auto_recover = bfa_nw_auto_recover;
566 /* Beginning state. IOC is in reset state. */
568 bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
572 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
583 /* Semaphore should be acquired for version check. */
585 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
587 bfa_ioc_hw_sem_init(iocpf->ioc);
588 bfa_ioc_hw_sem_get(iocpf->ioc);
591 /* Awaiting h/w semaphore to continue with version check. */
593 bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
595 struct bfa_ioc *ioc = iocpf->ioc;
598 case IOCPF_E_SEMLOCKED:
599 if (bfa_ioc_firmware_lock(ioc)) {
600 if (bfa_ioc_sync_start(ioc)) {
601 bfa_ioc_sync_join(ioc);
602 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
604 bfa_ioc_firmware_unlock(ioc);
605 bfa_nw_ioc_hw_sem_release(ioc);
606 mod_timer(&ioc->sem_timer, jiffies +
607 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
610 bfa_nw_ioc_hw_sem_release(ioc);
611 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
615 case IOCPF_E_SEM_ERROR:
616 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
617 bfa_ioc_pf_hwfailed(ioc);
620 case IOCPF_E_DISABLE:
621 bfa_ioc_hw_sem_get_cancel(ioc);
622 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
623 bfa_ioc_pf_disabled(ioc);
627 bfa_ioc_hw_sem_get_cancel(ioc);
628 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
636 /* Notify enable completion callback */
638 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
640 /* Call only the first time sm enters fwmismatch state. */
641 if (!iocpf->fw_mismatch_notified)
642 bfa_ioc_pf_fwmismatch(iocpf->ioc);
644 iocpf->fw_mismatch_notified = true;
645 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
646 msecs_to_jiffies(BFA_IOC_TOV));
649 /* Awaiting firmware version match. */
651 bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
653 struct bfa_ioc *ioc = iocpf->ioc;
656 case IOCPF_E_TIMEOUT:
657 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
660 case IOCPF_E_DISABLE:
661 del_timer(&ioc->iocpf_timer);
662 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
663 bfa_ioc_pf_disabled(ioc);
667 del_timer(&ioc->iocpf_timer);
668 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
676 /* Request for semaphore. */
678 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
680 bfa_ioc_hw_sem_get(iocpf->ioc);
683 /* Awaiting semaphore for h/w initialzation. */
685 bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
687 struct bfa_ioc *ioc = iocpf->ioc;
690 case IOCPF_E_SEMLOCKED:
691 if (bfa_ioc_sync_complete(ioc)) {
692 bfa_ioc_sync_join(ioc);
693 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
695 bfa_nw_ioc_hw_sem_release(ioc);
696 mod_timer(&ioc->sem_timer, jiffies +
697 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
701 case IOCPF_E_SEM_ERROR:
702 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
703 bfa_ioc_pf_hwfailed(ioc);
706 case IOCPF_E_DISABLE:
707 bfa_ioc_hw_sem_get_cancel(ioc);
708 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
717 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
719 iocpf->poll_time = 0;
720 bfa_ioc_reset(iocpf->ioc, false);
723 /* Hardware is being initialized. Interrupts are enabled.
724 * Holding hardware semaphore lock.
727 bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
729 struct bfa_ioc *ioc = iocpf->ioc;
732 case IOCPF_E_FWREADY:
733 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
736 case IOCPF_E_TIMEOUT:
737 bfa_nw_ioc_hw_sem_release(ioc);
738 bfa_ioc_pf_failed(ioc);
739 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
742 case IOCPF_E_DISABLE:
743 del_timer(&ioc->iocpf_timer);
744 bfa_ioc_sync_leave(ioc);
745 bfa_nw_ioc_hw_sem_release(ioc);
746 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
755 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
757 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
758 msecs_to_jiffies(BFA_IOC_TOV));
760 * Enable Interrupts before sending fw IOC ENABLE cmd.
762 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
763 bfa_ioc_send_enable(iocpf->ioc);
766 /* Host IOC function is being enabled, awaiting response from firmware.
767 * Semaphore is acquired.
770 bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
772 struct bfa_ioc *ioc = iocpf->ioc;
775 case IOCPF_E_FWRSP_ENABLE:
776 del_timer(&ioc->iocpf_timer);
777 bfa_nw_ioc_hw_sem_release(ioc);
778 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
781 case IOCPF_E_INITFAIL:
782 del_timer(&ioc->iocpf_timer);
785 case IOCPF_E_TIMEOUT:
786 bfa_nw_ioc_hw_sem_release(ioc);
787 if (event == IOCPF_E_TIMEOUT)
788 bfa_ioc_pf_failed(ioc);
789 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
792 case IOCPF_E_DISABLE:
793 del_timer(&ioc->iocpf_timer);
794 bfa_nw_ioc_hw_sem_release(ioc);
795 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
804 bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
806 bfa_ioc_pf_enabled(iocpf->ioc);
810 bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
813 case IOCPF_E_DISABLE:
814 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
817 case IOCPF_E_GETATTRFAIL:
818 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
822 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
831 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
833 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
834 msecs_to_jiffies(BFA_IOC_TOV));
835 bfa_ioc_send_disable(iocpf->ioc);
838 /* IOC is being disabled */
840 bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
842 struct bfa_ioc *ioc = iocpf->ioc;
845 case IOCPF_E_FWRSP_DISABLE:
846 del_timer(&ioc->iocpf_timer);
847 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
851 del_timer(&ioc->iocpf_timer);
854 case IOCPF_E_TIMEOUT:
855 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
856 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
859 case IOCPF_E_FWRSP_ENABLE:
868 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
870 bfa_ioc_hw_sem_get(iocpf->ioc);
873 /* IOC hb ack request is being removed. */
875 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
877 struct bfa_ioc *ioc = iocpf->ioc;
880 case IOCPF_E_SEMLOCKED:
881 bfa_ioc_sync_leave(ioc);
882 bfa_nw_ioc_hw_sem_release(ioc);
883 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
886 case IOCPF_E_SEM_ERROR:
887 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
888 bfa_ioc_pf_hwfailed(ioc);
899 /* IOC disable completion entry. */
901 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
903 bfa_ioc_mbox_flush(iocpf->ioc);
904 bfa_ioc_pf_disabled(iocpf->ioc);
908 bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
910 struct bfa_ioc *ioc = iocpf->ioc;
914 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
918 bfa_ioc_firmware_unlock(ioc);
919 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
928 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
930 bfa_nw_ioc_debug_save_ftrc(iocpf->ioc);
931 bfa_ioc_hw_sem_get(iocpf->ioc);
934 /* Hardware initialization failed. */
936 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
938 struct bfa_ioc *ioc = iocpf->ioc;
941 case IOCPF_E_SEMLOCKED:
942 bfa_ioc_notify_fail(ioc);
943 bfa_ioc_sync_leave(ioc);
944 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
945 bfa_nw_ioc_hw_sem_release(ioc);
946 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
949 case IOCPF_E_SEM_ERROR:
950 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
951 bfa_ioc_pf_hwfailed(ioc);
954 case IOCPF_E_DISABLE:
955 bfa_ioc_hw_sem_get_cancel(ioc);
956 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
960 bfa_ioc_hw_sem_get_cancel(ioc);
961 bfa_ioc_firmware_unlock(ioc);
962 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
974 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
978 /* Hardware initialization failed. */
980 bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
982 struct bfa_ioc *ioc = iocpf->ioc;
985 case IOCPF_E_DISABLE:
986 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
990 bfa_ioc_firmware_unlock(ioc);
991 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1000 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
1003 * Mark IOC as failed in hardware and stop firmware.
1005 bfa_ioc_lpu_stop(iocpf->ioc);
1008 * Flush any queued up mailbox requests.
1010 bfa_ioc_mbox_flush(iocpf->ioc);
1011 bfa_ioc_hw_sem_get(iocpf->ioc);
1014 /* IOC is in failed state. */
1016 bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1018 struct bfa_ioc *ioc = iocpf->ioc;
1021 case IOCPF_E_SEMLOCKED:
1022 bfa_ioc_sync_ack(ioc);
1023 bfa_ioc_notify_fail(ioc);
1024 if (!iocpf->auto_recover) {
1025 bfa_ioc_sync_leave(ioc);
1026 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1027 bfa_nw_ioc_hw_sem_release(ioc);
1028 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1030 if (bfa_ioc_sync_complete(ioc))
1031 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1033 bfa_nw_ioc_hw_sem_release(ioc);
1034 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1039 case IOCPF_E_SEM_ERROR:
1040 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1041 bfa_ioc_pf_hwfailed(ioc);
1044 case IOCPF_E_DISABLE:
1045 bfa_ioc_hw_sem_get_cancel(ioc);
1046 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1053 bfa_sm_fault(event);
1058 bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
1062 /* IOC is in failed state. */
1064 bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1067 case IOCPF_E_DISABLE:
1068 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1072 bfa_sm_fault(event);
1076 /* BFA IOC private functions */
1078 /* Notify common modules registered for notification. */
1080 bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
1082 struct bfa_ioc_notify *notify;
1084 list_for_each_entry(notify, &ioc->notify_q, qe)
1085 notify->cbfn(notify->cbarg, event);
1089 bfa_ioc_disable_comp(struct bfa_ioc *ioc)
1091 ioc->cbfn->disable_cbfn(ioc->bfa);
1092 bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1096 bfa_nw_ioc_sem_get(void __iomem *sem_reg)
1100 #define BFA_SEM_SPINCNT 3000
1102 r32 = readl(sem_reg);
1104 while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1107 r32 = readl(sem_reg);
1117 bfa_nw_ioc_sem_release(void __iomem *sem_reg)
1123 /* Clear fwver hdr */
1125 bfa_ioc_fwver_clear(struct bfa_ioc *ioc)
1127 u32 pgnum, loff = 0;
1130 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1131 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1133 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) {
1134 writel(0, ioc->ioc_regs.smem_page_start + loff);
1135 loff += sizeof(u32);
1141 bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
1143 struct bfi_ioc_image_hdr fwhdr;
1146 /* Spin on init semaphore to serialize. */
1147 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1150 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1153 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1154 if (fwstate == BFI_IOC_UNINIT) {
1155 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1159 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1161 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
1162 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1166 bfa_ioc_fwver_clear(ioc);
1167 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
1168 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
1171 * Try to lock and then unlock the semaphore.
1173 readl(ioc->ioc_regs.ioc_sem_reg);
1174 writel(1, ioc->ioc_regs.ioc_sem_reg);
1176 /* Unlock init semaphore */
1177 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1181 bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
1186 * First read to the semaphore register will return 0, subsequent reads
1187 * will return 1. Semaphore is released by writing 1 to the register
1189 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1191 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1195 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1199 mod_timer(&ioc->sem_timer, jiffies +
1200 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
1204 bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
1206 writel(1, ioc->ioc_regs.ioc_sem_reg);
1210 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
1212 del_timer(&ioc->sem_timer);
1215 /* Initialize LPU local memory (aka secondary memory / SRAM) */
1217 bfa_ioc_lmem_init(struct bfa_ioc *ioc)
1221 #define PSS_LMEM_INIT_TIME 10000
1223 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1224 pss_ctl &= ~__PSS_LMEM_RESET;
1225 pss_ctl |= __PSS_LMEM_INIT_EN;
1228 * i2c workaround 12.5khz clock
1230 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1231 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1234 * wait for memory initialization to be complete
1238 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1240 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1243 * If memory initialization is not successful, IOC timeout will catch
1246 BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1248 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1249 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1253 bfa_ioc_lpu_start(struct bfa_ioc *ioc)
1258 * Take processor out of reset.
1260 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1261 pss_ctl &= ~__PSS_LPU0_RESET;
1263 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1267 bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
1272 * Put processors in reset.
1274 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1275 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1277 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1280 /* Get driver and firmware versions. */
1282 bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1287 u32 *fwsig = (u32 *) fwhdr;
1289 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1290 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1292 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
1295 swab32(readl(loff + ioc->ioc_regs.smem_page_start));
1296 loff += sizeof(u32);
1301 bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr *fwhdr_1,
1302 struct bfi_ioc_image_hdr *fwhdr_2)
1306 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1307 if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i])
1314 /* Returns TRUE if major minor and maintenance are same.
1315 * If patch version are same, check for MD5 Checksum to be same.
1318 bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr *drv_fwhdr,
1319 struct bfi_ioc_image_hdr *fwhdr_to_cmp)
1321 if (drv_fwhdr->signature != fwhdr_to_cmp->signature)
1323 if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major)
1325 if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor)
1327 if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint)
1329 if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch &&
1330 drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase &&
1331 drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build)
1332 return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp);
1338 bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr *flash_fwhdr)
1340 if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF)
1347 fwhdr_is_ga(struct bfi_ioc_image_hdr *fwhdr)
1349 if (fwhdr->fwver.phase == 0 &&
1350 fwhdr->fwver.build == 0)
1356 /* Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. */
1357 static enum bfi_ioc_img_ver_cmp
1358 bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr,
1359 struct bfi_ioc_image_hdr *fwhdr_to_cmp)
1361 if (!bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp))
1362 return BFI_IOC_IMG_VER_INCOMP;
1364 if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
1365 return BFI_IOC_IMG_VER_BETTER;
1366 else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch)
1367 return BFI_IOC_IMG_VER_OLD;
1369 /* GA takes priority over internal builds of the same patch stream.
1370 * At this point major minor maint and patch numbers are same.
1372 if (fwhdr_is_ga(base_fwhdr))
1373 if (fwhdr_is_ga(fwhdr_to_cmp))
1374 return BFI_IOC_IMG_VER_SAME;
1376 return BFI_IOC_IMG_VER_OLD;
1378 if (fwhdr_is_ga(fwhdr_to_cmp))
1379 return BFI_IOC_IMG_VER_BETTER;
1381 if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase)
1382 return BFI_IOC_IMG_VER_BETTER;
1383 else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase)
1384 return BFI_IOC_IMG_VER_OLD;
1386 if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build)
1387 return BFI_IOC_IMG_VER_BETTER;
1388 else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build)
1389 return BFI_IOC_IMG_VER_OLD;
1391 /* All Version Numbers are equal.
1392 * Md5 check to be done as a part of compatibility check.
1394 return BFI_IOC_IMG_VER_SAME;
1397 /* register definitions */
1398 #define FLI_CMD_REG 0x0001d000
1399 #define FLI_WRDATA_REG 0x0001d00c
1400 #define FLI_RDDATA_REG 0x0001d010
1401 #define FLI_ADDR_REG 0x0001d004
1402 #define FLI_DEV_STATUS_REG 0x0001d014
1404 #define BFA_FLASH_FIFO_SIZE 128 /* fifo size */
1405 #define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */
1406 #define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */
1407 #define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */
1409 #define NFC_STATE_RUNNING 0x20000001
1410 #define NFC_STATE_PAUSED 0x00004560
1411 #define NFC_VER_VALID 0x147
1413 enum bfa_flash_cmd {
1414 BFA_FLASH_FAST_READ = 0x0b, /* fast read */
1415 BFA_FLASH_WRITE_ENABLE = 0x06, /* write enable */
1416 BFA_FLASH_SECTOR_ERASE = 0xd8, /* sector erase */
1417 BFA_FLASH_WRITE = 0x02, /* write */
1418 BFA_FLASH_READ_STATUS = 0x05, /* read status */
1421 /* hardware error definition */
1422 enum bfa_flash_err {
1423 BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */
1424 BFA_FLASH_UNINIT = -2, /*!< flash not initialized */
1425 BFA_FLASH_BAD = -3, /*!< flash bad */
1426 BFA_FLASH_BUSY = -4, /*!< flash busy */
1427 BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */
1428 BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */
1429 BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */
1430 BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */
1431 BFA_FLASH_ERR_LEN = -9, /*!< invalid length */
1434 /* flash command register data structure */
1435 union bfa_flash_cmd_reg {
1456 /* flash device status register data structure */
1457 union bfa_flash_dev_status_reg {
1480 /* flash address register data structure */
1481 union bfa_flash_addr_reg {
1494 /* Flash raw private functions */
1496 bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt,
1497 u8 rd_cnt, u8 ad_cnt, u8 op)
1499 union bfa_flash_cmd_reg cmd;
1503 cmd.r.write_cnt = wr_cnt;
1504 cmd.r.read_cnt = rd_cnt;
1505 cmd.r.addr_cnt = ad_cnt;
1507 writel(cmd.i, (pci_bar + FLI_CMD_REG));
1511 bfa_flash_set_addr(void __iomem *pci_bar, u32 address)
1513 union bfa_flash_addr_reg addr;
1515 addr.r.addr = address & 0x00ffffff;
1517 writel(addr.i, (pci_bar + FLI_ADDR_REG));
1521 bfa_flash_cmd_act_check(void __iomem *pci_bar)
1523 union bfa_flash_cmd_reg cmd;
1525 cmd.i = readl(pci_bar + FLI_CMD_REG);
1528 return BFA_FLASH_ERR_CMD_ACT;
1533 /* Flush FLI data fifo. */
1535 bfa_flash_fifo_flush(void __iomem *pci_bar)
1538 union bfa_flash_dev_status_reg dev_status;
1540 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
1542 if (!dev_status.r.fifo_cnt)
1545 /* fifo counter in terms of words */
1546 for (i = 0; i < dev_status.r.fifo_cnt; i++)
1547 readl(pci_bar + FLI_RDDATA_REG);
1549 /* Check the device status. It may take some time. */
1550 for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
1551 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
1552 if (!dev_status.r.fifo_cnt)
1556 if (dev_status.r.fifo_cnt)
1557 return BFA_FLASH_ERR_FIFO_CNT;
1562 /* Read flash status. */
1564 bfa_flash_status_read(void __iomem *pci_bar)
1566 union bfa_flash_dev_status_reg dev_status;
1571 status = bfa_flash_fifo_flush(pci_bar);
1575 bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS);
1577 for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
1578 status = bfa_flash_cmd_act_check(pci_bar);
1586 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
1587 if (!dev_status.r.fifo_cnt)
1588 return BFA_FLASH_BUSY;
1590 ret_status = readl(pci_bar + FLI_RDDATA_REG);
1593 status = bfa_flash_fifo_flush(pci_bar);
1600 /* Start flash read operation. */
1602 bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
1607 /* len must be mutiple of 4 and not exceeding fifo size */
1608 if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
1609 return BFA_FLASH_ERR_LEN;
1612 status = bfa_flash_status_read(pci_bar);
1613 if (status == BFA_FLASH_BUSY)
1614 status = bfa_flash_status_read(pci_bar);
1619 /* check if write-in-progress bit is cleared */
1620 if (status & BFA_FLASH_WIP_MASK)
1621 return BFA_FLASH_ERR_WIP;
1623 bfa_flash_set_addr(pci_bar, offset);
1625 bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ);
1630 /* Check flash read operation. */
1632 bfa_flash_read_check(void __iomem *pci_bar)
1634 if (bfa_flash_cmd_act_check(pci_bar))
1640 /* End flash read operation. */
1642 bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf)
1646 /* read data fifo up to 32 words */
1647 for (i = 0; i < len; i += 4) {
1648 u32 w = readl(pci_bar + FLI_RDDATA_REG);
1649 *((u32 *)(buf + i)) = swab32(w);
1652 bfa_flash_fifo_flush(pci_bar);
1655 /* Perform flash raw read. */
1657 #define FLASH_BLOCKING_OP_MAX 500
1658 #define FLASH_SEM_LOCK_REG 0x18820
1661 bfa_raw_sem_get(void __iomem *bar)
1665 locked = readl(bar + FLASH_SEM_LOCK_REG);
1670 static enum bfa_status
1671 bfa_flash_sem_get(void __iomem *bar)
1673 u32 n = FLASH_BLOCKING_OP_MAX;
1675 while (!bfa_raw_sem_get(bar)) {
1677 return BFA_STATUS_BADFLASH;
1680 return BFA_STATUS_OK;
1684 bfa_flash_sem_put(void __iomem *bar)
1686 writel(0, (bar + FLASH_SEM_LOCK_REG));
1689 static enum bfa_status
1690 bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
1695 u32 off, l, s, residue, fifo_sz;
1699 fifo_sz = BFA_FLASH_FIFO_SIZE;
1700 status = bfa_flash_sem_get(pci_bar);
1701 if (status != BFA_STATUS_OK)
1707 l = (n + 1) * fifo_sz - s;
1711 status = bfa_flash_read_start(pci_bar, offset + off, l,
1714 bfa_flash_sem_put(pci_bar);
1715 return BFA_STATUS_FAILED;
1718 n = BFA_FLASH_BLOCKING_OP_MAX;
1719 while (bfa_flash_read_check(pci_bar)) {
1721 bfa_flash_sem_put(pci_bar);
1722 return BFA_STATUS_FAILED;
1726 bfa_flash_read_end(pci_bar, l, &buf[off]);
1731 bfa_flash_sem_put(pci_bar);
1733 return BFA_STATUS_OK;
1736 #define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */
1738 static enum bfa_status
1739 bfa_nw_ioc_flash_img_get_chnk(struct bfa_ioc *ioc, u32 off,
1742 return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva,
1743 BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)),
1744 (char *)fwimg, BFI_FLASH_CHUNK_SZ);
1747 static enum bfi_ioc_img_ver_cmp
1748 bfa_ioc_flash_fwver_cmp(struct bfa_ioc *ioc,
1749 struct bfi_ioc_image_hdr *base_fwhdr)
1751 struct bfi_ioc_image_hdr *flash_fwhdr;
1752 enum bfa_status status;
1753 u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS];
1755 status = bfa_nw_ioc_flash_img_get_chnk(ioc, 0, fwimg);
1756 if (status != BFA_STATUS_OK)
1757 return BFI_IOC_IMG_VER_INCOMP;
1759 flash_fwhdr = (struct bfi_ioc_image_hdr *)fwimg;
1760 if (bfa_ioc_flash_fwver_valid(flash_fwhdr))
1761 return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr);
1763 return BFI_IOC_IMG_VER_INCOMP;
1767 * Returns TRUE if driver is willing to work with current smem f/w version.
1770 bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1772 struct bfi_ioc_image_hdr *drv_fwhdr;
1773 enum bfi_ioc_img_ver_cmp smem_flash_cmp, drv_smem_cmp;
1775 drv_fwhdr = (struct bfi_ioc_image_hdr *)
1776 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1778 /* If smem is incompatible or old, driver should not work with it. */
1779 drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, fwhdr);
1780 if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP ||
1781 drv_smem_cmp == BFI_IOC_IMG_VER_OLD) {
1785 /* IF Flash has a better F/W than smem do not work with smem.
1786 * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
1787 * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
1789 smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, fwhdr);
1791 if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER)
1793 else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME)
1796 return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ?
1800 /* Return true if current running version is valid. Firmware signature and
1801 * execution context (driver/bios) must match.
1804 bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
1806 struct bfi_ioc_image_hdr fwhdr;
1808 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1809 if (swab32(fwhdr.bootenv) != boot_env)
1812 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
1815 /* Conditionally flush any pending message from firmware at start. */
1817 bfa_ioc_msgflush(struct bfa_ioc *ioc)
1821 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1823 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1827 bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1829 enum bfi_ioc_state ioc_fwstate;
1833 ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1836 ioc_fwstate = BFI_IOC_UNINIT;
1838 boot_env = BFI_FWBOOT_ENV_OS;
1841 * check if firmware is valid
1843 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1844 false : bfa_ioc_fwver_valid(ioc, boot_env);
1847 if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) ==
1849 bfa_ioc_poll_fwinit(ioc);
1855 * If hardware initialization is in progress (initialized by other IOC),
1856 * just wait for an initialization completion interrupt.
1858 if (ioc_fwstate == BFI_IOC_INITING) {
1859 bfa_ioc_poll_fwinit(ioc);
1864 * If IOC function is disabled and firmware version is same,
1865 * just re-enable IOC.
1867 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1869 * When using MSI-X any pending firmware ready event should
1870 * be flushed. Otherwise MSI-X interrupts are not delivered.
1872 bfa_ioc_msgflush(ioc);
1873 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1878 * Initialize the h/w for any other states.
1880 if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) ==
1882 bfa_ioc_poll_fwinit(ioc);
1886 bfa_nw_ioc_timeout(struct bfa_ioc *ioc)
1888 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1892 bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
1894 u32 *msgp = (u32 *) ioc_msg;
1897 BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
1900 * first write msg to mailbox registers
1902 for (i = 0; i < len / sizeof(u32); i++)
1903 writel(cpu_to_le32(msgp[i]),
1904 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1906 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1907 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1910 * write 1 to mailbox CMD to trigger LPU event
1912 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1913 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1917 bfa_ioc_send_enable(struct bfa_ioc *ioc)
1919 struct bfi_ioc_ctrl_req enable_req;
1921 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1922 bfa_ioc_portid(ioc));
1923 enable_req.clscode = htons(ioc->clscode);
1924 enable_req.rsvd = htons(0);
1925 /* overflow in 2106 */
1926 enable_req.tv_sec = ntohl(ktime_get_real_seconds());
1927 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1931 bfa_ioc_send_disable(struct bfa_ioc *ioc)
1933 struct bfi_ioc_ctrl_req disable_req;
1935 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1936 bfa_ioc_portid(ioc));
1937 disable_req.clscode = htons(ioc->clscode);
1938 disable_req.rsvd = htons(0);
1939 /* overflow in 2106 */
1940 disable_req.tv_sec = ntohl(ktime_get_real_seconds());
1941 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1945 bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1947 struct bfi_ioc_getattr_req attr_req;
1949 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1950 bfa_ioc_portid(ioc));
1951 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1952 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1956 bfa_nw_ioc_hb_check(struct bfa_ioc *ioc)
1960 hb_count = readl(ioc->ioc_regs.heartbeat);
1961 if (ioc->hb_count == hb_count) {
1962 bfa_ioc_recover(ioc);
1965 ioc->hb_count = hb_count;
1968 bfa_ioc_mbox_poll(ioc);
1969 mod_timer(&ioc->hb_timer, jiffies +
1970 msecs_to_jiffies(BFA_IOC_HB_TOV));
1974 bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
1976 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1977 mod_timer(&ioc->hb_timer, jiffies +
1978 msecs_to_jiffies(BFA_IOC_HB_TOV));
1982 bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1984 del_timer(&ioc->hb_timer);
1987 /* Initiate a full firmware download. */
1988 static enum bfa_status
1989 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1999 u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS];
2000 enum bfa_status status;
2002 if (boot_env == BFI_FWBOOT_ENV_OS &&
2003 boot_type == BFI_FWBOOT_TYPE_FLASH) {
2004 fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32);
2006 status = bfa_nw_ioc_flash_img_get_chnk(ioc,
2007 BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf);
2008 if (status != BFA_STATUS_OK)
2013 fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc));
2014 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
2015 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
2018 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
2020 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2022 for (i = 0; i < fwimg_size; i++) {
2023 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
2024 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
2025 if (boot_env == BFI_FWBOOT_ENV_OS &&
2026 boot_type == BFI_FWBOOT_TYPE_FLASH) {
2027 status = bfa_nw_ioc_flash_img_get_chnk(ioc,
2028 BFA_IOC_FLASH_CHUNK_ADDR(chunkno),
2030 if (status != BFA_STATUS_OK)
2035 fwimg = bfa_cb_image_get_chunk(
2036 bfa_ioc_asic_gen(ioc),
2037 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
2044 writel(swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]),
2045 ioc->ioc_regs.smem_page_start + loff);
2047 loff += sizeof(u32);
2050 * handle page offset wrap around
2052 loff = PSS_SMEM_PGOFF(loff);
2056 ioc->ioc_regs.host_page_num_fn);
2060 writel(bfa_ioc_smem_pgnum(ioc, 0),
2061 ioc->ioc_regs.host_page_num_fn);
2064 * Set boot type, env and device mode at the end.
2066 if (boot_env == BFI_FWBOOT_ENV_OS &&
2067 boot_type == BFI_FWBOOT_TYPE_FLASH) {
2068 boot_type = BFI_FWBOOT_TYPE_NORMAL;
2070 asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
2071 ioc->port0_mode, ioc->port1_mode);
2072 writel(asicmode, ((ioc->ioc_regs.smem_page_start)
2073 + BFI_FWBOOT_DEVMODE_OFF));
2074 writel(boot_type, ((ioc->ioc_regs.smem_page_start)
2075 + (BFI_FWBOOT_TYPE_OFF)));
2076 writel(boot_env, ((ioc->ioc_regs.smem_page_start)
2077 + (BFI_FWBOOT_ENV_OFF)));
2078 return BFA_STATUS_OK;
2082 bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
2084 bfa_ioc_hwinit(ioc, force);
2087 /* BFA ioc enable reply by firmware */
2089 bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
2092 struct bfa_iocpf *iocpf = &ioc->iocpf;
2094 ioc->port_mode = ioc->port_mode_cfg = port_mode;
2095 ioc->ad_cap_bm = cap_bm;
2096 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
2099 /* Update BFA configuration from firmware configuration. */
2101 bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
2103 struct bfi_ioc_attr *attr = ioc->attr;
2105 attr->adapter_prop = ntohl(attr->adapter_prop);
2106 attr->card_type = ntohl(attr->card_type);
2107 attr->maxfrsize = ntohs(attr->maxfrsize);
2109 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
2112 /* Attach time initialization of mbox logic. */
2114 bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
2116 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2119 INIT_LIST_HEAD(&mod->cmd_q);
2120 for (mc = 0; mc < BFI_MC_MAX; mc++) {
2121 mod->mbhdlr[mc].cbfn = NULL;
2122 mod->mbhdlr[mc].cbarg = ioc->bfa;
2126 /* Mbox poll timer -- restarts any pending mailbox requests. */
2128 bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
2130 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2131 struct bfa_mbox_cmd *cmd;
2132 bfa_mbox_cmd_cbfn_t cbfn;
2137 * If no command pending, do nothing
2139 if (list_empty(&mod->cmd_q))
2143 * If previous command is not yet fetched by firmware, do nothing
2145 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2150 * Enqueue command to firmware.
2152 cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe);
2154 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2157 * Give a callback to the client, indicating that the command is sent
2167 /* Cleanup any pending requests. */
2169 bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
2171 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2172 struct bfa_mbox_cmd *cmd;
2174 while (!list_empty(&mod->cmd_q)) {
2175 cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe);
2181 * bfa_nw_ioc_smem_read - Read data from SMEM to host through PCI memmap
2183 * @ioc: memory for IOC
2184 * @tbuf: app memory to store data from smem
2185 * @soff: smem offset
2186 * @sz: size of smem in bytes
2189 bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
2191 u32 pgnum, loff, r32;
2195 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
2196 loff = PSS_SMEM_PGOFF(soff);
2199 * Hold semaphore to serialize pll init and fwtrc.
2201 if (!bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg))
2204 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2206 len = sz/sizeof(u32);
2207 for (i = 0; i < len; i++) {
2208 r32 = swab32(readl(loff + ioc->ioc_regs.smem_page_start));
2209 buf[i] = be32_to_cpu(r32);
2210 loff += sizeof(u32);
2213 * handle page offset wrap around
2215 loff = PSS_SMEM_PGOFF(loff);
2218 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2222 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
2223 ioc->ioc_regs.host_page_num_fn);
2228 readl(ioc->ioc_regs.ioc_init_sem_reg);
2229 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2233 /* Retrieve saved firmware trace from a prior IOC failure. */
2235 bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
2237 u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id;
2238 int tlen, status = 0;
2241 if (tlen > BNA_DBG_FWTRC_LEN)
2242 tlen = BNA_DBG_FWTRC_LEN;
2244 status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen);
2249 /* Save firmware trace if configured. */
2251 bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
2255 if (ioc->dbg_fwsave_once) {
2256 ioc->dbg_fwsave_once = false;
2257 if (ioc->dbg_fwsave_len) {
2258 tlen = ioc->dbg_fwsave_len;
2259 bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2264 /* Retrieve saved firmware trace from a prior IOC failure. */
2266 bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
2270 if (ioc->dbg_fwsave_len == 0)
2271 return BFA_STATUS_ENOFSAVE;
2274 if (tlen > ioc->dbg_fwsave_len)
2275 tlen = ioc->dbg_fwsave_len;
2277 memcpy(trcdata, ioc->dbg_fwsave, tlen);
2279 return BFA_STATUS_OK;
2283 bfa_ioc_fail_notify(struct bfa_ioc *ioc)
2286 * Notify driver and common modules registered for notification.
2288 ioc->cbfn->hbfail_cbfn(ioc->bfa);
2289 bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
2290 bfa_nw_ioc_debug_save_ftrc(ioc);
2293 /* IOCPF to IOC interface */
2295 bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
2297 bfa_fsm_send_event(ioc, IOC_E_ENABLED);
2301 bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
2303 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
2307 bfa_ioc_pf_failed(struct bfa_ioc *ioc)
2309 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
2313 bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc)
2315 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
2319 bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
2322 * Provide enable completion callback and AEN notification.
2324 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
2328 static enum bfa_status
2329 bfa_ioc_pll_init(struct bfa_ioc *ioc)
2332 * Hold semaphore so that nobody can access the chip during init.
2334 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
2336 bfa_ioc_pll_init_asic(ioc);
2338 ioc->pllinit = true;
2340 /* Initialize LMEM */
2341 bfa_ioc_lmem_init(ioc);
2344 * release semaphore.
2346 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
2348 return BFA_STATUS_OK;
2351 /* Interface used by diag module to do firmware boot with memory test
2352 * as the entry vector.
2354 static enum bfa_status
2355 bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
2358 struct bfi_ioc_image_hdr *drv_fwhdr;
2359 enum bfa_status status;
2360 bfa_ioc_stats(ioc, ioc_boots);
2362 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2363 return BFA_STATUS_FAILED;
2364 if (boot_env == BFI_FWBOOT_ENV_OS &&
2365 boot_type == BFI_FWBOOT_TYPE_NORMAL) {
2366 drv_fwhdr = (struct bfi_ioc_image_hdr *)
2367 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
2368 /* Work with Flash iff flash f/w is better than driver f/w.
2369 * Otherwise push drivers firmware.
2371 if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) ==
2372 BFI_IOC_IMG_VER_BETTER)
2373 boot_type = BFI_FWBOOT_TYPE_FLASH;
2377 * Initialize IOC state of all functions on a chip reset.
2379 if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2380 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2381 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2383 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
2384 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
2387 bfa_ioc_msgflush(ioc);
2388 status = bfa_ioc_download_fw(ioc, boot_type, boot_env);
2389 if (status == BFA_STATUS_OK)
2390 bfa_ioc_lpu_start(ioc);
2392 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2397 /* Enable/disable IOC failure auto recovery. */
2399 bfa_nw_ioc_auto_recover(bool auto_recover)
2401 bfa_nw_auto_recover = auto_recover;
2405 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
2411 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2418 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2420 r32 = readl(ioc->ioc_regs.lpu_mbox +
2422 msgp[i] = htonl(r32);
2426 * turn off mailbox interrupt by clearing mailbox status
2428 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2429 readl(ioc->ioc_regs.lpu_mbox_cmd);
2435 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
2437 union bfi_ioc_i2h_msg_u *msg;
2438 struct bfa_iocpf *iocpf = &ioc->iocpf;
2440 msg = (union bfi_ioc_i2h_msg_u *) m;
2442 bfa_ioc_stats(ioc, ioc_isrs);
2444 switch (msg->mh.msg_id) {
2445 case BFI_IOC_I2H_HBEAT:
2448 case BFI_IOC_I2H_ENABLE_REPLY:
2449 bfa_ioc_enable_reply(ioc,
2450 (enum bfa_mode)msg->fw_event.port_mode,
2451 msg->fw_event.cap_bm);
2454 case BFI_IOC_I2H_DISABLE_REPLY:
2455 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2458 case BFI_IOC_I2H_GETATTR_REPLY:
2459 bfa_ioc_getattr_reply(ioc);
2468 * bfa_nw_ioc_attach - IOC attach time initialization and setup.
2470 * @ioc: memory for IOC
2471 * @bfa: driver instance structure
2474 bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
2478 ioc->fcmode = false;
2479 ioc->pllinit = false;
2480 ioc->dbg_fwsave_once = true;
2481 ioc->iocpf.ioc = ioc;
2483 bfa_ioc_mbox_attach(ioc);
2484 INIT_LIST_HEAD(&ioc->notify_q);
2486 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2487 bfa_fsm_send_event(ioc, IOC_E_RESET);
2490 /* Driver detach time IOC cleanup. */
2492 bfa_nw_ioc_detach(struct bfa_ioc *ioc)
2494 bfa_fsm_send_event(ioc, IOC_E_DETACH);
2496 /* Done with detach, empty the notify_q. */
2497 INIT_LIST_HEAD(&ioc->notify_q);
2501 * bfa_nw_ioc_pci_init - Setup IOC PCI properties.
2503 * @pcidev: PCI device information for this IOC
2506 bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
2507 enum bfi_pcifn_class clscode)
2509 ioc->clscode = clscode;
2510 ioc->pcidev = *pcidev;
2513 * Initialize IOC and device personality
2515 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2516 ioc->asic_mode = BFI_ASIC_MODE_FC;
2518 switch (pcidev->device_id) {
2519 case PCI_DEVICE_ID_BROCADE_CT:
2520 ioc->asic_gen = BFI_ASIC_GEN_CT;
2521 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2522 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2523 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2524 ioc->ad_cap_bm = BFA_CM_CNA;
2527 case BFA_PCI_DEVICE_ID_CT2:
2528 ioc->asic_gen = BFI_ASIC_GEN_CT2;
2529 if (clscode == BFI_PCIFN_CLASS_FC &&
2530 pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2531 ioc->asic_mode = BFI_ASIC_MODE_FC16;
2533 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2534 ioc->ad_cap_bm = BFA_CM_HBA;
2536 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2537 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2538 if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2540 ioc->port_mode_cfg = BFA_MODE_CNA;
2541 ioc->ad_cap_bm = BFA_CM_CNA;
2544 ioc->port_mode_cfg = BFA_MODE_NIC;
2545 ioc->ad_cap_bm = BFA_CM_NIC;
2555 * Set asic specific interfaces.
2557 if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2558 bfa_nw_ioc_set_ct_hwif(ioc);
2560 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2561 bfa_nw_ioc_set_ct2_hwif(ioc);
2562 bfa_nw_ioc_ct2_poweron(ioc);
2565 bfa_ioc_map_port(ioc);
2566 bfa_ioc_reg_init(ioc);
2570 * bfa_nw_ioc_mem_claim - Initialize IOC dma memory
2572 * @dm_kva: kernel virtual address of IOC dma memory
2573 * @dm_pa: physical address of IOC dma memory
2576 bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
2579 * dma memory for firmware attribute
2581 ioc->attr_dma.kva = dm_kva;
2582 ioc->attr_dma.pa = dm_pa;
2583 ioc->attr = (struct bfi_ioc_attr *) dm_kva;
2586 /* Return size of dma memory required. */
2588 bfa_nw_ioc_meminfo(void)
2590 return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
2594 bfa_nw_ioc_enable(struct bfa_ioc *ioc)
2596 bfa_ioc_stats(ioc, ioc_enables);
2597 ioc->dbg_fwsave_once = true;
2599 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2603 bfa_nw_ioc_disable(struct bfa_ioc *ioc)
2605 bfa_ioc_stats(ioc, ioc_disables);
2606 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2609 /* Initialize memory for saving firmware trace. */
2611 bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
2613 ioc->dbg_fwsave = dbg_fwsave;
2614 ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0;
2618 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
2620 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
2623 /* Register mailbox message handler function, to be called by common modules */
2625 bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
2626 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2628 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2630 mod->mbhdlr[mc].cbfn = cbfn;
2631 mod->mbhdlr[mc].cbarg = cbarg;
2635 * bfa_nw_ioc_mbox_queue - Queue a mailbox command request to firmware.
2637 * @ioc: IOC instance
2638 * @cmd: Mailbox command
2640 * Waits if mailbox is busy. Responsibility of caller to serialize
2643 bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
2644 bfa_mbox_cmd_cbfn_t cbfn, void *cbarg)
2646 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2653 * If a previous command is pending, queue new command
2655 if (!list_empty(&mod->cmd_q)) {
2656 list_add_tail(&cmd->qe, &mod->cmd_q);
2661 * If mailbox is busy, queue command for poll timer
2663 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2665 list_add_tail(&cmd->qe, &mod->cmd_q);
2670 * mailbox is free -- queue command to firmware
2672 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2677 /* Handle mailbox interrupts */
2679 bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
2681 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2685 if (bfa_ioc_msgget(ioc, &m)) {
2687 * Treat IOC message class as special.
2689 mc = m.mh.msg_class;
2690 if (mc == BFI_MC_IOC) {
2691 bfa_ioc_isr(ioc, &m);
2695 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2698 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2701 bfa_ioc_lpu_read_stat(ioc);
2704 * Try to send pending mailbox commands
2706 bfa_ioc_mbox_poll(ioc);
2710 bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
2712 bfa_ioc_stats(ioc, ioc_hbfails);
2713 bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2714 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2717 /* return true if IOC is disabled */
2719 bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
2721 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2722 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2725 /* return true if IOC is operational */
2727 bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
2729 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2732 /* Add to IOC heartbeat failure notification queue. To be used by common
2733 * modules such as cee, port, diag.
2736 bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
2737 struct bfa_ioc_notify *notify)
2739 list_add_tail(¬ify->qe, &ioc->notify_q);
2742 #define BFA_MFG_NAME "QLogic"
2744 bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
2745 struct bfa_adapter_attr *ad_attr)
2747 struct bfi_ioc_attr *ioc_attr;
2749 ioc_attr = ioc->attr;
2751 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2752 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2753 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2754 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2755 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2756 sizeof(struct bfa_mfg_vpd));
2758 ad_attr->nports = bfa_ioc_get_nports(ioc);
2759 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2761 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2762 /* For now, model descr uses same model string */
2763 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2765 ad_attr->card_type = ioc_attr->card_type;
2766 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2768 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2769 ad_attr->prototype = 1;
2771 ad_attr->prototype = 0;
2773 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
2774 bfa_nw_ioc_get_mac(ioc, ad_attr->mac);
2776 ad_attr->pcie_gen = ioc_attr->pcie_gen;
2777 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2778 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2779 ad_attr->asic_rev = ioc_attr->asic_rev;
2781 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2784 static enum bfa_ioc_type
2785 bfa_ioc_get_type(struct bfa_ioc *ioc)
2787 if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2788 return BFA_IOC_TYPE_LL;
2790 BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC));
2792 return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2793 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2797 bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
2800 (void *)ioc->attr->brcd_serialnum,
2801 BFA_ADAPTER_SERIAL_NUM_LEN);
2805 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
2807 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2811 bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
2813 BUG_ON(!(chip_rev));
2815 memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2821 chip_rev[4] = ioc->attr->asic_rev;
2826 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
2828 memcpy(optrom_ver, ioc->attr->optrom_version,
2833 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
2835 strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2839 bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
2841 struct bfi_ioc_attr *ioc_attr;
2844 memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2846 ioc_attr = ioc->attr;
2848 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2849 BFA_MFG_NAME, ioc_attr->card_type);
2852 static enum bfa_ioc_state
2853 bfa_ioc_get_state(struct bfa_ioc *ioc)
2855 enum bfa_iocpf_state iocpf_st;
2856 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2858 if (ioc_st == BFA_IOC_ENABLING ||
2859 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2861 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2864 case BFA_IOCPF_SEMWAIT:
2865 ioc_st = BFA_IOC_SEMWAIT;
2868 case BFA_IOCPF_HWINIT:
2869 ioc_st = BFA_IOC_HWINIT;
2872 case BFA_IOCPF_FWMISMATCH:
2873 ioc_st = BFA_IOC_FWMISMATCH;
2876 case BFA_IOCPF_FAIL:
2877 ioc_st = BFA_IOC_FAIL;
2880 case BFA_IOCPF_INITFAIL:
2881 ioc_st = BFA_IOC_INITFAIL;
2892 bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
2894 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
2896 ioc_attr->state = bfa_ioc_get_state(ioc);
2897 ioc_attr->port_id = bfa_ioc_portid(ioc);
2898 ioc_attr->port_mode = ioc->port_mode;
2900 ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2901 ioc_attr->cap_bm = ioc->ad_cap_bm;
2903 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2905 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2907 ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc);
2908 ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc);
2909 ioc_attr->def_fn = bfa_ioc_is_default(ioc);
2910 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2915 bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
2917 return ioc->attr->pwwn;
2921 bfa_nw_ioc_get_mac(struct bfa_ioc *ioc, u8 *mac)
2923 ether_addr_copy(mac, ioc->attr->mac);
2926 /* Firmware failure detected. Start recovery actions. */
2928 bfa_ioc_recover(struct bfa_ioc *ioc)
2930 pr_crit("Heart Beat of IOC has failed\n");
2931 bfa_ioc_stats(ioc, ioc_hbfails);
2932 bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2933 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2936 /* BFA IOC PF private functions */
2939 bfa_iocpf_enable(struct bfa_ioc *ioc)
2941 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2945 bfa_iocpf_disable(struct bfa_ioc *ioc)
2947 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2951 bfa_iocpf_fail(struct bfa_ioc *ioc)
2953 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2957 bfa_iocpf_initfail(struct bfa_ioc *ioc)
2959 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2963 bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
2965 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2969 bfa_iocpf_stop(struct bfa_ioc *ioc)
2971 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2975 bfa_nw_iocpf_timeout(struct bfa_ioc *ioc)
2977 enum bfa_iocpf_state iocpf_st;
2979 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2981 if (iocpf_st == BFA_IOCPF_HWINIT)
2982 bfa_ioc_poll_fwinit(ioc);
2984 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2988 bfa_nw_iocpf_sem_timeout(struct bfa_ioc *ioc)
2990 bfa_ioc_hw_sem_get(ioc);
2994 bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
2996 u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
2998 if (fwstate == BFI_IOC_DISABLED) {
2999 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
3003 if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
3004 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
3006 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
3007 mod_timer(&ioc->iocpf_timer, jiffies +
3008 msecs_to_jiffies(BFA_IOC_POLL_TOV));
3013 * Flash module specific
3017 * FLASH DMA buffer should be big enough to hold both MFG block and
3018 * asic block(64k) at the same time and also should be 2k aligned to
3019 * avoid write segement to cross sector boundary.
3021 #define BFA_FLASH_SEG_SZ 2048
3022 #define BFA_FLASH_DMA_BUF_SZ \
3023 roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ)
3026 bfa_flash_cb(struct bfa_flash *flash)
3030 flash->cbfn(flash->cbarg, flash->status);
3034 bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
3036 struct bfa_flash *flash = cbarg;
3039 case BFA_IOC_E_DISABLED:
3040 case BFA_IOC_E_FAILED:
3041 if (flash->op_busy) {
3042 flash->status = BFA_STATUS_IOC_FAILURE;
3043 flash->cbfn(flash->cbarg, flash->status);
3053 * Send flash write request.
3056 bfa_flash_write_send(struct bfa_flash *flash)
3058 struct bfi_flash_write_req *msg =
3059 (struct bfi_flash_write_req *) flash->mb.msg;
3062 msg->type = be32_to_cpu(flash->type);
3063 msg->instance = flash->instance;
3064 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
3065 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
3066 flash->residue : BFA_FLASH_DMA_BUF_SZ;
3067 msg->length = be32_to_cpu(len);
3069 /* indicate if it's the last msg of the whole write operation */
3070 msg->last = (len == flash->residue) ? 1 : 0;
3072 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
3073 bfa_ioc_portid(flash->ioc));
3074 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
3075 memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
3076 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
3078 flash->residue -= len;
3079 flash->offset += len;
3083 * bfa_flash_read_send - Send flash read request.
3085 * @cbarg: callback argument
3088 bfa_flash_read_send(void *cbarg)
3090 struct bfa_flash *flash = cbarg;
3091 struct bfi_flash_read_req *msg =
3092 (struct bfi_flash_read_req *) flash->mb.msg;
3095 msg->type = be32_to_cpu(flash->type);
3096 msg->instance = flash->instance;
3097 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
3098 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
3099 flash->residue : BFA_FLASH_DMA_BUF_SZ;
3100 msg->length = be32_to_cpu(len);
3101 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
3102 bfa_ioc_portid(flash->ioc));
3103 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
3104 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
3108 * bfa_flash_intr - Process flash response messages upon receiving interrupts.
3110 * @flasharg: flash structure
3111 * @msg: message structure
3114 bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
3116 struct bfa_flash *flash = flasharg;
3120 struct bfi_flash_query_rsp *query;
3121 struct bfi_flash_write_rsp *write;
3122 struct bfi_flash_read_rsp *read;
3123 struct bfi_mbmsg *msg;
3128 /* receiving response after ioc failure */
3129 if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT)
3132 switch (msg->mh.msg_id) {
3133 case BFI_FLASH_I2H_QUERY_RSP:
3134 status = be32_to_cpu(m.query->status);
3135 if (status == BFA_STATUS_OK) {
3137 struct bfa_flash_attr *attr, *f;
3139 attr = (struct bfa_flash_attr *) flash->ubuf;
3140 f = (struct bfa_flash_attr *) flash->dbuf_kva;
3141 attr->status = be32_to_cpu(f->status);
3142 attr->npart = be32_to_cpu(f->npart);
3143 for (i = 0; i < attr->npart; i++) {
3144 attr->part[i].part_type =
3145 be32_to_cpu(f->part[i].part_type);
3146 attr->part[i].part_instance =
3147 be32_to_cpu(f->part[i].part_instance);
3148 attr->part[i].part_off =
3149 be32_to_cpu(f->part[i].part_off);
3150 attr->part[i].part_size =
3151 be32_to_cpu(f->part[i].part_size);
3152 attr->part[i].part_len =
3153 be32_to_cpu(f->part[i].part_len);
3154 attr->part[i].part_status =
3155 be32_to_cpu(f->part[i].part_status);
3158 flash->status = status;
3159 bfa_flash_cb(flash);
3161 case BFI_FLASH_I2H_WRITE_RSP:
3162 status = be32_to_cpu(m.write->status);
3163 if (status != BFA_STATUS_OK || flash->residue == 0) {
3164 flash->status = status;
3165 bfa_flash_cb(flash);
3167 bfa_flash_write_send(flash);
3169 case BFI_FLASH_I2H_READ_RSP:
3170 status = be32_to_cpu(m.read->status);
3171 if (status != BFA_STATUS_OK) {
3172 flash->status = status;
3173 bfa_flash_cb(flash);
3175 u32 len = be32_to_cpu(m.read->length);
3176 memcpy(flash->ubuf + flash->offset,
3177 flash->dbuf_kva, len);
3178 flash->residue -= len;
3179 flash->offset += len;
3180 if (flash->residue == 0) {
3181 flash->status = status;
3182 bfa_flash_cb(flash);
3184 bfa_flash_read_send(flash);
3187 case BFI_FLASH_I2H_BOOT_VER_RSP:
3188 case BFI_FLASH_I2H_EVENT:
3196 * Flash memory info API.
3199 bfa_nw_flash_meminfo(void)
3201 return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
3205 * bfa_nw_flash_attach - Flash attach API.
3207 * @flash: flash structure
3208 * @ioc: ioc structure
3209 * @dev: device structure
3212 bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
3216 flash->cbarg = NULL;
3219 bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
3220 bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
3221 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
3225 * bfa_nw_flash_memclaim - Claim memory for flash
3227 * @flash: flash structure
3228 * @dm_kva: pointer to virtual memory address
3229 * @dm_pa: physical memory address
3232 bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
3234 flash->dbuf_kva = dm_kva;
3235 flash->dbuf_pa = dm_pa;
3236 memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
3237 dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
3238 dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
3242 * bfa_nw_flash_get_attr - Get flash attribute.
3244 * @flash: flash structure
3245 * @attr: flash attribute structure
3246 * @cbfn: callback function
3247 * @cbarg: callback argument
3252 bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
3253 bfa_cb_flash cbfn, void *cbarg)
3255 struct bfi_flash_query_req *msg =
3256 (struct bfi_flash_query_req *) flash->mb.msg;
3258 if (!bfa_nw_ioc_is_operational(flash->ioc))
3259 return BFA_STATUS_IOC_NON_OP;
3262 return BFA_STATUS_DEVBUSY;
3266 flash->cbarg = cbarg;
3267 flash->ubuf = (u8 *) attr;
3269 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
3270 bfa_ioc_portid(flash->ioc));
3271 bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa);
3272 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
3274 return BFA_STATUS_OK;
3278 * bfa_nw_flash_update_part - Update flash partition.
3280 * @flash: flash structure
3281 * @type: flash partition type
3282 * @instance: flash partition instance
3283 * @buf: update data buffer
3284 * @len: data buffer length
3285 * @offset: offset relative to the partition starting address
3286 * @cbfn: callback function
3287 * @cbarg: callback argument
3292 bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
3293 void *buf, u32 len, u32 offset,
3294 bfa_cb_flash cbfn, void *cbarg)
3296 if (!bfa_nw_ioc_is_operational(flash->ioc))
3297 return BFA_STATUS_IOC_NON_OP;
3300 * 'len' must be in word (4-byte) boundary
3302 if (!len || (len & 0x03))
3303 return BFA_STATUS_FLASH_BAD_LEN;
3305 if (type == BFA_FLASH_PART_MFG)
3306 return BFA_STATUS_EINVAL;
3309 return BFA_STATUS_DEVBUSY;
3313 flash->cbarg = cbarg;
3315 flash->instance = instance;
3316 flash->residue = len;
3318 flash->addr_off = offset;
3321 bfa_flash_write_send(flash);
3323 return BFA_STATUS_OK;
3327 * bfa_nw_flash_read_part - Read flash partition.
3329 * @flash: flash structure
3330 * @type: flash partition type
3331 * @instance: flash partition instance
3332 * @buf: read data buffer
3333 * @len: data buffer length
3334 * @offset: offset relative to the partition starting address
3335 * @cbfn: callback function
3336 * @cbarg: callback argument
3341 bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance,
3342 void *buf, u32 len, u32 offset,
3343 bfa_cb_flash cbfn, void *cbarg)
3345 if (!bfa_nw_ioc_is_operational(flash->ioc))
3346 return BFA_STATUS_IOC_NON_OP;
3349 * 'len' must be in word (4-byte) boundary
3351 if (!len || (len & 0x03))
3352 return BFA_STATUS_FLASH_BAD_LEN;
3355 return BFA_STATUS_DEVBUSY;
3359 flash->cbarg = cbarg;
3361 flash->instance = instance;
3362 flash->residue = len;
3364 flash->addr_off = offset;
3367 bfa_flash_read_send(flash);
3369 return BFA_STATUS_OK;