2 * CXL Flash Device Driver
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
7 * Copyright (C) 2015 IBM Corporation
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/delay.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
20 #include <asm/unaligned.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_host.h>
26 #include <uapi/scsi/cxlflash_ioctl.h>
32 MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
33 MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
34 MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
35 MODULE_LICENSE("GPL");
37 static struct class *cxlflash_class;
38 static u32 cxlflash_major;
39 static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
42 * process_cmd_err() - command error handler
43 * @cmd: AFU command that experienced the error.
44 * @scp: SCSI command associated with the AFU command in error.
46 * Translates error bits from AFU command to SCSI command results.
48 static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
50 struct afu *afu = cmd->parent;
51 struct cxlflash_cfg *cfg = afu->parent;
52 struct device *dev = &cfg->dev->dev;
53 struct sisl_ioarcb *ioarcb;
54 struct sisl_ioasa *ioasa;
63 if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
65 scsi_set_resid(scp, resid);
66 dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
67 __func__, cmd, scp, resid);
70 if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
71 dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
73 scp->result = (DID_ERROR << 16);
76 dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
77 "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
78 ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
79 ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
81 if (ioasa->rc.scsi_rc) {
82 /* We have a SCSI status */
83 if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
84 memcpy(scp->sense_buffer, ioasa->sense_data,
86 scp->result = ioasa->rc.scsi_rc;
88 scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
92 * We encountered an error. Set scp->result based on nature
95 if (ioasa->rc.fc_rc) {
96 /* We have an FC status */
97 switch (ioasa->rc.fc_rc) {
98 case SISL_FC_RC_LINKDOWN:
99 scp->result = (DID_REQUEUE << 16);
101 case SISL_FC_RC_RESID:
102 /* This indicates an FCP resid underrun */
103 if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
104 /* If the SISL_RC_FLAGS_OVERRUN flag was set,
105 * then we will handle this error else where.
106 * If not then we must handle it here.
107 * This is probably an AFU bug.
109 scp->result = (DID_ERROR << 16);
112 case SISL_FC_RC_RESIDERR:
113 /* Resid mismatch between adapter and device */
114 case SISL_FC_RC_TGTABORT:
115 case SISL_FC_RC_ABORTOK:
116 case SISL_FC_RC_ABORTFAIL:
117 case SISL_FC_RC_NOLOGI:
118 case SISL_FC_RC_ABORTPEND:
119 case SISL_FC_RC_WRABORTPEND:
120 case SISL_FC_RC_NOEXP:
121 case SISL_FC_RC_INUSE:
122 scp->result = (DID_ERROR << 16);
127 if (ioasa->rc.afu_rc) {
128 /* We have an AFU error */
129 switch (ioasa->rc.afu_rc) {
130 case SISL_AFU_RC_NO_CHANNELS:
131 scp->result = (DID_NO_CONNECT << 16);
133 case SISL_AFU_RC_DATA_DMA_ERR:
134 switch (ioasa->afu_extra) {
135 case SISL_AFU_DMA_ERR_PAGE_IN:
137 scp->result = (DID_IMM_RETRY << 16);
139 case SISL_AFU_DMA_ERR_INVALID_EA:
141 scp->result = (DID_ERROR << 16);
144 case SISL_AFU_RC_OUT_OF_DATA_BUFS:
146 scp->result = (DID_ALLOC_FAILURE << 16);
149 scp->result = (DID_ERROR << 16);
155 * cmd_complete() - command completion handler
156 * @cmd: AFU command that has completed.
158 * For SCSI commands this routine prepares and submits commands that have
159 * either completed or timed out to the SCSI stack. For internal commands
160 * (TMF or AFU), this routine simply notifies the originator that the
161 * command has completed.
163 static void cmd_complete(struct afu_cmd *cmd)
165 struct scsi_cmnd *scp;
167 struct afu *afu = cmd->parent;
168 struct cxlflash_cfg *cfg = afu->parent;
169 struct device *dev = &cfg->dev->dev;
170 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
172 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
173 list_del(&cmd->list);
174 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
178 if (unlikely(cmd->sa.ioasc))
179 process_cmd_err(cmd, scp);
181 scp->result = (DID_OK << 16);
183 dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
184 __func__, scp, scp->result, cmd->sa.ioasc);
186 } else if (cmd->cmd_tmf) {
187 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
188 cfg->tmf_active = false;
189 wake_up_all_locked(&cfg->tmf_waitq);
190 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
192 complete(&cmd->cevent);
196 * flush_pending_cmds() - flush all pending commands on this hardware queue
197 * @hwq: Hardware queue to flush.
199 * The hardware send queue lock associated with this hardware queue must be
200 * held when calling this routine.
202 static void flush_pending_cmds(struct hwq *hwq)
204 struct cxlflash_cfg *cfg = hwq->afu->parent;
205 struct afu_cmd *cmd, *tmp;
206 struct scsi_cmnd *scp;
209 list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) {
210 /* Bypass command when on a doneq, cmd_complete() will handle */
211 if (!list_empty(&cmd->queue))
214 list_del(&cmd->list);
218 scp->result = (DID_IMM_RETRY << 16);
221 cmd->cmd_aborted = true;
224 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
225 cfg->tmf_active = false;
226 wake_up_all_locked(&cfg->tmf_waitq);
227 spin_unlock_irqrestore(&cfg->tmf_slock,
230 complete(&cmd->cevent);
236 * context_reset() - reset context via specified register
237 * @hwq: Hardware queue owning the context to be reset.
238 * @reset_reg: MMIO register to perform reset.
240 * When the reset is successful, the SISLite specification guarantees that
241 * the AFU has aborted all currently pending I/O. Accordingly, these commands
244 * Return: 0 on success, -errno on failure
246 static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg)
248 struct cxlflash_cfg *cfg = hwq->afu->parent;
249 struct device *dev = &cfg->dev->dev;
255 dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq);
257 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
259 writeq_be(val, reset_reg);
261 val = readq_be(reset_reg);
262 if ((val & 0x1) == 0x0) {
267 /* Double delay each time */
269 } while (nretry++ < MC_ROOM_RETRY_CNT);
272 flush_pending_cmds(hwq);
274 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
276 dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n",
277 __func__, rc, val, nretry);
282 * context_reset_ioarrin() - reset context via IOARRIN register
283 * @hwq: Hardware queue owning the context to be reset.
285 * Return: 0 on success, -errno on failure
287 static int context_reset_ioarrin(struct hwq *hwq)
289 return context_reset(hwq, &hwq->host_map->ioarrin);
293 * context_reset_sq() - reset context via SQ_CONTEXT_RESET register
294 * @hwq: Hardware queue owning the context to be reset.
296 * Return: 0 on success, -errno on failure
298 static int context_reset_sq(struct hwq *hwq)
300 return context_reset(hwq, &hwq->host_map->sq_ctx_reset);
304 * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
305 * @afu: AFU associated with the host.
306 * @cmd: AFU command to send.
309 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
311 static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
313 struct cxlflash_cfg *cfg = afu->parent;
314 struct device *dev = &cfg->dev->dev;
315 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
321 * To avoid the performance penalty of MMIO, spread the update of
322 * 'room' over multiple commands.
324 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
325 if (--hwq->room < 0) {
326 room = readq_be(&hwq->host_map->cmd_room);
328 dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
329 "0x%02X, room=0x%016llX\n",
330 __func__, cmd->rcb.cdb[0], room);
332 rc = SCSI_MLQUEUE_HOST_BUSY;
335 hwq->room = room - 1;
338 list_add(&cmd->list, &hwq->pending_cmds);
339 writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
341 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
342 dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__,
343 cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
348 * send_cmd_sq() - sends an AFU command via SQ ring
349 * @afu: AFU associated with the host.
350 * @cmd: AFU command to send.
353 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
355 static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
357 struct cxlflash_cfg *cfg = afu->parent;
358 struct device *dev = &cfg->dev->dev;
359 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
364 newval = atomic_dec_if_positive(&hwq->hsq_credits);
366 rc = SCSI_MLQUEUE_HOST_BUSY;
370 cmd->rcb.ioasa = &cmd->sa;
372 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
374 *hwq->hsq_curr = cmd->rcb;
375 if (hwq->hsq_curr < hwq->hsq_end)
378 hwq->hsq_curr = hwq->hsq_start;
380 list_add(&cmd->list, &hwq->pending_cmds);
381 writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail);
383 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
385 dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
386 "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
387 cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr,
388 readq_be(&hwq->host_map->sq_head),
389 readq_be(&hwq->host_map->sq_tail));
394 * wait_resp() - polls for a response or timeout to a sent AFU command
395 * @afu: AFU associated with the host.
396 * @cmd: AFU command that was sent.
398 * Return: 0 on success, -errno on failure
400 static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
402 struct cxlflash_cfg *cfg = afu->parent;
403 struct device *dev = &cfg->dev->dev;
405 ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
407 timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
411 if (cmd->cmd_aborted)
414 if (unlikely(cmd->sa.ioasc != 0)) {
415 dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
416 __func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
424 * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command
425 * @host: SCSI host associated with device.
426 * @scp: SCSI command to send.
427 * @afu: SCSI command to send.
429 * Hashes a command based upon the hardware queue mode.
431 * Return: Trusted index of target hardware queue
433 static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
439 if (afu->num_hwqs == 1)
442 switch (afu->hwq_mode) {
444 hwq = afu->hwq_rr_count++ % afu->num_hwqs;
447 tag = blk_mq_unique_tag(scp->request);
448 hwq = blk_mq_unique_tag_to_hwq(tag);
451 hwq = smp_processor_id() % afu->num_hwqs;
461 * send_tmf() - sends a Task Management Function (TMF)
462 * @cfg: Internal structure associated with the host.
463 * @sdev: SCSI device destined for TMF.
464 * @tmfcmd: TMF command to send.
467 * 0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure
469 static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
472 struct afu *afu = cfg->afu;
473 struct afu_cmd *cmd = NULL;
474 struct device *dev = &cfg->dev->dev;
475 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
481 buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
482 if (unlikely(!buf)) {
483 dev_err(dev, "%s: no memory for command\n", __func__);
488 cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
489 INIT_LIST_HEAD(&cmd->queue);
491 /* When Task Management Function is active do not send another */
492 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
494 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
497 cfg->tmf_active = true;
498 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
502 cmd->hwq_index = hwq->index;
504 cmd->rcb.ctx_id = hwq->ctx_hndl;
505 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
506 cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel);
507 cmd->rcb.lun_id = lun_to_lunid(sdev->lun);
508 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
509 SISL_REQ_FLAGS_SUP_UNDERRUN |
510 SISL_REQ_FLAGS_TMF_CMD);
511 memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
513 rc = afu->send_cmd(afu, cmd);
515 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
516 cfg->tmf_active = false;
517 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
521 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
522 to = msecs_to_jiffies(5000);
523 to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
528 dev_err(dev, "%s: TMF timed out\n", __func__);
530 } else if (cmd->cmd_aborted) {
531 dev_err(dev, "%s: TMF aborted\n", __func__);
533 } else if (cmd->sa.ioasc) {
534 dev_err(dev, "%s: TMF failed ioasc=%08x\n",
535 __func__, cmd->sa.ioasc);
538 cfg->tmf_active = false;
539 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
546 * cxlflash_driver_info() - information handler for this host driver
547 * @host: SCSI host associated with device.
549 * Return: A string describing the device.
551 static const char *cxlflash_driver_info(struct Scsi_Host *host)
553 return CXLFLASH_ADAPTER_NAME;
557 * cxlflash_queuecommand() - sends a mid-layer request
558 * @host: SCSI host associated with device.
559 * @scp: SCSI command to send.
561 * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
563 static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
565 struct cxlflash_cfg *cfg = shost_priv(host);
566 struct afu *afu = cfg->afu;
567 struct device *dev = &cfg->dev->dev;
568 struct afu_cmd *cmd = sc_to_afuci(scp);
569 struct scatterlist *sg = scsi_sglist(scp);
570 int hwq_index = cmd_to_target_hwq(host, scp, afu);
571 struct hwq *hwq = get_hwq(afu, hwq_index);
572 u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
576 dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
577 "cdb=(%08x-%08x-%08x-%08x)\n",
578 __func__, scp, host->host_no, scp->device->channel,
579 scp->device->id, scp->device->lun,
580 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
581 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
582 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
583 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
586 * If a Task Management Function is active, wait for it to complete
587 * before continuing with regular commands.
589 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
590 if (cfg->tmf_active) {
591 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
592 rc = SCSI_MLQUEUE_HOST_BUSY;
595 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
597 switch (cfg->state) {
601 dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
602 rc = SCSI_MLQUEUE_HOST_BUSY;
605 dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
606 scp->result = (DID_NO_CONNECT << 16);
615 cmd->rcb.data_len = sg->length;
616 cmd->rcb.data_ea = (uintptr_t)sg_virt(sg);
621 cmd->hwq_index = hwq_index;
623 cmd->rcb.ctx_id = hwq->ctx_hndl;
624 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
625 cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
626 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
628 if (scp->sc_data_direction == DMA_TO_DEVICE)
629 req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
631 cmd->rcb.req_flags = req_flags;
632 memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
634 rc = afu->send_cmd(afu, cmd);
640 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
641 * @cfg: Internal structure associated with the host.
643 static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
645 struct pci_dev *pdev = cfg->dev;
647 if (pci_channel_offline(pdev))
648 wait_event_timeout(cfg->reset_waitq,
649 !pci_channel_offline(pdev),
650 CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
654 * free_mem() - free memory associated with the AFU
655 * @cfg: Internal structure associated with the host.
657 static void free_mem(struct cxlflash_cfg *cfg)
659 struct afu *afu = cfg->afu;
662 free_pages((ulong)afu, get_order(sizeof(struct afu)));
668 * cxlflash_reset_sync() - synchronizing point for asynchronous resets
669 * @cfg: Internal structure associated with the host.
671 static void cxlflash_reset_sync(struct cxlflash_cfg *cfg)
673 if (cfg->async_reset_cookie == 0)
676 /* Wait until all async calls prior to this cookie have completed */
677 async_synchronize_cookie(cfg->async_reset_cookie + 1);
678 cfg->async_reset_cookie = 0;
682 * stop_afu() - stops the AFU command timers and unmaps the MMIO space
683 * @cfg: Internal structure associated with the host.
685 * Safe to call with AFU in a partially allocated/initialized state.
687 * Cancels scheduled worker threads, waits for any active internal AFU
688 * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
690 static void stop_afu(struct cxlflash_cfg *cfg)
692 struct afu *afu = cfg->afu;
696 cancel_work_sync(&cfg->work_q);
697 if (!current_is_async())
698 cxlflash_reset_sync(cfg);
701 while (atomic_read(&afu->cmds_active))
704 if (afu_is_irqpoll_enabled(afu)) {
705 for (i = 0; i < afu->num_hwqs; i++) {
706 hwq = get_hwq(afu, i);
708 irq_poll_disable(&hwq->irqpoll);
712 if (likely(afu->afu_map)) {
713 cxl_psa_unmap((void __iomem *)afu->afu_map);
720 * term_intr() - disables all AFU interrupts
721 * @cfg: Internal structure associated with the host.
722 * @level: Depth of allocation, where to begin waterfall tear down.
723 * @index: Index of the hardware queue.
725 * Safe to call with AFU/MC in partially allocated/initialized state.
727 static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
730 struct afu *afu = cfg->afu;
731 struct device *dev = &cfg->dev->dev;
735 dev_err(dev, "%s: returning with NULL afu\n", __func__);
739 hwq = get_hwq(afu, index);
742 dev_err(dev, "%s: returning with NULL MC\n", __func__);
748 /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
749 if (index == PRIMARY_HWQ)
750 cxl_unmap_afu_irq(hwq->ctx, 3, hwq);
752 cxl_unmap_afu_irq(hwq->ctx, 2, hwq);
754 cxl_unmap_afu_irq(hwq->ctx, 1, hwq);
756 cxl_free_afu_irqs(hwq->ctx);
759 /* No action required */
765 * term_mc() - terminates the master context
766 * @cfg: Internal structure associated with the host.
767 * @index: Index of the hardware queue.
769 * Safe to call with AFU/MC in partially allocated/initialized state.
771 static void term_mc(struct cxlflash_cfg *cfg, u32 index)
773 struct afu *afu = cfg->afu;
774 struct device *dev = &cfg->dev->dev;
779 dev_err(dev, "%s: returning with NULL afu\n", __func__);
783 hwq = get_hwq(afu, index);
786 dev_err(dev, "%s: returning with NULL MC\n", __func__);
790 WARN_ON(cxl_stop_context(hwq->ctx));
791 if (index != PRIMARY_HWQ)
792 WARN_ON(cxl_release_context(hwq->ctx));
795 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
796 flush_pending_cmds(hwq);
797 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
801 * term_afu() - terminates the AFU
802 * @cfg: Internal structure associated with the host.
804 * Safe to call with AFU/MC in partially allocated/initialized state.
806 static void term_afu(struct cxlflash_cfg *cfg)
808 struct device *dev = &cfg->dev->dev;
812 * Tear down is carefully orchestrated to ensure
813 * no interrupts can come in when the problem state
816 * 1) Disable all AFU interrupts for each master
817 * 2) Unmap the problem state area
818 * 3) Stop each master context
820 for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
821 term_intr(cfg, UNMAP_THREE, k);
825 for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
828 dev_dbg(dev, "%s: returning\n", __func__);
832 * notify_shutdown() - notifies device of pending shutdown
833 * @cfg: Internal structure associated with the host.
834 * @wait: Whether to wait for shutdown processing to complete.
836 * This function will notify the AFU that the adapter is being shutdown
837 * and will wait for shutdown processing to complete if wait is true.
838 * This notification should flush pending I/Os to the device and halt
839 * further I/Os until the next AFU reset is issued and device restarted.
841 static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
843 struct afu *afu = cfg->afu;
844 struct device *dev = &cfg->dev->dev;
845 struct dev_dependent_vals *ddv;
846 __be64 __iomem *fc_port_regs;
848 int i, retry_cnt = 0;
850 ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
851 if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
854 if (!afu || !afu->afu_map) {
855 dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
860 for (i = 0; i < cfg->num_fc_ports; i++) {
861 fc_port_regs = get_fc_port_regs(cfg, i);
863 reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
864 reg |= SISL_FC_SHUTDOWN_NORMAL;
865 writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
871 /* Wait up to 1.5 seconds for shutdown processing to complete */
872 for (i = 0; i < cfg->num_fc_ports; i++) {
873 fc_port_regs = get_fc_port_regs(cfg, i);
877 status = readq_be(&fc_port_regs[FC_STATUS / 8]);
878 if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
880 if (++retry_cnt >= MC_RETRY_CNT) {
881 dev_dbg(dev, "%s: port %d shutdown processing "
882 "not yet completed\n", __func__, i);
885 msleep(100 * retry_cnt);
891 * cxlflash_get_minor() - gets the first available minor number
893 * Return: Unique minor number that can be used to create the character device.
895 static int cxlflash_get_minor(void)
900 bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
901 if (bit >= CXLFLASH_MAX_ADAPTERS)
904 minor = bit & MINORMASK;
905 set_bit(minor, cxlflash_minor);
910 * cxlflash_put_minor() - releases the minor number
911 * @minor: Minor number that is no longer needed.
913 static void cxlflash_put_minor(int minor)
915 clear_bit(minor, cxlflash_minor);
919 * cxlflash_release_chrdev() - release the character device for the host
920 * @cfg: Internal structure associated with the host.
922 static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg)
924 device_unregister(cfg->chardev);
926 cdev_del(&cfg->cdev);
927 cxlflash_put_minor(MINOR(cfg->cdev.dev));
931 * cxlflash_remove() - PCI entry point to tear down host
932 * @pdev: PCI device associated with the host.
934 * Safe to use as a cleanup in partially allocated/initialized state. Note that
935 * the reset_waitq is flushed as part of the stop/termination of user contexts.
937 static void cxlflash_remove(struct pci_dev *pdev)
939 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
940 struct device *dev = &pdev->dev;
943 if (!pci_is_enabled(pdev)) {
944 dev_dbg(dev, "%s: Device is disabled\n", __func__);
948 /* If a Task Management Function is active, wait for it to complete
949 * before continuing with remove.
951 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
953 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
956 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
958 /* Notify AFU and wait for shutdown processing to complete */
959 notify_shutdown(cfg, true);
961 cfg->state = STATE_FAILTERM;
962 cxlflash_stop_term_user_contexts(cfg);
964 switch (cfg->init_state) {
965 case INIT_STATE_CDEV:
966 cxlflash_release_chrdev(cfg);
967 case INIT_STATE_SCSI:
968 cxlflash_term_local_luns(cfg);
969 scsi_remove_host(cfg->host);
973 pci_disable_device(pdev);
974 case INIT_STATE_NONE:
976 scsi_host_put(cfg->host);
980 dev_dbg(dev, "%s: returning\n", __func__);
984 * alloc_mem() - allocates the AFU and its command pool
985 * @cfg: Internal structure associated with the host.
987 * A partially allocated state remains on failure.
991 * -ENOMEM on failure to allocate memory
993 static int alloc_mem(struct cxlflash_cfg *cfg)
996 struct device *dev = &cfg->dev->dev;
998 /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
999 cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1000 get_order(sizeof(struct afu)));
1001 if (unlikely(!cfg->afu)) {
1002 dev_err(dev, "%s: cannot get %d free pages\n",
1003 __func__, get_order(sizeof(struct afu)));
1007 cfg->afu->parent = cfg;
1008 cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS;
1009 cfg->afu->afu_map = NULL;
1015 * init_pci() - initializes the host as a PCI device
1016 * @cfg: Internal structure associated with the host.
1018 * Return: 0 on success, -errno on failure
1020 static int init_pci(struct cxlflash_cfg *cfg)
1022 struct pci_dev *pdev = cfg->dev;
1023 struct device *dev = &cfg->dev->dev;
1026 rc = pci_enable_device(pdev);
1027 if (rc || pci_channel_offline(pdev)) {
1028 if (pci_channel_offline(pdev)) {
1029 cxlflash_wait_for_pci_err_recovery(cfg);
1030 rc = pci_enable_device(pdev);
1034 dev_err(dev, "%s: Cannot enable adapter\n", __func__);
1035 cxlflash_wait_for_pci_err_recovery(cfg);
1041 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1046 * init_scsi() - adds the host to the SCSI stack and kicks off host scan
1047 * @cfg: Internal structure associated with the host.
1049 * Return: 0 on success, -errno on failure
1051 static int init_scsi(struct cxlflash_cfg *cfg)
1053 struct pci_dev *pdev = cfg->dev;
1054 struct device *dev = &cfg->dev->dev;
1057 rc = scsi_add_host(cfg->host, &pdev->dev);
1059 dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
1063 scsi_scan_host(cfg->host);
1066 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1071 * set_port_online() - transitions the specified host FC port to online state
1072 * @fc_regs: Top of MMIO region defined for specified port.
1074 * The provided MMIO region must be mapped prior to call. Online state means
1075 * that the FC link layer has synced, completed the handshaking process, and
1076 * is ready for login to start.
1078 static void set_port_online(__be64 __iomem *fc_regs)
1082 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1083 cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
1084 cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */
1085 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1089 * set_port_offline() - transitions the specified host FC port to offline state
1090 * @fc_regs: Top of MMIO region defined for specified port.
1092 * The provided MMIO region must be mapped prior to call.
1094 static void set_port_offline(__be64 __iomem *fc_regs)
1098 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1099 cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */
1100 cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */
1101 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1105 * wait_port_online() - waits for the specified host FC port come online
1106 * @fc_regs: Top of MMIO region defined for specified port.
1107 * @delay_us: Number of microseconds to delay between reading port status.
1108 * @nretry: Number of cycles to retry reading port status.
1110 * The provided MMIO region must be mapped prior to call. This will timeout
1111 * when the cable is not plugged in.
1114 * TRUE (1) when the specified port is online
1115 * FALSE (0) when the specified port fails to come online after timeout
1117 static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1121 WARN_ON(delay_us < 1000);
1124 msleep(delay_us / 1000);
1125 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1126 if (status == U64_MAX)
1128 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
1131 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
1135 * wait_port_offline() - waits for the specified host FC port go offline
1136 * @fc_regs: Top of MMIO region defined for specified port.
1137 * @delay_us: Number of microseconds to delay between reading port status.
1138 * @nretry: Number of cycles to retry reading port status.
1140 * The provided MMIO region must be mapped prior to call.
1143 * TRUE (1) when the specified port is offline
1144 * FALSE (0) when the specified port fails to go offline after timeout
1146 static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1150 WARN_ON(delay_us < 1000);
1153 msleep(delay_us / 1000);
1154 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1155 if (status == U64_MAX)
1157 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
1160 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1164 * afu_set_wwpn() - configures the WWPN for the specified host FC port
1165 * @afu: AFU associated with the host that owns the specified FC port.
1166 * @port: Port number being configured.
1167 * @fc_regs: Top of MMIO region defined for specified port.
1168 * @wwpn: The world-wide-port-number previously discovered for port.
1170 * The provided MMIO region must be mapped prior to call. As part of the
1171 * sequence to configure the WWPN, the port is toggled offline and then back
1172 * online. This toggling action can cause this routine to delay up to a few
1173 * seconds. When configured to use the internal LUN feature of the AFU, a
1174 * failure to come online is overridden.
1176 static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
1179 struct cxlflash_cfg *cfg = afu->parent;
1180 struct device *dev = &cfg->dev->dev;
1182 set_port_offline(fc_regs);
1183 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1184 FC_PORT_STATUS_RETRY_CNT)) {
1185 dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
1189 writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1191 set_port_online(fc_regs);
1192 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1193 FC_PORT_STATUS_RETRY_CNT)) {
1194 dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
1200 * afu_link_reset() - resets the specified host FC port
1201 * @afu: AFU associated with the host that owns the specified FC port.
1202 * @port: Port number being configured.
1203 * @fc_regs: Top of MMIO region defined for specified port.
1205 * The provided MMIO region must be mapped prior to call. The sequence to
1206 * reset the port involves toggling it offline and then back online. This
1207 * action can cause this routine to delay up to a few seconds. An effort
1208 * is made to maintain link with the device by switching to host to use
1209 * the alternate port exclusively while the reset takes place.
1210 * failure to come online is overridden.
1212 static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
1214 struct cxlflash_cfg *cfg = afu->parent;
1215 struct device *dev = &cfg->dev->dev;
1218 /* first switch the AFU to the other links, if any */
1219 port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1220 port_sel &= ~(1ULL << port);
1221 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1222 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1224 set_port_offline(fc_regs);
1225 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1226 FC_PORT_STATUS_RETRY_CNT))
1227 dev_err(dev, "%s: wait on port %d to go offline timed out\n",
1230 set_port_online(fc_regs);
1231 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1232 FC_PORT_STATUS_RETRY_CNT))
1233 dev_err(dev, "%s: wait on port %d to go online timed out\n",
1236 /* switch back to include this port */
1237 port_sel |= (1ULL << port);
1238 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1239 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1241 dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
1245 * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1246 * @afu: AFU associated with the host.
1248 static void afu_err_intr_init(struct afu *afu)
1250 struct cxlflash_cfg *cfg = afu->parent;
1251 __be64 __iomem *fc_port_regs;
1253 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
1256 /* global async interrupts: AFU clears afu_ctrl on context exit
1257 * if async interrupts were sent to that context. This prevents
1258 * the AFU form sending further async interrupts when
1260 * nobody to receive them.
1264 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1265 /* set LISN# to send and point to primary master context */
1266 reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1268 if (afu->internal_lun)
1269 reg |= 1; /* Bit 63 indicates local lun */
1270 writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1272 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1273 /* unmask bits that are of interest */
1274 /* note: afu can send an interrupt after this step */
1275 writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1276 /* clear again in case a bit came on after previous clear but before */
1278 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1280 /* Clear/Set internal lun bits */
1281 fc_port_regs = get_fc_port_regs(cfg, 0);
1282 reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
1283 reg &= SISL_FC_INTERNAL_MASK;
1284 if (afu->internal_lun)
1285 reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1286 writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
1288 /* now clear FC errors */
1289 for (i = 0; i < cfg->num_fc_ports; i++) {
1290 fc_port_regs = get_fc_port_regs(cfg, i);
1292 writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]);
1293 writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1296 /* sync interrupts for master's IOARRIN write */
1297 /* note that unlike asyncs, there can be no pending sync interrupts */
1298 /* at this time (this is a fresh context and master has not written */
1299 /* IOARRIN yet), so there is nothing to clear. */
1301 /* set LISN#, it is always sent to the context that wrote IOARRIN */
1302 for (i = 0; i < afu->num_hwqs; i++) {
1303 hwq = get_hwq(afu, i);
1305 writeq_be(SISL_MSI_SYNC_ERROR, &hwq->host_map->ctx_ctrl);
1306 writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
1311 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1312 * @irq: Interrupt number.
1313 * @data: Private data provided at interrupt registration, the AFU.
1315 * Return: Always return IRQ_HANDLED.
1317 static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1319 struct hwq *hwq = (struct hwq *)data;
1320 struct cxlflash_cfg *cfg = hwq->afu->parent;
1321 struct device *dev = &cfg->dev->dev;
1325 reg = readq_be(&hwq->host_map->intr_status);
1326 reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1328 if (reg_unmasked == 0UL) {
1329 dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
1331 goto cxlflash_sync_err_irq_exit;
1334 dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
1337 writeq_be(reg_unmasked, &hwq->host_map->intr_clear);
1339 cxlflash_sync_err_irq_exit:
1344 * process_hrrq() - process the read-response queue
1345 * @afu: AFU associated with the host.
1346 * @doneq: Queue of commands harvested from the RRQ.
1347 * @budget: Threshold of RRQ entries to process.
1349 * This routine must be called holding the disabled RRQ spin lock.
1351 * Return: The number of entries processed.
1353 static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget)
1355 struct afu *afu = hwq->afu;
1356 struct afu_cmd *cmd;
1357 struct sisl_ioasa *ioasa;
1358 struct sisl_ioarcb *ioarcb;
1359 bool toggle = hwq->toggle;
1362 *hrrq_start = hwq->hrrq_start,
1363 *hrrq_end = hwq->hrrq_end,
1364 *hrrq_curr = hwq->hrrq_curr;
1366 /* Process ready RRQ entries up to the specified budget (if any) */
1370 if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1373 entry &= ~SISL_RESP_HANDLE_T_BIT;
1375 if (afu_is_sq_cmd_mode(afu)) {
1376 ioasa = (struct sisl_ioasa *)entry;
1377 cmd = container_of(ioasa, struct afu_cmd, sa);
1379 ioarcb = (struct sisl_ioarcb *)entry;
1380 cmd = container_of(ioarcb, struct afu_cmd, rcb);
1383 list_add_tail(&cmd->queue, doneq);
1385 /* Advance to next entry or wrap and flip the toggle bit */
1386 if (hrrq_curr < hrrq_end)
1389 hrrq_curr = hrrq_start;
1390 toggle ^= SISL_RESP_HANDLE_T_BIT;
1393 atomic_inc(&hwq->hsq_credits);
1396 if (budget > 0 && num_hrrq >= budget)
1400 hwq->hrrq_curr = hrrq_curr;
1401 hwq->toggle = toggle;
1407 * process_cmd_doneq() - process a queue of harvested RRQ commands
1408 * @doneq: Queue of completed commands.
1410 * Note that upon return the queue can no longer be trusted.
1412 static void process_cmd_doneq(struct list_head *doneq)
1414 struct afu_cmd *cmd, *tmp;
1416 WARN_ON(list_empty(doneq));
1418 list_for_each_entry_safe(cmd, tmp, doneq, queue)
1423 * cxlflash_irqpoll() - process a queue of harvested RRQ commands
1424 * @irqpoll: IRQ poll structure associated with queue to poll.
1425 * @budget: Threshold of RRQ entries to process per poll.
1427 * Return: The number of entries processed.
1429 static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget)
1431 struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll);
1432 unsigned long hrrq_flags;
1434 int num_entries = 0;
1436 spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1438 num_entries = process_hrrq(hwq, &doneq, budget);
1439 if (num_entries < budget)
1440 irq_poll_complete(irqpoll);
1442 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1444 process_cmd_doneq(&doneq);
1449 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1450 * @irq: Interrupt number.
1451 * @data: Private data provided at interrupt registration, the AFU.
1453 * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
1455 static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1457 struct hwq *hwq = (struct hwq *)data;
1458 struct afu *afu = hwq->afu;
1459 unsigned long hrrq_flags;
1461 int num_entries = 0;
1463 spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1465 if (afu_is_irqpoll_enabled(afu)) {
1466 irq_poll_sched(&hwq->irqpoll);
1467 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1471 num_entries = process_hrrq(hwq, &doneq, -1);
1472 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1474 if (num_entries == 0)
1477 process_cmd_doneq(&doneq);
1482 * Asynchronous interrupt information table
1485 * - Order matters here as this array is indexed by bit position.
1487 * - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro
1488 * as complex and complains due to a lack of parentheses/braces.
1490 #define ASTATUS_FC(_a, _b, _c, _d) \
1491 { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
1493 #define BUILD_SISL_ASTATUS_FC_PORT(_a) \
1494 ASTATUS_FC(_a, LINK_UP, "link up", 0), \
1495 ASTATUS_FC(_a, LINK_DN, "link down", 0), \
1496 ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST), \
1497 ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR), \
1498 ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
1499 ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET), \
1500 ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0), \
1501 ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
1503 static const struct asyc_intr_info ainfo[] = {
1504 BUILD_SISL_ASTATUS_FC_PORT(1),
1505 BUILD_SISL_ASTATUS_FC_PORT(0),
1506 BUILD_SISL_ASTATUS_FC_PORT(3),
1507 BUILD_SISL_ASTATUS_FC_PORT(2)
1511 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1512 * @irq: Interrupt number.
1513 * @data: Private data provided at interrupt registration, the AFU.
1515 * Return: Always return IRQ_HANDLED.
1517 static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1519 struct hwq *hwq = (struct hwq *)data;
1520 struct afu *afu = hwq->afu;
1521 struct cxlflash_cfg *cfg = afu->parent;
1522 struct device *dev = &cfg->dev->dev;
1523 const struct asyc_intr_info *info;
1524 struct sisl_global_map __iomem *global = &afu->afu_map->global;
1525 __be64 __iomem *fc_port_regs;
1531 reg = readq_be(&global->regs.aintr_status);
1532 reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1534 if (unlikely(reg_unmasked == 0)) {
1535 dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
1540 /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1541 writeq_be(reg_unmasked, &global->regs.aintr_clear);
1543 /* Check each bit that is on */
1544 for_each_set_bit(bit, (ulong *)®_unmasked, BITS_PER_LONG) {
1545 if (unlikely(bit >= ARRAY_SIZE(ainfo))) {
1551 if (unlikely(info->status != 1ULL << bit)) {
1557 fc_port_regs = get_fc_port_regs(cfg, port);
1559 dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
1560 __func__, port, info->desc,
1561 readq_be(&fc_port_regs[FC_STATUS / 8]));
1564 * Do link reset first, some OTHER errors will set FC_ERROR
1565 * again if cleared before or w/o a reset
1567 if (info->action & LINK_RESET) {
1568 dev_err(dev, "%s: FC Port %d: resetting link\n",
1570 cfg->lr_state = LINK_RESET_REQUIRED;
1571 cfg->lr_port = port;
1572 schedule_work(&cfg->work_q);
1575 if (info->action & CLR_FC_ERROR) {
1576 reg = readq_be(&fc_port_regs[FC_ERROR / 8]);
1579 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1580 * should be the same and tracing one is sufficient.
1583 dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
1584 __func__, port, reg);
1586 writeq_be(reg, &fc_port_regs[FC_ERROR / 8]);
1587 writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1590 if (info->action & SCAN_HOST) {
1591 atomic_inc(&cfg->scan_host_needed);
1592 schedule_work(&cfg->work_q);
1601 * start_context() - starts the master context
1602 * @cfg: Internal structure associated with the host.
1603 * @index: Index of the hardware queue.
1605 * Return: A success or failure value from CXL services.
1607 static int start_context(struct cxlflash_cfg *cfg, u32 index)
1609 struct device *dev = &cfg->dev->dev;
1610 struct hwq *hwq = get_hwq(cfg->afu, index);
1613 rc = cxl_start_context(hwq->ctx,
1614 hwq->work.work_element_descriptor,
1617 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1622 * read_vpd() - obtains the WWPNs from VPD
1623 * @cfg: Internal structure associated with the host.
1624 * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs
1626 * Return: 0 on success, -errno on failure
1628 static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1630 struct device *dev = &cfg->dev->dev;
1631 struct pci_dev *pdev = cfg->dev;
1633 int ro_start, ro_size, i, j, k;
1635 char vpd_data[CXLFLASH_VPD_LEN];
1636 char tmp_buf[WWPN_BUF_LEN] = { 0 };
1637 const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *)
1638 cfg->dev_id->driver_data;
1639 const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED;
1640 const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
1642 /* Get the VPD data from the device */
1643 vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
1644 if (unlikely(vpd_size <= 0)) {
1645 dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
1646 __func__, vpd_size);
1651 /* Get the read only section offset */
1652 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1653 PCI_VPD_LRDT_RO_DATA);
1654 if (unlikely(ro_start < 0)) {
1655 dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
1660 /* Get the read only section size, cap when extends beyond read VPD */
1661 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1663 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1664 if (unlikely((i + j) > vpd_size)) {
1665 dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
1666 __func__, (i + j), vpd_size);
1667 ro_size = vpd_size - i;
1671 * Find the offset of the WWPN tag within the read only
1672 * VPD data and validate the found field (partials are
1673 * no good to us). Convert the ASCII data to an integer
1674 * value. Note that we must copy to a temporary buffer
1675 * because the conversion service requires that the ASCII
1676 * string be terminated.
1678 * Allow for WWPN not being found for all devices, setting
1679 * the returned WWPN to zero when not found. Notify with a
1680 * log error for cards that should have had WWPN keywords
1681 * in the VPD - cards requiring WWPN will not have their
1682 * ports programmed and operate in an undefined state.
1684 for (k = 0; k < cfg->num_fc_ports; k++) {
1686 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1688 i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1690 if (wwpn_vpd_required)
1691 dev_err(dev, "%s: Port %d WWPN not found\n",
1697 j = pci_vpd_info_field_size(&vpd_data[i]);
1698 i += PCI_VPD_INFO_FLD_HDR_SIZE;
1699 if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1700 dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
1706 memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1707 rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1709 dev_err(dev, "%s: WWPN conversion failed for port %d\n",
1715 dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]);
1719 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1724 * init_pcr() - initialize the provisioning and control registers
1725 * @cfg: Internal structure associated with the host.
1727 * Also sets up fast access to the mapped registers and initializes AFU
1728 * command fields that never change.
1730 static void init_pcr(struct cxlflash_cfg *cfg)
1732 struct afu *afu = cfg->afu;
1733 struct sisl_ctrl_map __iomem *ctrl_map;
1737 for (i = 0; i < MAX_CONTEXT; i++) {
1738 ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1739 /* Disrupt any clients that could be running */
1740 /* e.g. clients that survived a master restart */
1741 writeq_be(0, &ctrl_map->rht_start);
1742 writeq_be(0, &ctrl_map->rht_cnt_id);
1743 writeq_be(0, &ctrl_map->ctx_cap);
1746 /* Copy frequently used fields into hwq */
1747 for (i = 0; i < afu->num_hwqs; i++) {
1748 hwq = get_hwq(afu, i);
1750 hwq->ctx_hndl = (u16) cxl_process_element(hwq->ctx);
1751 hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
1752 hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
1754 /* Program the Endian Control for the master context */
1755 writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl);
1760 * init_global() - initialize AFU global registers
1761 * @cfg: Internal structure associated with the host.
1763 static int init_global(struct cxlflash_cfg *cfg)
1765 struct afu *afu = cfg->afu;
1766 struct device *dev = &cfg->dev->dev;
1768 struct sisl_host_map __iomem *hmap;
1769 __be64 __iomem *fc_port_regs;
1770 u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */
1771 int i = 0, num_ports = 0;
1775 rc = read_vpd(cfg, &wwpn[0]);
1777 dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1781 /* Set up RRQ and SQ in HWQ for master issued cmds */
1782 for (i = 0; i < afu->num_hwqs; i++) {
1783 hwq = get_hwq(afu, i);
1784 hmap = hwq->host_map;
1786 writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
1787 writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
1789 if (afu_is_sq_cmd_mode(afu)) {
1790 writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
1791 writeq_be((u64)hwq->hsq_end, &hmap->sq_end);
1795 /* AFU configuration */
1796 reg = readq_be(&afu->afu_map->global.regs.afu_config);
1797 reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1798 /* enable all auto retry options and control endianness */
1799 /* leave others at default: */
1800 /* CTX_CAP write protected, mbox_r does not clear on read and */
1801 /* checker on if dual afu */
1802 writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1804 /* Global port select: select either port */
1805 if (afu->internal_lun) {
1806 /* Only use port 0 */
1807 writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1810 writeq_be(PORT_MASK(cfg->num_fc_ports),
1811 &afu->afu_map->global.regs.afu_port_sel);
1812 num_ports = cfg->num_fc_ports;
1815 for (i = 0; i < num_ports; i++) {
1816 fc_port_regs = get_fc_port_regs(cfg, i);
1818 /* Unmask all errors (but they are still masked at AFU) */
1819 writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]);
1820 /* Clear CRC error cnt & set a threshold */
1821 (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]);
1822 writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]);
1824 /* Set WWPNs. If already programmed, wwpn[i] is 0 */
1826 afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]);
1827 /* Programming WWPN back to back causes additional
1828 * offline/online transitions and a PLOGI
1833 /* Set up master's own CTX_CAP to allow real mode, host translation */
1834 /* tables, afu cmds and read/write GSCSI cmds. */
1835 /* First, unlock ctx_cap write by reading mbox */
1836 for (i = 0; i < afu->num_hwqs; i++) {
1837 hwq = get_hwq(afu, i);
1839 (void)readq_be(&hwq->ctrl_map->mbox_r); /* unlock ctx_cap */
1840 writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1841 SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1842 SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1843 &hwq->ctrl_map->ctx_cap);
1847 * Determine write-same unmap support for host by evaluating the unmap
1848 * sector support bit of the context control register associated with
1849 * the primary hardware queue. Note that while this status is reflected
1850 * in a context register, the outcome can be assumed to be host-wide.
1852 hwq = get_hwq(afu, PRIMARY_HWQ);
1853 reg = readq_be(&hwq->host_map->ctx_ctrl);
1854 if (reg & SISL_CTX_CTRL_UNMAP_SECTOR)
1855 cfg->ws_unmap = true;
1857 /* Initialize heartbeat */
1858 afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1864 * start_afu() - initializes and starts the AFU
1865 * @cfg: Internal structure associated with the host.
1867 static int start_afu(struct cxlflash_cfg *cfg)
1869 struct afu *afu = cfg->afu;
1870 struct device *dev = &cfg->dev->dev;
1877 /* Initialize each HWQ */
1878 for (i = 0; i < afu->num_hwqs; i++) {
1879 hwq = get_hwq(afu, i);
1881 /* After an AFU reset, RRQ entries are stale, clear them */
1882 memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry));
1884 /* Initialize RRQ pointers */
1885 hwq->hrrq_start = &hwq->rrq_entry[0];
1886 hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
1887 hwq->hrrq_curr = hwq->hrrq_start;
1890 /* Initialize spin locks */
1891 spin_lock_init(&hwq->hrrq_slock);
1892 spin_lock_init(&hwq->hsq_slock);
1895 if (afu_is_sq_cmd_mode(afu)) {
1896 memset(&hwq->sq, 0, sizeof(hwq->sq));
1897 hwq->hsq_start = &hwq->sq[0];
1898 hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
1899 hwq->hsq_curr = hwq->hsq_start;
1901 atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
1904 /* Initialize IRQ poll */
1905 if (afu_is_irqpoll_enabled(afu))
1906 irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight,
1911 rc = init_global(cfg);
1913 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1918 * init_intr() - setup interrupt handlers for the master context
1919 * @cfg: Internal structure associated with the host.
1920 * @hwq: Hardware queue to initialize.
1922 * Return: 0 on success, -errno on failure
1924 static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1927 struct device *dev = &cfg->dev->dev;
1928 struct cxl_context *ctx = hwq->ctx;
1930 enum undo_level level = UNDO_NOOP;
1931 bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
1932 int num_irqs = is_primary_hwq ? 3 : 2;
1934 rc = cxl_allocate_afu_irqs(ctx, num_irqs);
1936 dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
1942 rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
1943 "SISL_MSI_SYNC_ERROR");
1944 if (unlikely(rc <= 0)) {
1945 dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
1950 rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
1951 "SISL_MSI_RRQ_UPDATED");
1952 if (unlikely(rc <= 0)) {
1953 dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
1958 /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
1959 if (!is_primary_hwq)
1962 rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
1963 "SISL_MSI_ASYNC_ERROR");
1964 if (unlikely(rc <= 0)) {
1965 dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
1974 * init_mc() - create and register as the master context
1975 * @cfg: Internal structure associated with the host.
1976 * index: HWQ Index of the master context.
1978 * Return: 0 on success, -errno on failure
1980 static int init_mc(struct cxlflash_cfg *cfg, u32 index)
1982 struct cxl_context *ctx;
1983 struct device *dev = &cfg->dev->dev;
1984 struct hwq *hwq = get_hwq(cfg->afu, index);
1986 enum undo_level level;
1988 hwq->afu = cfg->afu;
1990 INIT_LIST_HEAD(&hwq->pending_cmds);
1992 if (index == PRIMARY_HWQ)
1993 ctx = cxl_get_context(cfg->dev);
1995 ctx = cxl_dev_context_init(cfg->dev);
1996 if (unlikely(!ctx)) {
2004 /* Set it up as a master with the CXL */
2005 cxl_set_master(ctx);
2007 /* Reset AFU when initializing primary context */
2008 if (index == PRIMARY_HWQ) {
2009 rc = cxl_afu_reset(ctx);
2011 dev_err(dev, "%s: AFU reset failed rc=%d\n",
2017 level = init_intr(cfg, hwq);
2018 if (unlikely(level)) {
2019 dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
2023 /* This performs the equivalent of the CXL_IOCTL_START_WORK.
2024 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
2025 * element (pe) that is embedded in the context (ctx)
2027 rc = start_context(cfg, index);
2029 dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
2030 level = UNMAP_THREE;
2035 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2038 term_intr(cfg, level, index);
2039 if (index != PRIMARY_HWQ)
2040 cxl_release_context(ctx);
2047 * get_num_afu_ports() - determines and configures the number of AFU ports
2048 * @cfg: Internal structure associated with the host.
2050 * This routine determines the number of AFU ports by converting the global
2051 * port selection mask. The converted value is only valid following an AFU
2052 * reset (explicit or power-on). This routine must be invoked shortly after
2053 * mapping as other routines are dependent on the number of ports during the
2054 * initialization sequence.
2056 * To support legacy AFUs that might not have reflected an initial global
2057 * port mask (value read is 0), default to the number of ports originally
2058 * supported by the cxlflash driver (2) before hardware with other port
2059 * offerings was introduced.
2061 static void get_num_afu_ports(struct cxlflash_cfg *cfg)
2063 struct afu *afu = cfg->afu;
2064 struct device *dev = &cfg->dev->dev;
2066 int num_fc_ports = LEGACY_FC_PORTS;
2068 port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel);
2069 if (port_mask != 0ULL)
2070 num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS);
2072 dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n",
2073 __func__, port_mask, num_fc_ports);
2075 cfg->num_fc_ports = num_fc_ports;
2076 cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports);
2080 * init_afu() - setup as master context and start AFU
2081 * @cfg: Internal structure associated with the host.
2083 * This routine is a higher level of control for configuring the
2084 * AFU on probe and reset paths.
2086 * Return: 0 on success, -errno on failure
2088 static int init_afu(struct cxlflash_cfg *cfg)
2092 struct afu *afu = cfg->afu;
2093 struct device *dev = &cfg->dev->dev;
2097 cxl_perst_reloads_same_image(cfg->cxl_afu, true);
2099 afu->num_hwqs = afu->desired_hwqs;
2100 for (i = 0; i < afu->num_hwqs; i++) {
2101 rc = init_mc(cfg, i);
2103 dev_err(dev, "%s: init_mc failed rc=%d index=%d\n",
2109 /* Map the entire MMIO space of the AFU using the first context */
2110 hwq = get_hwq(afu, PRIMARY_HWQ);
2111 afu->afu_map = cxl_psa_map(hwq->ctx);
2112 if (!afu->afu_map) {
2113 dev_err(dev, "%s: cxl_psa_map failed\n", __func__);
2118 /* No byte reverse on reading afu_version or string will be backwards */
2119 reg = readq(&afu->afu_map->global.regs.afu_version);
2120 memcpy(afu->version, ®, sizeof(reg));
2121 afu->interface_version =
2122 readq_be(&afu->afu_map->global.regs.interface_version);
2123 if ((afu->interface_version + 1) == 0) {
2124 dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
2125 "interface version %016llx\n", afu->version,
2126 afu->interface_version);
2131 if (afu_is_sq_cmd_mode(afu)) {
2132 afu->send_cmd = send_cmd_sq;
2133 afu->context_reset = context_reset_sq;
2135 afu->send_cmd = send_cmd_ioarrin;
2136 afu->context_reset = context_reset_ioarrin;
2139 dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
2140 afu->version, afu->interface_version);
2142 get_num_afu_ports(cfg);
2144 rc = start_afu(cfg);
2146 dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
2150 afu_err_intr_init(cfg->afu);
2151 for (i = 0; i < afu->num_hwqs; i++) {
2152 hwq = get_hwq(afu, i);
2154 hwq->room = readq_be(&hwq->host_map->cmd_room);
2157 /* Restore the LUN mappings */
2158 cxlflash_restore_luntable(cfg);
2160 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2164 for (i = afu->num_hwqs - 1; i >= 0; i--) {
2165 term_intr(cfg, UNMAP_THREE, i);
2172 * afu_reset() - resets the AFU
2173 * @cfg: Internal structure associated with the host.
2175 * Return: 0 on success, -errno on failure
2177 static int afu_reset(struct cxlflash_cfg *cfg)
2179 struct device *dev = &cfg->dev->dev;
2182 /* Stop the context before the reset. Since the context is
2183 * no longer available restart it after the reset is complete
2189 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2194 * drain_ioctls() - wait until all currently executing ioctls have completed
2195 * @cfg: Internal structure associated with the host.
2197 * Obtain write access to read/write semaphore that wraps ioctl
2198 * handling to 'drain' ioctls currently executing.
2200 static void drain_ioctls(struct cxlflash_cfg *cfg)
2202 down_write(&cfg->ioctl_rwsem);
2203 up_write(&cfg->ioctl_rwsem);
2207 * cxlflash_async_reset_host() - asynchronous host reset handler
2208 * @data: Private data provided while scheduling reset.
2209 * @cookie: Cookie that can be used for checkpointing.
2211 static void cxlflash_async_reset_host(void *data, async_cookie_t cookie)
2213 struct cxlflash_cfg *cfg = data;
2214 struct device *dev = &cfg->dev->dev;
2217 if (cfg->state != STATE_RESET) {
2218 dev_dbg(dev, "%s: Not performing a reset, state=%d\n",
2219 __func__, cfg->state);
2224 cxlflash_mark_contexts_error(cfg);
2225 rc = afu_reset(cfg);
2227 cfg->state = STATE_FAILTERM;
2229 cfg->state = STATE_NORMAL;
2230 wake_up_all(&cfg->reset_waitq);
2233 scsi_unblock_requests(cfg->host);
2237 * cxlflash_schedule_async_reset() - schedule an asynchronous host reset
2238 * @cfg: Internal structure associated with the host.
2240 static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg)
2242 struct device *dev = &cfg->dev->dev;
2244 if (cfg->state != STATE_NORMAL) {
2245 dev_dbg(dev, "%s: Not performing reset state=%d\n",
2246 __func__, cfg->state);
2250 cfg->state = STATE_RESET;
2251 scsi_block_requests(cfg->host);
2252 cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host,
2257 * send_afu_cmd() - builds and sends an internal AFU command
2258 * @afu: AFU associated with the host.
2259 * @rcb: Pre-populated IOARCB describing command to send.
2261 * The AFU can only take one internal AFU command at a time. This limitation is
2262 * enforced by using a mutex to provide exclusive access to the AFU during the
2263 * operation. This design point requires calling threads to not be on interrupt
2264 * context due to the possibility of sleeping during concurrent AFU operations.
2266 * The command status is optionally passed back to the caller when the caller
2267 * populates the IOASA field of the IOARCB with a pointer to an IOASA structure.
2270 * 0 on success, -errno on failure
2272 static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
2274 struct cxlflash_cfg *cfg = afu->parent;
2275 struct device *dev = &cfg->dev->dev;
2276 struct afu_cmd *cmd = NULL;
2277 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
2281 static DEFINE_MUTEX(sync_active);
2283 if (cfg->state != STATE_NORMAL) {
2284 dev_dbg(dev, "%s: Sync not required state=%u\n",
2285 __func__, cfg->state);
2289 mutex_lock(&sync_active);
2290 atomic_inc(&afu->cmds_active);
2291 buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
2292 if (unlikely(!buf)) {
2293 dev_err(dev, "%s: no memory for command\n", __func__);
2298 cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
2301 memset(cmd, 0, sizeof(*cmd));
2302 memcpy(&cmd->rcb, rcb, sizeof(*rcb));
2303 INIT_LIST_HEAD(&cmd->queue);
2304 init_completion(&cmd->cevent);
2306 cmd->hwq_index = hwq->index;
2307 cmd->rcb.ctx_id = hwq->ctx_hndl;
2309 dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n",
2310 __func__, afu, cmd, cmd->rcb.cdb[0], nretry);
2312 rc = afu->send_cmd(afu, cmd);
2318 rc = wait_resp(afu, cmd);
2321 rc = afu->context_reset(hwq);
2323 cxlflash_schedule_async_reset(cfg);
2326 /* fall through to retry */
2330 /* fall through to exit */
2336 *rcb->ioasa = cmd->sa;
2338 atomic_dec(&afu->cmds_active);
2339 mutex_unlock(&sync_active);
2341 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2346 * cxlflash_afu_sync() - builds and sends an AFU sync command
2347 * @afu: AFU associated with the host.
2348 * @ctx: Identifies context requesting sync.
2349 * @res: Identifies resource requesting sync.
2350 * @mode: Type of sync to issue (lightweight, heavyweight, global).
2352 * AFU sync operations are only necessary and allowed when the device is
2353 * operating normally. When not operating normally, sync requests can occur as
2354 * part of cleaning up resources associated with an adapter prior to removal.
2355 * In this scenario, these requests are simply ignored (safe due to the AFU
2359 * 0 on success, -errno on failure
2361 int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode)
2363 struct cxlflash_cfg *cfg = afu->parent;
2364 struct device *dev = &cfg->dev->dev;
2365 struct sisl_ioarcb rcb = { 0 };
2367 dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n",
2368 __func__, afu, ctx, res, mode);
2370 rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
2371 rcb.msi = SISL_MSI_RRQ_UPDATED;
2372 rcb.timeout = MC_AFU_SYNC_TIMEOUT;
2374 rcb.cdb[0] = SISL_AFU_CMD_SYNC;
2376 put_unaligned_be16(ctx, &rcb.cdb[2]);
2377 put_unaligned_be32(res, &rcb.cdb[4]);
2379 return send_afu_cmd(afu, &rcb);
2383 * cxlflash_eh_abort_handler() - abort a SCSI command
2384 * @scp: SCSI command to abort.
2386 * CXL Flash devices do not support a single command abort. Reset the context
2387 * as per SISLite specification. Flush any pending commands in the hardware
2388 * queue before the reset.
2390 * Return: SUCCESS/FAILED as defined in scsi/scsi.h
2392 static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp)
2395 struct Scsi_Host *host = scp->device->host;
2396 struct cxlflash_cfg *cfg = shost_priv(host);
2397 struct afu_cmd *cmd = sc_to_afuc(scp);
2398 struct device *dev = &cfg->dev->dev;
2399 struct afu *afu = cfg->afu;
2400 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
2402 dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
2403 "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
2404 scp->device->channel, scp->device->id, scp->device->lun,
2405 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
2406 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
2407 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
2408 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
2410 /* When the state is not normal, another reset/reload is in progress.
2411 * Return failed and the mid-layer will invoke host reset handler.
2413 if (cfg->state != STATE_NORMAL) {
2414 dev_dbg(dev, "%s: Invalid state for abort, state=%d\n",
2415 __func__, cfg->state);
2419 rc = afu->context_reset(hwq);
2426 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2431 * cxlflash_eh_device_reset_handler() - reset a single LUN
2432 * @scp: SCSI command to send.
2435 * SUCCESS as defined in scsi/scsi.h
2436 * FAILED as defined in scsi/scsi.h
2438 static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
2441 struct scsi_device *sdev = scp->device;
2442 struct Scsi_Host *host = sdev->host;
2443 struct cxlflash_cfg *cfg = shost_priv(host);
2444 struct device *dev = &cfg->dev->dev;
2447 dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__,
2448 host->host_no, sdev->channel, sdev->id, sdev->lun);
2450 switch (cfg->state) {
2452 rcr = send_tmf(cfg, sdev, TMF_LUN_RESET);
2457 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2464 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2469 * cxlflash_eh_host_reset_handler() - reset the host adapter
2470 * @scp: SCSI command from stack identifying host.
2472 * Following a reset, the state is evaluated again in case an EEH occurred
2473 * during the reset. In such a scenario, the host reset will either yield
2474 * until the EEH recovery is complete or return success or failure based
2475 * upon the current device state.
2478 * SUCCESS as defined in scsi/scsi.h
2479 * FAILED as defined in scsi/scsi.h
2481 static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
2485 struct Scsi_Host *host = scp->device->host;
2486 struct cxlflash_cfg *cfg = shost_priv(host);
2487 struct device *dev = &cfg->dev->dev;
2489 dev_dbg(dev, "%s: %d\n", __func__, host->host_no);
2491 switch (cfg->state) {
2493 cfg->state = STATE_RESET;
2495 cxlflash_mark_contexts_error(cfg);
2496 rcr = afu_reset(cfg);
2499 cfg->state = STATE_FAILTERM;
2501 cfg->state = STATE_NORMAL;
2502 wake_up_all(&cfg->reset_waitq);
2506 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2507 if (cfg->state == STATE_NORMAL)
2515 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2520 * cxlflash_change_queue_depth() - change the queue depth for the device
2521 * @sdev: SCSI device destined for queue depth change.
2522 * @qdepth: Requested queue depth value to set.
2524 * The requested queue depth is capped to the maximum supported value.
2526 * Return: The actual queue depth set.
2528 static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
2531 if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
2532 qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
2534 scsi_change_queue_depth(sdev, qdepth);
2535 return sdev->queue_depth;
2539 * cxlflash_show_port_status() - queries and presents the current port status
2540 * @port: Desired port for status reporting.
2541 * @cfg: Internal structure associated with the host.
2542 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2544 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2546 static ssize_t cxlflash_show_port_status(u32 port,
2547 struct cxlflash_cfg *cfg,
2550 struct device *dev = &cfg->dev->dev;
2553 __be64 __iomem *fc_port_regs;
2555 WARN_ON(port >= MAX_FC_PORTS);
2557 if (port >= cfg->num_fc_ports) {
2558 dev_info(dev, "%s: Port %d not supported on this card.\n",
2563 fc_port_regs = get_fc_port_regs(cfg, port);
2564 status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]);
2565 status &= FC_MTIP_STATUS_MASK;
2567 if (status == FC_MTIP_STATUS_ONLINE)
2568 disp_status = "online";
2569 else if (status == FC_MTIP_STATUS_OFFLINE)
2570 disp_status = "offline";
2572 disp_status = "unknown";
2574 return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
2578 * port0_show() - queries and presents the current status of port 0
2579 * @dev: Generic device associated with the host owning the port.
2580 * @attr: Device attribute representing the port.
2581 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2583 * Return: The size of the ASCII string returned in @buf.
2585 static ssize_t port0_show(struct device *dev,
2586 struct device_attribute *attr,
2589 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2591 return cxlflash_show_port_status(0, cfg, buf);
2595 * port1_show() - queries and presents the current status of port 1
2596 * @dev: Generic device associated with the host owning the port.
2597 * @attr: Device attribute representing the port.
2598 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2600 * Return: The size of the ASCII string returned in @buf.
2602 static ssize_t port1_show(struct device *dev,
2603 struct device_attribute *attr,
2606 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2608 return cxlflash_show_port_status(1, cfg, buf);
2612 * port2_show() - queries and presents the current status of port 2
2613 * @dev: Generic device associated with the host owning the port.
2614 * @attr: Device attribute representing the port.
2615 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2617 * Return: The size of the ASCII string returned in @buf.
2619 static ssize_t port2_show(struct device *dev,
2620 struct device_attribute *attr,
2623 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2625 return cxlflash_show_port_status(2, cfg, buf);
2629 * port3_show() - queries and presents the current status of port 3
2630 * @dev: Generic device associated with the host owning the port.
2631 * @attr: Device attribute representing the port.
2632 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2634 * Return: The size of the ASCII string returned in @buf.
2636 static ssize_t port3_show(struct device *dev,
2637 struct device_attribute *attr,
2640 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2642 return cxlflash_show_port_status(3, cfg, buf);
2646 * lun_mode_show() - presents the current LUN mode of the host
2647 * @dev: Generic device associated with the host.
2648 * @attr: Device attribute representing the LUN mode.
2649 * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2651 * Return: The size of the ASCII string returned in @buf.
2653 static ssize_t lun_mode_show(struct device *dev,
2654 struct device_attribute *attr, char *buf)
2656 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2657 struct afu *afu = cfg->afu;
2659 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
2663 * lun_mode_store() - sets the LUN mode of the host
2664 * @dev: Generic device associated with the host.
2665 * @attr: Device attribute representing the LUN mode.
2666 * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2667 * @count: Length of data resizing in @buf.
2669 * The CXL Flash AFU supports a dummy LUN mode where the external
2670 * links and storage are not required. Space on the FPGA is used
2671 * to create 1 or 2 small LUNs which are presented to the system
2672 * as if they were a normal storage device. This feature is useful
2673 * during development and also provides manufacturing with a way
2674 * to test the AFU without an actual device.
2676 * 0 = external LUN[s] (default)
2677 * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2678 * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2679 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2680 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2682 * Return: The size of the ASCII string returned in @buf.
2684 static ssize_t lun_mode_store(struct device *dev,
2685 struct device_attribute *attr,
2686 const char *buf, size_t count)
2688 struct Scsi_Host *shost = class_to_shost(dev);
2689 struct cxlflash_cfg *cfg = shost_priv(shost);
2690 struct afu *afu = cfg->afu;
2694 rc = kstrtouint(buf, 10, &lun_mode);
2695 if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2696 afu->internal_lun = lun_mode;
2699 * When configured for internal LUN, there is only one channel,
2700 * channel number 0, else there will be one less than the number
2701 * of fc ports for this card.
2703 if (afu->internal_lun)
2704 shost->max_channel = 0;
2706 shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports);
2709 scsi_scan_host(cfg->host);
2716 * ioctl_version_show() - presents the current ioctl version of the host
2717 * @dev: Generic device associated with the host.
2718 * @attr: Device attribute representing the ioctl version.
2719 * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
2721 * Return: The size of the ASCII string returned in @buf.
2723 static ssize_t ioctl_version_show(struct device *dev,
2724 struct device_attribute *attr, char *buf)
2728 bytes = scnprintf(buf, PAGE_SIZE,
2729 "disk: %u\n", DK_CXLFLASH_VERSION_0);
2730 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2731 "host: %u\n", HT_CXLFLASH_VERSION_0);
2737 * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2738 * @port: Desired port for status reporting.
2739 * @cfg: Internal structure associated with the host.
2740 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2742 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2744 static ssize_t cxlflash_show_port_lun_table(u32 port,
2745 struct cxlflash_cfg *cfg,
2748 struct device *dev = &cfg->dev->dev;
2749 __be64 __iomem *fc_port_luns;
2753 WARN_ON(port >= MAX_FC_PORTS);
2755 if (port >= cfg->num_fc_ports) {
2756 dev_info(dev, "%s: Port %d not supported on this card.\n",
2761 fc_port_luns = get_fc_port_luns(cfg, port);
2763 for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2764 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2766 i, readq_be(&fc_port_luns[i]));
2771 * port0_lun_table_show() - presents the current LUN table of port 0
2772 * @dev: Generic device associated with the host owning the port.
2773 * @attr: Device attribute representing the port.
2774 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2776 * Return: The size of the ASCII string returned in @buf.
2778 static ssize_t port0_lun_table_show(struct device *dev,
2779 struct device_attribute *attr,
2782 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2784 return cxlflash_show_port_lun_table(0, cfg, buf);
2788 * port1_lun_table_show() - presents the current LUN table of port 1
2789 * @dev: Generic device associated with the host owning the port.
2790 * @attr: Device attribute representing the port.
2791 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2793 * Return: The size of the ASCII string returned in @buf.
2795 static ssize_t port1_lun_table_show(struct device *dev,
2796 struct device_attribute *attr,
2799 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2801 return cxlflash_show_port_lun_table(1, cfg, buf);
2805 * port2_lun_table_show() - presents the current LUN table of port 2
2806 * @dev: Generic device associated with the host owning the port.
2807 * @attr: Device attribute representing the port.
2808 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2810 * Return: The size of the ASCII string returned in @buf.
2812 static ssize_t port2_lun_table_show(struct device *dev,
2813 struct device_attribute *attr,
2816 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2818 return cxlflash_show_port_lun_table(2, cfg, buf);
2822 * port3_lun_table_show() - presents the current LUN table of port 3
2823 * @dev: Generic device associated with the host owning the port.
2824 * @attr: Device attribute representing the port.
2825 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2827 * Return: The size of the ASCII string returned in @buf.
2829 static ssize_t port3_lun_table_show(struct device *dev,
2830 struct device_attribute *attr,
2833 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2835 return cxlflash_show_port_lun_table(3, cfg, buf);
2839 * irqpoll_weight_show() - presents the current IRQ poll weight for the host
2840 * @dev: Generic device associated with the host.
2841 * @attr: Device attribute representing the IRQ poll weight.
2842 * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll
2845 * An IRQ poll weight of 0 indicates polling is disabled.
2847 * Return: The size of the ASCII string returned in @buf.
2849 static ssize_t irqpoll_weight_show(struct device *dev,
2850 struct device_attribute *attr, char *buf)
2852 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2853 struct afu *afu = cfg->afu;
2855 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight);
2859 * irqpoll_weight_store() - sets the current IRQ poll weight for the host
2860 * @dev: Generic device associated with the host.
2861 * @attr: Device attribute representing the IRQ poll weight.
2862 * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll
2864 * @count: Length of data resizing in @buf.
2866 * An IRQ poll weight of 0 indicates polling is disabled.
2868 * Return: The size of the ASCII string returned in @buf.
2870 static ssize_t irqpoll_weight_store(struct device *dev,
2871 struct device_attribute *attr,
2872 const char *buf, size_t count)
2874 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2875 struct device *cfgdev = &cfg->dev->dev;
2876 struct afu *afu = cfg->afu;
2881 rc = kstrtouint(buf, 10, &weight);
2887 "Invalid IRQ poll weight. It must be 256 or less.\n");
2891 if (weight == afu->irqpoll_weight) {
2893 "Current IRQ poll weight has the same weight.\n");
2897 if (afu_is_irqpoll_enabled(afu)) {
2898 for (i = 0; i < afu->num_hwqs; i++) {
2899 hwq = get_hwq(afu, i);
2901 irq_poll_disable(&hwq->irqpoll);
2905 afu->irqpoll_weight = weight;
2908 for (i = 0; i < afu->num_hwqs; i++) {
2909 hwq = get_hwq(afu, i);
2911 irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll);
2919 * num_hwqs_show() - presents the number of hardware queues for the host
2920 * @dev: Generic device associated with the host.
2921 * @attr: Device attribute representing the number of hardware queues.
2922 * @buf: Buffer of length PAGE_SIZE to report back the number of hardware
2925 * Return: The size of the ASCII string returned in @buf.
2927 static ssize_t num_hwqs_show(struct device *dev,
2928 struct device_attribute *attr, char *buf)
2930 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2931 struct afu *afu = cfg->afu;
2933 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs);
2937 * num_hwqs_store() - sets the number of hardware queues for the host
2938 * @dev: Generic device associated with the host.
2939 * @attr: Device attribute representing the number of hardware queues.
2940 * @buf: Buffer of length PAGE_SIZE containing the number of hardware
2942 * @count: Length of data resizing in @buf.
2944 * n > 0: num_hwqs = n
2945 * n = 0: num_hwqs = num_online_cpus()
2946 * n < 0: num_online_cpus() / abs(n)
2948 * Return: The size of the ASCII string returned in @buf.
2950 static ssize_t num_hwqs_store(struct device *dev,
2951 struct device_attribute *attr,
2952 const char *buf, size_t count)
2954 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2955 struct afu *afu = cfg->afu;
2957 int nhwqs, num_hwqs;
2959 rc = kstrtoint(buf, 10, &nhwqs);
2965 else if (nhwqs == 0)
2966 num_hwqs = num_online_cpus();
2968 num_hwqs = num_online_cpus() / abs(nhwqs);
2970 afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS);
2971 WARN_ON_ONCE(afu->desired_hwqs == 0);
2974 switch (cfg->state) {
2976 cfg->state = STATE_RESET;
2978 cxlflash_mark_contexts_error(cfg);
2979 rc = afu_reset(cfg);
2981 cfg->state = STATE_FAILTERM;
2983 cfg->state = STATE_NORMAL;
2984 wake_up_all(&cfg->reset_waitq);
2987 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2988 if (cfg->state == STATE_NORMAL)
2991 /* Ideally should not happen */
2992 dev_err(dev, "%s: Device is not ready, state=%d\n",
2993 __func__, cfg->state);
3000 static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" };
3003 * hwq_mode_show() - presents the HWQ steering mode for the host
3004 * @dev: Generic device associated with the host.
3005 * @attr: Device attribute representing the HWQ steering mode.
3006 * @buf: Buffer of length PAGE_SIZE to report back the HWQ steering mode
3007 * as a character string.
3009 * Return: The size of the ASCII string returned in @buf.
3011 static ssize_t hwq_mode_show(struct device *dev,
3012 struct device_attribute *attr, char *buf)
3014 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
3015 struct afu *afu = cfg->afu;
3017 return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]);
3021 * hwq_mode_store() - sets the HWQ steering mode for the host
3022 * @dev: Generic device associated with the host.
3023 * @attr: Device attribute representing the HWQ steering mode.
3024 * @buf: Buffer of length PAGE_SIZE containing the HWQ steering mode
3025 * as a character string.
3026 * @count: Length of data resizing in @buf.
3029 * tag = Block MQ Tagging
3030 * cpu = CPU Affinity
3032 * Return: The size of the ASCII string returned in @buf.
3034 static ssize_t hwq_mode_store(struct device *dev,
3035 struct device_attribute *attr,
3036 const char *buf, size_t count)
3038 struct Scsi_Host *shost = class_to_shost(dev);
3039 struct cxlflash_cfg *cfg = shost_priv(shost);
3040 struct device *cfgdev = &cfg->dev->dev;
3041 struct afu *afu = cfg->afu;
3043 u32 mode = MAX_HWQ_MODE;
3045 for (i = 0; i < MAX_HWQ_MODE; i++) {
3046 if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) {
3052 if (mode >= MAX_HWQ_MODE) {
3053 dev_info(cfgdev, "Invalid HWQ steering mode.\n");
3057 if ((mode == HWQ_MODE_TAG) && !shost_use_blk_mq(shost)) {
3058 dev_info(cfgdev, "SCSI-MQ is not enabled, use a different "
3059 "HWQ steering mode.\n");
3063 afu->hwq_mode = mode;
3069 * mode_show() - presents the current mode of the device
3070 * @dev: Generic device associated with the device.
3071 * @attr: Device attribute representing the device mode.
3072 * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
3074 * Return: The size of the ASCII string returned in @buf.
3076 static ssize_t mode_show(struct device *dev,
3077 struct device_attribute *attr, char *buf)
3079 struct scsi_device *sdev = to_scsi_device(dev);
3081 return scnprintf(buf, PAGE_SIZE, "%s\n",
3082 sdev->hostdata ? "superpipe" : "legacy");
3088 static DEVICE_ATTR_RO(port0);
3089 static DEVICE_ATTR_RO(port1);
3090 static DEVICE_ATTR_RO(port2);
3091 static DEVICE_ATTR_RO(port3);
3092 static DEVICE_ATTR_RW(lun_mode);
3093 static DEVICE_ATTR_RO(ioctl_version);
3094 static DEVICE_ATTR_RO(port0_lun_table);
3095 static DEVICE_ATTR_RO(port1_lun_table);
3096 static DEVICE_ATTR_RO(port2_lun_table);
3097 static DEVICE_ATTR_RO(port3_lun_table);
3098 static DEVICE_ATTR_RW(irqpoll_weight);
3099 static DEVICE_ATTR_RW(num_hwqs);
3100 static DEVICE_ATTR_RW(hwq_mode);
3102 static struct device_attribute *cxlflash_host_attrs[] = {
3108 &dev_attr_ioctl_version,
3109 &dev_attr_port0_lun_table,
3110 &dev_attr_port1_lun_table,
3111 &dev_attr_port2_lun_table,
3112 &dev_attr_port3_lun_table,
3113 &dev_attr_irqpoll_weight,
3122 static DEVICE_ATTR_RO(mode);
3124 static struct device_attribute *cxlflash_dev_attrs[] = {
3132 static struct scsi_host_template driver_template = {
3133 .module = THIS_MODULE,
3134 .name = CXLFLASH_ADAPTER_NAME,
3135 .info = cxlflash_driver_info,
3136 .ioctl = cxlflash_ioctl,
3137 .proc_name = CXLFLASH_NAME,
3138 .queuecommand = cxlflash_queuecommand,
3139 .eh_abort_handler = cxlflash_eh_abort_handler,
3140 .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
3141 .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
3142 .change_queue_depth = cxlflash_change_queue_depth,
3143 .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
3144 .can_queue = CXLFLASH_MAX_CMDS,
3145 .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
3147 .sg_tablesize = 1, /* No scatter gather support */
3148 .max_sectors = CXLFLASH_MAX_SECTORS,
3149 .use_clustering = ENABLE_CLUSTERING,
3150 .shost_attrs = cxlflash_host_attrs,
3151 .sdev_attrs = cxlflash_dev_attrs,
3155 * Device dependent values
3157 static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
3158 CXLFLASH_WWPN_VPD_REQUIRED };
3159 static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
3160 CXLFLASH_NOTIFY_SHUTDOWN };
3161 static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
3162 CXLFLASH_NOTIFY_SHUTDOWN };
3165 * PCI device binding table
3167 static struct pci_device_id cxlflash_pci_table[] = {
3168 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
3169 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
3170 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
3171 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
3172 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
3173 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
3177 MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
3180 * cxlflash_worker_thread() - work thread handler for the AFU
3181 * @work: Work structure contained within cxlflash associated with host.
3183 * Handles the following events:
3184 * - Link reset which cannot be performed on interrupt context due to
3185 * blocking up to a few seconds
3188 static void cxlflash_worker_thread(struct work_struct *work)
3190 struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
3192 struct afu *afu = cfg->afu;
3193 struct device *dev = &cfg->dev->dev;
3194 __be64 __iomem *fc_port_regs;
3198 /* Avoid MMIO if the device has failed */
3200 if (cfg->state != STATE_NORMAL)
3203 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3205 if (cfg->lr_state == LINK_RESET_REQUIRED) {
3206 port = cfg->lr_port;
3208 dev_err(dev, "%s: invalid port index %d\n",
3211 spin_unlock_irqrestore(cfg->host->host_lock,
3214 /* The reset can block... */
3215 fc_port_regs = get_fc_port_regs(cfg, port);
3216 afu_link_reset(afu, port, fc_port_regs);
3217 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3220 cfg->lr_state = LINK_RESET_COMPLETE;
3223 spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
3225 if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
3226 scsi_scan_host(cfg->host);
3230 * cxlflash_chr_open() - character device open handler
3231 * @inode: Device inode associated with this character device.
3232 * @file: File pointer for this device.
3234 * Only users with admin privileges are allowed to open the character device.
3236 * Return: 0 on success, -errno on failure
3238 static int cxlflash_chr_open(struct inode *inode, struct file *file)
3240 struct cxlflash_cfg *cfg;
3242 if (!capable(CAP_SYS_ADMIN))
3245 cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev);
3246 file->private_data = cfg;
3252 * decode_hioctl() - translates encoded host ioctl to easily identifiable string
3253 * @cmd: The host ioctl command to decode.
3255 * Return: A string identifying the decoded host ioctl.
3257 static char *decode_hioctl(int cmd)
3260 case HT_CXLFLASH_LUN_PROVISION:
3261 return __stringify_1(HT_CXLFLASH_LUN_PROVISION);
3268 * cxlflash_lun_provision() - host LUN provisioning handler
3269 * @cfg: Internal structure associated with the host.
3270 * @arg: Kernel copy of userspace ioctl data structure.
3272 * Return: 0 on success, -errno on failure
3274 static int cxlflash_lun_provision(struct cxlflash_cfg *cfg,
3275 struct ht_cxlflash_lun_provision *lunprov)
3277 struct afu *afu = cfg->afu;
3278 struct device *dev = &cfg->dev->dev;
3279 struct sisl_ioarcb rcb;
3280 struct sisl_ioasa asa;
3281 __be64 __iomem *fc_port_regs;
3282 u16 port = lunprov->port;
3283 u16 scmd = lunprov->hdr.subcmd;
3290 if (!afu_is_lun_provision(afu)) {
3295 if (port >= cfg->num_fc_ports) {
3301 case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN:
3302 type = SISL_AFU_LUN_PROVISION_CREATE;
3303 size = lunprov->size;
3306 case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN:
3307 type = SISL_AFU_LUN_PROVISION_DELETE;
3309 lun_id = lunprov->lun_id;
3311 case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT:
3312 fc_port_regs = get_fc_port_regs(cfg, port);
3314 reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]);
3315 lunprov->max_num_luns = reg;
3316 reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]);
3317 lunprov->cur_num_luns = reg;
3318 reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]);
3319 lunprov->max_cap_port = reg;
3320 reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]);
3321 lunprov->cur_cap_port = reg;
3329 memset(&rcb, 0, sizeof(rcb));
3330 memset(&asa, 0, sizeof(asa));
3331 rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
3332 rcb.lun_id = lun_id;
3333 rcb.msi = SISL_MSI_RRQ_UPDATED;
3334 rcb.timeout = MC_LUN_PROV_TIMEOUT;
3337 rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION;
3340 put_unaligned_be64(size, &rcb.cdb[8]);
3342 rc = send_afu_cmd(afu, &rcb);
3344 dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3345 __func__, rc, asa.ioasc, asa.afu_extra);
3349 if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) {
3350 lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo;
3351 memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid));
3354 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3359 * cxlflash_afu_debug() - host AFU debug handler
3360 * @cfg: Internal structure associated with the host.
3361 * @arg: Kernel copy of userspace ioctl data structure.
3363 * For debug requests requiring a data buffer, always provide an aligned
3364 * (cache line) buffer to the AFU to appease any alignment requirements.
3366 * Return: 0 on success, -errno on failure
3368 static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
3369 struct ht_cxlflash_afu_debug *afu_dbg)
3371 struct afu *afu = cfg->afu;
3372 struct device *dev = &cfg->dev->dev;
3373 struct sisl_ioarcb rcb;
3374 struct sisl_ioasa asa;
3377 void __user *ubuf = (__force void __user *)afu_dbg->data_ea;
3378 u16 req_flags = SISL_REQ_FLAGS_AFU_CMD;
3379 u32 ulen = afu_dbg->data_len;
3380 bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE;
3383 if (!afu_is_afu_debug(afu)) {
3389 req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN;
3391 if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) {
3396 if (unlikely(!access_ok(is_write ? VERIFY_READ : VERIFY_WRITE,
3402 buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL);
3403 if (unlikely(!buf)) {
3408 kbuf = PTR_ALIGN(buf, cache_line_size());
3411 req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
3413 if (copy_from_user(kbuf, ubuf, ulen)) {
3420 memset(&rcb, 0, sizeof(rcb));
3421 memset(&asa, 0, sizeof(asa));
3423 rcb.req_flags = req_flags;
3424 rcb.msi = SISL_MSI_RRQ_UPDATED;
3425 rcb.timeout = MC_AFU_DEBUG_TIMEOUT;
3429 rcb.data_len = ulen;
3430 rcb.data_ea = (uintptr_t)kbuf;
3433 rcb.cdb[0] = SISL_AFU_CMD_DEBUG;
3434 memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd,
3435 HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN);
3437 rc = send_afu_cmd(afu, &rcb);
3439 dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3440 __func__, rc, asa.ioasc, asa.afu_extra);
3444 if (ulen && !is_write) {
3445 if (copy_to_user(ubuf, kbuf, ulen))
3450 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3455 * cxlflash_chr_ioctl() - character device IOCTL handler
3456 * @file: File pointer for this device.
3457 * @cmd: IOCTL command.
3458 * @arg: Userspace ioctl data structure.
3460 * A read/write semaphore is used to implement a 'drain' of currently
3461 * running ioctls. The read semaphore is taken at the beginning of each
3462 * ioctl thread and released upon concluding execution. Additionally the
3463 * semaphore should be released and then reacquired in any ioctl execution
3464 * path which will wait for an event to occur that is outside the scope of
3465 * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
3466 * a thread simply needs to acquire the write semaphore.
3468 * Return: 0 on success, -errno on failure
3470 static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd,
3473 typedef int (*hioctl) (struct cxlflash_cfg *, void *);
3475 struct cxlflash_cfg *cfg = file->private_data;
3476 struct device *dev = &cfg->dev->dev;
3477 char buf[sizeof(union cxlflash_ht_ioctls)];
3478 void __user *uarg = (void __user *)arg;
3479 struct ht_cxlflash_hdr *hdr;
3481 bool known_ioctl = false;
3484 hioctl do_ioctl = NULL;
3486 static const struct {
3489 } ioctl_tbl[] = { /* NOTE: order matters here */
3490 { sizeof(struct ht_cxlflash_lun_provision),
3491 (hioctl)cxlflash_lun_provision },
3492 { sizeof(struct ht_cxlflash_afu_debug),
3493 (hioctl)cxlflash_afu_debug },
3496 /* Hold read semaphore so we can drain if needed */
3497 down_read(&cfg->ioctl_rwsem);
3499 dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n",
3500 __func__, cmd, idx, sizeof(ioctl_tbl));
3503 case HT_CXLFLASH_LUN_PROVISION:
3504 case HT_CXLFLASH_AFU_DEBUG:
3506 idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd);
3507 size = ioctl_tbl[idx].size;
3508 do_ioctl = ioctl_tbl[idx].ioctl;
3510 if (likely(do_ioctl))
3519 if (unlikely(copy_from_user(&buf, uarg, size))) {
3520 dev_err(dev, "%s: copy_from_user() fail "
3521 "size=%lu cmd=%d (%s) uarg=%p\n",
3522 __func__, size, cmd, decode_hioctl(cmd), uarg);
3527 hdr = (struct ht_cxlflash_hdr *)&buf;
3528 if (hdr->version != HT_CXLFLASH_VERSION_0) {
3529 dev_dbg(dev, "%s: Version %u not supported for %s\n",
3530 __func__, hdr->version, decode_hioctl(cmd));
3535 if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) {
3536 dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
3541 rc = do_ioctl(cfg, (void *)&buf);
3543 if (unlikely(copy_to_user(uarg, &buf, size))) {
3544 dev_err(dev, "%s: copy_to_user() fail "
3545 "size=%lu cmd=%d (%s) uarg=%p\n",
3546 __func__, size, cmd, decode_hioctl(cmd), uarg);
3550 /* fall through to exit */
3553 up_read(&cfg->ioctl_rwsem);
3554 if (unlikely(rc && known_ioctl))
3555 dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3556 __func__, decode_hioctl(cmd), cmd, rc);
3558 dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3559 __func__, decode_hioctl(cmd), cmd, rc);
3564 * Character device file operations
3566 static const struct file_operations cxlflash_chr_fops = {
3567 .owner = THIS_MODULE,
3568 .open = cxlflash_chr_open,
3569 .unlocked_ioctl = cxlflash_chr_ioctl,
3570 .compat_ioctl = cxlflash_chr_ioctl,
3574 * init_chrdev() - initialize the character device for the host
3575 * @cfg: Internal structure associated with the host.
3577 * Return: 0 on success, -errno on failure
3579 static int init_chrdev(struct cxlflash_cfg *cfg)
3581 struct device *dev = &cfg->dev->dev;
3582 struct device *char_dev;
3587 minor = cxlflash_get_minor();
3588 if (unlikely(minor < 0)) {
3589 dev_err(dev, "%s: Exhausted allowed adapters\n", __func__);
3594 devno = MKDEV(cxlflash_major, minor);
3595 cdev_init(&cfg->cdev, &cxlflash_chr_fops);
3597 rc = cdev_add(&cfg->cdev, devno, 1);
3599 dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc);
3603 char_dev = device_create(cxlflash_class, NULL, devno,
3604 NULL, "cxlflash%d", minor);
3605 if (IS_ERR(char_dev)) {
3606 rc = PTR_ERR(char_dev);
3607 dev_err(dev, "%s: device_create failed rc=%d\n",
3612 cfg->chardev = char_dev;
3614 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3617 cdev_del(&cfg->cdev);
3619 cxlflash_put_minor(minor);
3624 * cxlflash_probe() - PCI entry point to add host
3625 * @pdev: PCI device associated with the host.
3626 * @dev_id: PCI device id associated with device.
3628 * The device will initially start out in a 'probing' state and
3629 * transition to the 'normal' state at the end of a successful
3630 * probe. Should an EEH event occur during probe, the notification
3631 * thread (error_detected()) will wait until the probe handler
3632 * is nearly complete. At that time, the device will be moved to
3633 * a 'probed' state and the EEH thread woken up to drive the slot
3634 * reset and recovery (device moves to 'normal' state). Meanwhile,
3635 * the probe will be allowed to exit successfully.
3637 * Return: 0 on success, -errno on failure
3639 static int cxlflash_probe(struct pci_dev *pdev,
3640 const struct pci_device_id *dev_id)
3642 struct Scsi_Host *host;
3643 struct cxlflash_cfg *cfg = NULL;
3644 struct device *dev = &pdev->dev;
3645 struct dev_dependent_vals *ddv;
3649 dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
3650 __func__, pdev->irq);
3652 ddv = (struct dev_dependent_vals *)dev_id->driver_data;
3653 driver_template.max_sectors = ddv->max_sectors;
3655 host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
3657 dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
3662 host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
3663 host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
3664 host->unique_id = host->host_no;
3665 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
3667 cfg = shost_priv(host);
3669 rc = alloc_mem(cfg);
3671 dev_err(dev, "%s: alloc_mem failed\n", __func__);
3673 scsi_host_put(cfg->host);
3677 cfg->init_state = INIT_STATE_NONE;
3679 cfg->cxl_fops = cxlflash_cxl_fops;
3682 * Promoted LUNs move to the top of the LUN table. The rest stay on
3683 * the bottom half. The bottom half grows from the end (index = 255),
3684 * whereas the top half grows from the beginning (index = 0).
3686 * Initialize the last LUN index for all possible ports.
3688 cfg->promote_lun_index = 0;
3690 for (k = 0; k < MAX_FC_PORTS; k++)
3691 cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1;
3693 cfg->dev_id = (struct pci_device_id *)dev_id;
3695 init_waitqueue_head(&cfg->tmf_waitq);
3696 init_waitqueue_head(&cfg->reset_waitq);
3698 INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
3699 cfg->lr_state = LINK_RESET_INVALID;
3701 spin_lock_init(&cfg->tmf_slock);
3702 mutex_init(&cfg->ctx_tbl_list_mutex);
3703 mutex_init(&cfg->ctx_recovery_mutex);
3704 init_rwsem(&cfg->ioctl_rwsem);
3705 INIT_LIST_HEAD(&cfg->ctx_err_recovery);
3706 INIT_LIST_HEAD(&cfg->lluns);
3708 pci_set_drvdata(pdev, cfg);
3710 cfg->cxl_afu = cxl_pci_to_afu(pdev);
3714 dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
3717 cfg->init_state = INIT_STATE_PCI;
3720 if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
3721 dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
3724 cfg->init_state = INIT_STATE_AFU;
3726 rc = init_scsi(cfg);
3728 dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
3731 cfg->init_state = INIT_STATE_SCSI;
3733 rc = init_chrdev(cfg);
3735 dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc);
3738 cfg->init_state = INIT_STATE_CDEV;
3740 if (wq_has_sleeper(&cfg->reset_waitq)) {
3741 cfg->state = STATE_PROBED;
3742 wake_up_all(&cfg->reset_waitq);
3744 cfg->state = STATE_NORMAL;
3746 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3750 cxlflash_remove(pdev);
3755 * cxlflash_pci_error_detected() - called when a PCI error is detected
3756 * @pdev: PCI device struct.
3757 * @state: PCI channel state.
3759 * When an EEH occurs during an active reset, wait until the reset is
3760 * complete and then take action based upon the device state.
3762 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
3764 static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
3765 pci_channel_state_t state)
3768 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3769 struct device *dev = &cfg->dev->dev;
3771 dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
3774 case pci_channel_io_frozen:
3775 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
3776 cfg->state != STATE_PROBING);
3777 if (cfg->state == STATE_FAILTERM)
3778 return PCI_ERS_RESULT_DISCONNECT;
3780 cfg->state = STATE_RESET;
3781 scsi_block_requests(cfg->host);
3783 rc = cxlflash_mark_contexts_error(cfg);
3785 dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
3788 return PCI_ERS_RESULT_NEED_RESET;
3789 case pci_channel_io_perm_failure:
3790 cfg->state = STATE_FAILTERM;
3791 wake_up_all(&cfg->reset_waitq);
3792 scsi_unblock_requests(cfg->host);
3793 return PCI_ERS_RESULT_DISCONNECT;
3797 return PCI_ERS_RESULT_NEED_RESET;
3801 * cxlflash_pci_slot_reset() - called when PCI slot has been reset
3802 * @pdev: PCI device struct.
3804 * This routine is called by the pci error recovery code after the PCI
3805 * slot has been reset, just before we should resume normal operations.
3807 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
3809 static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
3812 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3813 struct device *dev = &cfg->dev->dev;
3815 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3819 dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
3820 return PCI_ERS_RESULT_DISCONNECT;
3823 return PCI_ERS_RESULT_RECOVERED;
3827 * cxlflash_pci_resume() - called when normal operation can resume
3828 * @pdev: PCI device struct
3830 static void cxlflash_pci_resume(struct pci_dev *pdev)
3832 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3833 struct device *dev = &cfg->dev->dev;
3835 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3837 cfg->state = STATE_NORMAL;
3838 wake_up_all(&cfg->reset_waitq);
3839 scsi_unblock_requests(cfg->host);
3843 * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class
3844 * @dev: Character device.
3845 * @mode: Mode that can be used to verify access.
3847 * Return: Allocated string describing the devtmpfs structure.
3849 static char *cxlflash_devnode(struct device *dev, umode_t *mode)
3851 return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev));
3855 * cxlflash_class_init() - create character device class
3857 * Return: 0 on success, -errno on failure
3859 static int cxlflash_class_init(void)
3864 rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash");
3866 pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc);
3870 cxlflash_major = MAJOR(devno);
3872 cxlflash_class = class_create(THIS_MODULE, "cxlflash");
3873 if (IS_ERR(cxlflash_class)) {
3874 rc = PTR_ERR(cxlflash_class);
3875 pr_err("%s: class_create failed rc=%d\n", __func__, rc);
3879 cxlflash_class->devnode = cxlflash_devnode;
3881 pr_debug("%s: returning rc=%d\n", __func__, rc);
3884 unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3889 * cxlflash_class_exit() - destroy character device class
3891 static void cxlflash_class_exit(void)
3893 dev_t devno = MKDEV(cxlflash_major, 0);
3895 class_destroy(cxlflash_class);
3896 unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3899 static const struct pci_error_handlers cxlflash_err_handler = {
3900 .error_detected = cxlflash_pci_error_detected,
3901 .slot_reset = cxlflash_pci_slot_reset,
3902 .resume = cxlflash_pci_resume,
3906 * PCI device structure
3908 static struct pci_driver cxlflash_driver = {
3909 .name = CXLFLASH_NAME,
3910 .id_table = cxlflash_pci_table,
3911 .probe = cxlflash_probe,
3912 .remove = cxlflash_remove,
3913 .shutdown = cxlflash_remove,
3914 .err_handler = &cxlflash_err_handler,
3918 * init_cxlflash() - module entry point
3920 * Return: 0 on success, -errno on failure
3922 static int __init init_cxlflash(void)
3927 cxlflash_list_init();
3928 rc = cxlflash_class_init();
3932 rc = pci_register_driver(&cxlflash_driver);
3936 pr_debug("%s: returning rc=%d\n", __func__, rc);
3939 cxlflash_class_exit();
3944 * exit_cxlflash() - module exit point
3946 static void __exit exit_cxlflash(void)
3948 cxlflash_term_global_luns();
3949 cxlflash_free_errpage();
3951 pci_unregister_driver(&cxlflash_driver);
3952 cxlflash_class_exit();
3955 module_init(init_cxlflash);
3956 module_exit(exit_cxlflash);