2 * Copyright (c) 2015 Linaro Ltd.
3 * Copyright (c) 2015 Hisilicon Limited.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
13 #define DRV_NAME "hisi_sas"
15 #define DEV_IS_GONE(dev) \
16 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19 u8 *lun, struct hisi_sas_tmf_task *tmf);
21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22 struct domain_device *device,
23 int abort_flag, int tag);
24 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
25 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
27 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
28 struct domain_device *device);
29 static void hisi_sas_dev_gone(struct domain_device *device);
31 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
33 switch (fis->command) {
34 case ATA_CMD_FPDMA_WRITE:
35 case ATA_CMD_FPDMA_READ:
36 case ATA_CMD_FPDMA_RECV:
37 case ATA_CMD_FPDMA_SEND:
38 case ATA_CMD_NCQ_NON_DATA:
39 return HISI_SAS_SATA_PROTOCOL_FPDMA;
41 case ATA_CMD_DOWNLOAD_MICRO:
43 case ATA_CMD_PMP_READ:
44 case ATA_CMD_READ_LOG_EXT:
45 case ATA_CMD_PIO_READ:
46 case ATA_CMD_PIO_READ_EXT:
47 case ATA_CMD_PMP_WRITE:
48 case ATA_CMD_WRITE_LOG_EXT:
49 case ATA_CMD_PIO_WRITE:
50 case ATA_CMD_PIO_WRITE_EXT:
51 return HISI_SAS_SATA_PROTOCOL_PIO;
54 case ATA_CMD_DOWNLOAD_MICRO_DMA:
55 case ATA_CMD_PMP_READ_DMA:
56 case ATA_CMD_PMP_WRITE_DMA:
58 case ATA_CMD_READ_EXT:
59 case ATA_CMD_READ_LOG_DMA_EXT:
60 case ATA_CMD_READ_STREAM_DMA_EXT:
61 case ATA_CMD_TRUSTED_RCV_DMA:
62 case ATA_CMD_TRUSTED_SND_DMA:
64 case ATA_CMD_WRITE_EXT:
65 case ATA_CMD_WRITE_FUA_EXT:
66 case ATA_CMD_WRITE_QUEUED:
67 case ATA_CMD_WRITE_LOG_DMA_EXT:
68 case ATA_CMD_WRITE_STREAM_DMA_EXT:
69 case ATA_CMD_ZAC_MGMT_IN:
70 return HISI_SAS_SATA_PROTOCOL_DMA;
72 case ATA_CMD_CHK_POWER:
73 case ATA_CMD_DEV_RESET:
76 case ATA_CMD_FLUSH_EXT:
78 case ATA_CMD_VERIFY_EXT:
79 case ATA_CMD_SET_FEATURES:
81 case ATA_CMD_STANDBYNOW1:
82 case ATA_CMD_ZAC_MGMT_OUT:
83 return HISI_SAS_SATA_PROTOCOL_NONDATA;
86 switch (fis->features) {
87 case ATA_SET_MAX_PASSWD:
88 case ATA_SET_MAX_LOCK:
89 return HISI_SAS_SATA_PROTOCOL_PIO;
91 case ATA_SET_MAX_PASSWD_DMA:
92 case ATA_SET_MAX_UNLOCK_DMA:
93 return HISI_SAS_SATA_PROTOCOL_DMA;
96 return HISI_SAS_SATA_PROTOCOL_NONDATA;
101 if (direction == DMA_NONE)
102 return HISI_SAS_SATA_PROTOCOL_NONDATA;
103 return HISI_SAS_SATA_PROTOCOL_PIO;
107 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
109 void hisi_sas_sata_done(struct sas_task *task,
110 struct hisi_sas_slot *slot)
112 struct task_status_struct *ts = &task->task_status;
113 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
114 struct hisi_sas_status_buffer *status_buf =
115 hisi_sas_status_buf_addr_mem(slot);
116 u8 *iu = &status_buf->iu[0];
117 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
119 resp->frame_len = sizeof(struct dev_to_host_fis);
120 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
122 ts->buf_valid_size = sizeof(*resp);
124 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
126 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
128 struct ata_queued_cmd *qc = task->uldd_task;
131 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
132 qc->tf.command == ATA_CMD_FPDMA_READ) {
139 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
142 * This function assumes linkrate mask fits in 8 bits, which it
143 * does for all HW versions supported.
145 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
150 max -= SAS_LINK_RATE_1_5_GBPS;
151 for (i = 0; i <= max; i++)
152 rate |= 1 << (i * 2);
155 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
157 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
159 return device->port->ha->lldd_ha;
162 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
164 return container_of(sas_port, struct hisi_sas_port, sas_port);
166 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
168 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
172 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
173 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
175 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
177 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
179 void *bitmap = hisi_hba->slot_index_tags;
181 clear_bit(slot_idx, bitmap);
184 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
188 if (hisi_hba->hw->slot_index_alloc || (slot_idx >=
189 hisi_hba->hw->max_command_entries - HISI_SAS_RESERVED_IPTT_CNT)) {
190 spin_lock_irqsave(&hisi_hba->lock, flags);
191 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
192 spin_unlock_irqrestore(&hisi_hba->lock, flags);
196 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
198 void *bitmap = hisi_hba->slot_index_tags;
200 set_bit(slot_idx, bitmap);
203 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
204 struct scsi_cmnd *scsi_cmnd)
207 void *bitmap = hisi_hba->slot_index_tags;
211 return scsi_cmnd->request->tag;
213 spin_lock_irqsave(&hisi_hba->lock, flags);
214 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
215 hisi_hba->last_slot_index + 1);
216 if (index >= hisi_hba->slot_index_count) {
217 index = find_next_zero_bit(bitmap,
218 hisi_hba->slot_index_count,
219 hisi_hba->hw->max_command_entries -
220 HISI_SAS_RESERVED_IPTT_CNT);
221 if (index >= hisi_hba->slot_index_count) {
222 spin_unlock_irqrestore(&hisi_hba->lock, flags);
223 return -SAS_QUEUE_FULL;
226 hisi_sas_slot_index_set(hisi_hba, index);
227 hisi_hba->last_slot_index = index;
228 spin_unlock_irqrestore(&hisi_hba->lock, flags);
233 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
237 for (i = 0; i < hisi_hba->slot_index_count; ++i)
238 hisi_sas_slot_index_clear(hisi_hba, i);
241 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
242 struct hisi_sas_slot *slot)
244 struct hisi_sas_dq *dq = &hisi_hba->dq[slot->dlvry_queue];
248 struct device *dev = hisi_hba->dev;
250 if (!task->lldd_task)
253 task->lldd_task = NULL;
255 if (!sas_protocol_ata(task->task_proto))
257 dma_unmap_sg(dev, task->scatter,
263 spin_lock_irqsave(&dq->lock, flags);
264 list_del_init(&slot->entry);
265 spin_unlock_irqrestore(&dq->lock, flags);
267 memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
269 hisi_sas_slot_index_free(hisi_hba, slot->idx);
271 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
273 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
274 struct hisi_sas_slot *slot)
276 hisi_hba->hw->prep_smp(hisi_hba, slot);
279 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
280 struct hisi_sas_slot *slot)
282 hisi_hba->hw->prep_ssp(hisi_hba, slot);
285 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
286 struct hisi_sas_slot *slot)
288 hisi_hba->hw->prep_stp(hisi_hba, slot);
291 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
292 struct hisi_sas_slot *slot,
293 int device_id, int abort_flag, int tag_to_abort)
295 hisi_hba->hw->prep_abort(hisi_hba, slot,
296 device_id, abort_flag, tag_to_abort);
299 static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
300 struct sas_task *task, int n_elem,
301 int n_elem_req, int n_elem_resp)
303 struct device *dev = hisi_hba->dev;
305 if (!sas_protocol_ata(task->task_proto)) {
306 if (task->num_scatter) {
308 dma_unmap_sg(dev, task->scatter,
311 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
313 dma_unmap_sg(dev, &task->smp_task.smp_req,
316 dma_unmap_sg(dev, &task->smp_task.smp_resp,
322 static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
323 struct sas_task *task, int *n_elem,
324 int *n_elem_req, int *n_elem_resp)
326 struct device *dev = hisi_hba->dev;
329 if (sas_protocol_ata(task->task_proto)) {
330 *n_elem = task->num_scatter;
332 unsigned int req_len, resp_len;
334 if (task->num_scatter) {
335 *n_elem = dma_map_sg(dev, task->scatter,
336 task->num_scatter, task->data_dir);
341 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
342 *n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
348 req_len = sg_dma_len(&task->smp_task.smp_req);
351 goto err_out_dma_unmap;
353 *n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp,
357 goto err_out_dma_unmap;
359 resp_len = sg_dma_len(&task->smp_task.smp_resp);
360 if (resp_len & 0x3) {
362 goto err_out_dma_unmap;
367 if (*n_elem > HISI_SAS_SGE_PAGE_CNT) {
368 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
371 goto err_out_dma_unmap;
376 /* It would be better to call dma_unmap_sg() here, but it's messy */
377 hisi_sas_dma_unmap(hisi_hba, task, *n_elem,
378 *n_elem_req, *n_elem_resp);
383 static int hisi_sas_task_prep(struct sas_task *task,
384 struct hisi_sas_dq **dq_pointer,
385 bool is_tmf, struct hisi_sas_tmf_task *tmf,
388 struct domain_device *device = task->dev;
389 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
390 struct hisi_sas_device *sas_dev = device->lldd_dev;
391 struct hisi_sas_port *port;
392 struct hisi_sas_slot *slot;
393 struct hisi_sas_cmd_hdr *cmd_hdr_base;
394 struct asd_sas_port *sas_port = device->port;
395 struct device *dev = hisi_hba->dev;
396 int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
397 int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
398 struct hisi_sas_dq *dq;
402 if (DEV_IS_GONE(sas_dev)) {
404 dev_info(dev, "task prep: device %d not ready\n",
407 dev_info(dev, "task prep: device %016llx not ready\n",
408 SAS_ADDR(device->sas_addr));
413 *dq_pointer = dq = sas_dev->dq;
415 port = to_hisi_sas_port(sas_port);
416 if (port && !port->port_attached) {
417 dev_info(dev, "task prep: %s port%d not attach device\n",
418 (dev_is_sata(device)) ?
425 rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
426 &n_elem_req, &n_elem_resp);
430 if (hisi_hba->hw->slot_index_alloc)
431 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
433 struct scsi_cmnd *scsi_cmnd = NULL;
435 if (task->uldd_task) {
436 struct ata_queued_cmd *qc;
438 if (dev_is_sata(device)) {
439 qc = task->uldd_task;
440 scsi_cmnd = qc->scsicmd;
442 scsi_cmnd = task->uldd_task;
445 rc = hisi_sas_slot_index_alloc(hisi_hba, scsi_cmnd);
448 goto err_out_dma_unmap;
451 slot = &hisi_hba->slot_info[slot_idx];
453 spin_lock_irqsave(&dq->lock, flags);
454 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
455 if (wr_q_index < 0) {
456 spin_unlock_irqrestore(&dq->lock, flags);
461 list_add_tail(&slot->delivery, &dq->list);
462 list_add_tail(&slot->entry, &sas_dev->list);
463 spin_unlock_irqrestore(&dq->lock, flags);
465 dlvry_queue = dq->id;
466 dlvry_queue_slot = wr_q_index;
468 slot->n_elem = n_elem;
469 slot->dlvry_queue = dlvry_queue;
470 slot->dlvry_queue_slot = dlvry_queue_slot;
471 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
472 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
476 slot->is_internal = is_tmf;
477 task->lldd_task = slot;
479 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
480 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
481 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
483 switch (task->task_proto) {
484 case SAS_PROTOCOL_SMP:
485 hisi_sas_task_prep_smp(hisi_hba, slot);
487 case SAS_PROTOCOL_SSP:
488 hisi_sas_task_prep_ssp(hisi_hba, slot);
490 case SAS_PROTOCOL_SATA:
491 case SAS_PROTOCOL_STP:
492 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
493 hisi_sas_task_prep_ata(hisi_hba, slot);
496 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
501 spin_lock_irqsave(&task->task_state_lock, flags);
502 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
503 spin_unlock_irqrestore(&task->task_state_lock, flags);
506 WRITE_ONCE(slot->ready, 1);
511 hisi_sas_slot_index_free(hisi_hba, slot_idx);
513 hisi_sas_dma_unmap(hisi_hba, task, n_elem,
514 n_elem_req, n_elem_resp);
516 dev_err(dev, "task prep: failed[%d]!\n", rc);
520 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
521 bool is_tmf, struct hisi_sas_tmf_task *tmf)
526 struct hisi_hba *hisi_hba;
528 struct domain_device *device = task->dev;
529 struct asd_sas_port *sas_port = device->port;
530 struct hisi_sas_dq *dq = NULL;
533 struct task_status_struct *ts = &task->task_status;
535 ts->resp = SAS_TASK_UNDELIVERED;
536 ts->stat = SAS_PHY_DOWN;
538 * libsas will use dev->port, should
539 * not call task_done for sata
541 if (device->dev_type != SAS_SATA_DEV)
542 task->task_done(task);
546 hisi_hba = dev_to_hisi_hba(device);
549 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
553 down(&hisi_hba->sem);
557 /* protect task_prep and start_delivery sequence */
558 rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
560 dev_err(dev, "task exec: failed[%d]!\n", rc);
563 spin_lock_irqsave(&dq->lock, flags);
564 hisi_hba->hw->start_delivery(dq);
565 spin_unlock_irqrestore(&dq->lock, flags);
571 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
573 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
574 struct asd_sas_phy *sas_phy = &phy->sas_phy;
575 struct sas_ha_struct *sas_ha;
577 if (!phy->phy_attached)
580 sas_ha = &hisi_hba->sha;
581 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
584 struct sas_phy *sphy = sas_phy->phy;
586 sphy->negotiated_linkrate = sas_phy->linkrate;
587 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
588 sphy->maximum_linkrate_hw =
589 hisi_hba->hw->phy_get_max_linkrate();
590 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
591 sphy->minimum_linkrate = phy->minimum_linkrate;
593 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
594 sphy->maximum_linkrate = phy->maximum_linkrate;
597 if (phy->phy_type & PORT_TYPE_SAS) {
598 struct sas_identify_frame *id;
600 id = (struct sas_identify_frame *)phy->frame_rcvd;
601 id->dev_type = phy->identify.device_type;
602 id->initiator_bits = SAS_PROTOCOL_ALL;
603 id->target_bits = phy->identify.target_port_protocols;
604 } else if (phy->phy_type & PORT_TYPE_SATA) {
608 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
609 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
612 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
614 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
615 struct hisi_sas_device *sas_dev = NULL;
617 int last = hisi_hba->last_dev_id;
618 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
621 spin_lock_irqsave(&hisi_hba->lock, flags);
622 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
623 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
624 int queue = i % hisi_hba->queue_count;
625 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
627 hisi_hba->devices[i].device_id = i;
628 sas_dev = &hisi_hba->devices[i];
629 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
630 sas_dev->dev_type = device->dev_type;
631 sas_dev->hisi_hba = hisi_hba;
632 sas_dev->sas_device = device;
634 INIT_LIST_HEAD(&hisi_hba->devices[i].list);
639 hisi_hba->last_dev_id = i;
640 spin_unlock_irqrestore(&hisi_hba->lock, flags);
645 #define HISI_SAS_SRST_ATA_DISK_CNT 3
646 static int hisi_sas_init_device(struct domain_device *device)
648 int rc = TMF_RESP_FUNC_COMPLETE;
650 struct hisi_sas_tmf_task tmf_task;
651 int retry = HISI_SAS_SRST_ATA_DISK_CNT;
652 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
654 switch (device->dev_type) {
656 int_to_scsilun(0, &lun);
658 tmf_task.tmf = TMF_CLEAR_TASK_SET;
659 rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun,
661 if (rc == TMF_RESP_FUNC_COMPLETE)
662 hisi_sas_release_task(hisi_hba, device);
666 case SAS_SATA_PM_PORT:
667 case SAS_SATA_PENDING:
668 while (retry-- > 0) {
669 rc = hisi_sas_softreset_ata_disk(device);
681 static int hisi_sas_dev_found(struct domain_device *device)
683 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
684 struct domain_device *parent_dev = device->parent;
685 struct hisi_sas_device *sas_dev;
686 struct device *dev = hisi_hba->dev;
689 if (hisi_hba->hw->alloc_dev)
690 sas_dev = hisi_hba->hw->alloc_dev(device);
692 sas_dev = hisi_sas_alloc_dev(device);
694 dev_err(dev, "fail alloc dev: max support %d devices\n",
695 HISI_SAS_MAX_DEVICES);
699 device->lldd_dev = sas_dev;
700 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
702 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
704 u8 phy_num = parent_dev->ex_dev.num_phys;
707 for (phy_no = 0; phy_no < phy_num; phy_no++) {
708 phy = &parent_dev->ex_dev.ex_phy[phy_no];
709 if (SAS_ADDR(phy->attached_sas_addr) ==
710 SAS_ADDR(device->sas_addr))
714 if (phy_no == phy_num) {
715 dev_info(dev, "dev found: no attached "
716 "dev:%016llx at ex:%016llx\n",
717 SAS_ADDR(device->sas_addr),
718 SAS_ADDR(parent_dev->sas_addr));
724 dev_info(dev, "dev[%d:%x] found\n",
725 sas_dev->device_id, sas_dev->dev_type);
727 rc = hisi_sas_init_device(device);
733 hisi_sas_dev_gone(device);
737 int hisi_sas_slave_configure(struct scsi_device *sdev)
739 struct domain_device *dev = sdev_to_domain_dev(sdev);
740 int ret = sas_slave_configure(sdev);
744 if (!dev_is_sata(dev))
745 sas_change_queue_depth(sdev, 64);
749 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure);
751 void hisi_sas_scan_start(struct Scsi_Host *shost)
753 struct hisi_hba *hisi_hba = shost_priv(shost);
755 hisi_hba->hw->phys_init(hisi_hba);
757 EXPORT_SYMBOL_GPL(hisi_sas_scan_start);
759 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
761 struct hisi_hba *hisi_hba = shost_priv(shost);
762 struct sas_ha_struct *sha = &hisi_hba->sha;
764 /* Wait for PHY up interrupt to occur */
771 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
773 static void hisi_sas_phyup_work(struct work_struct *work)
775 struct hisi_sas_phy *phy =
776 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
777 struct hisi_hba *hisi_hba = phy->hisi_hba;
778 struct asd_sas_phy *sas_phy = &phy->sas_phy;
779 int phy_no = sas_phy->id;
781 hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
782 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
785 static void hisi_sas_linkreset_work(struct work_struct *work)
787 struct hisi_sas_phy *phy =
788 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
789 struct asd_sas_phy *sas_phy = &phy->sas_phy;
791 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
794 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
795 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
796 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
799 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
800 enum hisi_sas_phy_event event)
802 struct hisi_hba *hisi_hba = phy->hisi_hba;
804 if (WARN_ON(event >= HISI_PHYES_NUM))
807 return queue_work(hisi_hba->wq, &phy->works[event]);
809 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
811 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
813 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
814 struct asd_sas_phy *sas_phy = &phy->sas_phy;
817 phy->hisi_hba = hisi_hba;
819 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
820 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
821 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
822 sas_phy->class = SAS;
823 sas_phy->iproto = SAS_PROTOCOL_ALL;
825 sas_phy->type = PHY_TYPE_PHYSICAL;
826 sas_phy->role = PHY_ROLE_INITIATOR;
827 sas_phy->oob_mode = OOB_NOT_CONNECTED;
828 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
829 sas_phy->id = phy_no;
830 sas_phy->sas_addr = &hisi_hba->sas_addr[0];
831 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
832 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
833 sas_phy->lldd_phy = phy;
835 for (i = 0; i < HISI_PHYES_NUM; i++)
836 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
838 spin_lock_init(&phy->lock);
841 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
843 struct sas_ha_struct *sas_ha = sas_phy->ha;
844 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
845 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
846 struct asd_sas_port *sas_port = sas_phy->port;
847 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
853 spin_lock_irqsave(&hisi_hba->lock, flags);
854 port->port_attached = 1;
855 port->id = phy->port_id;
857 sas_port->lldd_port = port;
858 spin_unlock_irqrestore(&hisi_hba->lock, flags);
861 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
862 struct hisi_sas_slot *slot)
866 struct task_status_struct *ts;
868 ts = &task->task_status;
870 ts->resp = SAS_TASK_COMPLETE;
871 ts->stat = SAS_ABORTED_TASK;
872 spin_lock_irqsave(&task->task_state_lock, flags);
873 task->task_state_flags &=
874 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
875 task->task_state_flags |= SAS_TASK_STATE_DONE;
876 spin_unlock_irqrestore(&task->task_state_lock, flags);
879 hisi_sas_slot_task_free(hisi_hba, task, slot);
882 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
883 struct domain_device *device)
885 struct hisi_sas_slot *slot, *slot2;
886 struct hisi_sas_device *sas_dev = device->lldd_dev;
888 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
889 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
892 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
894 struct hisi_sas_device *sas_dev;
895 struct domain_device *device;
898 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
899 sas_dev = &hisi_hba->devices[i];
900 device = sas_dev->sas_device;
902 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
906 hisi_sas_release_task(hisi_hba, device);
909 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
911 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
912 struct domain_device *device)
914 if (hisi_hba->hw->dereg_device)
915 hisi_hba->hw->dereg_device(hisi_hba, device);
918 static void hisi_sas_dev_gone(struct domain_device *device)
920 struct hisi_sas_device *sas_dev = device->lldd_dev;
921 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
922 struct device *dev = hisi_hba->dev;
924 dev_info(dev, "dev[%d:%x] is gone\n",
925 sas_dev->device_id, sas_dev->dev_type);
927 if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
928 hisi_sas_internal_task_abort(hisi_hba, device,
929 HISI_SAS_INT_ABT_DEV, 0);
931 hisi_sas_dereg_device(hisi_hba, device);
933 down(&hisi_hba->sem);
934 hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
936 device->lldd_dev = NULL;
939 if (hisi_hba->hw->free_device)
940 hisi_hba->hw->free_device(sas_dev);
941 sas_dev->dev_type = SAS_PHY_UNUSED;
944 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
946 return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
949 static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
950 struct sas_phy_linkrates *r)
952 struct sas_phy_linkrates _r;
954 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
955 struct asd_sas_phy *sas_phy = &phy->sas_phy;
956 enum sas_linkrate min, max;
958 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
959 max = sas_phy->phy->maximum_linkrate;
960 min = r->minimum_linkrate;
961 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
962 max = r->maximum_linkrate;
963 min = sas_phy->phy->minimum_linkrate;
967 _r.maximum_linkrate = max;
968 _r.minimum_linkrate = min;
970 sas_phy->phy->maximum_linkrate = max;
971 sas_phy->phy->minimum_linkrate = min;
973 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
975 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
976 hisi_hba->hw->phy_start(hisi_hba, phy_no);
979 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
982 struct sas_ha_struct *sas_ha = sas_phy->ha;
983 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
984 int phy_no = sas_phy->id;
987 case PHY_FUNC_HARD_RESET:
988 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
991 case PHY_FUNC_LINK_RESET:
992 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
994 hisi_hba->hw->phy_start(hisi_hba, phy_no);
997 case PHY_FUNC_DISABLE:
998 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
1001 case PHY_FUNC_SET_LINK_RATE:
1002 hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
1004 case PHY_FUNC_GET_EVENTS:
1005 if (hisi_hba->hw->get_events) {
1006 hisi_hba->hw->get_events(hisi_hba, phy_no);
1010 case PHY_FUNC_RELEASE_SPINUP_HOLD:
1017 static void hisi_sas_task_done(struct sas_task *task)
1019 del_timer(&task->slow_task->timer);
1020 complete(&task->slow_task->completion);
1023 static void hisi_sas_tmf_timedout(struct timer_list *t)
1025 struct sas_task_slow *slow = from_timer(slow, t, timer);
1026 struct sas_task *task = slow->task;
1027 unsigned long flags;
1028 bool is_completed = true;
1030 spin_lock_irqsave(&task->task_state_lock, flags);
1031 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1032 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1033 is_completed = false;
1035 spin_unlock_irqrestore(&task->task_state_lock, flags);
1038 complete(&task->slow_task->completion);
1041 #define TASK_TIMEOUT 20
1042 #define TASK_RETRY 3
1043 #define INTERNAL_ABORT_TIMEOUT 6
1044 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
1045 void *parameter, u32 para_len,
1046 struct hisi_sas_tmf_task *tmf)
1048 struct hisi_sas_device *sas_dev = device->lldd_dev;
1049 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
1050 struct device *dev = hisi_hba->dev;
1051 struct sas_task *task;
1054 for (retry = 0; retry < TASK_RETRY; retry++) {
1055 task = sas_alloc_slow_task(GFP_KERNEL);
1060 task->task_proto = device->tproto;
1062 if (dev_is_sata(device)) {
1063 task->ata_task.device_control_reg_update = 1;
1064 memcpy(&task->ata_task.fis, parameter, para_len);
1066 memcpy(&task->ssp_task, parameter, para_len);
1068 task->task_done = hisi_sas_task_done;
1070 task->slow_task->timer.function = hisi_sas_tmf_timedout;
1071 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
1072 add_timer(&task->slow_task->timer);
1074 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
1077 del_timer(&task->slow_task->timer);
1078 dev_err(dev, "abort tmf: executing internal task failed: %d\n",
1083 wait_for_completion(&task->slow_task->completion);
1084 res = TMF_RESP_FUNC_FAILED;
1085 /* Even TMF timed out, return direct. */
1086 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1087 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1088 struct hisi_sas_slot *slot = task->lldd_task;
1090 dev_err(dev, "abort tmf: TMF task timeout and not done\n");
1092 struct hisi_sas_cq *cq =
1093 &hisi_hba->cq[slot->dlvry_queue];
1095 * flush tasklet to avoid free'ing task
1096 * before using task in IO completion
1098 tasklet_kill(&cq->tasklet);
1104 dev_err(dev, "abort tmf: TMF task timeout\n");
1107 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1108 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1109 res = TMF_RESP_FUNC_COMPLETE;
1113 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1114 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1115 res = TMF_RESP_FUNC_SUCC;
1119 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1120 task->task_status.stat == SAS_DATA_UNDERRUN) {
1121 /* no error, but return the number of bytes of
1124 dev_warn(dev, "abort tmf: task to dev %016llx "
1125 "resp: 0x%x sts 0x%x underrun\n",
1126 SAS_ADDR(device->sas_addr),
1127 task->task_status.resp,
1128 task->task_status.stat);
1129 res = task->task_status.residual;
1133 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1134 task->task_status.stat == SAS_DATA_OVERRUN) {
1135 dev_warn(dev, "abort tmf: blocked task error\n");
1140 dev_warn(dev, "abort tmf: task to dev "
1141 "%016llx resp: 0x%x status 0x%x\n",
1142 SAS_ADDR(device->sas_addr), task->task_status.resp,
1143 task->task_status.stat);
1144 sas_free_task(task);
1148 if (retry == TASK_RETRY)
1149 dev_warn(dev, "abort tmf: executing internal task failed!\n");
1150 sas_free_task(task);
1154 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
1155 bool reset, int pmp, u8 *fis)
1157 struct ata_taskfile tf;
1159 ata_tf_init(dev, &tf);
1163 tf.ctl &= ~ATA_SRST;
1164 tf.command = ATA_CMD_DEV_RESET;
1165 ata_tf_to_fis(&tf, pmp, 0, fis);
1168 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
1171 struct ata_port *ap = device->sata_dev.ap;
1172 struct ata_link *link;
1173 int rc = TMF_RESP_FUNC_FAILED;
1174 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1175 struct device *dev = hisi_hba->dev;
1176 int s = sizeof(struct host_to_dev_fis);
1178 ata_for_each_link(link, ap, EDGE) {
1179 int pmp = sata_srst_pmp(link);
1181 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1182 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
1183 if (rc != TMF_RESP_FUNC_COMPLETE)
1187 if (rc == TMF_RESP_FUNC_COMPLETE) {
1188 ata_for_each_link(link, ap, EDGE) {
1189 int pmp = sata_srst_pmp(link);
1191 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1192 rc = hisi_sas_exec_internal_tmf_task(device, fis,
1194 if (rc != TMF_RESP_FUNC_COMPLETE)
1195 dev_err(dev, "ata disk de-reset failed\n");
1198 dev_err(dev, "ata disk reset failed\n");
1201 if (rc == TMF_RESP_FUNC_COMPLETE)
1202 hisi_sas_release_task(hisi_hba, device);
1207 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
1208 u8 *lun, struct hisi_sas_tmf_task *tmf)
1210 struct sas_ssp_task ssp_task;
1212 if (!(device->tproto & SAS_PROTOCOL_SSP))
1213 return TMF_RESP_FUNC_ESUPP;
1215 memcpy(ssp_task.LUN, lun, 8);
1217 return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
1218 sizeof(ssp_task), tmf);
1221 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1223 u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
1226 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1227 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1228 struct domain_device *device = sas_dev->sas_device;
1229 struct asd_sas_port *sas_port;
1230 struct hisi_sas_port *port;
1231 struct hisi_sas_phy *phy = NULL;
1232 struct asd_sas_phy *sas_phy;
1234 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1235 || !device || !device->port)
1238 sas_port = device->port;
1239 port = to_hisi_sas_port(sas_port);
1241 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1242 if (state & BIT(sas_phy->id)) {
1243 phy = sas_phy->lldd_phy;
1248 port->id = phy->port_id;
1250 /* Update linkrate of directly attached device. */
1251 if (!device->parent)
1252 device->linkrate = phy->sas_phy.linkrate;
1254 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1260 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1263 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1264 struct asd_sas_port *_sas_port = NULL;
1267 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1268 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1269 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1270 struct asd_sas_port *sas_port = sas_phy->port;
1271 bool do_port_check = !!(_sas_port != sas_port);
1273 if (!sas_phy->phy->enabled)
1276 /* Report PHY state change to libsas */
1277 if (state & BIT(phy_no)) {
1278 if (do_port_check && sas_port && sas_port->port_dev) {
1279 struct domain_device *dev = sas_port->port_dev;
1281 _sas_port = sas_port;
1283 if (DEV_IS_EXPANDER(dev->dev_type))
1284 sas_ha->notify_port_event(sas_phy,
1285 PORTE_BROADCAST_RCVD);
1287 } else if (old_state & (1 << phy_no))
1288 /* PHY down but was up before */
1289 hisi_sas_phy_down(hisi_hba, phy_no, 0);
1294 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba)
1296 struct hisi_sas_device *sas_dev;
1297 struct domain_device *device;
1300 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1301 sas_dev = &hisi_hba->devices[i];
1302 device = sas_dev->sas_device;
1304 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1307 hisi_sas_init_device(device);
1311 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
1312 struct asd_sas_port *sas_port,
1313 struct domain_device *device)
1315 struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 };
1316 struct ata_port *ap = device->sata_dev.ap;
1317 struct device *dev = hisi_hba->dev;
1318 int s = sizeof(struct host_to_dev_fis);
1319 int rc = TMF_RESP_FUNC_FAILED;
1320 struct asd_sas_phy *sas_phy;
1321 struct ata_link *link;
1325 state = hisi_hba->hw->get_phys_state(hisi_hba);
1326 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) {
1327 if (!(state & BIT(sas_phy->id)))
1330 ata_for_each_link(link, ap, EDGE) {
1331 int pmp = sata_srst_pmp(link);
1333 tmf_task.phy_id = sas_phy->id;
1334 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1335 rc = hisi_sas_exec_internal_tmf_task(device, fis, s,
1337 if (rc != TMF_RESP_FUNC_COMPLETE) {
1338 dev_err(dev, "phy%d ata reset failed rc=%d\n",
1346 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
1348 struct device *dev = hisi_hba->dev;
1351 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1352 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1353 struct domain_device *device = sas_dev->sas_device;
1355 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1358 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1359 HISI_SAS_INT_ABT_DEV, 0);
1361 dev_err(dev, "STP reject: abort dev failed %d\n", rc);
1364 for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) {
1365 struct hisi_sas_port *port = &hisi_hba->port[port_no];
1366 struct asd_sas_port *sas_port = &port->sas_port;
1367 struct domain_device *port_dev = sas_port->port_dev;
1368 struct domain_device *device;
1370 if (!port_dev || !DEV_IS_EXPANDER(port_dev->dev_type))
1373 /* Try to find a SATA device */
1374 list_for_each_entry(device, &sas_port->dev_list,
1376 if (dev_is_sata(device)) {
1377 hisi_sas_send_ata_reset_each_phy(hisi_hba,
1386 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
1388 struct Scsi_Host *shost = hisi_hba->shost;
1390 down(&hisi_hba->sem);
1391 hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
1393 scsi_block_requests(shost);
1394 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
1396 if (timer_pending(&hisi_hba->timer))
1397 del_timer_sync(&hisi_hba->timer);
1399 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1401 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare);
1403 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
1405 struct Scsi_Host *shost = hisi_hba->shost;
1408 /* Init and wait for PHYs to come up and all libsas event finished. */
1409 hisi_hba->hw->phys_init(hisi_hba);
1411 hisi_sas_refresh_port_id(hisi_hba);
1412 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1415 if (hisi_hba->reject_stp_links_msk)
1416 hisi_sas_terminate_stp_reject(hisi_hba);
1417 hisi_sas_reset_init_all_devices(hisi_hba);
1418 scsi_unblock_requests(shost);
1419 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1421 state = hisi_hba->hw->get_phys_state(hisi_hba);
1422 hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state, state);
1424 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
1426 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1428 struct device *dev = hisi_hba->dev;
1429 struct Scsi_Host *shost = hisi_hba->shost;
1432 if (!hisi_hba->hw->soft_reset)
1435 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1438 dev_info(dev, "controller resetting...\n");
1439 hisi_sas_controller_reset_prepare(hisi_hba);
1441 rc = hisi_hba->hw->soft_reset(hisi_hba);
1443 dev_warn(dev, "controller reset failed (%d)\n", rc);
1444 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1446 scsi_unblock_requests(shost);
1447 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1451 hisi_sas_controller_reset_done(hisi_hba);
1452 dev_info(dev, "controller reset complete\n");
1457 static int hisi_sas_abort_task(struct sas_task *task)
1459 struct scsi_lun lun;
1460 struct hisi_sas_tmf_task tmf_task;
1461 struct domain_device *device = task->dev;
1462 struct hisi_sas_device *sas_dev = device->lldd_dev;
1463 struct hisi_hba *hisi_hba;
1465 int rc = TMF_RESP_FUNC_FAILED;
1466 unsigned long flags;
1469 return TMF_RESP_FUNC_FAILED;
1471 hisi_hba = dev_to_hisi_hba(task->dev);
1472 dev = hisi_hba->dev;
1474 spin_lock_irqsave(&task->task_state_lock, flags);
1475 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1476 struct hisi_sas_slot *slot = task->lldd_task;
1477 struct hisi_sas_cq *cq;
1481 * flush tasklet to avoid free'ing task
1482 * before using task in IO completion
1484 cq = &hisi_hba->cq[slot->dlvry_queue];
1485 tasklet_kill(&cq->tasklet);
1487 spin_unlock_irqrestore(&task->task_state_lock, flags);
1488 rc = TMF_RESP_FUNC_COMPLETE;
1491 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1492 spin_unlock_irqrestore(&task->task_state_lock, flags);
1494 sas_dev->dev_status = HISI_SAS_DEV_EH;
1495 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1496 struct scsi_cmnd *cmnd = task->uldd_task;
1497 struct hisi_sas_slot *slot = task->lldd_task;
1498 u16 tag = slot->idx;
1501 int_to_scsilun(cmnd->device->lun, &lun);
1502 tmf_task.tmf = TMF_ABORT_TASK;
1503 tmf_task.tag_of_task_to_be_managed = tag;
1505 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1508 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1509 HISI_SAS_INT_ABT_CMD, tag);
1511 dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1512 return TMF_RESP_FUNC_FAILED;
1516 * If the TMF finds that the IO is not in the device and also
1517 * the internal abort does not succeed, then it is safe to
1519 * Note: if the internal abort succeeds then the slot
1520 * will have already been completed
1522 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1523 if (task->lldd_task)
1524 hisi_sas_do_release_task(hisi_hba, task, slot);
1526 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1527 task->task_proto & SAS_PROTOCOL_STP) {
1528 if (task->dev->dev_type == SAS_SATA_DEV) {
1529 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1530 HISI_SAS_INT_ABT_DEV, 0);
1532 dev_err(dev, "abort task: internal abort failed\n");
1535 hisi_sas_dereg_device(hisi_hba, device);
1536 rc = hisi_sas_softreset_ata_disk(device);
1538 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1540 struct hisi_sas_slot *slot = task->lldd_task;
1541 u32 tag = slot->idx;
1542 struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
1544 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1545 HISI_SAS_INT_ABT_CMD, tag);
1546 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1549 * flush tasklet to avoid free'ing task
1550 * before using task in IO completion
1552 tasklet_kill(&cq->tasklet);
1558 if (rc != TMF_RESP_FUNC_COMPLETE)
1559 dev_notice(dev, "abort task: rc=%d\n", rc);
1563 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1565 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1566 struct device *dev = hisi_hba->dev;
1567 struct hisi_sas_tmf_task tmf_task;
1568 int rc = TMF_RESP_FUNC_FAILED;
1570 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1571 HISI_SAS_INT_ABT_DEV, 0);
1573 dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1574 return TMF_RESP_FUNC_FAILED;
1576 hisi_sas_dereg_device(hisi_hba, device);
1578 tmf_task.tmf = TMF_ABORT_TASK_SET;
1579 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1581 if (rc == TMF_RESP_FUNC_COMPLETE)
1582 hisi_sas_release_task(hisi_hba, device);
1587 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1589 int rc = TMF_RESP_FUNC_FAILED;
1590 struct hisi_sas_tmf_task tmf_task;
1592 tmf_task.tmf = TMF_CLEAR_ACA;
1593 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1598 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1600 struct sas_phy *local_phy = sas_get_local_phy(device);
1601 int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1602 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1603 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1604 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1605 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number];
1606 struct hisi_sas_phy *phy = container_of(sas_phy,
1607 struct hisi_sas_phy, sas_phy);
1608 DECLARE_COMPLETION_ONSTACK(phyreset);
1610 if (scsi_is_sas_phy_local(local_phy)) {
1612 phy->reset_completion = &phyreset;
1615 rc = sas_phy_reset(local_phy, reset_type);
1616 sas_put_local_phy(local_phy);
1618 if (scsi_is_sas_phy_local(local_phy)) {
1619 int ret = wait_for_completion_timeout(&phyreset, 2 * HZ);
1620 unsigned long flags;
1622 spin_lock_irqsave(&phy->lock, flags);
1623 phy->reset_completion = NULL;
1625 spin_unlock_irqrestore(&phy->lock, flags);
1627 /* report PHY down if timed out */
1629 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0);
1636 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1638 struct hisi_sas_device *sas_dev = device->lldd_dev;
1639 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1640 struct device *dev = hisi_hba->dev;
1641 int rc = TMF_RESP_FUNC_FAILED;
1643 if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1644 return TMF_RESP_FUNC_FAILED;
1645 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1647 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1648 HISI_SAS_INT_ABT_DEV, 0);
1650 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1651 return TMF_RESP_FUNC_FAILED;
1653 hisi_sas_dereg_device(hisi_hba, device);
1655 rc = hisi_sas_debug_I_T_nexus_reset(device);
1657 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
1658 hisi_sas_release_task(hisi_hba, device);
1663 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1665 struct hisi_sas_device *sas_dev = device->lldd_dev;
1666 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1667 struct device *dev = hisi_hba->dev;
1668 int rc = TMF_RESP_FUNC_FAILED;
1670 sas_dev->dev_status = HISI_SAS_DEV_EH;
1671 if (dev_is_sata(device)) {
1672 struct sas_phy *phy;
1674 /* Clear internal IO and then hardreset */
1675 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1676 HISI_SAS_INT_ABT_DEV, 0);
1678 dev_err(dev, "lu_reset: internal abort failed\n");
1681 hisi_sas_dereg_device(hisi_hba, device);
1683 phy = sas_get_local_phy(device);
1685 rc = sas_phy_reset(phy, 1);
1688 hisi_sas_release_task(hisi_hba, device);
1689 sas_put_local_phy(phy);
1691 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
1693 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1694 HISI_SAS_INT_ABT_DEV, 0);
1696 dev_err(dev, "lu_reset: internal abort failed\n");
1699 hisi_sas_dereg_device(hisi_hba, device);
1701 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1702 if (rc == TMF_RESP_FUNC_COMPLETE)
1703 hisi_sas_release_task(hisi_hba, device);
1706 if (rc != TMF_RESP_FUNC_COMPLETE)
1707 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1708 sas_dev->device_id, rc);
1712 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1714 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1715 struct device *dev = hisi_hba->dev;
1716 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1719 queue_work(hisi_hba->wq, &r.work);
1720 wait_for_completion(r.completion);
1722 return TMF_RESP_FUNC_FAILED;
1724 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1725 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1726 struct domain_device *device = sas_dev->sas_device;
1728 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device ||
1729 DEV_IS_EXPANDER(device->dev_type))
1732 rc = hisi_sas_debug_I_T_nexus_reset(device);
1733 if (rc != TMF_RESP_FUNC_COMPLETE)
1734 dev_info(dev, "clear nexus ha: for device[%d] rc=%d\n",
1735 sas_dev->device_id, rc);
1738 hisi_sas_release_tasks(hisi_hba);
1740 return TMF_RESP_FUNC_COMPLETE;
1743 static int hisi_sas_query_task(struct sas_task *task)
1745 struct scsi_lun lun;
1746 struct hisi_sas_tmf_task tmf_task;
1747 int rc = TMF_RESP_FUNC_FAILED;
1749 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1750 struct scsi_cmnd *cmnd = task->uldd_task;
1751 struct domain_device *device = task->dev;
1752 struct hisi_sas_slot *slot = task->lldd_task;
1753 u32 tag = slot->idx;
1755 int_to_scsilun(cmnd->device->lun, &lun);
1756 tmf_task.tmf = TMF_QUERY_TASK;
1757 tmf_task.tag_of_task_to_be_managed = tag;
1759 rc = hisi_sas_debug_issue_ssp_tmf(device,
1763 /* The task is still in Lun, release it then */
1764 case TMF_RESP_FUNC_SUCC:
1765 /* The task is not in Lun or failed, reset the phy */
1766 case TMF_RESP_FUNC_FAILED:
1767 case TMF_RESP_FUNC_COMPLETE:
1770 rc = TMF_RESP_FUNC_FAILED;
1778 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1779 struct sas_task *task, int abort_flag,
1782 struct domain_device *device = task->dev;
1783 struct hisi_sas_device *sas_dev = device->lldd_dev;
1784 struct device *dev = hisi_hba->dev;
1785 struct hisi_sas_port *port;
1786 struct hisi_sas_slot *slot;
1787 struct asd_sas_port *sas_port = device->port;
1788 struct hisi_sas_cmd_hdr *cmd_hdr_base;
1789 struct hisi_sas_dq *dq = sas_dev->dq;
1790 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1791 unsigned long flags, flags_dq = 0;
1794 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1800 port = to_hisi_sas_port(sas_port);
1802 /* simply get a slot and send abort command */
1803 rc = hisi_sas_slot_index_alloc(hisi_hba, NULL);
1808 slot = &hisi_hba->slot_info[slot_idx];
1810 spin_lock_irqsave(&dq->lock, flags_dq);
1811 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
1812 if (wr_q_index < 0) {
1813 spin_unlock_irqrestore(&dq->lock, flags_dq);
1817 list_add_tail(&slot->delivery, &dq->list);
1818 spin_unlock_irqrestore(&dq->lock, flags_dq);
1820 dlvry_queue = dq->id;
1821 dlvry_queue_slot = wr_q_index;
1823 slot->n_elem = n_elem;
1824 slot->dlvry_queue = dlvry_queue;
1825 slot->dlvry_queue_slot = dlvry_queue_slot;
1826 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1827 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1830 slot->is_internal = true;
1831 task->lldd_task = slot;
1833 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1834 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1835 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
1837 hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1838 abort_flag, task_tag);
1840 spin_lock_irqsave(&task->task_state_lock, flags);
1841 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1842 spin_unlock_irqrestore(&task->task_state_lock, flags);
1843 WRITE_ONCE(slot->ready, 1);
1844 /* send abort command to the chip */
1845 spin_lock_irqsave(&dq->lock, flags);
1846 list_add_tail(&slot->entry, &sas_dev->list);
1847 hisi_hba->hw->start_delivery(dq);
1848 spin_unlock_irqrestore(&dq->lock, flags);
1853 hisi_sas_slot_index_free(hisi_hba, slot_idx);
1855 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1861 * hisi_sas_internal_task_abort -- execute an internal
1862 * abort command for single IO command or a device
1863 * @hisi_hba: host controller struct
1864 * @device: domain device
1865 * @abort_flag: mode of operation, device or single IO
1866 * @tag: tag of IO to be aborted (only relevant to single
1870 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1871 struct domain_device *device,
1872 int abort_flag, int tag)
1874 struct sas_task *task;
1875 struct hisi_sas_device *sas_dev = device->lldd_dev;
1876 struct device *dev = hisi_hba->dev;
1880 * The interface is not realized means this HW don't support internal
1881 * abort, or don't need to do internal abort. Then here, we return
1882 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
1883 * the internal abort has been executed and returned CQ.
1885 if (!hisi_hba->hw->prep_abort)
1886 return TMF_RESP_FUNC_FAILED;
1888 task = sas_alloc_slow_task(GFP_KERNEL);
1893 task->task_proto = device->tproto;
1894 task->task_done = hisi_sas_task_done;
1895 task->slow_task->timer.function = hisi_sas_tmf_timedout;
1896 task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT*HZ;
1897 add_timer(&task->slow_task->timer);
1899 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1900 task, abort_flag, tag);
1902 del_timer(&task->slow_task->timer);
1903 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1907 wait_for_completion(&task->slow_task->completion);
1908 res = TMF_RESP_FUNC_FAILED;
1910 /* Internal abort timed out */
1911 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1912 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1913 struct hisi_sas_slot *slot = task->lldd_task;
1916 struct hisi_sas_cq *cq =
1917 &hisi_hba->cq[slot->dlvry_queue];
1919 * flush tasklet to avoid free'ing task
1920 * before using task in IO completion
1922 tasklet_kill(&cq->tasklet);
1925 dev_err(dev, "internal task abort: timeout and not done.\n");
1929 dev_err(dev, "internal task abort: timeout.\n");
1932 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1933 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1934 res = TMF_RESP_FUNC_COMPLETE;
1938 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1939 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1940 res = TMF_RESP_FUNC_SUCC;
1945 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
1946 "resp: 0x%x sts 0x%x\n",
1947 SAS_ADDR(device->sas_addr),
1949 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1950 task->task_status.stat);
1951 sas_free_task(task);
1956 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1958 hisi_sas_port_notify_formed(sas_phy);
1961 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
1962 u8 reg_index, u8 reg_count, u8 *write_data)
1964 struct hisi_hba *hisi_hba = sha->lldd_ha;
1966 if (!hisi_hba->hw->write_gpio)
1969 return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
1970 reg_index, reg_count, write_data);
1973 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1975 phy->phy_attached = 0;
1980 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1982 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1983 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1984 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1985 struct device *dev = hisi_hba->dev;
1988 /* Phy down but ready */
1989 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1990 hisi_sas_port_notify_formed(sas_phy);
1992 struct hisi_sas_port *port = phy->port;
1994 if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) ||
1996 dev_info(dev, "ignore flutter phy%d down\n", phy_no);
1999 /* Phy down and not ready */
2000 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
2001 sas_phy_disconnected(sas_phy);
2004 if (phy->phy_type & PORT_TYPE_SAS) {
2005 int port_id = port->id;
2007 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
2009 port->port_attached = 0;
2010 } else if (phy->phy_type & PORT_TYPE_SATA)
2011 port->port_attached = 0;
2013 hisi_sas_phy_disconnected(phy);
2016 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
2018 void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
2022 for (i = 0; i < hisi_hba->queue_count; i++) {
2023 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2025 tasklet_kill(&cq->tasklet);
2028 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
2030 struct scsi_transport_template *hisi_sas_stt;
2031 EXPORT_SYMBOL_GPL(hisi_sas_stt);
2033 static struct sas_domain_function_template hisi_sas_transport_ops = {
2034 .lldd_dev_found = hisi_sas_dev_found,
2035 .lldd_dev_gone = hisi_sas_dev_gone,
2036 .lldd_execute_task = hisi_sas_queue_command,
2037 .lldd_control_phy = hisi_sas_control_phy,
2038 .lldd_abort_task = hisi_sas_abort_task,
2039 .lldd_abort_task_set = hisi_sas_abort_task_set,
2040 .lldd_clear_aca = hisi_sas_clear_aca,
2041 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
2042 .lldd_lu_reset = hisi_sas_lu_reset,
2043 .lldd_query_task = hisi_sas_query_task,
2044 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
2045 .lldd_port_formed = hisi_sas_port_formed,
2046 .lldd_write_gpio = hisi_sas_write_gpio,
2049 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
2051 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
2053 for (i = 0; i < hisi_hba->queue_count; i++) {
2054 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2055 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2057 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
2058 memset(hisi_hba->cmd_hdr[i], 0, s);
2061 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2062 memset(hisi_hba->complete_hdr[i], 0, s);
2066 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
2067 memset(hisi_hba->initial_fis, 0, s);
2069 s = max_command_entries * sizeof(struct hisi_sas_iost);
2070 memset(hisi_hba->iost, 0, s);
2072 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2073 memset(hisi_hba->breakpoint, 0, s);
2075 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
2076 memset(hisi_hba->sata_breakpoint, 0, s);
2078 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
2080 int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
2082 struct device *dev = hisi_hba->dev;
2083 int i, j, s, max_command_entries = hisi_hba->hw->max_command_entries;
2084 int max_command_entries_ru, sz_slot_buf_ru;
2085 int blk_cnt, slots_per_blk;
2087 sema_init(&hisi_hba->sem, 1);
2088 spin_lock_init(&hisi_hba->lock);
2089 for (i = 0; i < hisi_hba->n_phy; i++) {
2090 hisi_sas_phy_init(hisi_hba, i);
2091 hisi_hba->port[i].port_attached = 0;
2092 hisi_hba->port[i].id = -1;
2095 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
2096 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
2097 hisi_hba->devices[i].device_id = i;
2098 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
2101 for (i = 0; i < hisi_hba->queue_count; i++) {
2102 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2103 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2105 /* Completion queue structure */
2107 cq->hisi_hba = hisi_hba;
2109 /* Delivery queue structure */
2110 spin_lock_init(&dq->lock);
2111 INIT_LIST_HEAD(&dq->list);
2113 dq->hisi_hba = hisi_hba;
2115 /* Delivery queue */
2116 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
2117 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s,
2118 &hisi_hba->cmd_hdr_dma[i],
2120 if (!hisi_hba->cmd_hdr[i])
2123 /* Completion queue */
2124 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2125 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s,
2126 &hisi_hba->complete_hdr_dma[i],
2128 if (!hisi_hba->complete_hdr[i])
2132 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
2133 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma,
2135 if (!hisi_hba->itct)
2137 memset(hisi_hba->itct, 0, s);
2139 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
2140 sizeof(struct hisi_sas_slot),
2142 if (!hisi_hba->slot_info)
2145 /* roundup to avoid overly large block size */
2146 max_command_entries_ru = roundup(max_command_entries, 64);
2147 sz_slot_buf_ru = roundup(sizeof(struct hisi_sas_slot_buf_table), 64);
2148 s = lcm(max_command_entries_ru, sz_slot_buf_ru);
2149 blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
2150 slots_per_blk = s / sz_slot_buf_ru;
2151 for (i = 0; i < blk_cnt; i++) {
2152 struct hisi_sas_slot_buf_table *buf;
2154 int slot_index = i * slots_per_blk;
2156 buf = dmam_alloc_coherent(dev, s, &buf_dma, GFP_KERNEL);
2161 for (j = 0; j < slots_per_blk; j++, slot_index++) {
2162 struct hisi_sas_slot *slot;
2164 slot = &hisi_hba->slot_info[slot_index];
2166 slot->buf_dma = buf_dma;
2167 slot->idx = slot_index;
2170 buf_dma += sizeof(*buf);
2174 s = max_command_entries * sizeof(struct hisi_sas_iost);
2175 hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma,
2177 if (!hisi_hba->iost)
2180 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2181 hisi_hba->breakpoint = dmam_alloc_coherent(dev, s,
2182 &hisi_hba->breakpoint_dma,
2184 if (!hisi_hba->breakpoint)
2187 hisi_hba->slot_index_count = max_command_entries;
2188 s = hisi_hba->slot_index_count / BITS_PER_BYTE;
2189 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
2190 if (!hisi_hba->slot_index_tags)
2193 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
2194 hisi_hba->initial_fis = dmam_alloc_coherent(dev, s,
2195 &hisi_hba->initial_fis_dma,
2197 if (!hisi_hba->initial_fis)
2200 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
2201 hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s,
2202 &hisi_hba->sata_breakpoint_dma,
2204 if (!hisi_hba->sata_breakpoint)
2206 hisi_sas_init_mem(hisi_hba);
2208 hisi_sas_slot_index_init(hisi_hba);
2209 hisi_hba->last_slot_index = hisi_hba->hw->max_command_entries -
2210 HISI_SAS_RESERVED_IPTT_CNT;
2212 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
2213 if (!hisi_hba->wq) {
2214 dev_err(dev, "sas_alloc: failed to create workqueue\n");
2222 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
2224 void hisi_sas_free(struct hisi_hba *hisi_hba)
2227 destroy_workqueue(hisi_hba->wq);
2229 EXPORT_SYMBOL_GPL(hisi_sas_free);
2231 void hisi_sas_rst_work_handler(struct work_struct *work)
2233 struct hisi_hba *hisi_hba =
2234 container_of(work, struct hisi_hba, rst_work);
2236 hisi_sas_controller_reset(hisi_hba);
2238 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
2240 void hisi_sas_sync_rst_work_handler(struct work_struct *work)
2242 struct hisi_sas_rst *rst =
2243 container_of(work, struct hisi_sas_rst, work);
2245 if (!hisi_sas_controller_reset(rst->hisi_hba))
2247 complete(rst->completion);
2249 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
2251 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
2253 struct device *dev = hisi_hba->dev;
2254 struct platform_device *pdev = hisi_hba->platform_dev;
2255 struct device_node *np = pdev ? pdev->dev.of_node : NULL;
2258 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
2260 dev_err(dev, "could not get property sas-addr\n");
2266 * These properties are only required for platform device-based
2267 * controller with DT firmware.
2269 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
2270 "hisilicon,sas-syscon");
2271 if (IS_ERR(hisi_hba->ctrl)) {
2272 dev_err(dev, "could not get syscon\n");
2276 if (device_property_read_u32(dev, "ctrl-reset-reg",
2277 &hisi_hba->ctrl_reset_reg)) {
2279 "could not get property ctrl-reset-reg\n");
2283 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
2284 &hisi_hba->ctrl_reset_sts_reg)) {
2286 "could not get property ctrl-reset-sts-reg\n");
2290 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
2291 &hisi_hba->ctrl_clock_ena_reg)) {
2293 "could not get property ctrl-clock-ena-reg\n");
2298 refclk = devm_clk_get(dev, NULL);
2300 dev_dbg(dev, "no ref clk property\n");
2302 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
2304 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
2305 dev_err(dev, "could not get property phy-count\n");
2309 if (device_property_read_u32(dev, "queue-count",
2310 &hisi_hba->queue_count)) {
2311 dev_err(dev, "could not get property queue-count\n");
2317 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
2319 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2320 const struct hisi_sas_hw *hw)
2322 struct resource *res;
2323 struct Scsi_Host *shost;
2324 struct hisi_hba *hisi_hba;
2325 struct device *dev = &pdev->dev;
2328 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
2330 dev_err(dev, "scsi host alloc failed\n");
2333 hisi_hba = shost_priv(shost);
2335 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2337 hisi_hba->dev = dev;
2338 hisi_hba->platform_dev = pdev;
2339 hisi_hba->shost = shost;
2340 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2342 timer_setup(&hisi_hba->timer, NULL, 0);
2344 if (hisi_sas_get_fw_info(hisi_hba) < 0)
2347 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
2349 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
2352 dev_err(dev, "No usable DMA addressing method\n");
2356 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2357 hisi_hba->regs = devm_ioremap_resource(dev, res);
2358 if (IS_ERR(hisi_hba->regs))
2361 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2363 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2364 if (IS_ERR(hisi_hba->sgpio_regs))
2368 if (hisi_sas_alloc(hisi_hba, shost)) {
2369 hisi_sas_free(hisi_hba);
2375 scsi_host_put(shost);
2376 dev_err(dev, "shost alloc failed\n");
2380 int hisi_sas_probe(struct platform_device *pdev,
2381 const struct hisi_sas_hw *hw)
2383 struct Scsi_Host *shost;
2384 struct hisi_hba *hisi_hba;
2385 struct device *dev = &pdev->dev;
2386 struct asd_sas_phy **arr_phy;
2387 struct asd_sas_port **arr_port;
2388 struct sas_ha_struct *sha;
2389 int rc, phy_nr, port_nr, i;
2391 shost = hisi_sas_shost_alloc(pdev, hw);
2395 sha = SHOST_TO_SAS_HA(shost);
2396 hisi_hba = shost_priv(shost);
2397 platform_set_drvdata(pdev, sha);
2399 phy_nr = port_nr = hisi_hba->n_phy;
2401 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2402 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2403 if (!arr_phy || !arr_port) {
2408 sha->sas_phy = arr_phy;
2409 sha->sas_port = arr_port;
2410 sha->lldd_ha = hisi_hba;
2412 shost->transportt = hisi_sas_stt;
2413 shost->max_id = HISI_SAS_MAX_DEVICES;
2414 shost->max_lun = ~0;
2415 shost->max_channel = 1;
2416 shost->max_cmd_len = 16;
2417 if (hisi_hba->hw->slot_index_alloc) {
2418 shost->can_queue = hisi_hba->hw->max_command_entries;
2419 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
2421 shost->can_queue = hisi_hba->hw->max_command_entries -
2422 HISI_SAS_RESERVED_IPTT_CNT;
2423 shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
2424 HISI_SAS_RESERVED_IPTT_CNT;
2427 sha->sas_ha_name = DRV_NAME;
2428 sha->dev = hisi_hba->dev;
2429 sha->lldd_module = THIS_MODULE;
2430 sha->sas_addr = &hisi_hba->sas_addr[0];
2431 sha->num_phys = hisi_hba->n_phy;
2432 sha->core.shost = hisi_hba->shost;
2434 for (i = 0; i < hisi_hba->n_phy; i++) {
2435 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2436 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2439 rc = scsi_add_host(shost, &pdev->dev);
2443 rc = sas_register_ha(sha);
2445 goto err_out_register_ha;
2447 rc = hisi_hba->hw->hw_init(hisi_hba);
2449 goto err_out_register_ha;
2451 scsi_scan_host(shost);
2455 err_out_register_ha:
2456 scsi_remove_host(shost);
2458 hisi_sas_free(hisi_hba);
2459 scsi_host_put(shost);
2462 EXPORT_SYMBOL_GPL(hisi_sas_probe);
2464 int hisi_sas_remove(struct platform_device *pdev)
2466 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2467 struct hisi_hba *hisi_hba = sha->lldd_ha;
2468 struct Scsi_Host *shost = sha->core.shost;
2470 if (timer_pending(&hisi_hba->timer))
2471 del_timer(&hisi_hba->timer);
2473 sas_unregister_ha(sha);
2474 sas_remove_host(sha->core.shost);
2476 hisi_sas_free(hisi_hba);
2477 scsi_host_put(shost);
2480 EXPORT_SYMBOL_GPL(hisi_sas_remove);
2482 static __init int hisi_sas_init(void)
2484 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2491 static __exit void hisi_sas_exit(void)
2493 sas_release_transport(hisi_sas_stt);
2496 module_init(hisi_sas_init);
2497 module_exit(hisi_sas_exit);
2499 MODULE_LICENSE("GPL");
2500 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2501 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2502 MODULE_ALIAS("platform:" DRV_NAME);