2 * Copyright (c) 2015 Linaro Ltd.
3 * Copyright (c) 2015 Hisilicon Limited.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
13 #define DRV_NAME "hisi_sas"
15 #define DEV_IS_GONE(dev) \
16 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19 u8 *lun, struct hisi_sas_tmf_task *tmf);
21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22 struct domain_device *device,
23 int abort_flag, int tag);
24 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
25 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
27 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
28 struct domain_device *device);
29 static void hisi_sas_dev_gone(struct domain_device *device);
31 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
33 switch (fis->command) {
34 case ATA_CMD_FPDMA_WRITE:
35 case ATA_CMD_FPDMA_READ:
36 case ATA_CMD_FPDMA_RECV:
37 case ATA_CMD_FPDMA_SEND:
38 case ATA_CMD_NCQ_NON_DATA:
39 return HISI_SAS_SATA_PROTOCOL_FPDMA;
41 case ATA_CMD_DOWNLOAD_MICRO:
43 case ATA_CMD_PMP_READ:
44 case ATA_CMD_READ_LOG_EXT:
45 case ATA_CMD_PIO_READ:
46 case ATA_CMD_PIO_READ_EXT:
47 case ATA_CMD_PMP_WRITE:
48 case ATA_CMD_WRITE_LOG_EXT:
49 case ATA_CMD_PIO_WRITE:
50 case ATA_CMD_PIO_WRITE_EXT:
51 return HISI_SAS_SATA_PROTOCOL_PIO;
54 case ATA_CMD_DOWNLOAD_MICRO_DMA:
55 case ATA_CMD_PMP_READ_DMA:
56 case ATA_CMD_PMP_WRITE_DMA:
58 case ATA_CMD_READ_EXT:
59 case ATA_CMD_READ_LOG_DMA_EXT:
60 case ATA_CMD_READ_STREAM_DMA_EXT:
61 case ATA_CMD_TRUSTED_RCV_DMA:
62 case ATA_CMD_TRUSTED_SND_DMA:
64 case ATA_CMD_WRITE_EXT:
65 case ATA_CMD_WRITE_FUA_EXT:
66 case ATA_CMD_WRITE_QUEUED:
67 case ATA_CMD_WRITE_LOG_DMA_EXT:
68 case ATA_CMD_WRITE_STREAM_DMA_EXT:
69 case ATA_CMD_ZAC_MGMT_IN:
70 return HISI_SAS_SATA_PROTOCOL_DMA;
72 case ATA_CMD_CHK_POWER:
73 case ATA_CMD_DEV_RESET:
76 case ATA_CMD_FLUSH_EXT:
78 case ATA_CMD_VERIFY_EXT:
79 case ATA_CMD_SET_FEATURES:
81 case ATA_CMD_STANDBYNOW1:
82 case ATA_CMD_ZAC_MGMT_OUT:
83 return HISI_SAS_SATA_PROTOCOL_NONDATA;
86 switch (fis->features) {
87 case ATA_SET_MAX_PASSWD:
88 case ATA_SET_MAX_LOCK:
89 return HISI_SAS_SATA_PROTOCOL_PIO;
91 case ATA_SET_MAX_PASSWD_DMA:
92 case ATA_SET_MAX_UNLOCK_DMA:
93 return HISI_SAS_SATA_PROTOCOL_DMA;
96 return HISI_SAS_SATA_PROTOCOL_NONDATA;
101 if (direction == DMA_NONE)
102 return HISI_SAS_SATA_PROTOCOL_NONDATA;
103 return HISI_SAS_SATA_PROTOCOL_PIO;
107 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
109 void hisi_sas_sata_done(struct sas_task *task,
110 struct hisi_sas_slot *slot)
112 struct task_status_struct *ts = &task->task_status;
113 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
114 struct hisi_sas_status_buffer *status_buf =
115 hisi_sas_status_buf_addr_mem(slot);
116 u8 *iu = &status_buf->iu[0];
117 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
119 resp->frame_len = sizeof(struct dev_to_host_fis);
120 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
122 ts->buf_valid_size = sizeof(*resp);
124 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
126 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
128 struct ata_queued_cmd *qc = task->uldd_task;
131 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
132 qc->tf.command == ATA_CMD_FPDMA_READ) {
139 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
142 * This function assumes linkrate mask fits in 8 bits, which it
143 * does for all HW versions supported.
145 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
150 max -= SAS_LINK_RATE_1_5_GBPS;
151 for (i = 0; i <= max; i++)
152 rate |= 1 << (i * 2);
155 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
157 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
159 return device->port->ha->lldd_ha;
162 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
164 return container_of(sas_port, struct hisi_sas_port, sas_port);
166 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
168 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
172 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
173 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
175 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
177 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
179 void *bitmap = hisi_hba->slot_index_tags;
181 clear_bit(slot_idx, bitmap);
184 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
188 if (hisi_hba->hw->slot_index_alloc || (slot_idx >=
189 hisi_hba->hw->max_command_entries - HISI_SAS_RESERVED_IPTT_CNT)) {
190 spin_lock_irqsave(&hisi_hba->lock, flags);
191 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
192 spin_unlock_irqrestore(&hisi_hba->lock, flags);
196 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
198 void *bitmap = hisi_hba->slot_index_tags;
200 set_bit(slot_idx, bitmap);
203 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
204 struct scsi_cmnd *scsi_cmnd)
207 void *bitmap = hisi_hba->slot_index_tags;
211 return scsi_cmnd->request->tag;
213 spin_lock_irqsave(&hisi_hba->lock, flags);
214 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
215 hisi_hba->last_slot_index + 1);
216 if (index >= hisi_hba->slot_index_count) {
217 index = find_next_zero_bit(bitmap,
218 hisi_hba->slot_index_count,
219 hisi_hba->hw->max_command_entries -
220 HISI_SAS_RESERVED_IPTT_CNT);
221 if (index >= hisi_hba->slot_index_count) {
222 spin_unlock_irqrestore(&hisi_hba->lock, flags);
223 return -SAS_QUEUE_FULL;
226 hisi_sas_slot_index_set(hisi_hba, index);
227 hisi_hba->last_slot_index = index;
228 spin_unlock_irqrestore(&hisi_hba->lock, flags);
233 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
237 for (i = 0; i < hisi_hba->slot_index_count; ++i)
238 hisi_sas_slot_index_clear(hisi_hba, i);
241 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
242 struct hisi_sas_slot *slot)
245 int device_id = slot->device_id;
246 struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id];
249 struct device *dev = hisi_hba->dev;
251 if (!task->lldd_task)
254 task->lldd_task = NULL;
256 if (!sas_protocol_ata(task->task_proto)) {
257 struct sas_ssp_task *ssp_task = &task->ssp_task;
258 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
261 dma_unmap_sg(dev, task->scatter,
264 if (slot->n_elem_dif)
265 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
266 scsi_prot_sg_count(scsi_cmnd),
271 spin_lock_irqsave(&sas_dev->lock, flags);
272 list_del_init(&slot->entry);
273 spin_unlock_irqrestore(&sas_dev->lock, flags);
275 memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
277 hisi_sas_slot_index_free(hisi_hba, slot->idx);
279 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
281 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
282 struct hisi_sas_slot *slot)
284 hisi_hba->hw->prep_smp(hisi_hba, slot);
287 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
288 struct hisi_sas_slot *slot)
290 hisi_hba->hw->prep_ssp(hisi_hba, slot);
293 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
294 struct hisi_sas_slot *slot)
296 hisi_hba->hw->prep_stp(hisi_hba, slot);
299 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
300 struct hisi_sas_slot *slot,
301 int device_id, int abort_flag, int tag_to_abort)
303 hisi_hba->hw->prep_abort(hisi_hba, slot,
304 device_id, abort_flag, tag_to_abort);
307 static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
308 struct sas_task *task, int n_elem,
309 int n_elem_req, int n_elem_resp)
311 struct device *dev = hisi_hba->dev;
313 if (!sas_protocol_ata(task->task_proto)) {
314 if (task->num_scatter) {
316 dma_unmap_sg(dev, task->scatter,
319 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
321 dma_unmap_sg(dev, &task->smp_task.smp_req,
324 dma_unmap_sg(dev, &task->smp_task.smp_resp,
330 static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
331 struct sas_task *task, int *n_elem,
332 int *n_elem_req, int *n_elem_resp)
334 struct device *dev = hisi_hba->dev;
337 if (sas_protocol_ata(task->task_proto)) {
338 *n_elem = task->num_scatter;
340 unsigned int req_len, resp_len;
342 if (task->num_scatter) {
343 *n_elem = dma_map_sg(dev, task->scatter,
344 task->num_scatter, task->data_dir);
349 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
350 *n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
356 req_len = sg_dma_len(&task->smp_task.smp_req);
359 goto err_out_dma_unmap;
361 *n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp,
365 goto err_out_dma_unmap;
367 resp_len = sg_dma_len(&task->smp_task.smp_resp);
368 if (resp_len & 0x3) {
370 goto err_out_dma_unmap;
375 if (*n_elem > HISI_SAS_SGE_PAGE_CNT) {
376 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
379 goto err_out_dma_unmap;
384 /* It would be better to call dma_unmap_sg() here, but it's messy */
385 hisi_sas_dma_unmap(hisi_hba, task, *n_elem,
386 *n_elem_req, *n_elem_resp);
391 static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba,
392 struct sas_task *task, int n_elem_dif)
394 struct device *dev = hisi_hba->dev;
397 struct sas_ssp_task *ssp_task = &task->ssp_task;
398 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
400 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
401 scsi_prot_sg_count(scsi_cmnd),
406 static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba,
407 int *n_elem_dif, struct sas_task *task)
409 struct device *dev = hisi_hba->dev;
410 struct sas_ssp_task *ssp_task;
411 struct scsi_cmnd *scsi_cmnd;
414 if (task->num_scatter) {
415 ssp_task = &task->ssp_task;
416 scsi_cmnd = ssp_task->cmd;
418 if (scsi_prot_sg_count(scsi_cmnd)) {
419 *n_elem_dif = dma_map_sg(dev,
420 scsi_prot_sglist(scsi_cmnd),
421 scsi_prot_sg_count(scsi_cmnd),
427 if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) {
428 dev_err(dev, "task prep: n_elem_dif(%d) too large\n",
431 goto err_out_dif_dma_unmap;
438 err_out_dif_dma_unmap:
439 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
440 scsi_prot_sg_count(scsi_cmnd), task->data_dir);
444 static int hisi_sas_task_prep(struct sas_task *task,
445 struct hisi_sas_dq **dq_pointer,
446 bool is_tmf, struct hisi_sas_tmf_task *tmf,
449 struct domain_device *device = task->dev;
450 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
451 struct hisi_sas_device *sas_dev = device->lldd_dev;
452 struct hisi_sas_port *port;
453 struct hisi_sas_slot *slot;
454 struct hisi_sas_cmd_hdr *cmd_hdr_base;
455 struct asd_sas_port *sas_port = device->port;
456 struct device *dev = hisi_hba->dev;
457 int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
458 int n_elem = 0, n_elem_dif = 0, n_elem_req = 0, n_elem_resp = 0;
459 struct hisi_sas_dq *dq;
463 if (DEV_IS_GONE(sas_dev)) {
465 dev_info(dev, "task prep: device %d not ready\n",
468 dev_info(dev, "task prep: device %016llx not ready\n",
469 SAS_ADDR(device->sas_addr));
474 if (hisi_hba->reply_map) {
475 int cpu = raw_smp_processor_id();
476 unsigned int dq_index = hisi_hba->reply_map[cpu];
478 *dq_pointer = dq = &hisi_hba->dq[dq_index];
480 *dq_pointer = dq = sas_dev->dq;
483 port = to_hisi_sas_port(sas_port);
484 if (port && !port->port_attached) {
485 dev_info(dev, "task prep: %s port%d not attach device\n",
486 (dev_is_sata(device)) ?
493 rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
494 &n_elem_req, &n_elem_resp);
498 if (!sas_protocol_ata(task->task_proto)) {
499 rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task);
501 goto err_out_dma_unmap;
504 if (hisi_hba->hw->slot_index_alloc)
505 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
507 struct scsi_cmnd *scsi_cmnd = NULL;
509 if (task->uldd_task) {
510 struct ata_queued_cmd *qc;
512 if (dev_is_sata(device)) {
513 qc = task->uldd_task;
514 scsi_cmnd = qc->scsicmd;
516 scsi_cmnd = task->uldd_task;
519 rc = hisi_sas_slot_index_alloc(hisi_hba, scsi_cmnd);
522 goto err_out_dif_dma_unmap;
525 slot = &hisi_hba->slot_info[slot_idx];
527 spin_lock_irqsave(&dq->lock, flags);
528 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
529 if (wr_q_index < 0) {
530 spin_unlock_irqrestore(&dq->lock, flags);
535 list_add_tail(&slot->delivery, &dq->list);
536 spin_unlock_irqrestore(&dq->lock, flags);
537 spin_lock_irqsave(&sas_dev->lock, flags);
538 list_add_tail(&slot->entry, &sas_dev->list);
539 spin_unlock_irqrestore(&sas_dev->lock, flags);
541 dlvry_queue = dq->id;
542 dlvry_queue_slot = wr_q_index;
544 slot->device_id = sas_dev->device_id;
545 slot->n_elem = n_elem;
546 slot->n_elem_dif = n_elem_dif;
547 slot->dlvry_queue = dlvry_queue;
548 slot->dlvry_queue_slot = dlvry_queue_slot;
549 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
550 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
554 slot->is_internal = is_tmf;
555 task->lldd_task = slot;
557 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
558 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
559 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
561 switch (task->task_proto) {
562 case SAS_PROTOCOL_SMP:
563 hisi_sas_task_prep_smp(hisi_hba, slot);
565 case SAS_PROTOCOL_SSP:
566 hisi_sas_task_prep_ssp(hisi_hba, slot);
568 case SAS_PROTOCOL_SATA:
569 case SAS_PROTOCOL_STP:
570 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
571 hisi_sas_task_prep_ata(hisi_hba, slot);
574 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
579 spin_lock_irqsave(&task->task_state_lock, flags);
580 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
581 spin_unlock_irqrestore(&task->task_state_lock, flags);
584 WRITE_ONCE(slot->ready, 1);
589 hisi_sas_slot_index_free(hisi_hba, slot_idx);
590 err_out_dif_dma_unmap:
591 if (!sas_protocol_ata(task->task_proto))
592 hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
594 hisi_sas_dma_unmap(hisi_hba, task, n_elem,
595 n_elem_req, n_elem_resp);
597 dev_err(dev, "task prep: failed[%d]!\n", rc);
601 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
602 bool is_tmf, struct hisi_sas_tmf_task *tmf)
607 struct hisi_hba *hisi_hba;
609 struct domain_device *device = task->dev;
610 struct asd_sas_port *sas_port = device->port;
611 struct hisi_sas_dq *dq = NULL;
614 struct task_status_struct *ts = &task->task_status;
616 ts->resp = SAS_TASK_UNDELIVERED;
617 ts->stat = SAS_PHY_DOWN;
619 * libsas will use dev->port, should
620 * not call task_done for sata
622 if (device->dev_type != SAS_SATA_DEV)
623 task->task_done(task);
627 hisi_hba = dev_to_hisi_hba(device);
630 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
634 down(&hisi_hba->sem);
638 /* protect task_prep and start_delivery sequence */
639 rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
641 dev_err(dev, "task exec: failed[%d]!\n", rc);
644 spin_lock_irqsave(&dq->lock, flags);
645 hisi_hba->hw->start_delivery(dq);
646 spin_unlock_irqrestore(&dq->lock, flags);
652 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
654 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
655 struct asd_sas_phy *sas_phy = &phy->sas_phy;
656 struct sas_ha_struct *sas_ha;
658 if (!phy->phy_attached)
661 sas_ha = &hisi_hba->sha;
662 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
665 struct sas_phy *sphy = sas_phy->phy;
667 sphy->negotiated_linkrate = sas_phy->linkrate;
668 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
669 sphy->maximum_linkrate_hw =
670 hisi_hba->hw->phy_get_max_linkrate();
671 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
672 sphy->minimum_linkrate = phy->minimum_linkrate;
674 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
675 sphy->maximum_linkrate = phy->maximum_linkrate;
678 if (phy->phy_type & PORT_TYPE_SAS) {
679 struct sas_identify_frame *id;
681 id = (struct sas_identify_frame *)phy->frame_rcvd;
682 id->dev_type = phy->identify.device_type;
683 id->initiator_bits = SAS_PROTOCOL_ALL;
684 id->target_bits = phy->identify.target_port_protocols;
685 } else if (phy->phy_type & PORT_TYPE_SATA) {
689 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
690 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
693 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
695 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
696 struct hisi_sas_device *sas_dev = NULL;
698 int last = hisi_hba->last_dev_id;
699 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
702 spin_lock_irqsave(&hisi_hba->lock, flags);
703 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
704 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
705 int queue = i % hisi_hba->queue_count;
706 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
708 hisi_hba->devices[i].device_id = i;
709 sas_dev = &hisi_hba->devices[i];
710 sas_dev->dev_type = device->dev_type;
711 sas_dev->hisi_hba = hisi_hba;
712 sas_dev->sas_device = device;
714 spin_lock_init(&sas_dev->lock);
715 INIT_LIST_HEAD(&hisi_hba->devices[i].list);
720 hisi_hba->last_dev_id = i;
721 spin_unlock_irqrestore(&hisi_hba->lock, flags);
726 #define HISI_SAS_SRST_ATA_DISK_CNT 3
727 static int hisi_sas_init_device(struct domain_device *device)
729 int rc = TMF_RESP_FUNC_COMPLETE;
731 struct hisi_sas_tmf_task tmf_task;
732 int retry = HISI_SAS_SRST_ATA_DISK_CNT;
733 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
735 switch (device->dev_type) {
737 int_to_scsilun(0, &lun);
739 tmf_task.tmf = TMF_CLEAR_TASK_SET;
740 rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun,
742 if (rc == TMF_RESP_FUNC_COMPLETE)
743 hisi_sas_release_task(hisi_hba, device);
747 case SAS_SATA_PM_PORT:
748 case SAS_SATA_PENDING:
749 while (retry-- > 0) {
750 rc = hisi_sas_softreset_ata_disk(device);
762 static int hisi_sas_dev_found(struct domain_device *device)
764 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
765 struct domain_device *parent_dev = device->parent;
766 struct hisi_sas_device *sas_dev;
767 struct device *dev = hisi_hba->dev;
770 if (hisi_hba->hw->alloc_dev)
771 sas_dev = hisi_hba->hw->alloc_dev(device);
773 sas_dev = hisi_sas_alloc_dev(device);
775 dev_err(dev, "fail alloc dev: max support %d devices\n",
776 HISI_SAS_MAX_DEVICES);
780 device->lldd_dev = sas_dev;
781 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
783 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
785 u8 phy_num = parent_dev->ex_dev.num_phys;
788 for (phy_no = 0; phy_no < phy_num; phy_no++) {
789 phy = &parent_dev->ex_dev.ex_phy[phy_no];
790 if (SAS_ADDR(phy->attached_sas_addr) ==
791 SAS_ADDR(device->sas_addr))
795 if (phy_no == phy_num) {
796 dev_info(dev, "dev found: no attached "
797 "dev:%016llx at ex:%016llx\n",
798 SAS_ADDR(device->sas_addr),
799 SAS_ADDR(parent_dev->sas_addr));
805 dev_info(dev, "dev[%d:%x] found\n",
806 sas_dev->device_id, sas_dev->dev_type);
808 rc = hisi_sas_init_device(device);
814 hisi_sas_dev_gone(device);
818 int hisi_sas_slave_configure(struct scsi_device *sdev)
820 struct domain_device *dev = sdev_to_domain_dev(sdev);
821 int ret = sas_slave_configure(sdev);
825 if (!dev_is_sata(dev))
826 sas_change_queue_depth(sdev, 64);
830 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure);
832 void hisi_sas_scan_start(struct Scsi_Host *shost)
834 struct hisi_hba *hisi_hba = shost_priv(shost);
836 hisi_hba->hw->phys_init(hisi_hba);
838 EXPORT_SYMBOL_GPL(hisi_sas_scan_start);
840 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
842 struct hisi_hba *hisi_hba = shost_priv(shost);
843 struct sas_ha_struct *sha = &hisi_hba->sha;
845 /* Wait for PHY up interrupt to occur */
852 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
854 static void hisi_sas_phyup_work(struct work_struct *work)
856 struct hisi_sas_phy *phy =
857 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
858 struct hisi_hba *hisi_hba = phy->hisi_hba;
859 struct asd_sas_phy *sas_phy = &phy->sas_phy;
860 int phy_no = sas_phy->id;
862 if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP)
863 hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no);
864 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
867 static void hisi_sas_linkreset_work(struct work_struct *work)
869 struct hisi_sas_phy *phy =
870 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
871 struct asd_sas_phy *sas_phy = &phy->sas_phy;
873 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
876 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
877 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
878 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
881 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
882 enum hisi_sas_phy_event event)
884 struct hisi_hba *hisi_hba = phy->hisi_hba;
886 if (WARN_ON(event >= HISI_PHYES_NUM))
889 return queue_work(hisi_hba->wq, &phy->works[event]);
891 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
893 static void hisi_sas_wait_phyup_timedout(struct timer_list *t)
895 struct hisi_sas_phy *phy = from_timer(phy, t, timer);
896 struct hisi_hba *hisi_hba = phy->hisi_hba;
897 struct device *dev = hisi_hba->dev;
898 int phy_no = phy->sas_phy.id;
900 dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no);
901 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
904 void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
906 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
907 struct device *dev = hisi_hba->dev;
909 if (!timer_pending(&phy->timer)) {
910 dev_dbg(dev, "phy%d OOB ready\n", phy_no);
911 phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT * HZ;
912 add_timer(&phy->timer);
915 EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready);
917 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
919 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
920 struct asd_sas_phy *sas_phy = &phy->sas_phy;
923 phy->hisi_hba = hisi_hba;
925 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
926 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
927 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
928 sas_phy->class = SAS;
929 sas_phy->iproto = SAS_PROTOCOL_ALL;
931 sas_phy->type = PHY_TYPE_PHYSICAL;
932 sas_phy->role = PHY_ROLE_INITIATOR;
933 sas_phy->oob_mode = OOB_NOT_CONNECTED;
934 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
935 sas_phy->id = phy_no;
936 sas_phy->sas_addr = &hisi_hba->sas_addr[0];
937 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
938 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
939 sas_phy->lldd_phy = phy;
941 for (i = 0; i < HISI_PHYES_NUM; i++)
942 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
944 spin_lock_init(&phy->lock);
946 timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0);
949 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
951 struct sas_ha_struct *sas_ha = sas_phy->ha;
952 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
953 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
954 struct asd_sas_port *sas_port = sas_phy->port;
955 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
961 spin_lock_irqsave(&hisi_hba->lock, flags);
962 port->port_attached = 1;
963 port->id = phy->port_id;
965 sas_port->lldd_port = port;
966 spin_unlock_irqrestore(&hisi_hba->lock, flags);
969 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
970 struct hisi_sas_slot *slot)
974 struct task_status_struct *ts;
976 ts = &task->task_status;
978 ts->resp = SAS_TASK_COMPLETE;
979 ts->stat = SAS_ABORTED_TASK;
980 spin_lock_irqsave(&task->task_state_lock, flags);
981 task->task_state_flags &=
982 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
983 task->task_state_flags |= SAS_TASK_STATE_DONE;
984 spin_unlock_irqrestore(&task->task_state_lock, flags);
987 hisi_sas_slot_task_free(hisi_hba, task, slot);
990 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
991 struct domain_device *device)
993 struct hisi_sas_slot *slot, *slot2;
994 struct hisi_sas_device *sas_dev = device->lldd_dev;
996 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
997 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
1000 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
1002 struct hisi_sas_device *sas_dev;
1003 struct domain_device *device;
1006 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1007 sas_dev = &hisi_hba->devices[i];
1008 device = sas_dev->sas_device;
1010 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
1014 hisi_sas_release_task(hisi_hba, device);
1017 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
1019 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
1020 struct domain_device *device)
1022 if (hisi_hba->hw->dereg_device)
1023 hisi_hba->hw->dereg_device(hisi_hba, device);
1026 static void hisi_sas_dev_gone(struct domain_device *device)
1028 struct hisi_sas_device *sas_dev = device->lldd_dev;
1029 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1030 struct device *dev = hisi_hba->dev;
1032 dev_info(dev, "dev[%d:%x] is gone\n",
1033 sas_dev->device_id, sas_dev->dev_type);
1035 if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
1036 hisi_sas_internal_task_abort(hisi_hba, device,
1037 HISI_SAS_INT_ABT_DEV, 0);
1039 hisi_sas_dereg_device(hisi_hba, device);
1041 down(&hisi_hba->sem);
1042 hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
1044 device->lldd_dev = NULL;
1047 if (hisi_hba->hw->free_device)
1048 hisi_hba->hw->free_device(sas_dev);
1049 sas_dev->dev_type = SAS_PHY_UNUSED;
1052 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
1054 return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
1057 static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
1058 struct sas_phy_linkrates *r)
1060 struct sas_phy_linkrates _r;
1062 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1063 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1064 enum sas_linkrate min, max;
1066 if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS)
1069 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
1070 max = sas_phy->phy->maximum_linkrate;
1071 min = r->minimum_linkrate;
1072 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
1073 max = r->maximum_linkrate;
1074 min = sas_phy->phy->minimum_linkrate;
1078 _r.maximum_linkrate = max;
1079 _r.minimum_linkrate = min;
1081 sas_phy->phy->maximum_linkrate = max;
1082 sas_phy->phy->minimum_linkrate = min;
1084 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
1086 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
1087 hisi_hba->hw->phy_start(hisi_hba, phy_no);
1092 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
1095 struct sas_ha_struct *sas_ha = sas_phy->ha;
1096 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1097 int phy_no = sas_phy->id;
1100 case PHY_FUNC_HARD_RESET:
1101 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
1104 case PHY_FUNC_LINK_RESET:
1105 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
1107 hisi_hba->hw->phy_start(hisi_hba, phy_no);
1110 case PHY_FUNC_DISABLE:
1111 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
1114 case PHY_FUNC_SET_LINK_RATE:
1115 return hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
1116 case PHY_FUNC_GET_EVENTS:
1117 if (hisi_hba->hw->get_events) {
1118 hisi_hba->hw->get_events(hisi_hba, phy_no);
1122 case PHY_FUNC_RELEASE_SPINUP_HOLD:
1129 static void hisi_sas_task_done(struct sas_task *task)
1131 del_timer(&task->slow_task->timer);
1132 complete(&task->slow_task->completion);
1135 static void hisi_sas_tmf_timedout(struct timer_list *t)
1137 struct sas_task_slow *slow = from_timer(slow, t, timer);
1138 struct sas_task *task = slow->task;
1139 unsigned long flags;
1140 bool is_completed = true;
1142 spin_lock_irqsave(&task->task_state_lock, flags);
1143 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1144 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1145 is_completed = false;
1147 spin_unlock_irqrestore(&task->task_state_lock, flags);
1150 complete(&task->slow_task->completion);
1153 #define TASK_TIMEOUT 20
1154 #define TASK_RETRY 3
1155 #define INTERNAL_ABORT_TIMEOUT 6
1156 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
1157 void *parameter, u32 para_len,
1158 struct hisi_sas_tmf_task *tmf)
1160 struct hisi_sas_device *sas_dev = device->lldd_dev;
1161 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
1162 struct device *dev = hisi_hba->dev;
1163 struct sas_task *task;
1166 for (retry = 0; retry < TASK_RETRY; retry++) {
1167 task = sas_alloc_slow_task(GFP_KERNEL);
1172 task->task_proto = device->tproto;
1174 if (dev_is_sata(device)) {
1175 task->ata_task.device_control_reg_update = 1;
1176 memcpy(&task->ata_task.fis, parameter, para_len);
1178 memcpy(&task->ssp_task, parameter, para_len);
1180 task->task_done = hisi_sas_task_done;
1182 task->slow_task->timer.function = hisi_sas_tmf_timedout;
1183 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT * HZ;
1184 add_timer(&task->slow_task->timer);
1186 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
1189 del_timer(&task->slow_task->timer);
1190 dev_err(dev, "abort tmf: executing internal task failed: %d\n",
1195 wait_for_completion(&task->slow_task->completion);
1196 res = TMF_RESP_FUNC_FAILED;
1197 /* Even TMF timed out, return direct. */
1198 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1199 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1200 struct hisi_sas_slot *slot = task->lldd_task;
1202 dev_err(dev, "abort tmf: TMF task timeout and not done\n");
1204 struct hisi_sas_cq *cq =
1205 &hisi_hba->cq[slot->dlvry_queue];
1207 * flush tasklet to avoid free'ing task
1208 * before using task in IO completion
1210 tasklet_kill(&cq->tasklet);
1216 dev_err(dev, "abort tmf: TMF task timeout\n");
1219 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1220 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1221 res = TMF_RESP_FUNC_COMPLETE;
1225 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1226 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1227 res = TMF_RESP_FUNC_SUCC;
1231 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1232 task->task_status.stat == SAS_DATA_UNDERRUN) {
1233 /* no error, but return the number of bytes of
1236 dev_warn(dev, "abort tmf: task to dev %016llx "
1237 "resp: 0x%x sts 0x%x underrun\n",
1238 SAS_ADDR(device->sas_addr),
1239 task->task_status.resp,
1240 task->task_status.stat);
1241 res = task->task_status.residual;
1245 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1246 task->task_status.stat == SAS_DATA_OVERRUN) {
1247 dev_warn(dev, "abort tmf: blocked task error\n");
1252 dev_warn(dev, "abort tmf: task to dev "
1253 "%016llx resp: 0x%x status 0x%x\n",
1254 SAS_ADDR(device->sas_addr), task->task_status.resp,
1255 task->task_status.stat);
1256 sas_free_task(task);
1260 if (retry == TASK_RETRY)
1261 dev_warn(dev, "abort tmf: executing internal task failed!\n");
1262 sas_free_task(task);
1266 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
1267 bool reset, int pmp, u8 *fis)
1269 struct ata_taskfile tf;
1271 ata_tf_init(dev, &tf);
1275 tf.ctl &= ~ATA_SRST;
1276 tf.command = ATA_CMD_DEV_RESET;
1277 ata_tf_to_fis(&tf, pmp, 0, fis);
1280 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
1283 struct ata_port *ap = device->sata_dev.ap;
1284 struct ata_link *link;
1285 int rc = TMF_RESP_FUNC_FAILED;
1286 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1287 struct device *dev = hisi_hba->dev;
1288 int s = sizeof(struct host_to_dev_fis);
1290 ata_for_each_link(link, ap, EDGE) {
1291 int pmp = sata_srst_pmp(link);
1293 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1294 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
1295 if (rc != TMF_RESP_FUNC_COMPLETE)
1299 if (rc == TMF_RESP_FUNC_COMPLETE) {
1300 ata_for_each_link(link, ap, EDGE) {
1301 int pmp = sata_srst_pmp(link);
1303 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1304 rc = hisi_sas_exec_internal_tmf_task(device, fis,
1306 if (rc != TMF_RESP_FUNC_COMPLETE)
1307 dev_err(dev, "ata disk de-reset failed\n");
1310 dev_err(dev, "ata disk reset failed\n");
1313 if (rc == TMF_RESP_FUNC_COMPLETE)
1314 hisi_sas_release_task(hisi_hba, device);
1319 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
1320 u8 *lun, struct hisi_sas_tmf_task *tmf)
1322 struct sas_ssp_task ssp_task;
1324 if (!(device->tproto & SAS_PROTOCOL_SSP))
1325 return TMF_RESP_FUNC_ESUPP;
1327 memcpy(ssp_task.LUN, lun, 8);
1329 return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
1330 sizeof(ssp_task), tmf);
1333 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1335 u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
1338 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1339 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1340 struct domain_device *device = sas_dev->sas_device;
1341 struct asd_sas_port *sas_port;
1342 struct hisi_sas_port *port;
1343 struct hisi_sas_phy *phy = NULL;
1344 struct asd_sas_phy *sas_phy;
1346 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1347 || !device || !device->port)
1350 sas_port = device->port;
1351 port = to_hisi_sas_port(sas_port);
1353 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1354 if (state & BIT(sas_phy->id)) {
1355 phy = sas_phy->lldd_phy;
1360 port->id = phy->port_id;
1362 /* Update linkrate of directly attached device. */
1363 if (!device->parent)
1364 device->linkrate = phy->sas_phy.linkrate;
1366 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1372 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1375 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1376 struct asd_sas_port *_sas_port = NULL;
1379 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1380 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1381 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1382 struct asd_sas_port *sas_port = sas_phy->port;
1383 bool do_port_check = !!(_sas_port != sas_port);
1385 if (!sas_phy->phy->enabled)
1388 /* Report PHY state change to libsas */
1389 if (state & BIT(phy_no)) {
1390 if (do_port_check && sas_port && sas_port->port_dev) {
1391 struct domain_device *dev = sas_port->port_dev;
1393 _sas_port = sas_port;
1395 if (DEV_IS_EXPANDER(dev->dev_type))
1396 sas_ha->notify_port_event(sas_phy,
1397 PORTE_BROADCAST_RCVD);
1399 } else if (old_state & (1 << phy_no))
1400 /* PHY down but was up before */
1401 hisi_sas_phy_down(hisi_hba, phy_no, 0);
1406 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba)
1408 struct hisi_sas_device *sas_dev;
1409 struct domain_device *device;
1412 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1413 sas_dev = &hisi_hba->devices[i];
1414 device = sas_dev->sas_device;
1416 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1419 hisi_sas_init_device(device);
1423 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
1424 struct asd_sas_port *sas_port,
1425 struct domain_device *device)
1427 struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 };
1428 struct ata_port *ap = device->sata_dev.ap;
1429 struct device *dev = hisi_hba->dev;
1430 int s = sizeof(struct host_to_dev_fis);
1431 int rc = TMF_RESP_FUNC_FAILED;
1432 struct asd_sas_phy *sas_phy;
1433 struct ata_link *link;
1437 state = hisi_hba->hw->get_phys_state(hisi_hba);
1438 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) {
1439 if (!(state & BIT(sas_phy->id)))
1442 ata_for_each_link(link, ap, EDGE) {
1443 int pmp = sata_srst_pmp(link);
1445 tmf_task.phy_id = sas_phy->id;
1446 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1447 rc = hisi_sas_exec_internal_tmf_task(device, fis, s,
1449 if (rc != TMF_RESP_FUNC_COMPLETE) {
1450 dev_err(dev, "phy%d ata reset failed rc=%d\n",
1458 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
1460 struct device *dev = hisi_hba->dev;
1463 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1464 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1465 struct domain_device *device = sas_dev->sas_device;
1467 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1470 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1471 HISI_SAS_INT_ABT_DEV, 0);
1473 dev_err(dev, "STP reject: abort dev failed %d\n", rc);
1476 for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) {
1477 struct hisi_sas_port *port = &hisi_hba->port[port_no];
1478 struct asd_sas_port *sas_port = &port->sas_port;
1479 struct domain_device *port_dev = sas_port->port_dev;
1480 struct domain_device *device;
1482 if (!port_dev || !DEV_IS_EXPANDER(port_dev->dev_type))
1485 /* Try to find a SATA device */
1486 list_for_each_entry(device, &sas_port->dev_list,
1488 if (dev_is_sata(device)) {
1489 hisi_sas_send_ata_reset_each_phy(hisi_hba,
1498 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
1500 struct Scsi_Host *shost = hisi_hba->shost;
1502 down(&hisi_hba->sem);
1503 hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
1505 scsi_block_requests(shost);
1506 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
1508 if (timer_pending(&hisi_hba->timer))
1509 del_timer_sync(&hisi_hba->timer);
1511 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1513 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare);
1515 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
1517 struct Scsi_Host *shost = hisi_hba->shost;
1520 /* Init and wait for PHYs to come up and all libsas event finished. */
1521 hisi_hba->hw->phys_init(hisi_hba);
1523 hisi_sas_refresh_port_id(hisi_hba);
1524 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1527 if (hisi_hba->reject_stp_links_msk)
1528 hisi_sas_terminate_stp_reject(hisi_hba);
1529 hisi_sas_reset_init_all_devices(hisi_hba);
1530 scsi_unblock_requests(shost);
1531 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1533 state = hisi_hba->hw->get_phys_state(hisi_hba);
1534 hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state, state);
1536 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
1538 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1540 struct device *dev = hisi_hba->dev;
1541 struct Scsi_Host *shost = hisi_hba->shost;
1544 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct)
1545 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
1547 if (!hisi_hba->hw->soft_reset)
1550 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1553 dev_info(dev, "controller resetting...\n");
1554 hisi_sas_controller_reset_prepare(hisi_hba);
1556 rc = hisi_hba->hw->soft_reset(hisi_hba);
1558 dev_warn(dev, "controller reset failed (%d)\n", rc);
1559 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1561 scsi_unblock_requests(shost);
1562 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1566 hisi_sas_controller_reset_done(hisi_hba);
1567 dev_info(dev, "controller reset complete\n");
1572 static int hisi_sas_abort_task(struct sas_task *task)
1574 struct scsi_lun lun;
1575 struct hisi_sas_tmf_task tmf_task;
1576 struct domain_device *device = task->dev;
1577 struct hisi_sas_device *sas_dev = device->lldd_dev;
1578 struct hisi_hba *hisi_hba;
1580 int rc = TMF_RESP_FUNC_FAILED;
1581 unsigned long flags;
1584 return TMF_RESP_FUNC_FAILED;
1586 hisi_hba = dev_to_hisi_hba(task->dev);
1587 dev = hisi_hba->dev;
1589 spin_lock_irqsave(&task->task_state_lock, flags);
1590 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1591 struct hisi_sas_slot *slot = task->lldd_task;
1592 struct hisi_sas_cq *cq;
1596 * flush tasklet to avoid free'ing task
1597 * before using task in IO completion
1599 cq = &hisi_hba->cq[slot->dlvry_queue];
1600 tasklet_kill(&cq->tasklet);
1602 spin_unlock_irqrestore(&task->task_state_lock, flags);
1603 rc = TMF_RESP_FUNC_COMPLETE;
1606 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1607 spin_unlock_irqrestore(&task->task_state_lock, flags);
1609 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1610 struct scsi_cmnd *cmnd = task->uldd_task;
1611 struct hisi_sas_slot *slot = task->lldd_task;
1612 u16 tag = slot->idx;
1615 int_to_scsilun(cmnd->device->lun, &lun);
1616 tmf_task.tmf = TMF_ABORT_TASK;
1617 tmf_task.tag_of_task_to_be_managed = tag;
1619 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1622 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1623 HISI_SAS_INT_ABT_CMD, tag);
1625 dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1626 return TMF_RESP_FUNC_FAILED;
1630 * If the TMF finds that the IO is not in the device and also
1631 * the internal abort does not succeed, then it is safe to
1633 * Note: if the internal abort succeeds then the slot
1634 * will have already been completed
1636 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1637 if (task->lldd_task)
1638 hisi_sas_do_release_task(hisi_hba, task, slot);
1640 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1641 task->task_proto & SAS_PROTOCOL_STP) {
1642 if (task->dev->dev_type == SAS_SATA_DEV) {
1643 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1644 HISI_SAS_INT_ABT_DEV,
1647 dev_err(dev, "abort task: internal abort failed\n");
1650 hisi_sas_dereg_device(hisi_hba, device);
1651 rc = hisi_sas_softreset_ata_disk(device);
1653 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1655 struct hisi_sas_slot *slot = task->lldd_task;
1656 u32 tag = slot->idx;
1657 struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
1659 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1660 HISI_SAS_INT_ABT_CMD, tag);
1661 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1664 * flush tasklet to avoid free'ing task
1665 * before using task in IO completion
1667 tasklet_kill(&cq->tasklet);
1673 if (rc != TMF_RESP_FUNC_COMPLETE)
1674 dev_notice(dev, "abort task: rc=%d\n", rc);
1678 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1680 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1681 struct device *dev = hisi_hba->dev;
1682 struct hisi_sas_tmf_task tmf_task;
1683 int rc = TMF_RESP_FUNC_FAILED;
1685 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1686 HISI_SAS_INT_ABT_DEV, 0);
1688 dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1689 return TMF_RESP_FUNC_FAILED;
1691 hisi_sas_dereg_device(hisi_hba, device);
1693 tmf_task.tmf = TMF_ABORT_TASK_SET;
1694 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1696 if (rc == TMF_RESP_FUNC_COMPLETE)
1697 hisi_sas_release_task(hisi_hba, device);
1702 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1704 struct hisi_sas_tmf_task tmf_task;
1707 tmf_task.tmf = TMF_CLEAR_ACA;
1708 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1713 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1715 struct sas_phy *local_phy = sas_get_local_phy(device);
1716 int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1717 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1718 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1719 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1720 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number];
1721 struct hisi_sas_phy *phy = container_of(sas_phy,
1722 struct hisi_sas_phy, sas_phy);
1723 DECLARE_COMPLETION_ONSTACK(phyreset);
1725 if (scsi_is_sas_phy_local(local_phy)) {
1727 phy->reset_completion = &phyreset;
1730 rc = sas_phy_reset(local_phy, reset_type);
1731 sas_put_local_phy(local_phy);
1733 if (scsi_is_sas_phy_local(local_phy)) {
1734 int ret = wait_for_completion_timeout(&phyreset, 2 * HZ);
1735 unsigned long flags;
1737 spin_lock_irqsave(&phy->lock, flags);
1738 phy->reset_completion = NULL;
1740 spin_unlock_irqrestore(&phy->lock, flags);
1742 /* report PHY down if timed out */
1744 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0);
1751 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1753 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1754 struct device *dev = hisi_hba->dev;
1757 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1758 HISI_SAS_INT_ABT_DEV, 0);
1760 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1761 return TMF_RESP_FUNC_FAILED;
1763 hisi_sas_dereg_device(hisi_hba, device);
1765 rc = hisi_sas_debug_I_T_nexus_reset(device);
1767 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
1768 hisi_sas_release_task(hisi_hba, device);
1773 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1775 struct hisi_sas_device *sas_dev = device->lldd_dev;
1776 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1777 struct device *dev = hisi_hba->dev;
1778 int rc = TMF_RESP_FUNC_FAILED;
1780 if (dev_is_sata(device)) {
1781 struct sas_phy *phy;
1783 /* Clear internal IO and then hardreset */
1784 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1785 HISI_SAS_INT_ABT_DEV, 0);
1787 dev_err(dev, "lu_reset: internal abort failed\n");
1790 hisi_sas_dereg_device(hisi_hba, device);
1792 phy = sas_get_local_phy(device);
1794 rc = sas_phy_reset(phy, 1);
1797 hisi_sas_release_task(hisi_hba, device);
1798 sas_put_local_phy(phy);
1800 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
1802 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1803 HISI_SAS_INT_ABT_DEV, 0);
1805 dev_err(dev, "lu_reset: internal abort failed\n");
1808 hisi_sas_dereg_device(hisi_hba, device);
1810 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1811 if (rc == TMF_RESP_FUNC_COMPLETE)
1812 hisi_sas_release_task(hisi_hba, device);
1815 if (rc != TMF_RESP_FUNC_COMPLETE)
1816 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1817 sas_dev->device_id, rc);
1821 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1823 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1824 struct device *dev = hisi_hba->dev;
1825 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1828 queue_work(hisi_hba->wq, &r.work);
1829 wait_for_completion(r.completion);
1831 return TMF_RESP_FUNC_FAILED;
1833 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1834 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1835 struct domain_device *device = sas_dev->sas_device;
1837 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device ||
1838 DEV_IS_EXPANDER(device->dev_type))
1841 rc = hisi_sas_debug_I_T_nexus_reset(device);
1842 if (rc != TMF_RESP_FUNC_COMPLETE)
1843 dev_info(dev, "clear nexus ha: for device[%d] rc=%d\n",
1844 sas_dev->device_id, rc);
1847 hisi_sas_release_tasks(hisi_hba);
1849 return TMF_RESP_FUNC_COMPLETE;
1852 static int hisi_sas_query_task(struct sas_task *task)
1854 struct scsi_lun lun;
1855 struct hisi_sas_tmf_task tmf_task;
1856 int rc = TMF_RESP_FUNC_FAILED;
1858 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1859 struct scsi_cmnd *cmnd = task->uldd_task;
1860 struct domain_device *device = task->dev;
1861 struct hisi_sas_slot *slot = task->lldd_task;
1862 u32 tag = slot->idx;
1864 int_to_scsilun(cmnd->device->lun, &lun);
1865 tmf_task.tmf = TMF_QUERY_TASK;
1866 tmf_task.tag_of_task_to_be_managed = tag;
1868 rc = hisi_sas_debug_issue_ssp_tmf(device,
1872 /* The task is still in Lun, release it then */
1873 case TMF_RESP_FUNC_SUCC:
1874 /* The task is not in Lun or failed, reset the phy */
1875 case TMF_RESP_FUNC_FAILED:
1876 case TMF_RESP_FUNC_COMPLETE:
1879 rc = TMF_RESP_FUNC_FAILED;
1887 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1888 struct sas_task *task, int abort_flag,
1889 int task_tag, struct hisi_sas_dq *dq)
1891 struct domain_device *device = task->dev;
1892 struct hisi_sas_device *sas_dev = device->lldd_dev;
1893 struct device *dev = hisi_hba->dev;
1894 struct hisi_sas_port *port;
1895 struct hisi_sas_slot *slot;
1896 struct asd_sas_port *sas_port = device->port;
1897 struct hisi_sas_cmd_hdr *cmd_hdr_base;
1898 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1899 unsigned long flags, flags_dq = 0;
1902 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1908 port = to_hisi_sas_port(sas_port);
1910 /* simply get a slot and send abort command */
1911 rc = hisi_sas_slot_index_alloc(hisi_hba, NULL);
1916 slot = &hisi_hba->slot_info[slot_idx];
1918 spin_lock_irqsave(&dq->lock, flags_dq);
1919 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
1920 if (wr_q_index < 0) {
1921 spin_unlock_irqrestore(&dq->lock, flags_dq);
1925 list_add_tail(&slot->delivery, &dq->list);
1926 spin_unlock_irqrestore(&dq->lock, flags_dq);
1927 spin_lock_irqsave(&sas_dev->lock, flags);
1928 list_add_tail(&slot->entry, &sas_dev->list);
1929 spin_unlock_irqrestore(&sas_dev->lock, flags);
1931 dlvry_queue = dq->id;
1932 dlvry_queue_slot = wr_q_index;
1934 slot->device_id = sas_dev->device_id;
1935 slot->n_elem = n_elem;
1936 slot->dlvry_queue = dlvry_queue;
1937 slot->dlvry_queue_slot = dlvry_queue_slot;
1938 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1939 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1942 slot->is_internal = true;
1943 task->lldd_task = slot;
1945 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1946 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1947 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
1949 hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1950 abort_flag, task_tag);
1952 spin_lock_irqsave(&task->task_state_lock, flags);
1953 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1954 spin_unlock_irqrestore(&task->task_state_lock, flags);
1955 WRITE_ONCE(slot->ready, 1);
1956 /* send abort command to the chip */
1957 spin_lock_irqsave(&dq->lock, flags);
1958 hisi_hba->hw->start_delivery(dq);
1959 spin_unlock_irqrestore(&dq->lock, flags);
1964 hisi_sas_slot_index_free(hisi_hba, slot_idx);
1966 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1972 * _hisi_sas_internal_task_abort -- execute an internal
1973 * abort command for single IO command or a device
1974 * @hisi_hba: host controller struct
1975 * @device: domain device
1976 * @abort_flag: mode of operation, device or single IO
1977 * @tag: tag of IO to be aborted (only relevant to single
1979 * @dq: delivery queue for this internal abort command
1982 _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1983 struct domain_device *device, int abort_flag,
1984 int tag, struct hisi_sas_dq *dq)
1986 struct sas_task *task;
1987 struct hisi_sas_device *sas_dev = device->lldd_dev;
1988 struct device *dev = hisi_hba->dev;
1992 * The interface is not realized means this HW don't support internal
1993 * abort, or don't need to do internal abort. Then here, we return
1994 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
1995 * the internal abort has been executed and returned CQ.
1997 if (!hisi_hba->hw->prep_abort)
1998 return TMF_RESP_FUNC_FAILED;
2000 task = sas_alloc_slow_task(GFP_KERNEL);
2005 task->task_proto = device->tproto;
2006 task->task_done = hisi_sas_task_done;
2007 task->slow_task->timer.function = hisi_sas_tmf_timedout;
2008 task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT * HZ;
2009 add_timer(&task->slow_task->timer);
2011 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
2012 task, abort_flag, tag, dq);
2014 del_timer(&task->slow_task->timer);
2015 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
2019 wait_for_completion(&task->slow_task->completion);
2020 res = TMF_RESP_FUNC_FAILED;
2022 /* Internal abort timed out */
2023 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
2024 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
2025 struct hisi_sas_slot *slot = task->lldd_task;
2028 struct hisi_sas_cq *cq =
2029 &hisi_hba->cq[slot->dlvry_queue];
2031 * flush tasklet to avoid free'ing task
2032 * before using task in IO completion
2034 tasklet_kill(&cq->tasklet);
2037 dev_err(dev, "internal task abort: timeout and not done.\n");
2042 dev_err(dev, "internal task abort: timeout.\n");
2045 if (task->task_status.resp == SAS_TASK_COMPLETE &&
2046 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
2047 res = TMF_RESP_FUNC_COMPLETE;
2051 if (task->task_status.resp == SAS_TASK_COMPLETE &&
2052 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
2053 res = TMF_RESP_FUNC_SUCC;
2058 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
2059 "resp: 0x%x sts 0x%x\n",
2060 SAS_ADDR(device->sas_addr),
2062 task->task_status.resp, /* 0 is complete, -1 is undelivered */
2063 task->task_status.stat);
2064 sas_free_task(task);
2070 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
2071 struct domain_device *device,
2072 int abort_flag, int tag)
2074 struct hisi_sas_slot *slot;
2075 struct device *dev = hisi_hba->dev;
2076 struct hisi_sas_dq *dq;
2079 switch (abort_flag) {
2080 case HISI_SAS_INT_ABT_CMD:
2081 slot = &hisi_hba->slot_info[tag];
2082 dq = &hisi_hba->dq[slot->dlvry_queue];
2083 return _hisi_sas_internal_task_abort(hisi_hba, device,
2084 abort_flag, tag, dq);
2085 case HISI_SAS_INT_ABT_DEV:
2086 for (i = 0; i < hisi_hba->cq_nvecs; i++) {
2087 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2088 const struct cpumask *mask = cq->pci_irq_mask;
2090 if (mask && !cpumask_intersects(cpu_online_mask, mask))
2092 dq = &hisi_hba->dq[i];
2093 rc = _hisi_sas_internal_task_abort(hisi_hba, device,
2101 dev_err(dev, "Unrecognised internal abort flag (%d)\n",
2109 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
2111 hisi_sas_port_notify_formed(sas_phy);
2114 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
2115 u8 reg_index, u8 reg_count, u8 *write_data)
2117 struct hisi_hba *hisi_hba = sha->lldd_ha;
2119 if (!hisi_hba->hw->write_gpio)
2122 return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
2123 reg_index, reg_count, write_data);
2126 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
2128 phy->phy_attached = 0;
2133 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
2135 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
2136 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2137 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
2138 struct device *dev = hisi_hba->dev;
2141 /* Phy down but ready */
2142 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
2143 hisi_sas_port_notify_formed(sas_phy);
2145 struct hisi_sas_port *port = phy->port;
2147 if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) ||
2149 dev_info(dev, "ignore flutter phy%d down\n", phy_no);
2152 /* Phy down and not ready */
2153 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
2154 sas_phy_disconnected(sas_phy);
2157 if (phy->phy_type & PORT_TYPE_SAS) {
2158 int port_id = port->id;
2160 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
2162 port->port_attached = 0;
2163 } else if (phy->phy_type & PORT_TYPE_SATA)
2164 port->port_attached = 0;
2166 hisi_sas_phy_disconnected(phy);
2169 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
2171 void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
2175 for (i = 0; i < hisi_hba->cq_nvecs; i++) {
2176 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2178 tasklet_kill(&cq->tasklet);
2181 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
2183 struct scsi_transport_template *hisi_sas_stt;
2184 EXPORT_SYMBOL_GPL(hisi_sas_stt);
2186 static struct sas_domain_function_template hisi_sas_transport_ops = {
2187 .lldd_dev_found = hisi_sas_dev_found,
2188 .lldd_dev_gone = hisi_sas_dev_gone,
2189 .lldd_execute_task = hisi_sas_queue_command,
2190 .lldd_control_phy = hisi_sas_control_phy,
2191 .lldd_abort_task = hisi_sas_abort_task,
2192 .lldd_abort_task_set = hisi_sas_abort_task_set,
2193 .lldd_clear_aca = hisi_sas_clear_aca,
2194 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
2195 .lldd_lu_reset = hisi_sas_lu_reset,
2196 .lldd_query_task = hisi_sas_query_task,
2197 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
2198 .lldd_port_formed = hisi_sas_port_formed,
2199 .lldd_write_gpio = hisi_sas_write_gpio,
2202 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
2204 int i, s, j, max_command_entries = hisi_hba->hw->max_command_entries;
2205 struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint;
2207 for (i = 0; i < hisi_hba->queue_count; i++) {
2208 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2209 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2210 struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i];
2212 s = sizeof(struct hisi_sas_cmd_hdr);
2213 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
2214 memset(&cmd_hdr[j], 0, s);
2218 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2219 memset(hisi_hba->complete_hdr[i], 0, s);
2223 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
2224 memset(hisi_hba->initial_fis, 0, s);
2226 s = max_command_entries * sizeof(struct hisi_sas_iost);
2227 memset(hisi_hba->iost, 0, s);
2229 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2230 memset(hisi_hba->breakpoint, 0, s);
2232 s = sizeof(struct hisi_sas_sata_breakpoint);
2233 for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++)
2234 memset(&sata_breakpoint[j], 0, s);
2236 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
2238 int hisi_sas_alloc(struct hisi_hba *hisi_hba)
2240 struct device *dev = hisi_hba->dev;
2241 int i, j, s, max_command_entries = hisi_hba->hw->max_command_entries;
2242 int max_command_entries_ru, sz_slot_buf_ru;
2243 int blk_cnt, slots_per_blk;
2245 sema_init(&hisi_hba->sem, 1);
2246 spin_lock_init(&hisi_hba->lock);
2247 for (i = 0; i < hisi_hba->n_phy; i++) {
2248 hisi_sas_phy_init(hisi_hba, i);
2249 hisi_hba->port[i].port_attached = 0;
2250 hisi_hba->port[i].id = -1;
2253 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
2254 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
2255 hisi_hba->devices[i].device_id = i;
2258 for (i = 0; i < hisi_hba->queue_count; i++) {
2259 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2260 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2262 /* Completion queue structure */
2264 cq->hisi_hba = hisi_hba;
2266 /* Delivery queue structure */
2267 spin_lock_init(&dq->lock);
2268 INIT_LIST_HEAD(&dq->list);
2270 dq->hisi_hba = hisi_hba;
2272 /* Delivery queue */
2273 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
2274 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s,
2275 &hisi_hba->cmd_hdr_dma[i],
2277 if (!hisi_hba->cmd_hdr[i])
2280 /* Completion queue */
2281 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2282 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s,
2283 &hisi_hba->complete_hdr_dma[i],
2285 if (!hisi_hba->complete_hdr[i])
2289 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
2290 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma,
2291 GFP_KERNEL | __GFP_ZERO);
2292 if (!hisi_hba->itct)
2295 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
2296 sizeof(struct hisi_sas_slot),
2298 if (!hisi_hba->slot_info)
2301 /* roundup to avoid overly large block size */
2302 max_command_entries_ru = roundup(max_command_entries, 64);
2303 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK)
2304 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table);
2306 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table);
2307 sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64);
2308 s = lcm(max_command_entries_ru, sz_slot_buf_ru);
2309 blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
2310 slots_per_blk = s / sz_slot_buf_ru;
2312 for (i = 0; i < blk_cnt; i++) {
2313 int slot_index = i * slots_per_blk;
2317 buf = dmam_alloc_coherent(dev, s, &buf_dma,
2318 GFP_KERNEL | __GFP_ZERO);
2322 for (j = 0; j < slots_per_blk; j++, slot_index++) {
2323 struct hisi_sas_slot *slot;
2325 slot = &hisi_hba->slot_info[slot_index];
2327 slot->buf_dma = buf_dma;
2328 slot->idx = slot_index;
2330 buf += sz_slot_buf_ru;
2331 buf_dma += sz_slot_buf_ru;
2335 s = max_command_entries * sizeof(struct hisi_sas_iost);
2336 hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma,
2338 if (!hisi_hba->iost)
2341 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2342 hisi_hba->breakpoint = dmam_alloc_coherent(dev, s,
2343 &hisi_hba->breakpoint_dma,
2345 if (!hisi_hba->breakpoint)
2348 hisi_hba->slot_index_count = max_command_entries;
2349 s = hisi_hba->slot_index_count / BITS_PER_BYTE;
2350 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
2351 if (!hisi_hba->slot_index_tags)
2354 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
2355 hisi_hba->initial_fis = dmam_alloc_coherent(dev, s,
2356 &hisi_hba->initial_fis_dma,
2358 if (!hisi_hba->initial_fis)
2361 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
2362 hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s,
2363 &hisi_hba->sata_breakpoint_dma,
2365 if (!hisi_hba->sata_breakpoint)
2367 hisi_sas_init_mem(hisi_hba);
2369 hisi_sas_slot_index_init(hisi_hba);
2370 hisi_hba->last_slot_index = hisi_hba->hw->max_command_entries -
2371 HISI_SAS_RESERVED_IPTT_CNT;
2373 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
2374 if (!hisi_hba->wq) {
2375 dev_err(dev, "sas_alloc: failed to create workqueue\n");
2383 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
2385 void hisi_sas_free(struct hisi_hba *hisi_hba)
2388 destroy_workqueue(hisi_hba->wq);
2390 EXPORT_SYMBOL_GPL(hisi_sas_free);
2392 void hisi_sas_rst_work_handler(struct work_struct *work)
2394 struct hisi_hba *hisi_hba =
2395 container_of(work, struct hisi_hba, rst_work);
2397 hisi_sas_controller_reset(hisi_hba);
2399 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
2401 void hisi_sas_sync_rst_work_handler(struct work_struct *work)
2403 struct hisi_sas_rst *rst =
2404 container_of(work, struct hisi_sas_rst, work);
2406 if (!hisi_sas_controller_reset(rst->hisi_hba))
2408 complete(rst->completion);
2410 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
2412 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
2414 struct device *dev = hisi_hba->dev;
2415 struct platform_device *pdev = hisi_hba->platform_dev;
2416 struct device_node *np = pdev ? pdev->dev.of_node : NULL;
2419 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
2421 dev_err(dev, "could not get property sas-addr\n");
2427 * These properties are only required for platform device-based
2428 * controller with DT firmware.
2430 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
2431 "hisilicon,sas-syscon");
2432 if (IS_ERR(hisi_hba->ctrl)) {
2433 dev_err(dev, "could not get syscon\n");
2437 if (device_property_read_u32(dev, "ctrl-reset-reg",
2438 &hisi_hba->ctrl_reset_reg)) {
2440 "could not get property ctrl-reset-reg\n");
2444 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
2445 &hisi_hba->ctrl_reset_sts_reg)) {
2447 "could not get property ctrl-reset-sts-reg\n");
2451 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
2452 &hisi_hba->ctrl_clock_ena_reg)) {
2454 "could not get property ctrl-clock-ena-reg\n");
2459 refclk = devm_clk_get(dev, NULL);
2461 dev_dbg(dev, "no ref clk property\n");
2463 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
2465 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
2466 dev_err(dev, "could not get property phy-count\n");
2470 if (device_property_read_u32(dev, "queue-count",
2471 &hisi_hba->queue_count)) {
2472 dev_err(dev, "could not get property queue-count\n");
2478 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
2480 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2481 const struct hisi_sas_hw *hw)
2483 struct resource *res;
2484 struct Scsi_Host *shost;
2485 struct hisi_hba *hisi_hba;
2486 struct device *dev = &pdev->dev;
2489 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
2491 dev_err(dev, "scsi host alloc failed\n");
2494 hisi_hba = shost_priv(shost);
2496 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2498 hisi_hba->dev = dev;
2499 hisi_hba->platform_dev = pdev;
2500 hisi_hba->shost = shost;
2501 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2503 timer_setup(&hisi_hba->timer, NULL, 0);
2505 if (hisi_sas_get_fw_info(hisi_hba) < 0)
2508 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
2510 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
2513 dev_err(dev, "No usable DMA addressing method\n");
2517 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2518 hisi_hba->regs = devm_ioremap_resource(dev, res);
2519 if (IS_ERR(hisi_hba->regs))
2522 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2524 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2525 if (IS_ERR(hisi_hba->sgpio_regs))
2529 if (hisi_sas_alloc(hisi_hba)) {
2530 hisi_sas_free(hisi_hba);
2536 scsi_host_put(shost);
2537 dev_err(dev, "shost alloc failed\n");
2541 int hisi_sas_probe(struct platform_device *pdev,
2542 const struct hisi_sas_hw *hw)
2544 struct Scsi_Host *shost;
2545 struct hisi_hba *hisi_hba;
2546 struct device *dev = &pdev->dev;
2547 struct asd_sas_phy **arr_phy;
2548 struct asd_sas_port **arr_port;
2549 struct sas_ha_struct *sha;
2550 int rc, phy_nr, port_nr, i;
2552 shost = hisi_sas_shost_alloc(pdev, hw);
2556 sha = SHOST_TO_SAS_HA(shost);
2557 hisi_hba = shost_priv(shost);
2558 platform_set_drvdata(pdev, sha);
2560 phy_nr = port_nr = hisi_hba->n_phy;
2562 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2563 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2564 if (!arr_phy || !arr_port) {
2569 sha->sas_phy = arr_phy;
2570 sha->sas_port = arr_port;
2571 sha->lldd_ha = hisi_hba;
2573 shost->transportt = hisi_sas_stt;
2574 shost->max_id = HISI_SAS_MAX_DEVICES;
2575 shost->max_lun = ~0;
2576 shost->max_channel = 1;
2577 shost->max_cmd_len = 16;
2578 if (hisi_hba->hw->slot_index_alloc) {
2579 shost->can_queue = hisi_hba->hw->max_command_entries;
2580 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
2582 shost->can_queue = hisi_hba->hw->max_command_entries -
2583 HISI_SAS_RESERVED_IPTT_CNT;
2584 shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
2585 HISI_SAS_RESERVED_IPTT_CNT;
2588 sha->sas_ha_name = DRV_NAME;
2589 sha->dev = hisi_hba->dev;
2590 sha->lldd_module = THIS_MODULE;
2591 sha->sas_addr = &hisi_hba->sas_addr[0];
2592 sha->num_phys = hisi_hba->n_phy;
2593 sha->core.shost = hisi_hba->shost;
2595 for (i = 0; i < hisi_hba->n_phy; i++) {
2596 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2597 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2600 rc = scsi_add_host(shost, &pdev->dev);
2604 rc = sas_register_ha(sha);
2606 goto err_out_register_ha;
2608 rc = hisi_hba->hw->hw_init(hisi_hba);
2610 goto err_out_register_ha;
2612 scsi_scan_host(shost);
2616 err_out_register_ha:
2617 scsi_remove_host(shost);
2619 hisi_sas_free(hisi_hba);
2620 scsi_host_put(shost);
2623 EXPORT_SYMBOL_GPL(hisi_sas_probe);
2625 struct dentry *hisi_sas_debugfs_dir;
2627 static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba)
2629 int queue_entry_size = hisi_hba->hw->complete_hdr_size;
2632 for (i = 0; i < hisi_hba->queue_count; i++)
2633 memcpy(hisi_hba->debugfs_complete_hdr[i],
2634 hisi_hba->complete_hdr[i],
2635 HISI_SAS_QUEUE_SLOTS * queue_entry_size);
2638 static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba)
2640 int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr);
2643 for (i = 0; i < hisi_hba->queue_count; i++) {
2644 struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr;
2647 debugfs_cmd_hdr = hisi_hba->debugfs_cmd_hdr[i];
2648 cmd_hdr = hisi_hba->cmd_hdr[i];
2650 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
2651 memcpy(&debugfs_cmd_hdr[j], &cmd_hdr[j],
2656 static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba)
2658 const struct hisi_sas_debugfs_reg *port =
2659 hisi_hba->hw->debugfs_reg_port;
2664 for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) {
2665 databuf = (u32 *)hisi_hba->debugfs_port_reg[phy_cnt];
2666 for (i = 0; i < port->count; i++, databuf++) {
2667 offset = port->base_off + 4 * i;
2668 *databuf = port->read_port_reg(hisi_hba, phy_cnt,
2674 static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba)
2676 u32 *databuf = (u32 *)hisi_hba->debugfs_global_reg;
2677 const struct hisi_sas_debugfs_reg *global =
2678 hisi_hba->hw->debugfs_reg_global;
2681 for (i = 0; i < global->count; i++, databuf++)
2682 *databuf = global->read_global_reg(hisi_hba, 4 * i);
2685 static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba)
2687 void *databuf = hisi_hba->debugfs_itct;
2688 struct hisi_sas_itct *itct;
2691 itct = hisi_hba->itct;
2693 for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) {
2694 memcpy(databuf, itct, sizeof(struct hisi_sas_itct));
2695 databuf += sizeof(struct hisi_sas_itct);
2699 static void hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba *hisi_hba)
2701 int max_command_entries = hisi_hba->hw->max_command_entries;
2702 void *databuf = hisi_hba->debugfs_iost;
2703 struct hisi_sas_iost *iost;
2706 iost = hisi_hba->iost;
2708 for (i = 0; i < max_command_entries; i++, iost++) {
2709 memcpy(databuf, iost, sizeof(struct hisi_sas_iost));
2710 databuf += sizeof(struct hisi_sas_iost);
2715 hisi_sas_debugfs_to_reg_name(int off, int base_off,
2716 const struct hisi_sas_debugfs_reg_lu *lu)
2718 for (; lu->name; lu++) {
2719 if (off == lu->off - base_off)
2726 static void hisi_sas_debugfs_print_reg(u32 *regs_val, const void *ptr,
2729 const struct hisi_sas_debugfs_reg *reg = ptr;
2732 for (i = 0; i < reg->count; i++) {
2736 name = hisi_sas_debugfs_to_reg_name(off, reg->base_off,
2740 seq_printf(s, "0x%08x 0x%08x %s\n", off,
2743 seq_printf(s, "0x%08x 0x%08x\n", off,
2748 static int hisi_sas_debugfs_global_show(struct seq_file *s, void *p)
2750 struct hisi_hba *hisi_hba = s->private;
2751 const struct hisi_sas_hw *hw = hisi_hba->hw;
2752 const struct hisi_sas_debugfs_reg *reg_global = hw->debugfs_reg_global;
2754 hisi_sas_debugfs_print_reg(hisi_hba->debugfs_global_reg,
2760 static int hisi_sas_debugfs_global_open(struct inode *inode, struct file *filp)
2762 return single_open(filp, hisi_sas_debugfs_global_show,
2766 static const struct file_operations hisi_sas_debugfs_global_fops = {
2767 .open = hisi_sas_debugfs_global_open,
2769 .llseek = seq_lseek,
2770 .release = single_release,
2771 .owner = THIS_MODULE,
2774 static int hisi_sas_debugfs_port_show(struct seq_file *s, void *p)
2776 struct hisi_sas_phy *phy = s->private;
2777 struct hisi_hba *hisi_hba = phy->hisi_hba;
2778 const struct hisi_sas_hw *hw = hisi_hba->hw;
2779 const struct hisi_sas_debugfs_reg *reg_port = hw->debugfs_reg_port;
2780 u32 *databuf = hisi_hba->debugfs_port_reg[phy->sas_phy.id];
2782 hisi_sas_debugfs_print_reg(databuf, reg_port, s);
2787 static int hisi_sas_debugfs_port_open(struct inode *inode, struct file *filp)
2789 return single_open(filp, hisi_sas_debugfs_port_show, inode->i_private);
2792 static const struct file_operations hisi_sas_debugfs_port_fops = {
2793 .open = hisi_sas_debugfs_port_open,
2795 .llseek = seq_lseek,
2796 .release = single_release,
2797 .owner = THIS_MODULE,
2800 static int hisi_sas_show_row_64(struct seq_file *s, int index,
2801 int sz, __le64 *ptr)
2805 /* completion header size not fixed per HW version */
2806 seq_printf(s, "index %04d:\n\t", index);
2807 for (i = 1; i <= sz / 8; i++, ptr++) {
2808 seq_printf(s, " 0x%016llx", le64_to_cpu(*ptr));
2810 seq_puts(s, "\n\t");
2818 static int hisi_sas_show_row_32(struct seq_file *s, int index,
2819 int sz, __le32 *ptr)
2823 /* completion header size not fixed per HW version */
2824 seq_printf(s, "index %04d:\n\t", index);
2825 for (i = 1; i <= sz / 4; i++, ptr++) {
2826 seq_printf(s, " 0x%08x", le32_to_cpu(*ptr));
2828 seq_puts(s, "\n\t");
2835 static int hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr)
2837 struct hisi_sas_cq *cq = cq_ptr;
2838 struct hisi_hba *hisi_hba = cq->hisi_hba;
2839 void *complete_queue = hisi_hba->debugfs_complete_hdr[cq->id];
2840 __le32 *complete_hdr = complete_queue +
2841 (hisi_hba->hw->complete_hdr_size * slot);
2843 return hisi_sas_show_row_32(s, slot,
2844 hisi_hba->hw->complete_hdr_size,
2848 static int hisi_sas_debugfs_cq_show(struct seq_file *s, void *p)
2850 struct hisi_sas_cq *cq = s->private;
2853 for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) {
2854 ret = hisi_sas_cq_show_slot(s, slot, cq);
2861 static int hisi_sas_debugfs_cq_open(struct inode *inode, struct file *filp)
2863 return single_open(filp, hisi_sas_debugfs_cq_show, inode->i_private);
2866 static const struct file_operations hisi_sas_debugfs_cq_fops = {
2867 .open = hisi_sas_debugfs_cq_open,
2869 .llseek = seq_lseek,
2870 .release = single_release,
2871 .owner = THIS_MODULE,
2874 static int hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr)
2876 struct hisi_sas_dq *dq = dq_ptr;
2877 struct hisi_hba *hisi_hba = dq->hisi_hba;
2878 void *cmd_queue = hisi_hba->debugfs_cmd_hdr[dq->id];
2879 __le32 *cmd_hdr = cmd_queue +
2880 sizeof(struct hisi_sas_cmd_hdr) * slot;
2882 return hisi_sas_show_row_32(s, slot, sizeof(struct hisi_sas_cmd_hdr),
2886 static int hisi_sas_debugfs_dq_show(struct seq_file *s, void *p)
2890 for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) {
2891 ret = hisi_sas_dq_show_slot(s, slot, s->private);
2898 static int hisi_sas_debugfs_dq_open(struct inode *inode, struct file *filp)
2900 return single_open(filp, hisi_sas_debugfs_dq_show, inode->i_private);
2903 static const struct file_operations hisi_sas_debugfs_dq_fops = {
2904 .open = hisi_sas_debugfs_dq_open,
2906 .llseek = seq_lseek,
2907 .release = single_release,
2908 .owner = THIS_MODULE,
2911 static int hisi_sas_debugfs_iost_show(struct seq_file *s, void *p)
2913 struct hisi_hba *hisi_hba = s->private;
2914 struct hisi_sas_iost *debugfs_iost = hisi_hba->debugfs_iost;
2915 int i, ret, max_command_entries = hisi_hba->hw->max_command_entries;
2916 __le64 *iost = &debugfs_iost->qw0;
2918 for (i = 0; i < max_command_entries; i++, debugfs_iost++) {
2919 ret = hisi_sas_show_row_64(s, i, sizeof(*debugfs_iost),
2928 static int hisi_sas_debugfs_iost_open(struct inode *inode, struct file *filp)
2930 return single_open(filp, hisi_sas_debugfs_iost_show, inode->i_private);
2933 static const struct file_operations hisi_sas_debugfs_iost_fops = {
2934 .open = hisi_sas_debugfs_iost_open,
2936 .llseek = seq_lseek,
2937 .release = single_release,
2938 .owner = THIS_MODULE,
2941 static int hisi_sas_debugfs_itct_show(struct seq_file *s, void *p)
2944 struct hisi_hba *hisi_hba = s->private;
2945 struct hisi_sas_itct *debugfs_itct = hisi_hba->debugfs_itct;
2946 __le64 *itct = &debugfs_itct->qw0;
2948 for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, debugfs_itct++) {
2949 ret = hisi_sas_show_row_64(s, i, sizeof(*debugfs_itct),
2958 static int hisi_sas_debugfs_itct_open(struct inode *inode, struct file *filp)
2960 return single_open(filp, hisi_sas_debugfs_itct_show, inode->i_private);
2963 static const struct file_operations hisi_sas_debugfs_itct_fops = {
2964 .open = hisi_sas_debugfs_itct_open,
2966 .llseek = seq_lseek,
2967 .release = single_release,
2968 .owner = THIS_MODULE,
2971 static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
2973 struct dentry *dump_dentry;
2974 struct dentry *dentry;
2980 /* Create dump dir inside device dir */
2981 dump_dentry = debugfs_create_dir("dump", hisi_hba->debugfs_dir);
2982 hisi_hba->debugfs_dump_dentry = dump_dentry;
2984 debugfs_create_file("global", 0400, dump_dentry, hisi_hba,
2985 &hisi_sas_debugfs_global_fops);
2987 /* Create port dir and files */
2988 dentry = debugfs_create_dir("port", dump_dentry);
2989 for (p = 0; p < hisi_hba->n_phy; p++) {
2990 snprintf(name, 256, "%d", p);
2992 debugfs_create_file(name, 0400, dentry, &hisi_hba->phy[p],
2993 &hisi_sas_debugfs_port_fops);
2996 /* Create CQ dir and files */
2997 dentry = debugfs_create_dir("cq", dump_dentry);
2998 for (c = 0; c < hisi_hba->queue_count; c++) {
2999 snprintf(name, 256, "%d", c);
3001 debugfs_create_file(name, 0400, dentry, &hisi_hba->cq[c],
3002 &hisi_sas_debugfs_cq_fops);
3005 /* Create DQ dir and files */
3006 dentry = debugfs_create_dir("dq", dump_dentry);
3007 for (d = 0; d < hisi_hba->queue_count; d++) {
3008 snprintf(name, 256, "%d", d);
3010 debugfs_create_file(name, 0400, dentry, &hisi_hba->dq[d],
3011 &hisi_sas_debugfs_dq_fops);
3014 debugfs_create_file("iost", 0400, dump_dentry, hisi_hba,
3015 &hisi_sas_debugfs_iost_fops);
3017 debugfs_create_file("itct", 0400, dump_dentry, hisi_hba,
3018 &hisi_sas_debugfs_itct_fops);
3023 static void hisi_sas_debugfs_snapshot_regs(struct hisi_hba *hisi_hba)
3025 hisi_hba->hw->snapshot_prepare(hisi_hba);
3027 hisi_sas_debugfs_snapshot_global_reg(hisi_hba);
3028 hisi_sas_debugfs_snapshot_port_reg(hisi_hba);
3029 hisi_sas_debugfs_snapshot_cq_reg(hisi_hba);
3030 hisi_sas_debugfs_snapshot_dq_reg(hisi_hba);
3031 hisi_sas_debugfs_snapshot_itct_reg(hisi_hba);
3032 hisi_sas_debugfs_snapshot_iost_reg(hisi_hba);
3034 hisi_sas_debugfs_create_files(hisi_hba);
3036 hisi_hba->hw->snapshot_restore(hisi_hba);
3039 static ssize_t hisi_sas_debugfs_trigger_dump_write(struct file *file,
3040 const char __user *user_buf,
3041 size_t count, loff_t *ppos)
3043 struct hisi_hba *hisi_hba = file->f_inode->i_private;
3046 /* A bit racy, but don't care too much since it's only debugfs */
3047 if (hisi_hba->debugfs_snapshot)
3053 if (copy_from_user(buf, user_buf, count))
3059 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
3064 static const struct file_operations hisi_sas_debugfs_trigger_dump_fops = {
3065 .write = &hisi_sas_debugfs_trigger_dump_write,
3066 .owner = THIS_MODULE,
3069 void hisi_sas_debugfs_work_handler(struct work_struct *work)
3071 struct hisi_hba *hisi_hba =
3072 container_of(work, struct hisi_hba, debugfs_work);
3074 if (hisi_hba->debugfs_snapshot)
3076 hisi_hba->debugfs_snapshot = true;
3078 hisi_sas_debugfs_snapshot_regs(hisi_hba);
3080 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler);
3082 void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba)
3084 int max_command_entries = hisi_hba->hw->max_command_entries;
3085 struct device *dev = hisi_hba->dev;
3089 hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev),
3090 hisi_sas_debugfs_dir);
3091 debugfs_create_file("trigger_dump", 0600,
3092 hisi_hba->debugfs_dir,
3094 &hisi_sas_debugfs_trigger_dump_fops);
3096 /* Alloc buffer for global */
3097 sz = hisi_hba->hw->debugfs_reg_global->count * 4;
3098 hisi_hba->debugfs_global_reg =
3099 devm_kmalloc(dev, sz, GFP_KERNEL);
3101 if (!hisi_hba->debugfs_global_reg)
3104 /* Alloc buffer for port */
3105 sz = hisi_hba->hw->debugfs_reg_port->count * 4;
3106 for (p = 0; p < hisi_hba->n_phy; p++) {
3107 hisi_hba->debugfs_port_reg[p] =
3108 devm_kmalloc(dev, sz, GFP_KERNEL);
3110 if (!hisi_hba->debugfs_port_reg[p])
3114 /* Alloc buffer for cq */
3115 sz = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
3116 for (c = 0; c < hisi_hba->queue_count; c++) {
3117 hisi_hba->debugfs_complete_hdr[c] =
3118 devm_kmalloc(dev, sz, GFP_KERNEL);
3120 if (!hisi_hba->debugfs_complete_hdr[c])
3124 /* Alloc buffer for dq */
3125 sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
3126 for (d = 0; d < hisi_hba->queue_count; d++) {
3127 hisi_hba->debugfs_cmd_hdr[d] =
3128 devm_kmalloc(dev, sz, GFP_KERNEL);
3130 if (!hisi_hba->debugfs_cmd_hdr[d])
3134 /* Alloc buffer for iost */
3135 sz = max_command_entries * sizeof(struct hisi_sas_iost);
3137 hisi_hba->debugfs_iost = devm_kmalloc(dev, sz, GFP_KERNEL);
3138 if (!hisi_hba->debugfs_iost)
3141 /* Alloc buffer for itct */
3142 /* New memory allocation must be locate before itct */
3143 sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
3145 hisi_hba->debugfs_itct = devm_kmalloc(dev, sz, GFP_KERNEL);
3146 if (!hisi_hba->debugfs_itct)
3151 devm_kfree(dev, hisi_hba->debugfs_iost);
3153 for (i = 0; i < d; i++)
3154 devm_kfree(dev, hisi_hba->debugfs_cmd_hdr[i]);
3156 for (i = 0; i < c; i++)
3157 devm_kfree(dev, hisi_hba->debugfs_complete_hdr[i]);
3159 for (i = 0; i < p; i++)
3160 devm_kfree(dev, hisi_hba->debugfs_port_reg[i]);
3161 devm_kfree(dev, hisi_hba->debugfs_global_reg);
3163 debugfs_remove_recursive(hisi_hba->debugfs_dir);
3164 dev_dbg(dev, "failed to init debugfs!\n");
3166 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_init);
3168 void hisi_sas_debugfs_exit(struct hisi_hba *hisi_hba)
3170 debugfs_remove_recursive(hisi_hba->debugfs_dir);
3172 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_exit);
3174 int hisi_sas_remove(struct platform_device *pdev)
3176 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
3177 struct hisi_hba *hisi_hba = sha->lldd_ha;
3178 struct Scsi_Host *shost = sha->core.shost;
3180 if (timer_pending(&hisi_hba->timer))
3181 del_timer(&hisi_hba->timer);
3183 sas_unregister_ha(sha);
3184 sas_remove_host(sha->core.shost);
3186 hisi_sas_free(hisi_hba);
3187 scsi_host_put(shost);
3190 EXPORT_SYMBOL_GPL(hisi_sas_remove);
3192 bool hisi_sas_debugfs_enable;
3193 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable);
3194 module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444);
3195 MODULE_PARM_DESC(hisi_sas_debugfs_enable, "Enable driver debugfs (default disabled)");
3197 static __init int hisi_sas_init(void)
3199 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
3203 if (hisi_sas_debugfs_enable)
3204 hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL);
3209 static __exit void hisi_sas_exit(void)
3211 sas_release_transport(hisi_sas_stt);
3213 debugfs_remove(hisi_sas_debugfs_dir);
3216 module_init(hisi_sas_init);
3217 module_exit(hisi_sas_exit);
3219 MODULE_LICENSE("GPL");
3220 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
3221 MODULE_DESCRIPTION("HISILICON SAS controller driver");
3222 MODULE_ALIAS("platform:" DRV_NAME);