2 * Copyright (c) 2015 Linaro Ltd.
3 * Copyright (c) 2015 Hisilicon Limited.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
13 #define DRV_NAME "hisi_sas"
15 #define DEV_IS_GONE(dev) \
16 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19 u8 *lun, struct hisi_sas_tmf_task *tmf);
21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22 struct domain_device *device,
23 int abort_flag, int tag);
24 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
25 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
28 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
30 switch (fis->command) {
31 case ATA_CMD_FPDMA_WRITE:
32 case ATA_CMD_FPDMA_READ:
33 case ATA_CMD_FPDMA_RECV:
34 case ATA_CMD_FPDMA_SEND:
35 case ATA_CMD_NCQ_NON_DATA:
36 return HISI_SAS_SATA_PROTOCOL_FPDMA;
38 case ATA_CMD_DOWNLOAD_MICRO:
40 case ATA_CMD_PMP_READ:
41 case ATA_CMD_READ_LOG_EXT:
42 case ATA_CMD_PIO_READ:
43 case ATA_CMD_PIO_READ_EXT:
44 case ATA_CMD_PMP_WRITE:
45 case ATA_CMD_WRITE_LOG_EXT:
46 case ATA_CMD_PIO_WRITE:
47 case ATA_CMD_PIO_WRITE_EXT:
48 return HISI_SAS_SATA_PROTOCOL_PIO;
51 case ATA_CMD_DOWNLOAD_MICRO_DMA:
52 case ATA_CMD_PMP_READ_DMA:
53 case ATA_CMD_PMP_WRITE_DMA:
55 case ATA_CMD_READ_EXT:
56 case ATA_CMD_READ_LOG_DMA_EXT:
57 case ATA_CMD_READ_STREAM_DMA_EXT:
58 case ATA_CMD_TRUSTED_RCV_DMA:
59 case ATA_CMD_TRUSTED_SND_DMA:
61 case ATA_CMD_WRITE_EXT:
62 case ATA_CMD_WRITE_FUA_EXT:
63 case ATA_CMD_WRITE_QUEUED:
64 case ATA_CMD_WRITE_LOG_DMA_EXT:
65 case ATA_CMD_WRITE_STREAM_DMA_EXT:
66 case ATA_CMD_ZAC_MGMT_IN:
67 return HISI_SAS_SATA_PROTOCOL_DMA;
69 case ATA_CMD_CHK_POWER:
70 case ATA_CMD_DEV_RESET:
73 case ATA_CMD_FLUSH_EXT:
75 case ATA_CMD_VERIFY_EXT:
76 case ATA_CMD_SET_FEATURES:
78 case ATA_CMD_STANDBYNOW1:
79 case ATA_CMD_ZAC_MGMT_OUT:
80 return HISI_SAS_SATA_PROTOCOL_NONDATA;
83 switch (fis->features) {
84 case ATA_SET_MAX_PASSWD:
85 case ATA_SET_MAX_LOCK:
86 return HISI_SAS_SATA_PROTOCOL_PIO;
88 case ATA_SET_MAX_PASSWD_DMA:
89 case ATA_SET_MAX_UNLOCK_DMA:
90 return HISI_SAS_SATA_PROTOCOL_DMA;
93 return HISI_SAS_SATA_PROTOCOL_NONDATA;
98 if (direction == DMA_NONE)
99 return HISI_SAS_SATA_PROTOCOL_NONDATA;
100 return HISI_SAS_SATA_PROTOCOL_PIO;
104 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
106 void hisi_sas_sata_done(struct sas_task *task,
107 struct hisi_sas_slot *slot)
109 struct task_status_struct *ts = &task->task_status;
110 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
111 struct hisi_sas_status_buffer *status_buf =
112 hisi_sas_status_buf_addr_mem(slot);
113 u8 *iu = &status_buf->iu[0];
114 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
116 resp->frame_len = sizeof(struct dev_to_host_fis);
117 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
119 ts->buf_valid_size = sizeof(*resp);
121 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
123 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
125 struct ata_queued_cmd *qc = task->uldd_task;
128 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
129 qc->tf.command == ATA_CMD_FPDMA_READ) {
136 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
139 * This function assumes linkrate mask fits in 8 bits, which it
140 * does for all HW versions supported.
142 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
147 max -= SAS_LINK_RATE_1_5_GBPS;
148 for (i = 0; i <= max; i++)
149 rate |= 1 << (i * 2);
152 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
154 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
156 return device->port->ha->lldd_ha;
159 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
161 return container_of(sas_port, struct hisi_sas_port, sas_port);
163 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
165 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
169 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
170 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
172 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
174 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
176 void *bitmap = hisi_hba->slot_index_tags;
178 clear_bit(slot_idx, bitmap);
181 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
183 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
186 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
188 void *bitmap = hisi_hba->slot_index_tags;
190 set_bit(slot_idx, bitmap);
193 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
196 void *bitmap = hisi_hba->slot_index_tags;
198 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
199 hisi_hba->last_slot_index + 1);
200 if (index >= hisi_hba->slot_index_count) {
201 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
203 if (index >= hisi_hba->slot_index_count)
204 return -SAS_QUEUE_FULL;
206 hisi_sas_slot_index_set(hisi_hba, index);
208 hisi_hba->last_slot_index = index;
213 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
217 for (i = 0; i < hisi_hba->slot_index_count; ++i)
218 hisi_sas_slot_index_clear(hisi_hba, i);
221 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
222 struct hisi_sas_slot *slot)
224 struct hisi_sas_dq *dq = &hisi_hba->dq[slot->dlvry_queue];
228 struct device *dev = hisi_hba->dev;
230 if (!task->lldd_task)
233 task->lldd_task = NULL;
235 if (!sas_protocol_ata(task->task_proto))
237 dma_unmap_sg(dev, task->scatter,
243 dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
245 spin_lock_irqsave(&dq->lock, flags);
246 list_del_init(&slot->entry);
247 spin_unlock_irqrestore(&dq->lock, flags);
251 spin_lock_irqsave(&hisi_hba->lock, flags);
252 hisi_sas_slot_index_free(hisi_hba, slot->idx);
253 spin_unlock_irqrestore(&hisi_hba->lock, flags);
255 /* slot memory is fully zeroed when it is reused */
257 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
259 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
260 struct hisi_sas_slot *slot)
262 hisi_hba->hw->prep_smp(hisi_hba, slot);
265 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
266 struct hisi_sas_slot *slot, int is_tmf,
267 struct hisi_sas_tmf_task *tmf)
269 hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
272 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
273 struct hisi_sas_slot *slot)
275 hisi_hba->hw->prep_stp(hisi_hba, slot);
278 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
279 struct hisi_sas_slot *slot,
280 int device_id, int abort_flag, int tag_to_abort)
282 hisi_hba->hw->prep_abort(hisi_hba, slot,
283 device_id, abort_flag, tag_to_abort);
287 * This function will issue an abort TMF regardless of whether the
288 * task is in the sdev or not. Then it will do the task complete
289 * cleanup and callbacks.
291 static void hisi_sas_slot_abort(struct work_struct *work)
293 struct hisi_sas_slot *abort_slot =
294 container_of(work, struct hisi_sas_slot, abort_slot);
295 struct sas_task *task = abort_slot->task;
296 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
297 struct scsi_cmnd *cmnd = task->uldd_task;
298 struct hisi_sas_tmf_task tmf_task;
300 struct device *dev = hisi_hba->dev;
301 int tag = abort_slot->idx;
303 if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
304 dev_err(dev, "cannot abort slot for non-ssp task\n");
308 int_to_scsilun(cmnd->device->lun, &lun);
309 tmf_task.tmf = TMF_ABORT_TASK;
310 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
312 hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
314 /* Do cleanup for this task */
315 hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
317 task->task_done(task);
320 static int hisi_sas_task_prep(struct sas_task *task,
321 struct hisi_sas_dq **dq_pointer,
322 int is_tmf, struct hisi_sas_tmf_task *tmf,
325 struct domain_device *device = task->dev;
326 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
327 struct hisi_sas_device *sas_dev = device->lldd_dev;
328 struct hisi_sas_port *port;
329 struct hisi_sas_slot *slot;
330 struct hisi_sas_cmd_hdr *cmd_hdr_base;
331 struct asd_sas_port *sas_port = device->port;
332 struct device *dev = hisi_hba->dev;
333 int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
334 int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
335 unsigned long flags, flags_dq;
336 struct hisi_sas_dq *dq;
340 struct task_status_struct *ts = &task->task_status;
342 ts->resp = SAS_TASK_UNDELIVERED;
343 ts->stat = SAS_PHY_DOWN;
345 * libsas will use dev->port, should
346 * not call task_done for sata
348 if (device->dev_type != SAS_SATA_DEV)
349 task->task_done(task);
353 if (DEV_IS_GONE(sas_dev)) {
355 dev_info(dev, "task prep: device %d not ready\n",
358 dev_info(dev, "task prep: device %016llx not ready\n",
359 SAS_ADDR(device->sas_addr));
364 *dq_pointer = dq = sas_dev->dq;
366 port = to_hisi_sas_port(sas_port);
367 if (port && !port->port_attached) {
368 dev_info(dev, "task prep: %s port%d not attach device\n",
369 (dev_is_sata(device)) ?
376 if (!sas_protocol_ata(task->task_proto)) {
377 unsigned int req_len, resp_len;
379 if (task->num_scatter) {
380 n_elem = dma_map_sg(dev, task->scatter,
381 task->num_scatter, task->data_dir);
386 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
387 n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
393 req_len = sg_dma_len(&task->smp_task.smp_req);
396 goto err_out_dma_unmap;
398 n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp,
402 goto err_out_dma_unmap;
404 resp_len = sg_dma_len(&task->smp_task.smp_resp);
405 if (resp_len & 0x3) {
407 goto err_out_dma_unmap;
411 n_elem = task->num_scatter;
413 if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
414 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
417 goto err_out_dma_unmap;
420 spin_lock_irqsave(&hisi_hba->lock, flags);
421 if (hisi_hba->hw->slot_index_alloc)
422 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
425 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
426 spin_unlock_irqrestore(&hisi_hba->lock, flags);
428 goto err_out_dma_unmap;
430 slot = &hisi_hba->slot_info[slot_idx];
431 memset(slot, 0, sizeof(struct hisi_sas_slot));
433 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
434 GFP_ATOMIC, &slot->buf_dma);
440 spin_lock_irqsave(&dq->lock, flags_dq);
441 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
442 if (wr_q_index < 0) {
443 spin_unlock_irqrestore(&dq->lock, flags_dq);
447 list_add_tail(&slot->delivery, &dq->list);
448 spin_unlock_irqrestore(&dq->lock, flags_dq);
450 dlvry_queue = dq->id;
451 dlvry_queue_slot = wr_q_index;
453 slot->idx = slot_idx;
454 slot->n_elem = n_elem;
455 slot->dlvry_queue = dlvry_queue;
456 slot->dlvry_queue_slot = dlvry_queue_slot;
457 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
458 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
462 slot->is_internal = true;
463 task->lldd_task = slot;
464 INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
466 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
467 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
468 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
470 switch (task->task_proto) {
471 case SAS_PROTOCOL_SMP:
472 hisi_sas_task_prep_smp(hisi_hba, slot);
474 case SAS_PROTOCOL_SSP:
475 hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
477 case SAS_PROTOCOL_SATA:
478 case SAS_PROTOCOL_STP:
479 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
480 hisi_sas_task_prep_ata(hisi_hba, slot);
483 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
488 spin_lock_irqsave(&dq->lock, flags);
489 list_add_tail(&slot->entry, &sas_dev->list);
490 spin_unlock_irqrestore(&dq->lock, flags);
491 spin_lock_irqsave(&task->task_state_lock, flags);
492 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
493 spin_unlock_irqrestore(&task->task_state_lock, flags);
501 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
504 spin_lock_irqsave(&hisi_hba->lock, flags);
505 hisi_sas_slot_index_free(hisi_hba, slot_idx);
506 spin_unlock_irqrestore(&hisi_hba->lock, flags);
508 if (!sas_protocol_ata(task->task_proto)) {
509 if (task->num_scatter) {
510 dma_unmap_sg(dev, task->scatter, task->num_scatter,
512 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
514 dma_unmap_sg(dev, &task->smp_task.smp_req,
517 dma_unmap_sg(dev, &task->smp_task.smp_resp,
522 dev_err(dev, "task prep: failed[%d]!\n", rc);
526 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
527 int is_tmf, struct hisi_sas_tmf_task *tmf)
532 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
533 struct device *dev = hisi_hba->dev;
534 struct hisi_sas_dq *dq = NULL;
536 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
539 /* protect task_prep and start_delivery sequence */
540 rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
542 dev_err(dev, "task exec: failed[%d]!\n", rc);
545 spin_lock_irqsave(&dq->lock, flags);
546 hisi_hba->hw->start_delivery(dq);
547 spin_unlock_irqrestore(&dq->lock, flags);
553 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
555 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
556 struct asd_sas_phy *sas_phy = &phy->sas_phy;
557 struct sas_ha_struct *sas_ha;
559 if (!phy->phy_attached)
562 sas_ha = &hisi_hba->sha;
563 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
566 struct sas_phy *sphy = sas_phy->phy;
568 sphy->negotiated_linkrate = sas_phy->linkrate;
569 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
570 sphy->maximum_linkrate_hw =
571 hisi_hba->hw->phy_get_max_linkrate();
572 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
573 sphy->minimum_linkrate = phy->minimum_linkrate;
575 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
576 sphy->maximum_linkrate = phy->maximum_linkrate;
579 if (phy->phy_type & PORT_TYPE_SAS) {
580 struct sas_identify_frame *id;
582 id = (struct sas_identify_frame *)phy->frame_rcvd;
583 id->dev_type = phy->identify.device_type;
584 id->initiator_bits = SAS_PROTOCOL_ALL;
585 id->target_bits = phy->identify.target_port_protocols;
586 } else if (phy->phy_type & PORT_TYPE_SATA) {
590 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
591 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
594 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
596 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
597 struct hisi_sas_device *sas_dev = NULL;
599 int last = hisi_hba->last_dev_id;
600 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
603 spin_lock_irqsave(&hisi_hba->lock, flags);
604 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
605 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
606 int queue = i % hisi_hba->queue_count;
607 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
609 hisi_hba->devices[i].device_id = i;
610 sas_dev = &hisi_hba->devices[i];
611 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
612 sas_dev->dev_type = device->dev_type;
613 sas_dev->hisi_hba = hisi_hba;
614 sas_dev->sas_device = device;
616 INIT_LIST_HEAD(&hisi_hba->devices[i].list);
621 hisi_hba->last_dev_id = i;
622 spin_unlock_irqrestore(&hisi_hba->lock, flags);
627 static int hisi_sas_dev_found(struct domain_device *device)
629 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
630 struct domain_device *parent_dev = device->parent;
631 struct hisi_sas_device *sas_dev;
632 struct device *dev = hisi_hba->dev;
634 if (hisi_hba->hw->alloc_dev)
635 sas_dev = hisi_hba->hw->alloc_dev(device);
637 sas_dev = hisi_sas_alloc_dev(device);
639 dev_err(dev, "fail alloc dev: max support %d devices\n",
640 HISI_SAS_MAX_DEVICES);
644 device->lldd_dev = sas_dev;
645 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
647 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
649 u8 phy_num = parent_dev->ex_dev.num_phys;
652 for (phy_no = 0; phy_no < phy_num; phy_no++) {
653 phy = &parent_dev->ex_dev.ex_phy[phy_no];
654 if (SAS_ADDR(phy->attached_sas_addr) ==
655 SAS_ADDR(device->sas_addr))
659 if (phy_no == phy_num) {
660 dev_info(dev, "dev found: no attached "
661 "dev:%016llx at ex:%016llx\n",
662 SAS_ADDR(device->sas_addr),
663 SAS_ADDR(parent_dev->sas_addr));
668 dev_info(dev, "dev[%d:%x] found\n",
669 sas_dev->device_id, sas_dev->dev_type);
674 static int hisi_sas_slave_configure(struct scsi_device *sdev)
676 struct domain_device *dev = sdev_to_domain_dev(sdev);
677 int ret = sas_slave_configure(sdev);
681 if (!dev_is_sata(dev))
682 sas_change_queue_depth(sdev, 64);
687 static void hisi_sas_scan_start(struct Scsi_Host *shost)
689 struct hisi_hba *hisi_hba = shost_priv(shost);
691 hisi_hba->hw->phys_init(hisi_hba);
694 static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
696 struct hisi_hba *hisi_hba = shost_priv(shost);
697 struct sas_ha_struct *sha = &hisi_hba->sha;
699 /* Wait for PHY up interrupt to occur */
707 static void hisi_sas_phyup_work(struct work_struct *work)
709 struct hisi_sas_phy *phy =
710 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
711 struct hisi_hba *hisi_hba = phy->hisi_hba;
712 struct asd_sas_phy *sas_phy = &phy->sas_phy;
713 int phy_no = sas_phy->id;
715 hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
716 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
719 static void hisi_sas_linkreset_work(struct work_struct *work)
721 struct hisi_sas_phy *phy =
722 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
723 struct asd_sas_phy *sas_phy = &phy->sas_phy;
725 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
728 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
729 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
730 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
733 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
734 enum hisi_sas_phy_event event)
736 struct hisi_hba *hisi_hba = phy->hisi_hba;
738 if (WARN_ON(event >= HISI_PHYES_NUM))
741 return queue_work(hisi_hba->wq, &phy->works[event]);
743 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
745 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
747 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
748 struct asd_sas_phy *sas_phy = &phy->sas_phy;
751 phy->hisi_hba = hisi_hba;
753 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
754 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
755 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
756 sas_phy->class = SAS;
757 sas_phy->iproto = SAS_PROTOCOL_ALL;
759 sas_phy->type = PHY_TYPE_PHYSICAL;
760 sas_phy->role = PHY_ROLE_INITIATOR;
761 sas_phy->oob_mode = OOB_NOT_CONNECTED;
762 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
763 sas_phy->id = phy_no;
764 sas_phy->sas_addr = &hisi_hba->sas_addr[0];
765 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
766 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
767 sas_phy->lldd_phy = phy;
769 for (i = 0; i < HISI_PHYES_NUM; i++)
770 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
773 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
775 struct sas_ha_struct *sas_ha = sas_phy->ha;
776 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
777 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
778 struct asd_sas_port *sas_port = sas_phy->port;
779 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
785 spin_lock_irqsave(&hisi_hba->lock, flags);
786 port->port_attached = 1;
787 port->id = phy->port_id;
789 sas_port->lldd_port = port;
790 spin_unlock_irqrestore(&hisi_hba->lock, flags);
793 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
794 struct hisi_sas_slot *slot)
798 struct task_status_struct *ts;
800 ts = &task->task_status;
802 ts->resp = SAS_TASK_COMPLETE;
803 ts->stat = SAS_ABORTED_TASK;
804 spin_lock_irqsave(&task->task_state_lock, flags);
805 task->task_state_flags &=
806 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
807 task->task_state_flags |= SAS_TASK_STATE_DONE;
808 spin_unlock_irqrestore(&task->task_state_lock, flags);
811 hisi_sas_slot_task_free(hisi_hba, task, slot);
814 /* hisi_hba.lock should be locked */
815 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
816 struct domain_device *device)
818 struct hisi_sas_slot *slot, *slot2;
819 struct hisi_sas_device *sas_dev = device->lldd_dev;
821 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
822 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
825 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
827 struct hisi_sas_device *sas_dev;
828 struct domain_device *device;
831 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
832 sas_dev = &hisi_hba->devices[i];
833 device = sas_dev->sas_device;
835 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
839 hisi_sas_release_task(hisi_hba, device);
842 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
844 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
845 struct domain_device *device)
847 if (hisi_hba->hw->dereg_device)
848 hisi_hba->hw->dereg_device(hisi_hba, device);
851 static void hisi_sas_dev_gone(struct domain_device *device)
853 struct hisi_sas_device *sas_dev = device->lldd_dev;
854 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
855 struct device *dev = hisi_hba->dev;
857 dev_info(dev, "dev[%d:%x] is gone\n",
858 sas_dev->device_id, sas_dev->dev_type);
860 if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
861 hisi_sas_internal_task_abort(hisi_hba, device,
862 HISI_SAS_INT_ABT_DEV, 0);
864 hisi_sas_dereg_device(hisi_hba, device);
866 hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
867 device->lldd_dev = NULL;
870 if (hisi_hba->hw->free_device)
871 hisi_hba->hw->free_device(sas_dev);
872 sas_dev->dev_type = SAS_PHY_UNUSED;
875 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
877 return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
880 static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
881 struct sas_phy_linkrates *r)
883 struct sas_phy_linkrates _r;
885 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
886 struct asd_sas_phy *sas_phy = &phy->sas_phy;
887 enum sas_linkrate min, max;
889 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
890 max = sas_phy->phy->maximum_linkrate;
891 min = r->minimum_linkrate;
892 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
893 max = r->maximum_linkrate;
894 min = sas_phy->phy->minimum_linkrate;
898 _r.maximum_linkrate = max;
899 _r.minimum_linkrate = min;
901 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
903 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
904 hisi_hba->hw->phy_start(hisi_hba, phy_no);
907 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
910 struct sas_ha_struct *sas_ha = sas_phy->ha;
911 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
912 int phy_no = sas_phy->id;
915 case PHY_FUNC_HARD_RESET:
916 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
919 case PHY_FUNC_LINK_RESET:
920 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
922 hisi_hba->hw->phy_start(hisi_hba, phy_no);
925 case PHY_FUNC_DISABLE:
926 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
929 case PHY_FUNC_SET_LINK_RATE:
930 hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
932 case PHY_FUNC_GET_EVENTS:
933 if (hisi_hba->hw->get_events) {
934 hisi_hba->hw->get_events(hisi_hba, phy_no);
938 case PHY_FUNC_RELEASE_SPINUP_HOLD:
945 static void hisi_sas_task_done(struct sas_task *task)
947 if (!del_timer(&task->slow_task->timer))
949 complete(&task->slow_task->completion);
952 static void hisi_sas_tmf_timedout(struct timer_list *t)
954 struct sas_task_slow *slow = from_timer(slow, t, timer);
955 struct sas_task *task = slow->task;
958 spin_lock_irqsave(&task->task_state_lock, flags);
959 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
960 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
961 spin_unlock_irqrestore(&task->task_state_lock, flags);
963 complete(&task->slow_task->completion);
966 #define TASK_TIMEOUT 20
968 #define INTERNAL_ABORT_TIMEOUT 6
969 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
970 void *parameter, u32 para_len,
971 struct hisi_sas_tmf_task *tmf)
973 struct hisi_sas_device *sas_dev = device->lldd_dev;
974 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
975 struct device *dev = hisi_hba->dev;
976 struct sas_task *task;
979 for (retry = 0; retry < TASK_RETRY; retry++) {
980 task = sas_alloc_slow_task(GFP_KERNEL);
985 task->task_proto = device->tproto;
987 if (dev_is_sata(device)) {
988 task->ata_task.device_control_reg_update = 1;
989 memcpy(&task->ata_task.fis, parameter, para_len);
991 memcpy(&task->ssp_task, parameter, para_len);
993 task->task_done = hisi_sas_task_done;
995 task->slow_task->timer.function = hisi_sas_tmf_timedout;
996 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
997 add_timer(&task->slow_task->timer);
999 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
1002 del_timer(&task->slow_task->timer);
1003 dev_err(dev, "abort tmf: executing internal task failed: %d\n",
1008 wait_for_completion(&task->slow_task->completion);
1009 res = TMF_RESP_FUNC_FAILED;
1010 /* Even TMF timed out, return direct. */
1011 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1012 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1013 struct hisi_sas_slot *slot = task->lldd_task;
1015 dev_err(dev, "abort tmf: TMF task timeout and not done\n");
1021 dev_err(dev, "abort tmf: TMF task timeout\n");
1024 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1025 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1026 res = TMF_RESP_FUNC_COMPLETE;
1030 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1031 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1032 res = TMF_RESP_FUNC_SUCC;
1036 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1037 task->task_status.stat == SAS_DATA_UNDERRUN) {
1038 /* no error, but return the number of bytes of
1041 dev_warn(dev, "abort tmf: task to dev %016llx "
1042 "resp: 0x%x sts 0x%x underrun\n",
1043 SAS_ADDR(device->sas_addr),
1044 task->task_status.resp,
1045 task->task_status.stat);
1046 res = task->task_status.residual;
1050 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1051 task->task_status.stat == SAS_DATA_OVERRUN) {
1052 dev_warn(dev, "abort tmf: blocked task error\n");
1057 dev_warn(dev, "abort tmf: task to dev "
1058 "%016llx resp: 0x%x status 0x%x\n",
1059 SAS_ADDR(device->sas_addr), task->task_status.resp,
1060 task->task_status.stat);
1061 sas_free_task(task);
1065 if (retry == TASK_RETRY)
1066 dev_warn(dev, "abort tmf: executing internal task failed!\n");
1067 sas_free_task(task);
1071 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
1072 bool reset, int pmp, u8 *fis)
1074 struct ata_taskfile tf;
1076 ata_tf_init(dev, &tf);
1080 tf.ctl &= ~ATA_SRST;
1081 tf.command = ATA_CMD_DEV_RESET;
1082 ata_tf_to_fis(&tf, pmp, 0, fis);
1085 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
1088 struct ata_port *ap = device->sata_dev.ap;
1089 struct ata_link *link;
1090 int rc = TMF_RESP_FUNC_FAILED;
1091 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1092 struct device *dev = hisi_hba->dev;
1093 int s = sizeof(struct host_to_dev_fis);
1095 ata_for_each_link(link, ap, EDGE) {
1096 int pmp = sata_srst_pmp(link);
1098 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1099 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
1100 if (rc != TMF_RESP_FUNC_COMPLETE)
1104 if (rc == TMF_RESP_FUNC_COMPLETE) {
1105 ata_for_each_link(link, ap, EDGE) {
1106 int pmp = sata_srst_pmp(link);
1108 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1109 rc = hisi_sas_exec_internal_tmf_task(device, fis,
1111 if (rc != TMF_RESP_FUNC_COMPLETE)
1112 dev_err(dev, "ata disk de-reset failed\n");
1115 dev_err(dev, "ata disk reset failed\n");
1118 if (rc == TMF_RESP_FUNC_COMPLETE)
1119 hisi_sas_release_task(hisi_hba, device);
1124 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
1125 u8 *lun, struct hisi_sas_tmf_task *tmf)
1127 struct sas_ssp_task ssp_task;
1129 if (!(device->tproto & SAS_PROTOCOL_SSP))
1130 return TMF_RESP_FUNC_ESUPP;
1132 memcpy(ssp_task.LUN, lun, 8);
1134 return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
1135 sizeof(ssp_task), tmf);
1138 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1140 u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
1143 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1144 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1145 struct domain_device *device = sas_dev->sas_device;
1146 struct asd_sas_port *sas_port;
1147 struct hisi_sas_port *port;
1148 struct hisi_sas_phy *phy = NULL;
1149 struct asd_sas_phy *sas_phy;
1151 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1152 || !device || !device->port)
1155 sas_port = device->port;
1156 port = to_hisi_sas_port(sas_port);
1158 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1159 if (state & BIT(sas_phy->id)) {
1160 phy = sas_phy->lldd_phy;
1165 port->id = phy->port_id;
1167 /* Update linkrate of directly attached device. */
1168 if (!device->parent)
1169 device->linkrate = phy->sas_phy.linkrate;
1171 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1177 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1180 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1181 struct asd_sas_port *_sas_port = NULL;
1184 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1185 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1186 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1187 struct asd_sas_port *sas_port = sas_phy->port;
1188 bool do_port_check = !!(_sas_port != sas_port);
1190 if (!sas_phy->phy->enabled)
1193 /* Report PHY state change to libsas */
1194 if (state & BIT(phy_no)) {
1195 if (do_port_check && sas_port && sas_port->port_dev) {
1196 struct domain_device *dev = sas_port->port_dev;
1198 _sas_port = sas_port;
1200 if (DEV_IS_EXPANDER(dev->dev_type))
1201 sas_ha->notify_port_event(sas_phy,
1202 PORTE_BROADCAST_RCVD);
1204 } else if (old_state & (1 << phy_no))
1205 /* PHY down but was up before */
1206 hisi_sas_phy_down(hisi_hba, phy_no, 0);
1211 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1213 struct device *dev = hisi_hba->dev;
1214 struct Scsi_Host *shost = hisi_hba->shost;
1215 u32 old_state, state;
1218 if (!hisi_hba->hw->soft_reset)
1221 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1224 dev_info(dev, "controller resetting...\n");
1225 old_state = hisi_hba->hw->get_phys_state(hisi_hba);
1227 scsi_block_requests(shost);
1228 if (timer_pending(&hisi_hba->timer))
1229 del_timer_sync(&hisi_hba->timer);
1231 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1232 rc = hisi_hba->hw->soft_reset(hisi_hba);
1234 dev_warn(dev, "controller reset failed (%d)\n", rc);
1235 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1236 scsi_unblock_requests(shost);
1239 hisi_sas_release_tasks(hisi_hba);
1241 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1243 /* Init and wait for PHYs to come up and all libsas event finished. */
1244 hisi_hba->hw->phys_init(hisi_hba);
1246 hisi_sas_refresh_port_id(hisi_hba);
1247 scsi_unblock_requests(shost);
1249 state = hisi_hba->hw->get_phys_state(hisi_hba);
1250 hisi_sas_rescan_topology(hisi_hba, old_state, state);
1251 dev_info(dev, "controller reset complete\n");
1254 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1259 static int hisi_sas_abort_task(struct sas_task *task)
1261 struct scsi_lun lun;
1262 struct hisi_sas_tmf_task tmf_task;
1263 struct domain_device *device = task->dev;
1264 struct hisi_sas_device *sas_dev = device->lldd_dev;
1265 struct hisi_hba *hisi_hba;
1267 int rc = TMF_RESP_FUNC_FAILED;
1268 unsigned long flags;
1271 return TMF_RESP_FUNC_FAILED;
1273 hisi_hba = dev_to_hisi_hba(task->dev);
1274 dev = hisi_hba->dev;
1276 spin_lock_irqsave(&task->task_state_lock, flags);
1277 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1278 spin_unlock_irqrestore(&task->task_state_lock, flags);
1279 rc = TMF_RESP_FUNC_COMPLETE;
1282 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1283 spin_unlock_irqrestore(&task->task_state_lock, flags);
1285 sas_dev->dev_status = HISI_SAS_DEV_EH;
1286 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1287 struct scsi_cmnd *cmnd = task->uldd_task;
1288 struct hisi_sas_slot *slot = task->lldd_task;
1289 u32 tag = slot->idx;
1292 int_to_scsilun(cmnd->device->lun, &lun);
1293 tmf_task.tmf = TMF_ABORT_TASK;
1294 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1296 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1299 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1300 HISI_SAS_INT_ABT_CMD, tag);
1302 dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1303 return TMF_RESP_FUNC_FAILED;
1307 * If the TMF finds that the IO is not in the device and also
1308 * the internal abort does not succeed, then it is safe to
1310 * Note: if the internal abort succeeds then the slot
1311 * will have already been completed
1313 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1314 if (task->lldd_task)
1315 hisi_sas_do_release_task(hisi_hba, task, slot);
1317 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1318 task->task_proto & SAS_PROTOCOL_STP) {
1319 if (task->dev->dev_type == SAS_SATA_DEV) {
1320 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1321 HISI_SAS_INT_ABT_DEV, 0);
1323 dev_err(dev, "abort task: internal abort failed\n");
1326 hisi_sas_dereg_device(hisi_hba, device);
1327 rc = hisi_sas_softreset_ata_disk(device);
1329 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1331 struct hisi_sas_slot *slot = task->lldd_task;
1332 u32 tag = slot->idx;
1334 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1335 HISI_SAS_INT_ABT_CMD, tag);
1336 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1338 hisi_sas_do_release_task(hisi_hba, task, slot);
1342 if (rc != TMF_RESP_FUNC_COMPLETE)
1343 dev_notice(dev, "abort task: rc=%d\n", rc);
1347 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1349 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1350 struct device *dev = hisi_hba->dev;
1351 struct hisi_sas_tmf_task tmf_task;
1352 int rc = TMF_RESP_FUNC_FAILED;
1354 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1355 HISI_SAS_INT_ABT_DEV, 0);
1357 dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1358 return TMF_RESP_FUNC_FAILED;
1360 hisi_sas_dereg_device(hisi_hba, device);
1362 tmf_task.tmf = TMF_ABORT_TASK_SET;
1363 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1365 if (rc == TMF_RESP_FUNC_COMPLETE)
1366 hisi_sas_release_task(hisi_hba, device);
1371 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1373 int rc = TMF_RESP_FUNC_FAILED;
1374 struct hisi_sas_tmf_task tmf_task;
1376 tmf_task.tmf = TMF_CLEAR_ACA;
1377 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1382 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1384 struct sas_phy *phy = sas_get_local_phy(device);
1385 int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1386 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1387 rc = sas_phy_reset(phy, reset_type);
1388 sas_put_local_phy(phy);
1393 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1395 struct hisi_sas_device *sas_dev = device->lldd_dev;
1396 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1397 struct device *dev = hisi_hba->dev;
1398 int rc = TMF_RESP_FUNC_FAILED;
1400 if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1401 return TMF_RESP_FUNC_FAILED;
1402 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1404 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1405 HISI_SAS_INT_ABT_DEV, 0);
1407 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1408 return TMF_RESP_FUNC_FAILED;
1410 hisi_sas_dereg_device(hisi_hba, device);
1412 rc = hisi_sas_debug_I_T_nexus_reset(device);
1414 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
1415 hisi_sas_release_task(hisi_hba, device);
1420 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1422 struct hisi_sas_device *sas_dev = device->lldd_dev;
1423 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1424 struct device *dev = hisi_hba->dev;
1425 int rc = TMF_RESP_FUNC_FAILED;
1427 sas_dev->dev_status = HISI_SAS_DEV_EH;
1428 if (dev_is_sata(device)) {
1429 struct sas_phy *phy;
1431 /* Clear internal IO and then hardreset */
1432 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1433 HISI_SAS_INT_ABT_DEV, 0);
1435 dev_err(dev, "lu_reset: internal abort failed\n");
1438 hisi_sas_dereg_device(hisi_hba, device);
1440 phy = sas_get_local_phy(device);
1442 rc = sas_phy_reset(phy, 1);
1445 hisi_sas_release_task(hisi_hba, device);
1446 sas_put_local_phy(phy);
1448 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
1450 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1451 HISI_SAS_INT_ABT_DEV, 0);
1453 dev_err(dev, "lu_reset: internal abort failed\n");
1456 hisi_sas_dereg_device(hisi_hba, device);
1458 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1459 if (rc == TMF_RESP_FUNC_COMPLETE)
1460 hisi_sas_release_task(hisi_hba, device);
1463 if (rc != TMF_RESP_FUNC_COMPLETE)
1464 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1465 sas_dev->device_id, rc);
1469 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1471 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1472 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1474 queue_work(hisi_hba->wq, &r.work);
1475 wait_for_completion(r.completion);
1477 return TMF_RESP_FUNC_COMPLETE;
1479 return TMF_RESP_FUNC_FAILED;
1482 static int hisi_sas_query_task(struct sas_task *task)
1484 struct scsi_lun lun;
1485 struct hisi_sas_tmf_task tmf_task;
1486 int rc = TMF_RESP_FUNC_FAILED;
1488 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1489 struct scsi_cmnd *cmnd = task->uldd_task;
1490 struct domain_device *device = task->dev;
1491 struct hisi_sas_slot *slot = task->lldd_task;
1492 u32 tag = slot->idx;
1494 int_to_scsilun(cmnd->device->lun, &lun);
1495 tmf_task.tmf = TMF_QUERY_TASK;
1496 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1498 rc = hisi_sas_debug_issue_ssp_tmf(device,
1502 /* The task is still in Lun, release it then */
1503 case TMF_RESP_FUNC_SUCC:
1504 /* The task is not in Lun or failed, reset the phy */
1505 case TMF_RESP_FUNC_FAILED:
1506 case TMF_RESP_FUNC_COMPLETE:
1509 rc = TMF_RESP_FUNC_FAILED;
1517 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1518 struct sas_task *task, int abort_flag,
1521 struct domain_device *device = task->dev;
1522 struct hisi_sas_device *sas_dev = device->lldd_dev;
1523 struct device *dev = hisi_hba->dev;
1524 struct hisi_sas_port *port;
1525 struct hisi_sas_slot *slot;
1526 struct asd_sas_port *sas_port = device->port;
1527 struct hisi_sas_cmd_hdr *cmd_hdr_base;
1528 struct hisi_sas_dq *dq = sas_dev->dq;
1529 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1530 unsigned long flags, flags_dq = 0;
1533 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1539 port = to_hisi_sas_port(sas_port);
1541 /* simply get a slot and send abort command */
1542 spin_lock_irqsave(&hisi_hba->lock, flags);
1543 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
1545 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1548 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1550 slot = &hisi_hba->slot_info[slot_idx];
1551 memset(slot, 0, sizeof(struct hisi_sas_slot));
1553 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
1554 GFP_ATOMIC, &slot->buf_dma);
1560 spin_lock_irqsave(&dq->lock, flags_dq);
1561 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
1562 if (wr_q_index < 0) {
1563 spin_unlock_irqrestore(&dq->lock, flags_dq);
1566 list_add_tail(&slot->delivery, &dq->list);
1567 spin_unlock_irqrestore(&dq->lock, flags_dq);
1569 dlvry_queue = dq->id;
1570 dlvry_queue_slot = wr_q_index;
1572 slot->idx = slot_idx;
1573 slot->n_elem = n_elem;
1574 slot->dlvry_queue = dlvry_queue;
1575 slot->dlvry_queue_slot = dlvry_queue_slot;
1576 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1577 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1580 slot->is_internal = true;
1581 task->lldd_task = slot;
1583 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1584 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1585 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
1587 hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1588 abort_flag, task_tag);
1590 spin_lock_irqsave(&task->task_state_lock, flags);
1591 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1592 spin_unlock_irqrestore(&task->task_state_lock, flags);
1595 /* send abort command to the chip */
1596 spin_lock_irqsave(&dq->lock, flags);
1597 list_add_tail(&slot->entry, &sas_dev->list);
1598 hisi_hba->hw->start_delivery(dq);
1599 spin_unlock_irqrestore(&dq->lock, flags);
1604 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
1607 spin_lock_irqsave(&hisi_hba->lock, flags);
1608 hisi_sas_slot_index_free(hisi_hba, slot_idx);
1609 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1611 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1617 * hisi_sas_internal_task_abort -- execute an internal
1618 * abort command for single IO command or a device
1619 * @hisi_hba: host controller struct
1620 * @device: domain device
1621 * @abort_flag: mode of operation, device or single IO
1622 * @tag: tag of IO to be aborted (only relevant to single
1626 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1627 struct domain_device *device,
1628 int abort_flag, int tag)
1630 struct sas_task *task;
1631 struct hisi_sas_device *sas_dev = device->lldd_dev;
1632 struct device *dev = hisi_hba->dev;
1636 * The interface is not realized means this HW don't support internal
1637 * abort, or don't need to do internal abort. Then here, we return
1638 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
1639 * the internal abort has been executed and returned CQ.
1641 if (!hisi_hba->hw->prep_abort)
1642 return TMF_RESP_FUNC_FAILED;
1644 task = sas_alloc_slow_task(GFP_KERNEL);
1649 task->task_proto = device->tproto;
1650 task->task_done = hisi_sas_task_done;
1651 task->slow_task->timer.function = hisi_sas_tmf_timedout;
1652 task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT*HZ;
1653 add_timer(&task->slow_task->timer);
1655 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1656 task, abort_flag, tag);
1658 del_timer(&task->slow_task->timer);
1659 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1663 wait_for_completion(&task->slow_task->completion);
1664 res = TMF_RESP_FUNC_FAILED;
1666 /* Internal abort timed out */
1667 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1668 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1669 struct hisi_sas_slot *slot = task->lldd_task;
1673 dev_err(dev, "internal task abort: timeout and not done.\n");
1677 dev_err(dev, "internal task abort: timeout.\n");
1680 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1681 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1682 res = TMF_RESP_FUNC_COMPLETE;
1686 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1687 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1688 res = TMF_RESP_FUNC_SUCC;
1693 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
1694 "resp: 0x%x sts 0x%x\n",
1695 SAS_ADDR(device->sas_addr),
1697 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1698 task->task_status.stat);
1699 sas_free_task(task);
1704 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1706 hisi_sas_port_notify_formed(sas_phy);
1709 static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
1713 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
1714 u8 reg_index, u8 reg_count, u8 *write_data)
1716 struct hisi_hba *hisi_hba = sha->lldd_ha;
1718 if (!hisi_hba->hw->write_gpio)
1721 return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
1722 reg_index, reg_count, write_data);
1725 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1727 phy->phy_attached = 0;
1732 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1734 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1735 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1736 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1739 /* Phy down but ready */
1740 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1741 hisi_sas_port_notify_formed(sas_phy);
1743 struct hisi_sas_port *port = phy->port;
1745 /* Phy down and not ready */
1746 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1747 sas_phy_disconnected(sas_phy);
1750 if (phy->phy_type & PORT_TYPE_SAS) {
1751 int port_id = port->id;
1753 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1755 port->port_attached = 0;
1756 } else if (phy->phy_type & PORT_TYPE_SATA)
1757 port->port_attached = 0;
1759 hisi_sas_phy_disconnected(phy);
1762 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1764 void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
1768 for (i = 0; i < hisi_hba->queue_count; i++) {
1769 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1771 tasklet_kill(&cq->tasklet);
1774 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
1776 struct scsi_transport_template *hisi_sas_stt;
1777 EXPORT_SYMBOL_GPL(hisi_sas_stt);
1779 static struct device_attribute *host_attrs[] = {
1780 &dev_attr_phy_event_threshold,
1784 static struct scsi_host_template _hisi_sas_sht = {
1785 .module = THIS_MODULE,
1787 .queuecommand = sas_queuecommand,
1788 .target_alloc = sas_target_alloc,
1789 .slave_configure = hisi_sas_slave_configure,
1790 .scan_finished = hisi_sas_scan_finished,
1791 .scan_start = hisi_sas_scan_start,
1792 .change_queue_depth = sas_change_queue_depth,
1793 .bios_param = sas_bios_param,
1796 .sg_tablesize = SG_ALL,
1797 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
1798 .use_clustering = ENABLE_CLUSTERING,
1799 .eh_device_reset_handler = sas_eh_device_reset_handler,
1800 .eh_target_reset_handler = sas_eh_target_reset_handler,
1801 .target_destroy = sas_target_destroy,
1803 .shost_attrs = host_attrs,
1805 struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
1806 EXPORT_SYMBOL_GPL(hisi_sas_sht);
1808 static struct sas_domain_function_template hisi_sas_transport_ops = {
1809 .lldd_dev_found = hisi_sas_dev_found,
1810 .lldd_dev_gone = hisi_sas_dev_gone,
1811 .lldd_execute_task = hisi_sas_queue_command,
1812 .lldd_control_phy = hisi_sas_control_phy,
1813 .lldd_abort_task = hisi_sas_abort_task,
1814 .lldd_abort_task_set = hisi_sas_abort_task_set,
1815 .lldd_clear_aca = hisi_sas_clear_aca,
1816 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
1817 .lldd_lu_reset = hisi_sas_lu_reset,
1818 .lldd_query_task = hisi_sas_query_task,
1819 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
1820 .lldd_port_formed = hisi_sas_port_formed,
1821 .lldd_port_deformed = hisi_sas_port_deformed,
1822 .lldd_write_gpio = hisi_sas_write_gpio,
1825 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
1827 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1829 for (i = 0; i < hisi_hba->queue_count; i++) {
1830 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1831 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1833 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1834 memset(hisi_hba->cmd_hdr[i], 0, s);
1837 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1838 memset(hisi_hba->complete_hdr[i], 0, s);
1842 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
1843 memset(hisi_hba->initial_fis, 0, s);
1845 s = max_command_entries * sizeof(struct hisi_sas_iost);
1846 memset(hisi_hba->iost, 0, s);
1848 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1849 memset(hisi_hba->breakpoint, 0, s);
1851 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1852 memset(hisi_hba->sata_breakpoint, 0, s);
1854 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
1856 int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1858 struct device *dev = hisi_hba->dev;
1859 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1861 spin_lock_init(&hisi_hba->lock);
1862 for (i = 0; i < hisi_hba->n_phy; i++) {
1863 hisi_sas_phy_init(hisi_hba, i);
1864 hisi_hba->port[i].port_attached = 0;
1865 hisi_hba->port[i].id = -1;
1868 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1869 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
1870 hisi_hba->devices[i].device_id = i;
1871 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
1874 for (i = 0; i < hisi_hba->queue_count; i++) {
1875 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1876 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1878 /* Completion queue structure */
1880 cq->hisi_hba = hisi_hba;
1882 /* Delivery queue structure */
1883 spin_lock_init(&dq->lock);
1884 INIT_LIST_HEAD(&dq->list);
1886 dq->hisi_hba = hisi_hba;
1888 /* Delivery queue */
1889 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1890 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1891 &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1892 if (!hisi_hba->cmd_hdr[i])
1895 /* Completion queue */
1896 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1897 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1898 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1899 if (!hisi_hba->complete_hdr[i])
1903 s = sizeof(struct hisi_sas_slot_buf_table);
1904 hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
1905 if (!hisi_hba->buffer_pool)
1908 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1909 hisi_hba->itct = dma_zalloc_coherent(dev, s, &hisi_hba->itct_dma,
1911 if (!hisi_hba->itct)
1914 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
1915 sizeof(struct hisi_sas_slot),
1917 if (!hisi_hba->slot_info)
1920 s = max_command_entries * sizeof(struct hisi_sas_iost);
1921 hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1923 if (!hisi_hba->iost)
1926 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1927 hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1928 &hisi_hba->breakpoint_dma, GFP_KERNEL);
1929 if (!hisi_hba->breakpoint)
1932 hisi_hba->slot_index_count = max_command_entries;
1933 s = hisi_hba->slot_index_count / BITS_PER_BYTE;
1934 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1935 if (!hisi_hba->slot_index_tags)
1938 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1939 hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1940 &hisi_hba->initial_fis_dma, GFP_KERNEL);
1941 if (!hisi_hba->initial_fis)
1944 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1945 hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1946 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1947 if (!hisi_hba->sata_breakpoint)
1949 hisi_sas_init_mem(hisi_hba);
1951 hisi_sas_slot_index_init(hisi_hba);
1953 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1954 if (!hisi_hba->wq) {
1955 dev_err(dev, "sas_alloc: failed to create workqueue\n");
1963 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
1965 void hisi_sas_free(struct hisi_hba *hisi_hba)
1967 struct device *dev = hisi_hba->dev;
1968 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1970 for (i = 0; i < hisi_hba->queue_count; i++) {
1971 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1972 if (hisi_hba->cmd_hdr[i])
1973 dma_free_coherent(dev, s,
1974 hisi_hba->cmd_hdr[i],
1975 hisi_hba->cmd_hdr_dma[i]);
1977 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1978 if (hisi_hba->complete_hdr[i])
1979 dma_free_coherent(dev, s,
1980 hisi_hba->complete_hdr[i],
1981 hisi_hba->complete_hdr_dma[i]);
1984 dma_pool_destroy(hisi_hba->buffer_pool);
1986 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1988 dma_free_coherent(dev, s,
1989 hisi_hba->itct, hisi_hba->itct_dma);
1991 s = max_command_entries * sizeof(struct hisi_sas_iost);
1993 dma_free_coherent(dev, s,
1994 hisi_hba->iost, hisi_hba->iost_dma);
1996 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1997 if (hisi_hba->breakpoint)
1998 dma_free_coherent(dev, s,
1999 hisi_hba->breakpoint,
2000 hisi_hba->breakpoint_dma);
2003 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
2004 if (hisi_hba->initial_fis)
2005 dma_free_coherent(dev, s,
2006 hisi_hba->initial_fis,
2007 hisi_hba->initial_fis_dma);
2009 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
2010 if (hisi_hba->sata_breakpoint)
2011 dma_free_coherent(dev, s,
2012 hisi_hba->sata_breakpoint,
2013 hisi_hba->sata_breakpoint_dma);
2016 destroy_workqueue(hisi_hba->wq);
2018 EXPORT_SYMBOL_GPL(hisi_sas_free);
2020 void hisi_sas_rst_work_handler(struct work_struct *work)
2022 struct hisi_hba *hisi_hba =
2023 container_of(work, struct hisi_hba, rst_work);
2025 hisi_sas_controller_reset(hisi_hba);
2027 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
2029 void hisi_sas_sync_rst_work_handler(struct work_struct *work)
2031 struct hisi_sas_rst *rst =
2032 container_of(work, struct hisi_sas_rst, work);
2034 if (!hisi_sas_controller_reset(rst->hisi_hba))
2036 complete(rst->completion);
2038 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
2040 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
2042 struct device *dev = hisi_hba->dev;
2043 struct platform_device *pdev = hisi_hba->platform_dev;
2044 struct device_node *np = pdev ? pdev->dev.of_node : NULL;
2047 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
2049 dev_err(dev, "could not get property sas-addr\n");
2055 * These properties are only required for platform device-based
2056 * controller with DT firmware.
2058 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
2059 "hisilicon,sas-syscon");
2060 if (IS_ERR(hisi_hba->ctrl)) {
2061 dev_err(dev, "could not get syscon\n");
2065 if (device_property_read_u32(dev, "ctrl-reset-reg",
2066 &hisi_hba->ctrl_reset_reg)) {
2068 "could not get property ctrl-reset-reg\n");
2072 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
2073 &hisi_hba->ctrl_reset_sts_reg)) {
2075 "could not get property ctrl-reset-sts-reg\n");
2079 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
2080 &hisi_hba->ctrl_clock_ena_reg)) {
2082 "could not get property ctrl-clock-ena-reg\n");
2087 refclk = devm_clk_get(dev, NULL);
2089 dev_dbg(dev, "no ref clk property\n");
2091 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
2093 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
2094 dev_err(dev, "could not get property phy-count\n");
2098 if (device_property_read_u32(dev, "queue-count",
2099 &hisi_hba->queue_count)) {
2100 dev_err(dev, "could not get property queue-count\n");
2106 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
2108 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2109 const struct hisi_sas_hw *hw)
2111 struct resource *res;
2112 struct Scsi_Host *shost;
2113 struct hisi_hba *hisi_hba;
2114 struct device *dev = &pdev->dev;
2116 shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
2118 dev_err(dev, "scsi host alloc failed\n");
2121 hisi_hba = shost_priv(shost);
2123 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2125 hisi_hba->dev = dev;
2126 hisi_hba->platform_dev = pdev;
2127 hisi_hba->shost = shost;
2128 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2130 timer_setup(&hisi_hba->timer, NULL, 0);
2132 if (hisi_sas_get_fw_info(hisi_hba) < 0)
2135 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
2136 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2137 dev_err(dev, "No usable DMA addressing method\n");
2141 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2142 hisi_hba->regs = devm_ioremap_resource(dev, res);
2143 if (IS_ERR(hisi_hba->regs))
2146 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2148 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2149 if (IS_ERR(hisi_hba->sgpio_regs))
2153 if (hisi_sas_alloc(hisi_hba, shost)) {
2154 hisi_sas_free(hisi_hba);
2160 scsi_host_put(shost);
2161 dev_err(dev, "shost alloc failed\n");
2165 int hisi_sas_probe(struct platform_device *pdev,
2166 const struct hisi_sas_hw *hw)
2168 struct Scsi_Host *shost;
2169 struct hisi_hba *hisi_hba;
2170 struct device *dev = &pdev->dev;
2171 struct asd_sas_phy **arr_phy;
2172 struct asd_sas_port **arr_port;
2173 struct sas_ha_struct *sha;
2174 int rc, phy_nr, port_nr, i;
2176 shost = hisi_sas_shost_alloc(pdev, hw);
2180 sha = SHOST_TO_SAS_HA(shost);
2181 hisi_hba = shost_priv(shost);
2182 platform_set_drvdata(pdev, sha);
2184 phy_nr = port_nr = hisi_hba->n_phy;
2186 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2187 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2188 if (!arr_phy || !arr_port) {
2193 sha->sas_phy = arr_phy;
2194 sha->sas_port = arr_port;
2195 sha->lldd_ha = hisi_hba;
2197 shost->transportt = hisi_sas_stt;
2198 shost->max_id = HISI_SAS_MAX_DEVICES;
2199 shost->max_lun = ~0;
2200 shost->max_channel = 1;
2201 shost->max_cmd_len = 16;
2202 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
2203 shost->can_queue = hisi_hba->hw->max_command_entries;
2204 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
2206 sha->sas_ha_name = DRV_NAME;
2207 sha->dev = hisi_hba->dev;
2208 sha->lldd_module = THIS_MODULE;
2209 sha->sas_addr = &hisi_hba->sas_addr[0];
2210 sha->num_phys = hisi_hba->n_phy;
2211 sha->core.shost = hisi_hba->shost;
2213 for (i = 0; i < hisi_hba->n_phy; i++) {
2214 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2215 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2218 rc = scsi_add_host(shost, &pdev->dev);
2222 rc = sas_register_ha(sha);
2224 goto err_out_register_ha;
2226 rc = hisi_hba->hw->hw_init(hisi_hba);
2228 goto err_out_register_ha;
2230 scsi_scan_host(shost);
2234 err_out_register_ha:
2235 scsi_remove_host(shost);
2237 hisi_sas_free(hisi_hba);
2238 scsi_host_put(shost);
2241 EXPORT_SYMBOL_GPL(hisi_sas_probe);
2243 int hisi_sas_remove(struct platform_device *pdev)
2245 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2246 struct hisi_hba *hisi_hba = sha->lldd_ha;
2247 struct Scsi_Host *shost = sha->core.shost;
2249 if (timer_pending(&hisi_hba->timer))
2250 del_timer(&hisi_hba->timer);
2252 sas_unregister_ha(sha);
2253 sas_remove_host(sha->core.shost);
2255 hisi_sas_free(hisi_hba);
2256 scsi_host_put(shost);
2259 EXPORT_SYMBOL_GPL(hisi_sas_remove);
2261 static __init int hisi_sas_init(void)
2263 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2270 static __exit void hisi_sas_exit(void)
2272 sas_release_transport(hisi_sas_stt);
2275 module_init(hisi_sas_init);
2276 module_exit(hisi_sas_exit);
2278 MODULE_LICENSE("GPL");
2279 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2280 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2281 MODULE_ALIAS("platform:" DRV_NAME);