2 * Copyright (c) 2015 Linaro Ltd.
3 * Copyright (c) 2015 Hisilicon Limited.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
13 #define DRV_NAME "hisi_sas"
15 #define DEV_IS_GONE(dev) \
16 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19 u8 *lun, struct hisi_sas_tmf_task *tmf);
21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22 struct domain_device *device,
23 int abort_flag, int tag);
24 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
25 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
28 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
30 switch (fis->command) {
31 case ATA_CMD_FPDMA_WRITE:
32 case ATA_CMD_FPDMA_READ:
33 case ATA_CMD_FPDMA_RECV:
34 case ATA_CMD_FPDMA_SEND:
35 case ATA_CMD_NCQ_NON_DATA:
36 return HISI_SAS_SATA_PROTOCOL_FPDMA;
38 case ATA_CMD_DOWNLOAD_MICRO:
40 case ATA_CMD_PMP_READ:
41 case ATA_CMD_READ_LOG_EXT:
42 case ATA_CMD_PIO_READ:
43 case ATA_CMD_PIO_READ_EXT:
44 case ATA_CMD_PMP_WRITE:
45 case ATA_CMD_WRITE_LOG_EXT:
46 case ATA_CMD_PIO_WRITE:
47 case ATA_CMD_PIO_WRITE_EXT:
48 return HISI_SAS_SATA_PROTOCOL_PIO;
51 case ATA_CMD_DOWNLOAD_MICRO_DMA:
52 case ATA_CMD_PMP_READ_DMA:
53 case ATA_CMD_PMP_WRITE_DMA:
55 case ATA_CMD_READ_EXT:
56 case ATA_CMD_READ_LOG_DMA_EXT:
57 case ATA_CMD_READ_STREAM_DMA_EXT:
58 case ATA_CMD_TRUSTED_RCV_DMA:
59 case ATA_CMD_TRUSTED_SND_DMA:
61 case ATA_CMD_WRITE_EXT:
62 case ATA_CMD_WRITE_FUA_EXT:
63 case ATA_CMD_WRITE_QUEUED:
64 case ATA_CMD_WRITE_LOG_DMA_EXT:
65 case ATA_CMD_WRITE_STREAM_DMA_EXT:
66 case ATA_CMD_ZAC_MGMT_IN:
67 return HISI_SAS_SATA_PROTOCOL_DMA;
69 case ATA_CMD_CHK_POWER:
70 case ATA_CMD_DEV_RESET:
73 case ATA_CMD_FLUSH_EXT:
75 case ATA_CMD_VERIFY_EXT:
76 case ATA_CMD_SET_FEATURES:
78 case ATA_CMD_STANDBYNOW1:
79 case ATA_CMD_ZAC_MGMT_OUT:
80 return HISI_SAS_SATA_PROTOCOL_NONDATA;
83 switch (fis->features) {
84 case ATA_SET_MAX_PASSWD:
85 case ATA_SET_MAX_LOCK:
86 return HISI_SAS_SATA_PROTOCOL_PIO;
88 case ATA_SET_MAX_PASSWD_DMA:
89 case ATA_SET_MAX_UNLOCK_DMA:
90 return HISI_SAS_SATA_PROTOCOL_DMA;
93 return HISI_SAS_SATA_PROTOCOL_NONDATA;
98 if (direction == DMA_NONE)
99 return HISI_SAS_SATA_PROTOCOL_NONDATA;
100 return HISI_SAS_SATA_PROTOCOL_PIO;
104 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
106 void hisi_sas_sata_done(struct sas_task *task,
107 struct hisi_sas_slot *slot)
109 struct task_status_struct *ts = &task->task_status;
110 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
111 struct hisi_sas_status_buffer *status_buf =
112 hisi_sas_status_buf_addr_mem(slot);
113 u8 *iu = &status_buf->iu[0];
114 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
116 resp->frame_len = sizeof(struct dev_to_host_fis);
117 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
119 ts->buf_valid_size = sizeof(*resp);
121 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
123 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
125 struct ata_queued_cmd *qc = task->uldd_task;
128 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
129 qc->tf.command == ATA_CMD_FPDMA_READ) {
136 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
139 * This function assumes linkrate mask fits in 8 bits, which it
140 * does for all HW versions supported.
142 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
147 max -= SAS_LINK_RATE_1_5_GBPS;
148 for (i = 0; i <= max; i++)
149 rate |= 1 << (i * 2);
152 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
154 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
156 return device->port->ha->lldd_ha;
159 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
161 return container_of(sas_port, struct hisi_sas_port, sas_port);
163 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
165 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
169 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
170 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
172 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
174 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
176 void *bitmap = hisi_hba->slot_index_tags;
178 clear_bit(slot_idx, bitmap);
181 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
183 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
186 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
188 void *bitmap = hisi_hba->slot_index_tags;
190 set_bit(slot_idx, bitmap);
193 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
196 void *bitmap = hisi_hba->slot_index_tags;
198 index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count);
199 if (index >= hisi_hba->slot_index_count)
200 return -SAS_QUEUE_FULL;
201 hisi_sas_slot_index_set(hisi_hba, index);
206 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
210 for (i = 0; i < hisi_hba->slot_index_count; ++i)
211 hisi_sas_slot_index_clear(hisi_hba, i);
214 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
215 struct hisi_sas_slot *slot)
219 struct device *dev = hisi_hba->dev;
221 if (!task->lldd_task)
224 task->lldd_task = NULL;
226 if (!sas_protocol_ata(task->task_proto))
228 dma_unmap_sg(dev, task->scatter,
234 dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
236 list_del_init(&slot->entry);
240 hisi_sas_slot_index_free(hisi_hba, slot->idx);
242 /* slot memory is fully zeroed when it is reused */
244 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
246 static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
247 struct hisi_sas_slot *slot)
249 return hisi_hba->hw->prep_smp(hisi_hba, slot);
252 static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
253 struct hisi_sas_slot *slot, int is_tmf,
254 struct hisi_sas_tmf_task *tmf)
256 return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
259 static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
260 struct hisi_sas_slot *slot)
262 return hisi_hba->hw->prep_stp(hisi_hba, slot);
265 static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
266 struct hisi_sas_slot *slot,
267 int device_id, int abort_flag, int tag_to_abort)
269 return hisi_hba->hw->prep_abort(hisi_hba, slot,
270 device_id, abort_flag, tag_to_abort);
274 * This function will issue an abort TMF regardless of whether the
275 * task is in the sdev or not. Then it will do the task complete
276 * cleanup and callbacks.
278 static void hisi_sas_slot_abort(struct work_struct *work)
280 struct hisi_sas_slot *abort_slot =
281 container_of(work, struct hisi_sas_slot, abort_slot);
282 struct sas_task *task = abort_slot->task;
283 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
284 struct scsi_cmnd *cmnd = task->uldd_task;
285 struct hisi_sas_tmf_task tmf_task;
287 struct device *dev = hisi_hba->dev;
288 int tag = abort_slot->idx;
291 if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
292 dev_err(dev, "cannot abort slot for non-ssp task\n");
296 int_to_scsilun(cmnd->device->lun, &lun);
297 tmf_task.tmf = TMF_ABORT_TASK;
298 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
300 hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
302 /* Do cleanup for this task */
303 spin_lock_irqsave(&hisi_hba->lock, flags);
304 hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
305 spin_unlock_irqrestore(&hisi_hba->lock, flags);
307 task->task_done(task);
310 static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
311 *dq, int is_tmf, struct hisi_sas_tmf_task *tmf,
314 struct hisi_hba *hisi_hba = dq->hisi_hba;
315 struct domain_device *device = task->dev;
316 struct hisi_sas_device *sas_dev = device->lldd_dev;
317 struct hisi_sas_port *port;
318 struct hisi_sas_slot *slot;
319 struct hisi_sas_cmd_hdr *cmd_hdr_base;
320 struct asd_sas_port *sas_port = device->port;
321 struct device *dev = hisi_hba->dev;
322 int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
323 int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
327 struct task_status_struct *ts = &task->task_status;
329 ts->resp = SAS_TASK_UNDELIVERED;
330 ts->stat = SAS_PHY_DOWN;
332 * libsas will use dev->port, should
333 * not call task_done for sata
335 if (device->dev_type != SAS_SATA_DEV)
336 task->task_done(task);
340 if (DEV_IS_GONE(sas_dev)) {
342 dev_info(dev, "task prep: device %d not ready\n",
345 dev_info(dev, "task prep: device %016llx not ready\n",
346 SAS_ADDR(device->sas_addr));
351 port = to_hisi_sas_port(sas_port);
352 if (port && !port->port_attached) {
353 dev_info(dev, "task prep: %s port%d not attach device\n",
354 (dev_is_sata(device)) ?
361 if (!sas_protocol_ata(task->task_proto)) {
362 unsigned int req_len, resp_len;
364 if (task->num_scatter) {
365 n_elem = dma_map_sg(dev, task->scatter,
366 task->num_scatter, task->data_dir);
371 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
372 n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
378 req_len = sg_dma_len(&task->smp_task.smp_req);
381 goto err_out_dma_unmap;
383 n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp,
387 goto err_out_dma_unmap;
389 resp_len = sg_dma_len(&task->smp_task.smp_resp);
390 if (resp_len & 0x3) {
392 goto err_out_dma_unmap;
396 n_elem = task->num_scatter;
398 spin_lock_irqsave(&hisi_hba->lock, flags);
399 if (hisi_hba->hw->slot_index_alloc)
400 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
403 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
404 spin_unlock_irqrestore(&hisi_hba->lock, flags);
406 goto err_out_dma_unmap;
408 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
412 dlvry_queue = dq->id;
413 dlvry_queue_slot = dq->wr_point;
414 slot = &hisi_hba->slot_info[slot_idx];
415 memset(slot, 0, sizeof(struct hisi_sas_slot));
417 slot->idx = slot_idx;
418 slot->n_elem = n_elem;
419 slot->dlvry_queue = dlvry_queue;
420 slot->dlvry_queue_slot = dlvry_queue_slot;
421 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
422 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
426 slot->is_internal = true;
427 task->lldd_task = slot;
428 INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
430 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
431 GFP_ATOMIC, &slot->buf_dma);
434 goto err_out_slot_buf;
436 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
437 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
438 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
440 switch (task->task_proto) {
441 case SAS_PROTOCOL_SMP:
442 rc = hisi_sas_task_prep_smp(hisi_hba, slot);
444 case SAS_PROTOCOL_SSP:
445 rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
447 case SAS_PROTOCOL_SATA:
448 case SAS_PROTOCOL_STP:
449 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
450 rc = hisi_sas_task_prep_ata(hisi_hba, slot);
453 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
460 dev_err(dev, "task prep: rc = 0x%x\n", rc);
464 spin_lock_irqsave(&hisi_hba->lock, flags);
465 list_add_tail(&slot->entry, &sas_dev->list);
466 spin_unlock_irqrestore(&hisi_hba->lock, flags);
467 spin_lock_irqsave(&task->task_state_lock, flags);
468 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
469 spin_unlock_irqrestore(&task->task_state_lock, flags);
471 dq->slot_prep = slot;
477 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
480 /* Nothing to be done */
482 spin_lock_irqsave(&hisi_hba->lock, flags);
483 hisi_sas_slot_index_free(hisi_hba, slot_idx);
484 spin_unlock_irqrestore(&hisi_hba->lock, flags);
486 if (!sas_protocol_ata(task->task_proto)) {
487 if (task->num_scatter) {
488 dma_unmap_sg(dev, task->scatter, task->num_scatter,
490 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
492 dma_unmap_sg(dev, &task->smp_task.smp_req,
495 dma_unmap_sg(dev, &task->smp_task.smp_resp,
500 dev_err(dev, "task prep: failed[%d]!\n", rc);
504 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
505 int is_tmf, struct hisi_sas_tmf_task *tmf)
510 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
511 struct device *dev = hisi_hba->dev;
512 struct domain_device *device = task->dev;
513 struct hisi_sas_device *sas_dev = device->lldd_dev;
514 struct hisi_sas_dq *dq = sas_dev->dq;
516 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
519 /* protect task_prep and start_delivery sequence */
520 spin_lock_irqsave(&dq->lock, flags);
521 rc = hisi_sas_task_prep(task, dq, is_tmf, tmf, &pass);
523 dev_err(dev, "task exec: failed[%d]!\n", rc);
526 hisi_hba->hw->start_delivery(dq);
527 spin_unlock_irqrestore(&dq->lock, flags);
532 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
534 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
535 struct asd_sas_phy *sas_phy = &phy->sas_phy;
536 struct sas_ha_struct *sas_ha;
538 if (!phy->phy_attached)
541 sas_ha = &hisi_hba->sha;
542 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
545 struct sas_phy *sphy = sas_phy->phy;
547 sphy->negotiated_linkrate = sas_phy->linkrate;
548 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
549 sphy->maximum_linkrate_hw =
550 hisi_hba->hw->phy_get_max_linkrate();
551 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
552 sphy->minimum_linkrate = phy->minimum_linkrate;
554 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
555 sphy->maximum_linkrate = phy->maximum_linkrate;
558 if (phy->phy_type & PORT_TYPE_SAS) {
559 struct sas_identify_frame *id;
561 id = (struct sas_identify_frame *)phy->frame_rcvd;
562 id->dev_type = phy->identify.device_type;
563 id->initiator_bits = SAS_PROTOCOL_ALL;
564 id->target_bits = phy->identify.target_port_protocols;
565 } else if (phy->phy_type & PORT_TYPE_SATA) {
569 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
570 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
573 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
575 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
576 struct hisi_sas_device *sas_dev = NULL;
580 spin_lock_irqsave(&hisi_hba->lock, flags);
581 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
582 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
583 int queue = i % hisi_hba->queue_count;
584 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
586 hisi_hba->devices[i].device_id = i;
587 sas_dev = &hisi_hba->devices[i];
588 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
589 sas_dev->dev_type = device->dev_type;
590 sas_dev->hisi_hba = hisi_hba;
591 sas_dev->sas_device = device;
593 INIT_LIST_HEAD(&hisi_hba->devices[i].list);
597 spin_unlock_irqrestore(&hisi_hba->lock, flags);
602 static int hisi_sas_dev_found(struct domain_device *device)
604 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
605 struct domain_device *parent_dev = device->parent;
606 struct hisi_sas_device *sas_dev;
607 struct device *dev = hisi_hba->dev;
609 if (hisi_hba->hw->alloc_dev)
610 sas_dev = hisi_hba->hw->alloc_dev(device);
612 sas_dev = hisi_sas_alloc_dev(device);
614 dev_err(dev, "fail alloc dev: max support %d devices\n",
615 HISI_SAS_MAX_DEVICES);
619 device->lldd_dev = sas_dev;
620 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
622 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
624 u8 phy_num = parent_dev->ex_dev.num_phys;
627 for (phy_no = 0; phy_no < phy_num; phy_no++) {
628 phy = &parent_dev->ex_dev.ex_phy[phy_no];
629 if (SAS_ADDR(phy->attached_sas_addr) ==
630 SAS_ADDR(device->sas_addr))
634 if (phy_no == phy_num) {
635 dev_info(dev, "dev found: no attached "
636 "dev:%016llx at ex:%016llx\n",
637 SAS_ADDR(device->sas_addr),
638 SAS_ADDR(parent_dev->sas_addr));
643 dev_info(dev, "dev[%d:%x] found\n",
644 sas_dev->device_id, sas_dev->dev_type);
649 static int hisi_sas_slave_configure(struct scsi_device *sdev)
651 struct domain_device *dev = sdev_to_domain_dev(sdev);
652 int ret = sas_slave_configure(sdev);
656 if (!dev_is_sata(dev))
657 sas_change_queue_depth(sdev, 64);
662 static void hisi_sas_scan_start(struct Scsi_Host *shost)
664 struct hisi_hba *hisi_hba = shost_priv(shost);
666 hisi_hba->hw->phys_init(hisi_hba);
669 static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
671 struct hisi_hba *hisi_hba = shost_priv(shost);
672 struct sas_ha_struct *sha = &hisi_hba->sha;
674 /* Wait for PHY up interrupt to occur */
682 static void hisi_sas_phyup_work(struct work_struct *work)
684 struct hisi_sas_phy *phy =
685 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
686 struct hisi_hba *hisi_hba = phy->hisi_hba;
687 struct asd_sas_phy *sas_phy = &phy->sas_phy;
688 int phy_no = sas_phy->id;
690 hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
691 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
694 static void hisi_sas_linkreset_work(struct work_struct *work)
696 struct hisi_sas_phy *phy =
697 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
698 struct asd_sas_phy *sas_phy = &phy->sas_phy;
700 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
703 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
704 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
705 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
708 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
709 enum hisi_sas_phy_event event)
711 struct hisi_hba *hisi_hba = phy->hisi_hba;
713 if (WARN_ON(event >= HISI_PHYES_NUM))
716 return queue_work(hisi_hba->wq, &phy->works[event]);
718 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
720 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
722 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
723 struct asd_sas_phy *sas_phy = &phy->sas_phy;
726 phy->hisi_hba = hisi_hba;
728 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
729 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
730 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
731 sas_phy->class = SAS;
732 sas_phy->iproto = SAS_PROTOCOL_ALL;
734 sas_phy->type = PHY_TYPE_PHYSICAL;
735 sas_phy->role = PHY_ROLE_INITIATOR;
736 sas_phy->oob_mode = OOB_NOT_CONNECTED;
737 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
738 sas_phy->id = phy_no;
739 sas_phy->sas_addr = &hisi_hba->sas_addr[0];
740 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
741 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
742 sas_phy->lldd_phy = phy;
744 for (i = 0; i < HISI_PHYES_NUM; i++)
745 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
748 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
750 struct sas_ha_struct *sas_ha = sas_phy->ha;
751 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
752 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
753 struct asd_sas_port *sas_port = sas_phy->port;
754 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
760 spin_lock_irqsave(&hisi_hba->lock, flags);
761 port->port_attached = 1;
762 port->id = phy->port_id;
764 sas_port->lldd_port = port;
765 spin_unlock_irqrestore(&hisi_hba->lock, flags);
768 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
769 struct hisi_sas_slot *slot)
773 struct task_status_struct *ts;
775 ts = &task->task_status;
777 ts->resp = SAS_TASK_COMPLETE;
778 ts->stat = SAS_ABORTED_TASK;
779 spin_lock_irqsave(&task->task_state_lock, flags);
780 task->task_state_flags &=
781 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
782 task->task_state_flags |= SAS_TASK_STATE_DONE;
783 spin_unlock_irqrestore(&task->task_state_lock, flags);
786 hisi_sas_slot_task_free(hisi_hba, task, slot);
789 /* hisi_hba.lock should be locked */
790 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
791 struct domain_device *device)
793 struct hisi_sas_slot *slot, *slot2;
794 struct hisi_sas_device *sas_dev = device->lldd_dev;
796 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
797 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
800 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
802 struct hisi_sas_device *sas_dev;
803 struct domain_device *device;
806 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
807 sas_dev = &hisi_hba->devices[i];
808 device = sas_dev->sas_device;
810 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
814 hisi_sas_release_task(hisi_hba, device);
817 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
819 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
820 struct domain_device *device)
822 if (hisi_hba->hw->dereg_device)
823 hisi_hba->hw->dereg_device(hisi_hba, device);
826 static void hisi_sas_dev_gone(struct domain_device *device)
828 struct hisi_sas_device *sas_dev = device->lldd_dev;
829 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
830 struct device *dev = hisi_hba->dev;
832 dev_info(dev, "dev[%d:%x] is gone\n",
833 sas_dev->device_id, sas_dev->dev_type);
835 if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
836 hisi_sas_internal_task_abort(hisi_hba, device,
837 HISI_SAS_INT_ABT_DEV, 0);
839 hisi_sas_dereg_device(hisi_hba, device);
841 hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
842 device->lldd_dev = NULL;
845 if (hisi_hba->hw->free_device)
846 hisi_hba->hw->free_device(sas_dev);
847 sas_dev->dev_type = SAS_PHY_UNUSED;
850 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
852 return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
855 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
858 struct sas_ha_struct *sas_ha = sas_phy->ha;
859 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
860 int phy_no = sas_phy->id;
863 case PHY_FUNC_HARD_RESET:
864 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
867 case PHY_FUNC_LINK_RESET:
868 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
870 hisi_hba->hw->phy_start(hisi_hba, phy_no);
873 case PHY_FUNC_DISABLE:
874 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
877 case PHY_FUNC_SET_LINK_RATE:
878 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
880 case PHY_FUNC_GET_EVENTS:
881 if (hisi_hba->hw->get_events) {
882 hisi_hba->hw->get_events(hisi_hba, phy_no);
886 case PHY_FUNC_RELEASE_SPINUP_HOLD:
893 static void hisi_sas_task_done(struct sas_task *task)
895 if (!del_timer(&task->slow_task->timer))
897 complete(&task->slow_task->completion);
900 static void hisi_sas_tmf_timedout(struct timer_list *t)
902 struct sas_task_slow *slow = from_timer(slow, t, timer);
903 struct sas_task *task = slow->task;
906 spin_lock_irqsave(&task->task_state_lock, flags);
907 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
908 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
909 spin_unlock_irqrestore(&task->task_state_lock, flags);
911 complete(&task->slow_task->completion);
914 #define TASK_TIMEOUT 20
916 #define INTERNAL_ABORT_TIMEOUT 6
917 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
918 void *parameter, u32 para_len,
919 struct hisi_sas_tmf_task *tmf)
921 struct hisi_sas_device *sas_dev = device->lldd_dev;
922 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
923 struct device *dev = hisi_hba->dev;
924 struct sas_task *task;
927 for (retry = 0; retry < TASK_RETRY; retry++) {
928 task = sas_alloc_slow_task(GFP_KERNEL);
933 task->task_proto = device->tproto;
935 if (dev_is_sata(device)) {
936 task->ata_task.device_control_reg_update = 1;
937 memcpy(&task->ata_task.fis, parameter, para_len);
939 memcpy(&task->ssp_task, parameter, para_len);
941 task->task_done = hisi_sas_task_done;
943 task->slow_task->timer.function = hisi_sas_tmf_timedout;
944 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
945 add_timer(&task->slow_task->timer);
947 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
950 del_timer(&task->slow_task->timer);
951 dev_err(dev, "abort tmf: executing internal task failed: %d\n",
956 wait_for_completion(&task->slow_task->completion);
957 res = TMF_RESP_FUNC_FAILED;
958 /* Even TMF timed out, return direct. */
959 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
960 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
961 struct hisi_sas_slot *slot = task->lldd_task;
963 dev_err(dev, "abort tmf: TMF task timeout and not done\n");
969 dev_err(dev, "abort tmf: TMF task timeout\n");
972 if (task->task_status.resp == SAS_TASK_COMPLETE &&
973 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
974 res = TMF_RESP_FUNC_COMPLETE;
978 if (task->task_status.resp == SAS_TASK_COMPLETE &&
979 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
980 res = TMF_RESP_FUNC_SUCC;
984 if (task->task_status.resp == SAS_TASK_COMPLETE &&
985 task->task_status.stat == SAS_DATA_UNDERRUN) {
986 /* no error, but return the number of bytes of
989 dev_warn(dev, "abort tmf: task to dev %016llx "
990 "resp: 0x%x sts 0x%x underrun\n",
991 SAS_ADDR(device->sas_addr),
992 task->task_status.resp,
993 task->task_status.stat);
994 res = task->task_status.residual;
998 if (task->task_status.resp == SAS_TASK_COMPLETE &&
999 task->task_status.stat == SAS_DATA_OVERRUN) {
1000 dev_warn(dev, "abort tmf: blocked task error\n");
1005 dev_warn(dev, "abort tmf: task to dev "
1006 "%016llx resp: 0x%x status 0x%x\n",
1007 SAS_ADDR(device->sas_addr), task->task_status.resp,
1008 task->task_status.stat);
1009 sas_free_task(task);
1013 if (retry == TASK_RETRY)
1014 dev_warn(dev, "abort tmf: executing internal task failed!\n");
1015 sas_free_task(task);
1019 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
1020 bool reset, int pmp, u8 *fis)
1022 struct ata_taskfile tf;
1024 ata_tf_init(dev, &tf);
1028 tf.ctl &= ~ATA_SRST;
1029 tf.command = ATA_CMD_DEV_RESET;
1030 ata_tf_to_fis(&tf, pmp, 0, fis);
1033 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
1036 struct ata_port *ap = device->sata_dev.ap;
1037 struct ata_link *link;
1038 int rc = TMF_RESP_FUNC_FAILED;
1039 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1040 struct device *dev = hisi_hba->dev;
1041 int s = sizeof(struct host_to_dev_fis);
1042 unsigned long flags;
1044 ata_for_each_link(link, ap, EDGE) {
1045 int pmp = sata_srst_pmp(link);
1047 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1048 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
1049 if (rc != TMF_RESP_FUNC_COMPLETE)
1053 if (rc == TMF_RESP_FUNC_COMPLETE) {
1054 ata_for_each_link(link, ap, EDGE) {
1055 int pmp = sata_srst_pmp(link);
1057 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1058 rc = hisi_sas_exec_internal_tmf_task(device, fis,
1060 if (rc != TMF_RESP_FUNC_COMPLETE)
1061 dev_err(dev, "ata disk de-reset failed\n");
1064 dev_err(dev, "ata disk reset failed\n");
1067 if (rc == TMF_RESP_FUNC_COMPLETE) {
1068 spin_lock_irqsave(&hisi_hba->lock, flags);
1069 hisi_sas_release_task(hisi_hba, device);
1070 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1076 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
1077 u8 *lun, struct hisi_sas_tmf_task *tmf)
1079 struct sas_ssp_task ssp_task;
1081 if (!(device->tproto & SAS_PROTOCOL_SSP))
1082 return TMF_RESP_FUNC_ESUPP;
1084 memcpy(ssp_task.LUN, lun, 8);
1086 return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
1087 sizeof(ssp_task), tmf);
1090 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1092 u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
1095 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1096 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1097 struct domain_device *device = sas_dev->sas_device;
1098 struct asd_sas_port *sas_port;
1099 struct hisi_sas_port *port;
1100 struct hisi_sas_phy *phy = NULL;
1101 struct asd_sas_phy *sas_phy;
1103 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1104 || !device || !device->port)
1107 sas_port = device->port;
1108 port = to_hisi_sas_port(sas_port);
1110 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1111 if (state & BIT(sas_phy->id)) {
1112 phy = sas_phy->lldd_phy;
1117 port->id = phy->port_id;
1119 /* Update linkrate of directly attached device. */
1120 if (!device->parent)
1121 device->linkrate = phy->sas_phy.linkrate;
1123 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1129 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1132 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1133 struct asd_sas_port *_sas_port = NULL;
1136 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1137 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1138 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1139 struct asd_sas_port *sas_port = sas_phy->port;
1140 bool do_port_check = !!(_sas_port != sas_port);
1142 if (!sas_phy->phy->enabled)
1145 /* Report PHY state change to libsas */
1146 if (state & BIT(phy_no)) {
1147 if (do_port_check && sas_port && sas_port->port_dev) {
1148 struct domain_device *dev = sas_port->port_dev;
1150 _sas_port = sas_port;
1152 if (DEV_IS_EXPANDER(dev->dev_type))
1153 sas_ha->notify_port_event(sas_phy,
1154 PORTE_BROADCAST_RCVD);
1156 } else if (old_state & (1 << phy_no))
1157 /* PHY down but was up before */
1158 hisi_sas_phy_down(hisi_hba, phy_no, 0);
1163 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1165 struct device *dev = hisi_hba->dev;
1166 struct Scsi_Host *shost = hisi_hba->shost;
1167 u32 old_state, state;
1168 unsigned long flags;
1171 if (!hisi_hba->hw->soft_reset)
1174 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1177 dev_info(dev, "controller resetting...\n");
1178 old_state = hisi_hba->hw->get_phys_state(hisi_hba);
1180 scsi_block_requests(shost);
1181 if (timer_pending(&hisi_hba->timer))
1182 del_timer_sync(&hisi_hba->timer);
1184 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1185 rc = hisi_hba->hw->soft_reset(hisi_hba);
1187 dev_warn(dev, "controller reset failed (%d)\n", rc);
1188 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1189 scsi_unblock_requests(shost);
1192 spin_lock_irqsave(&hisi_hba->lock, flags);
1193 hisi_sas_release_tasks(hisi_hba);
1194 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1196 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1198 /* Init and wait for PHYs to come up and all libsas event finished. */
1199 hisi_hba->hw->phys_init(hisi_hba);
1201 hisi_sas_refresh_port_id(hisi_hba);
1202 scsi_unblock_requests(shost);
1204 state = hisi_hba->hw->get_phys_state(hisi_hba);
1205 hisi_sas_rescan_topology(hisi_hba, old_state, state);
1206 dev_info(dev, "controller reset complete\n");
1209 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1214 static int hisi_sas_abort_task(struct sas_task *task)
1216 struct scsi_lun lun;
1217 struct hisi_sas_tmf_task tmf_task;
1218 struct domain_device *device = task->dev;
1219 struct hisi_sas_device *sas_dev = device->lldd_dev;
1220 struct hisi_hba *hisi_hba;
1222 int rc = TMF_RESP_FUNC_FAILED;
1223 unsigned long flags;
1226 return TMF_RESP_FUNC_FAILED;
1228 hisi_hba = dev_to_hisi_hba(task->dev);
1229 dev = hisi_hba->dev;
1231 spin_lock_irqsave(&task->task_state_lock, flags);
1232 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1233 spin_unlock_irqrestore(&task->task_state_lock, flags);
1234 rc = TMF_RESP_FUNC_COMPLETE;
1237 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1238 spin_unlock_irqrestore(&task->task_state_lock, flags);
1240 sas_dev->dev_status = HISI_SAS_DEV_EH;
1241 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1242 struct scsi_cmnd *cmnd = task->uldd_task;
1243 struct hisi_sas_slot *slot = task->lldd_task;
1244 u32 tag = slot->idx;
1247 int_to_scsilun(cmnd->device->lun, &lun);
1248 tmf_task.tmf = TMF_ABORT_TASK;
1249 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1251 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1254 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1255 HISI_SAS_INT_ABT_CMD, tag);
1257 dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1258 return TMF_RESP_FUNC_FAILED;
1262 * If the TMF finds that the IO is not in the device and also
1263 * the internal abort does not succeed, then it is safe to
1265 * Note: if the internal abort succeeds then the slot
1266 * will have already been completed
1268 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1269 if (task->lldd_task) {
1270 spin_lock_irqsave(&hisi_hba->lock, flags);
1271 hisi_sas_do_release_task(hisi_hba, task, slot);
1272 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1275 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1276 task->task_proto & SAS_PROTOCOL_STP) {
1277 if (task->dev->dev_type == SAS_SATA_DEV) {
1278 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1279 HISI_SAS_INT_ABT_DEV, 0);
1281 dev_err(dev, "abort task: internal abort failed\n");
1284 hisi_sas_dereg_device(hisi_hba, device);
1285 rc = hisi_sas_softreset_ata_disk(device);
1287 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1289 struct hisi_sas_slot *slot = task->lldd_task;
1290 u32 tag = slot->idx;
1292 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1293 HISI_SAS_INT_ABT_CMD, tag);
1294 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1296 spin_lock_irqsave(&hisi_hba->lock, flags);
1297 hisi_sas_do_release_task(hisi_hba, task, slot);
1298 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1303 if (rc != TMF_RESP_FUNC_COMPLETE)
1304 dev_notice(dev, "abort task: rc=%d\n", rc);
1308 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1310 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1311 struct device *dev = hisi_hba->dev;
1312 struct hisi_sas_tmf_task tmf_task;
1313 int rc = TMF_RESP_FUNC_FAILED;
1314 unsigned long flags;
1316 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1317 HISI_SAS_INT_ABT_DEV, 0);
1319 dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1320 return TMF_RESP_FUNC_FAILED;
1322 hisi_sas_dereg_device(hisi_hba, device);
1324 tmf_task.tmf = TMF_ABORT_TASK_SET;
1325 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1327 if (rc == TMF_RESP_FUNC_COMPLETE) {
1328 spin_lock_irqsave(&hisi_hba->lock, flags);
1329 hisi_sas_release_task(hisi_hba, device);
1330 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1336 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1338 int rc = TMF_RESP_FUNC_FAILED;
1339 struct hisi_sas_tmf_task tmf_task;
1341 tmf_task.tmf = TMF_CLEAR_ACA;
1342 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1347 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1349 struct sas_phy *phy = sas_get_local_phy(device);
1350 int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1351 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1352 rc = sas_phy_reset(phy, reset_type);
1353 sas_put_local_phy(phy);
1358 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1360 struct hisi_sas_device *sas_dev = device->lldd_dev;
1361 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1362 struct device *dev = hisi_hba->dev;
1363 int rc = TMF_RESP_FUNC_FAILED;
1364 unsigned long flags;
1366 if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1367 return TMF_RESP_FUNC_FAILED;
1368 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1370 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1371 HISI_SAS_INT_ABT_DEV, 0);
1373 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1374 return TMF_RESP_FUNC_FAILED;
1376 hisi_sas_dereg_device(hisi_hba, device);
1378 rc = hisi_sas_debug_I_T_nexus_reset(device);
1380 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) {
1381 spin_lock_irqsave(&hisi_hba->lock, flags);
1382 hisi_sas_release_task(hisi_hba, device);
1383 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1388 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1390 struct hisi_sas_device *sas_dev = device->lldd_dev;
1391 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1392 struct device *dev = hisi_hba->dev;
1393 unsigned long flags;
1394 int rc = TMF_RESP_FUNC_FAILED;
1396 sas_dev->dev_status = HISI_SAS_DEV_EH;
1397 if (dev_is_sata(device)) {
1398 struct sas_phy *phy;
1400 /* Clear internal IO and then hardreset */
1401 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1402 HISI_SAS_INT_ABT_DEV, 0);
1404 dev_err(dev, "lu_reset: internal abort failed\n");
1407 hisi_sas_dereg_device(hisi_hba, device);
1409 phy = sas_get_local_phy(device);
1411 rc = sas_phy_reset(phy, 1);
1414 spin_lock_irqsave(&hisi_hba->lock, flags);
1415 hisi_sas_release_task(hisi_hba, device);
1416 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1418 sas_put_local_phy(phy);
1420 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
1422 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1423 HISI_SAS_INT_ABT_DEV, 0);
1425 dev_err(dev, "lu_reset: internal abort failed\n");
1428 hisi_sas_dereg_device(hisi_hba, device);
1430 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1431 if (rc == TMF_RESP_FUNC_COMPLETE) {
1432 spin_lock_irqsave(&hisi_hba->lock, flags);
1433 hisi_sas_release_task(hisi_hba, device);
1434 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1438 if (rc != TMF_RESP_FUNC_COMPLETE)
1439 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1440 sas_dev->device_id, rc);
1444 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1446 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1447 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1449 queue_work(hisi_hba->wq, &r.work);
1450 wait_for_completion(r.completion);
1452 return TMF_RESP_FUNC_COMPLETE;
1454 return TMF_RESP_FUNC_FAILED;
1457 static int hisi_sas_query_task(struct sas_task *task)
1459 struct scsi_lun lun;
1460 struct hisi_sas_tmf_task tmf_task;
1461 int rc = TMF_RESP_FUNC_FAILED;
1463 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1464 struct scsi_cmnd *cmnd = task->uldd_task;
1465 struct domain_device *device = task->dev;
1466 struct hisi_sas_slot *slot = task->lldd_task;
1467 u32 tag = slot->idx;
1469 int_to_scsilun(cmnd->device->lun, &lun);
1470 tmf_task.tmf = TMF_QUERY_TASK;
1471 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1473 rc = hisi_sas_debug_issue_ssp_tmf(device,
1477 /* The task is still in Lun, release it then */
1478 case TMF_RESP_FUNC_SUCC:
1479 /* The task is not in Lun or failed, reset the phy */
1480 case TMF_RESP_FUNC_FAILED:
1481 case TMF_RESP_FUNC_COMPLETE:
1484 rc = TMF_RESP_FUNC_FAILED;
1492 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1493 struct sas_task *task, int abort_flag,
1496 struct domain_device *device = task->dev;
1497 struct hisi_sas_device *sas_dev = device->lldd_dev;
1498 struct device *dev = hisi_hba->dev;
1499 struct hisi_sas_port *port;
1500 struct hisi_sas_slot *slot;
1501 struct asd_sas_port *sas_port = device->port;
1502 struct hisi_sas_cmd_hdr *cmd_hdr_base;
1503 struct hisi_sas_dq *dq = sas_dev->dq;
1504 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1505 unsigned long flags, flags_dq;
1507 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1513 port = to_hisi_sas_port(sas_port);
1515 /* simply get a slot and send abort command */
1516 spin_lock_irqsave(&hisi_hba->lock, flags);
1517 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
1519 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1522 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1524 spin_lock_irqsave(&dq->lock, flags_dq);
1525 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
1529 dlvry_queue = dq->id;
1530 dlvry_queue_slot = dq->wr_point;
1532 slot = &hisi_hba->slot_info[slot_idx];
1533 memset(slot, 0, sizeof(struct hisi_sas_slot));
1535 slot->idx = slot_idx;
1536 slot->n_elem = n_elem;
1537 slot->dlvry_queue = dlvry_queue;
1538 slot->dlvry_queue_slot = dlvry_queue_slot;
1539 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1540 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1543 slot->is_internal = true;
1544 task->lldd_task = slot;
1546 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
1547 GFP_ATOMIC, &slot->buf_dma);
1553 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1554 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1555 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
1557 rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1558 abort_flag, task_tag);
1562 spin_lock_irqsave(&hisi_hba->lock, flags);
1563 list_add_tail(&slot->entry, &sas_dev->list);
1564 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1565 spin_lock_irqsave(&task->task_state_lock, flags);
1566 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1567 spin_unlock_irqrestore(&task->task_state_lock, flags);
1569 dq->slot_prep = slot;
1571 /* send abort command to the chip */
1572 hisi_hba->hw->start_delivery(dq);
1573 spin_unlock_irqrestore(&dq->lock, flags_dq);
1578 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
1581 spin_lock_irqsave(&hisi_hba->lock, flags);
1582 hisi_sas_slot_index_free(hisi_hba, slot_idx);
1583 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1584 spin_unlock_irqrestore(&dq->lock, flags_dq);
1586 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1592 * hisi_sas_internal_task_abort -- execute an internal
1593 * abort command for single IO command or a device
1594 * @hisi_hba: host controller struct
1595 * @device: domain device
1596 * @abort_flag: mode of operation, device or single IO
1597 * @tag: tag of IO to be aborted (only relevant to single
1601 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1602 struct domain_device *device,
1603 int abort_flag, int tag)
1605 struct sas_task *task;
1606 struct hisi_sas_device *sas_dev = device->lldd_dev;
1607 struct device *dev = hisi_hba->dev;
1611 * The interface is not realized means this HW don't support internal
1612 * abort, or don't need to do internal abort. Then here, we return
1613 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
1614 * the internal abort has been executed and returned CQ.
1616 if (!hisi_hba->hw->prep_abort)
1617 return TMF_RESP_FUNC_FAILED;
1619 task = sas_alloc_slow_task(GFP_KERNEL);
1624 task->task_proto = device->tproto;
1625 task->task_done = hisi_sas_task_done;
1626 task->slow_task->timer.function = hisi_sas_tmf_timedout;
1627 task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT*HZ;
1628 add_timer(&task->slow_task->timer);
1630 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1631 task, abort_flag, tag);
1633 del_timer(&task->slow_task->timer);
1634 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1638 wait_for_completion(&task->slow_task->completion);
1639 res = TMF_RESP_FUNC_FAILED;
1641 /* Internal abort timed out */
1642 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1643 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1644 struct hisi_sas_slot *slot = task->lldd_task;
1648 dev_err(dev, "internal task abort: timeout and not done.\n");
1652 dev_err(dev, "internal task abort: timeout.\n");
1655 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1656 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1657 res = TMF_RESP_FUNC_COMPLETE;
1661 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1662 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1663 res = TMF_RESP_FUNC_SUCC;
1668 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
1669 "resp: 0x%x sts 0x%x\n",
1670 SAS_ADDR(device->sas_addr),
1672 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1673 task->task_status.stat);
1674 sas_free_task(task);
1679 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1681 hisi_sas_port_notify_formed(sas_phy);
1684 static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
1688 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
1689 u8 reg_index, u8 reg_count, u8 *write_data)
1691 struct hisi_hba *hisi_hba = sha->lldd_ha;
1693 if (!hisi_hba->hw->write_gpio)
1696 return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
1697 reg_index, reg_count, write_data);
1700 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1702 phy->phy_attached = 0;
1707 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1709 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1710 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1711 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1714 /* Phy down but ready */
1715 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1716 hisi_sas_port_notify_formed(sas_phy);
1718 struct hisi_sas_port *port = phy->port;
1720 /* Phy down and not ready */
1721 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1722 sas_phy_disconnected(sas_phy);
1725 if (phy->phy_type & PORT_TYPE_SAS) {
1726 int port_id = port->id;
1728 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1730 port->port_attached = 0;
1731 } else if (phy->phy_type & PORT_TYPE_SATA)
1732 port->port_attached = 0;
1734 hisi_sas_phy_disconnected(phy);
1737 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1739 void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
1743 for (i = 0; i < hisi_hba->queue_count; i++) {
1744 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1746 tasklet_kill(&cq->tasklet);
1749 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
1751 struct scsi_transport_template *hisi_sas_stt;
1752 EXPORT_SYMBOL_GPL(hisi_sas_stt);
1754 static struct device_attribute *host_attrs[] = {
1755 &dev_attr_phy_event_threshold,
1759 static struct scsi_host_template _hisi_sas_sht = {
1760 .module = THIS_MODULE,
1762 .queuecommand = sas_queuecommand,
1763 .target_alloc = sas_target_alloc,
1764 .slave_configure = hisi_sas_slave_configure,
1765 .scan_finished = hisi_sas_scan_finished,
1766 .scan_start = hisi_sas_scan_start,
1767 .change_queue_depth = sas_change_queue_depth,
1768 .bios_param = sas_bios_param,
1771 .sg_tablesize = SG_ALL,
1772 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
1773 .use_clustering = ENABLE_CLUSTERING,
1774 .eh_device_reset_handler = sas_eh_device_reset_handler,
1775 .eh_target_reset_handler = sas_eh_target_reset_handler,
1776 .target_destroy = sas_target_destroy,
1778 .shost_attrs = host_attrs,
1780 struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
1781 EXPORT_SYMBOL_GPL(hisi_sas_sht);
1783 static struct sas_domain_function_template hisi_sas_transport_ops = {
1784 .lldd_dev_found = hisi_sas_dev_found,
1785 .lldd_dev_gone = hisi_sas_dev_gone,
1786 .lldd_execute_task = hisi_sas_queue_command,
1787 .lldd_control_phy = hisi_sas_control_phy,
1788 .lldd_abort_task = hisi_sas_abort_task,
1789 .lldd_abort_task_set = hisi_sas_abort_task_set,
1790 .lldd_clear_aca = hisi_sas_clear_aca,
1791 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
1792 .lldd_lu_reset = hisi_sas_lu_reset,
1793 .lldd_query_task = hisi_sas_query_task,
1794 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
1795 .lldd_port_formed = hisi_sas_port_formed,
1796 .lldd_port_deformed = hisi_sas_port_deformed,
1797 .lldd_write_gpio = hisi_sas_write_gpio,
1800 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
1802 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1804 for (i = 0; i < hisi_hba->queue_count; i++) {
1805 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1806 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1808 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1809 memset(hisi_hba->cmd_hdr[i], 0, s);
1812 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1813 memset(hisi_hba->complete_hdr[i], 0, s);
1817 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
1818 memset(hisi_hba->initial_fis, 0, s);
1820 s = max_command_entries * sizeof(struct hisi_sas_iost);
1821 memset(hisi_hba->iost, 0, s);
1823 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1824 memset(hisi_hba->breakpoint, 0, s);
1826 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1827 memset(hisi_hba->sata_breakpoint, 0, s);
1829 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
1831 int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1833 struct device *dev = hisi_hba->dev;
1834 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1836 spin_lock_init(&hisi_hba->lock);
1837 for (i = 0; i < hisi_hba->n_phy; i++) {
1838 hisi_sas_phy_init(hisi_hba, i);
1839 hisi_hba->port[i].port_attached = 0;
1840 hisi_hba->port[i].id = -1;
1843 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1844 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
1845 hisi_hba->devices[i].device_id = i;
1846 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
1849 for (i = 0; i < hisi_hba->queue_count; i++) {
1850 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1851 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1853 /* Completion queue structure */
1855 cq->hisi_hba = hisi_hba;
1857 /* Delivery queue structure */
1858 spin_lock_init(&dq->lock);
1860 dq->hisi_hba = hisi_hba;
1862 /* Delivery queue */
1863 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1864 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1865 &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1866 if (!hisi_hba->cmd_hdr[i])
1869 /* Completion queue */
1870 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1871 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1872 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1873 if (!hisi_hba->complete_hdr[i])
1877 s = sizeof(struct hisi_sas_slot_buf_table);
1878 hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
1879 if (!hisi_hba->buffer_pool)
1882 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1883 hisi_hba->itct = dma_zalloc_coherent(dev, s, &hisi_hba->itct_dma,
1885 if (!hisi_hba->itct)
1888 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
1889 sizeof(struct hisi_sas_slot),
1891 if (!hisi_hba->slot_info)
1894 s = max_command_entries * sizeof(struct hisi_sas_iost);
1895 hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1897 if (!hisi_hba->iost)
1900 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1901 hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1902 &hisi_hba->breakpoint_dma, GFP_KERNEL);
1903 if (!hisi_hba->breakpoint)
1906 hisi_hba->slot_index_count = max_command_entries;
1907 s = hisi_hba->slot_index_count / BITS_PER_BYTE;
1908 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1909 if (!hisi_hba->slot_index_tags)
1912 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1913 hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1914 &hisi_hba->initial_fis_dma, GFP_KERNEL);
1915 if (!hisi_hba->initial_fis)
1918 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1919 hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1920 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1921 if (!hisi_hba->sata_breakpoint)
1923 hisi_sas_init_mem(hisi_hba);
1925 hisi_sas_slot_index_init(hisi_hba);
1927 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1928 if (!hisi_hba->wq) {
1929 dev_err(dev, "sas_alloc: failed to create workqueue\n");
1937 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
1939 void hisi_sas_free(struct hisi_hba *hisi_hba)
1941 struct device *dev = hisi_hba->dev;
1942 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1944 for (i = 0; i < hisi_hba->queue_count; i++) {
1945 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1946 if (hisi_hba->cmd_hdr[i])
1947 dma_free_coherent(dev, s,
1948 hisi_hba->cmd_hdr[i],
1949 hisi_hba->cmd_hdr_dma[i]);
1951 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1952 if (hisi_hba->complete_hdr[i])
1953 dma_free_coherent(dev, s,
1954 hisi_hba->complete_hdr[i],
1955 hisi_hba->complete_hdr_dma[i]);
1958 dma_pool_destroy(hisi_hba->buffer_pool);
1960 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1962 dma_free_coherent(dev, s,
1963 hisi_hba->itct, hisi_hba->itct_dma);
1965 s = max_command_entries * sizeof(struct hisi_sas_iost);
1967 dma_free_coherent(dev, s,
1968 hisi_hba->iost, hisi_hba->iost_dma);
1970 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1971 if (hisi_hba->breakpoint)
1972 dma_free_coherent(dev, s,
1973 hisi_hba->breakpoint,
1974 hisi_hba->breakpoint_dma);
1977 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1978 if (hisi_hba->initial_fis)
1979 dma_free_coherent(dev, s,
1980 hisi_hba->initial_fis,
1981 hisi_hba->initial_fis_dma);
1983 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1984 if (hisi_hba->sata_breakpoint)
1985 dma_free_coherent(dev, s,
1986 hisi_hba->sata_breakpoint,
1987 hisi_hba->sata_breakpoint_dma);
1990 destroy_workqueue(hisi_hba->wq);
1992 EXPORT_SYMBOL_GPL(hisi_sas_free);
1994 void hisi_sas_rst_work_handler(struct work_struct *work)
1996 struct hisi_hba *hisi_hba =
1997 container_of(work, struct hisi_hba, rst_work);
1999 hisi_sas_controller_reset(hisi_hba);
2001 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
2003 void hisi_sas_sync_rst_work_handler(struct work_struct *work)
2005 struct hisi_sas_rst *rst =
2006 container_of(work, struct hisi_sas_rst, work);
2008 if (!hisi_sas_controller_reset(rst->hisi_hba))
2010 complete(rst->completion);
2012 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
2014 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
2016 struct device *dev = hisi_hba->dev;
2017 struct platform_device *pdev = hisi_hba->platform_dev;
2018 struct device_node *np = pdev ? pdev->dev.of_node : NULL;
2021 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
2023 dev_err(dev, "could not get property sas-addr\n");
2029 * These properties are only required for platform device-based
2030 * controller with DT firmware.
2032 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
2033 "hisilicon,sas-syscon");
2034 if (IS_ERR(hisi_hba->ctrl)) {
2035 dev_err(dev, "could not get syscon\n");
2039 if (device_property_read_u32(dev, "ctrl-reset-reg",
2040 &hisi_hba->ctrl_reset_reg)) {
2042 "could not get property ctrl-reset-reg\n");
2046 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
2047 &hisi_hba->ctrl_reset_sts_reg)) {
2049 "could not get property ctrl-reset-sts-reg\n");
2053 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
2054 &hisi_hba->ctrl_clock_ena_reg)) {
2056 "could not get property ctrl-clock-ena-reg\n");
2061 refclk = devm_clk_get(dev, NULL);
2063 dev_dbg(dev, "no ref clk property\n");
2065 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
2067 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
2068 dev_err(dev, "could not get property phy-count\n");
2072 if (device_property_read_u32(dev, "queue-count",
2073 &hisi_hba->queue_count)) {
2074 dev_err(dev, "could not get property queue-count\n");
2080 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
2082 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2083 const struct hisi_sas_hw *hw)
2085 struct resource *res;
2086 struct Scsi_Host *shost;
2087 struct hisi_hba *hisi_hba;
2088 struct device *dev = &pdev->dev;
2090 shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
2092 dev_err(dev, "scsi host alloc failed\n");
2095 hisi_hba = shost_priv(shost);
2097 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2099 hisi_hba->dev = dev;
2100 hisi_hba->platform_dev = pdev;
2101 hisi_hba->shost = shost;
2102 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2104 timer_setup(&hisi_hba->timer, NULL, 0);
2106 if (hisi_sas_get_fw_info(hisi_hba) < 0)
2109 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
2110 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2111 dev_err(dev, "No usable DMA addressing method\n");
2115 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2116 hisi_hba->regs = devm_ioremap_resource(dev, res);
2117 if (IS_ERR(hisi_hba->regs))
2120 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2122 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2123 if (IS_ERR(hisi_hba->sgpio_regs))
2127 if (hisi_sas_alloc(hisi_hba, shost)) {
2128 hisi_sas_free(hisi_hba);
2134 scsi_host_put(shost);
2135 dev_err(dev, "shost alloc failed\n");
2139 int hisi_sas_probe(struct platform_device *pdev,
2140 const struct hisi_sas_hw *hw)
2142 struct Scsi_Host *shost;
2143 struct hisi_hba *hisi_hba;
2144 struct device *dev = &pdev->dev;
2145 struct asd_sas_phy **arr_phy;
2146 struct asd_sas_port **arr_port;
2147 struct sas_ha_struct *sha;
2148 int rc, phy_nr, port_nr, i;
2150 shost = hisi_sas_shost_alloc(pdev, hw);
2154 sha = SHOST_TO_SAS_HA(shost);
2155 hisi_hba = shost_priv(shost);
2156 platform_set_drvdata(pdev, sha);
2158 phy_nr = port_nr = hisi_hba->n_phy;
2160 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2161 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2162 if (!arr_phy || !arr_port) {
2167 sha->sas_phy = arr_phy;
2168 sha->sas_port = arr_port;
2169 sha->lldd_ha = hisi_hba;
2171 shost->transportt = hisi_sas_stt;
2172 shost->max_id = HISI_SAS_MAX_DEVICES;
2173 shost->max_lun = ~0;
2174 shost->max_channel = 1;
2175 shost->max_cmd_len = 16;
2176 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
2177 shost->can_queue = hisi_hba->hw->max_command_entries;
2178 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
2180 sha->sas_ha_name = DRV_NAME;
2181 sha->dev = hisi_hba->dev;
2182 sha->lldd_module = THIS_MODULE;
2183 sha->sas_addr = &hisi_hba->sas_addr[0];
2184 sha->num_phys = hisi_hba->n_phy;
2185 sha->core.shost = hisi_hba->shost;
2187 for (i = 0; i < hisi_hba->n_phy; i++) {
2188 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2189 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2192 rc = scsi_add_host(shost, &pdev->dev);
2196 rc = sas_register_ha(sha);
2198 goto err_out_register_ha;
2200 rc = hisi_hba->hw->hw_init(hisi_hba);
2202 goto err_out_register_ha;
2204 scsi_scan_host(shost);
2208 err_out_register_ha:
2209 scsi_remove_host(shost);
2211 hisi_sas_free(hisi_hba);
2212 scsi_host_put(shost);
2215 EXPORT_SYMBOL_GPL(hisi_sas_probe);
2217 int hisi_sas_remove(struct platform_device *pdev)
2219 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2220 struct hisi_hba *hisi_hba = sha->lldd_ha;
2221 struct Scsi_Host *shost = sha->core.shost;
2223 if (timer_pending(&hisi_hba->timer))
2224 del_timer(&hisi_hba->timer);
2226 sas_unregister_ha(sha);
2227 sas_remove_host(sha->core.shost);
2229 hisi_sas_free(hisi_hba);
2230 scsi_host_put(shost);
2233 EXPORT_SYMBOL_GPL(hisi_sas_remove);
2235 static __init int hisi_sas_init(void)
2237 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2244 static __exit void hisi_sas_exit(void)
2246 sas_release_transport(hisi_sas_stt);
2249 module_init(hisi_sas_init);
2250 module_exit(hisi_sas_exit);
2252 MODULE_LICENSE("GPL");
2253 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2254 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2255 MODULE_ALIAS("platform:" DRV_NAME);