2 * Copyright (c) 2015 Linaro Ltd.
3 * Copyright (c) 2015 Hisilicon Limited.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
13 #define DRV_NAME "hisi_sas"
15 #define DEV_IS_GONE(dev) \
16 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19 u8 *lun, struct hisi_sas_tmf_task *tmf);
21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22 struct domain_device *device,
23 int abort_flag, int tag);
24 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
25 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
28 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
30 switch (fis->command) {
31 case ATA_CMD_FPDMA_WRITE:
32 case ATA_CMD_FPDMA_READ:
33 case ATA_CMD_FPDMA_RECV:
34 case ATA_CMD_FPDMA_SEND:
35 case ATA_CMD_NCQ_NON_DATA:
36 return HISI_SAS_SATA_PROTOCOL_FPDMA;
38 case ATA_CMD_DOWNLOAD_MICRO:
40 case ATA_CMD_PMP_READ:
41 case ATA_CMD_READ_LOG_EXT:
42 case ATA_CMD_PIO_READ:
43 case ATA_CMD_PIO_READ_EXT:
44 case ATA_CMD_PMP_WRITE:
45 case ATA_CMD_WRITE_LOG_EXT:
46 case ATA_CMD_PIO_WRITE:
47 case ATA_CMD_PIO_WRITE_EXT:
48 return HISI_SAS_SATA_PROTOCOL_PIO;
51 case ATA_CMD_DOWNLOAD_MICRO_DMA:
52 case ATA_CMD_PMP_READ_DMA:
53 case ATA_CMD_PMP_WRITE_DMA:
55 case ATA_CMD_READ_EXT:
56 case ATA_CMD_READ_LOG_DMA_EXT:
57 case ATA_CMD_READ_STREAM_DMA_EXT:
58 case ATA_CMD_TRUSTED_RCV_DMA:
59 case ATA_CMD_TRUSTED_SND_DMA:
61 case ATA_CMD_WRITE_EXT:
62 case ATA_CMD_WRITE_FUA_EXT:
63 case ATA_CMD_WRITE_QUEUED:
64 case ATA_CMD_WRITE_LOG_DMA_EXT:
65 case ATA_CMD_WRITE_STREAM_DMA_EXT:
66 case ATA_CMD_ZAC_MGMT_IN:
67 return HISI_SAS_SATA_PROTOCOL_DMA;
69 case ATA_CMD_CHK_POWER:
70 case ATA_CMD_DEV_RESET:
73 case ATA_CMD_FLUSH_EXT:
75 case ATA_CMD_VERIFY_EXT:
76 case ATA_CMD_SET_FEATURES:
78 case ATA_CMD_STANDBYNOW1:
79 case ATA_CMD_ZAC_MGMT_OUT:
80 return HISI_SAS_SATA_PROTOCOL_NONDATA;
83 if (fis->command == ATA_CMD_SET_MAX) {
84 switch (fis->features) {
85 case ATA_SET_MAX_PASSWD:
86 case ATA_SET_MAX_LOCK:
87 return HISI_SAS_SATA_PROTOCOL_PIO;
89 case ATA_SET_MAX_PASSWD_DMA:
90 case ATA_SET_MAX_UNLOCK_DMA:
91 return HISI_SAS_SATA_PROTOCOL_DMA;
94 return HISI_SAS_SATA_PROTOCOL_NONDATA;
97 if (direction == DMA_NONE)
98 return HISI_SAS_SATA_PROTOCOL_NONDATA;
99 return HISI_SAS_SATA_PROTOCOL_PIO;
103 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
105 void hisi_sas_sata_done(struct sas_task *task,
106 struct hisi_sas_slot *slot)
108 struct task_status_struct *ts = &task->task_status;
109 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
110 struct hisi_sas_status_buffer *status_buf =
111 hisi_sas_status_buf_addr_mem(slot);
112 u8 *iu = &status_buf->iu[0];
113 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
115 resp->frame_len = sizeof(struct dev_to_host_fis);
116 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
118 ts->buf_valid_size = sizeof(*resp);
120 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
122 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
124 struct ata_queued_cmd *qc = task->uldd_task;
127 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
128 qc->tf.command == ATA_CMD_FPDMA_READ) {
135 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
137 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
139 return device->port->ha->lldd_ha;
142 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
144 return container_of(sas_port, struct hisi_sas_port, sas_port);
146 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
148 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
152 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
153 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
155 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
157 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
159 void *bitmap = hisi_hba->slot_index_tags;
161 clear_bit(slot_idx, bitmap);
164 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
166 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
169 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
171 void *bitmap = hisi_hba->slot_index_tags;
173 set_bit(slot_idx, bitmap);
176 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
179 void *bitmap = hisi_hba->slot_index_tags;
181 index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count);
182 if (index >= hisi_hba->slot_index_count)
183 return -SAS_QUEUE_FULL;
184 hisi_sas_slot_index_set(hisi_hba, index);
189 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
193 for (i = 0; i < hisi_hba->slot_index_count; ++i)
194 hisi_sas_slot_index_clear(hisi_hba, i);
197 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
198 struct hisi_sas_slot *slot)
202 struct device *dev = hisi_hba->dev;
204 if (!task->lldd_task)
207 task->lldd_task = NULL;
209 if (!sas_protocol_ata(task->task_proto))
211 dma_unmap_sg(dev, task->scatter,
217 dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
219 list_del_init(&slot->entry);
223 hisi_sas_slot_index_free(hisi_hba, slot->idx);
225 /* slot memory is fully zeroed when it is reused */
227 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
229 static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
230 struct hisi_sas_slot *slot)
232 return hisi_hba->hw->prep_smp(hisi_hba, slot);
235 static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
236 struct hisi_sas_slot *slot, int is_tmf,
237 struct hisi_sas_tmf_task *tmf)
239 return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
242 static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
243 struct hisi_sas_slot *slot)
245 return hisi_hba->hw->prep_stp(hisi_hba, slot);
248 static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
249 struct hisi_sas_slot *slot,
250 int device_id, int abort_flag, int tag_to_abort)
252 return hisi_hba->hw->prep_abort(hisi_hba, slot,
253 device_id, abort_flag, tag_to_abort);
257 * This function will issue an abort TMF regardless of whether the
258 * task is in the sdev or not. Then it will do the task complete
259 * cleanup and callbacks.
261 static void hisi_sas_slot_abort(struct work_struct *work)
263 struct hisi_sas_slot *abort_slot =
264 container_of(work, struct hisi_sas_slot, abort_slot);
265 struct sas_task *task = abort_slot->task;
266 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
267 struct scsi_cmnd *cmnd = task->uldd_task;
268 struct hisi_sas_tmf_task tmf_task;
270 struct device *dev = hisi_hba->dev;
271 int tag = abort_slot->idx;
274 if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
275 dev_err(dev, "cannot abort slot for non-ssp task\n");
279 int_to_scsilun(cmnd->device->lun, &lun);
280 tmf_task.tmf = TMF_ABORT_TASK;
281 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
283 hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
285 /* Do cleanup for this task */
286 spin_lock_irqsave(&hisi_hba->lock, flags);
287 hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
288 spin_unlock_irqrestore(&hisi_hba->lock, flags);
290 task->task_done(task);
293 static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
294 *dq, int is_tmf, struct hisi_sas_tmf_task *tmf,
297 struct hisi_hba *hisi_hba = dq->hisi_hba;
298 struct domain_device *device = task->dev;
299 struct hisi_sas_device *sas_dev = device->lldd_dev;
300 struct hisi_sas_port *port;
301 struct hisi_sas_slot *slot;
302 struct hisi_sas_cmd_hdr *cmd_hdr_base;
303 struct asd_sas_port *sas_port = device->port;
304 struct device *dev = hisi_hba->dev;
305 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
309 struct task_status_struct *ts = &task->task_status;
311 ts->resp = SAS_TASK_UNDELIVERED;
312 ts->stat = SAS_PHY_DOWN;
314 * libsas will use dev->port, should
315 * not call task_done for sata
317 if (device->dev_type != SAS_SATA_DEV)
318 task->task_done(task);
322 if (DEV_IS_GONE(sas_dev)) {
324 dev_info(dev, "task prep: device %d not ready\n",
327 dev_info(dev, "task prep: device %016llx not ready\n",
328 SAS_ADDR(device->sas_addr));
333 port = to_hisi_sas_port(sas_port);
334 if (port && !port->port_attached) {
335 dev_info(dev, "task prep: %s port%d not attach device\n",
336 (dev_is_sata(device)) ?
343 if (!sas_protocol_ata(task->task_proto)) {
344 if (task->num_scatter) {
345 n_elem = dma_map_sg(dev, task->scatter,
346 task->num_scatter, task->data_dir);
353 n_elem = task->num_scatter;
355 spin_lock_irqsave(&hisi_hba->lock, flags);
356 if (hisi_hba->hw->slot_index_alloc)
357 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
360 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
362 spin_unlock_irqrestore(&hisi_hba->lock, flags);
365 spin_unlock_irqrestore(&hisi_hba->lock, flags);
367 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
371 dlvry_queue = dq->id;
372 dlvry_queue_slot = dq->wr_point;
373 slot = &hisi_hba->slot_info[slot_idx];
374 memset(slot, 0, sizeof(struct hisi_sas_slot));
376 slot->idx = slot_idx;
377 slot->n_elem = n_elem;
378 slot->dlvry_queue = dlvry_queue;
379 slot->dlvry_queue_slot = dlvry_queue_slot;
380 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
381 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
384 task->lldd_task = slot;
385 INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
387 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
388 GFP_ATOMIC, &slot->buf_dma);
391 goto err_out_slot_buf;
393 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
394 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
395 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
397 switch (task->task_proto) {
398 case SAS_PROTOCOL_SMP:
399 rc = hisi_sas_task_prep_smp(hisi_hba, slot);
401 case SAS_PROTOCOL_SSP:
402 rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
404 case SAS_PROTOCOL_SATA:
405 case SAS_PROTOCOL_STP:
406 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
407 rc = hisi_sas_task_prep_ata(hisi_hba, slot);
410 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
417 dev_err(dev, "task prep: rc = 0x%x\n", rc);
421 spin_lock_irqsave(&hisi_hba->lock, flags);
422 list_add_tail(&slot->entry, &sas_dev->list);
423 spin_unlock_irqrestore(&hisi_hba->lock, flags);
424 spin_lock_irqsave(&task->task_state_lock, flags);
425 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
426 spin_unlock_irqrestore(&task->task_state_lock, flags);
428 dq->slot_prep = slot;
434 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
437 /* Nothing to be done */
439 spin_lock_irqsave(&hisi_hba->lock, flags);
440 hisi_sas_slot_index_free(hisi_hba, slot_idx);
441 spin_unlock_irqrestore(&hisi_hba->lock, flags);
443 dev_err(dev, "task prep: failed[%d]!\n", rc);
444 if (!sas_protocol_ata(task->task_proto))
446 dma_unmap_sg(dev, task->scatter,
453 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
454 int is_tmf, struct hisi_sas_tmf_task *tmf)
459 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
460 struct device *dev = hisi_hba->dev;
461 struct domain_device *device = task->dev;
462 struct hisi_sas_device *sas_dev = device->lldd_dev;
463 struct hisi_sas_dq *dq = sas_dev->dq;
465 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
468 /* protect task_prep and start_delivery sequence */
469 spin_lock_irqsave(&dq->lock, flags);
470 rc = hisi_sas_task_prep(task, dq, is_tmf, tmf, &pass);
472 dev_err(dev, "task exec: failed[%d]!\n", rc);
475 hisi_hba->hw->start_delivery(dq);
476 spin_unlock_irqrestore(&dq->lock, flags);
481 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
483 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
484 struct asd_sas_phy *sas_phy = &phy->sas_phy;
485 struct sas_ha_struct *sas_ha;
487 if (!phy->phy_attached)
490 sas_ha = &hisi_hba->sha;
491 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
494 struct sas_phy *sphy = sas_phy->phy;
496 sphy->negotiated_linkrate = sas_phy->linkrate;
497 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
498 sphy->maximum_linkrate_hw =
499 hisi_hba->hw->phy_get_max_linkrate();
500 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
501 sphy->minimum_linkrate = phy->minimum_linkrate;
503 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
504 sphy->maximum_linkrate = phy->maximum_linkrate;
507 if (phy->phy_type & PORT_TYPE_SAS) {
508 struct sas_identify_frame *id;
510 id = (struct sas_identify_frame *)phy->frame_rcvd;
511 id->dev_type = phy->identify.device_type;
512 id->initiator_bits = SAS_PROTOCOL_ALL;
513 id->target_bits = phy->identify.target_port_protocols;
514 } else if (phy->phy_type & PORT_TYPE_SATA) {
518 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
519 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
522 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
524 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
525 struct hisi_sas_device *sas_dev = NULL;
529 spin_lock_irqsave(&hisi_hba->lock, flags);
530 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
531 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
532 int queue = i % hisi_hba->queue_count;
533 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
535 hisi_hba->devices[i].device_id = i;
536 sas_dev = &hisi_hba->devices[i];
537 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
538 sas_dev->dev_type = device->dev_type;
539 sas_dev->hisi_hba = hisi_hba;
540 sas_dev->sas_device = device;
542 INIT_LIST_HEAD(&hisi_hba->devices[i].list);
546 spin_unlock_irqrestore(&hisi_hba->lock, flags);
551 static int hisi_sas_dev_found(struct domain_device *device)
553 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
554 struct domain_device *parent_dev = device->parent;
555 struct hisi_sas_device *sas_dev;
556 struct device *dev = hisi_hba->dev;
558 if (hisi_hba->hw->alloc_dev)
559 sas_dev = hisi_hba->hw->alloc_dev(device);
561 sas_dev = hisi_sas_alloc_dev(device);
563 dev_err(dev, "fail alloc dev: max support %d devices\n",
564 HISI_SAS_MAX_DEVICES);
568 device->lldd_dev = sas_dev;
569 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
571 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
573 u8 phy_num = parent_dev->ex_dev.num_phys;
576 for (phy_no = 0; phy_no < phy_num; phy_no++) {
577 phy = &parent_dev->ex_dev.ex_phy[phy_no];
578 if (SAS_ADDR(phy->attached_sas_addr) ==
579 SAS_ADDR(device->sas_addr)) {
580 sas_dev->attached_phy = phy_no;
585 if (phy_no == phy_num) {
586 dev_info(dev, "dev found: no attached "
587 "dev:%016llx at ex:%016llx\n",
588 SAS_ADDR(device->sas_addr),
589 SAS_ADDR(parent_dev->sas_addr));
594 dev_info(dev, "dev[%d:%x] found\n",
595 sas_dev->device_id, sas_dev->dev_type);
600 static int hisi_sas_slave_configure(struct scsi_device *sdev)
602 struct domain_device *dev = sdev_to_domain_dev(sdev);
603 int ret = sas_slave_configure(sdev);
607 if (!dev_is_sata(dev))
608 sas_change_queue_depth(sdev, 64);
613 static void hisi_sas_scan_start(struct Scsi_Host *shost)
615 struct hisi_hba *hisi_hba = shost_priv(shost);
617 hisi_hba->hw->phys_init(hisi_hba);
620 static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
622 struct hisi_hba *hisi_hba = shost_priv(shost);
623 struct sas_ha_struct *sha = &hisi_hba->sha;
625 /* Wait for PHY up interrupt to occur */
633 static void hisi_sas_phyup_work(struct work_struct *work)
635 struct hisi_sas_phy *phy =
636 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
637 struct hisi_hba *hisi_hba = phy->hisi_hba;
638 struct asd_sas_phy *sas_phy = &phy->sas_phy;
639 int phy_no = sas_phy->id;
641 hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
642 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
645 static void hisi_sas_linkreset_work(struct work_struct *work)
647 struct hisi_sas_phy *phy =
648 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
649 struct asd_sas_phy *sas_phy = &phy->sas_phy;
651 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
654 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
655 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
656 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
659 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
660 enum hisi_sas_phy_event event)
662 struct hisi_hba *hisi_hba = phy->hisi_hba;
664 if (WARN_ON(event >= HISI_PHYES_NUM))
667 return queue_work(hisi_hba->wq, &phy->works[event]);
669 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
671 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
673 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
674 struct asd_sas_phy *sas_phy = &phy->sas_phy;
677 phy->hisi_hba = hisi_hba;
679 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
680 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
681 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
682 sas_phy->class = SAS;
683 sas_phy->iproto = SAS_PROTOCOL_ALL;
685 sas_phy->type = PHY_TYPE_PHYSICAL;
686 sas_phy->role = PHY_ROLE_INITIATOR;
687 sas_phy->oob_mode = OOB_NOT_CONNECTED;
688 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
689 sas_phy->id = phy_no;
690 sas_phy->sas_addr = &hisi_hba->sas_addr[0];
691 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
692 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
693 sas_phy->lldd_phy = phy;
695 for (i = 0; i < HISI_PHYES_NUM; i++)
696 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
699 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
701 struct sas_ha_struct *sas_ha = sas_phy->ha;
702 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
703 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
704 struct asd_sas_port *sas_port = sas_phy->port;
705 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
711 spin_lock_irqsave(&hisi_hba->lock, flags);
712 port->port_attached = 1;
713 port->id = phy->port_id;
715 sas_port->lldd_port = port;
716 spin_unlock_irqrestore(&hisi_hba->lock, flags);
719 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
720 struct hisi_sas_slot *slot)
724 struct task_status_struct *ts;
726 ts = &task->task_status;
728 ts->resp = SAS_TASK_COMPLETE;
729 ts->stat = SAS_ABORTED_TASK;
730 spin_lock_irqsave(&task->task_state_lock, flags);
731 task->task_state_flags &=
732 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
733 task->task_state_flags |= SAS_TASK_STATE_DONE;
734 spin_unlock_irqrestore(&task->task_state_lock, flags);
737 hisi_sas_slot_task_free(hisi_hba, task, slot);
740 /* hisi_hba.lock should be locked */
741 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
742 struct domain_device *device)
744 struct hisi_sas_slot *slot, *slot2;
745 struct hisi_sas_device *sas_dev = device->lldd_dev;
747 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
748 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
751 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
753 struct hisi_sas_device *sas_dev;
754 struct domain_device *device;
757 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
758 sas_dev = &hisi_hba->devices[i];
759 device = sas_dev->sas_device;
761 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
765 hisi_sas_release_task(hisi_hba, device);
768 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
770 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
771 struct domain_device *device)
773 if (hisi_hba->hw->dereg_device)
774 hisi_hba->hw->dereg_device(hisi_hba, device);
777 static void hisi_sas_dev_gone(struct domain_device *device)
779 struct hisi_sas_device *sas_dev = device->lldd_dev;
780 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
781 struct device *dev = hisi_hba->dev;
783 dev_info(dev, "dev[%d:%x] is gone\n",
784 sas_dev->device_id, sas_dev->dev_type);
786 if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
787 hisi_sas_internal_task_abort(hisi_hba, device,
788 HISI_SAS_INT_ABT_DEV, 0);
790 hisi_sas_dereg_device(hisi_hba, device);
792 hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
793 device->lldd_dev = NULL;
796 if (hisi_hba->hw->free_device)
797 hisi_hba->hw->free_device(sas_dev);
798 sas_dev->dev_type = SAS_PHY_UNUSED;
801 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
803 return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
806 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
809 struct sas_ha_struct *sas_ha = sas_phy->ha;
810 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
811 int phy_no = sas_phy->id;
814 case PHY_FUNC_HARD_RESET:
815 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
818 case PHY_FUNC_LINK_RESET:
819 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
821 hisi_hba->hw->phy_start(hisi_hba, phy_no);
824 case PHY_FUNC_DISABLE:
825 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
828 case PHY_FUNC_SET_LINK_RATE:
829 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
831 case PHY_FUNC_GET_EVENTS:
832 if (hisi_hba->hw->get_events) {
833 hisi_hba->hw->get_events(hisi_hba, phy_no);
837 case PHY_FUNC_RELEASE_SPINUP_HOLD:
844 static void hisi_sas_task_done(struct sas_task *task)
846 if (!del_timer(&task->slow_task->timer))
848 complete(&task->slow_task->completion);
851 static void hisi_sas_tmf_timedout(struct timer_list *t)
853 struct sas_task_slow *slow = from_timer(slow, t, timer);
854 struct sas_task *task = slow->task;
857 spin_lock_irqsave(&task->task_state_lock, flags);
858 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
859 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
860 spin_unlock_irqrestore(&task->task_state_lock, flags);
862 complete(&task->slow_task->completion);
865 #define TASK_TIMEOUT 20
867 #define INTERNAL_ABORT_TIMEOUT 6
868 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
869 void *parameter, u32 para_len,
870 struct hisi_sas_tmf_task *tmf)
872 struct hisi_sas_device *sas_dev = device->lldd_dev;
873 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
874 struct device *dev = hisi_hba->dev;
875 struct sas_task *task;
878 for (retry = 0; retry < TASK_RETRY; retry++) {
879 task = sas_alloc_slow_task(GFP_KERNEL);
884 task->task_proto = device->tproto;
886 if (dev_is_sata(device)) {
887 task->ata_task.device_control_reg_update = 1;
888 memcpy(&task->ata_task.fis, parameter, para_len);
890 memcpy(&task->ssp_task, parameter, para_len);
892 task->task_done = hisi_sas_task_done;
894 task->slow_task->timer.function = hisi_sas_tmf_timedout;
895 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
896 add_timer(&task->slow_task->timer);
898 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
901 del_timer(&task->slow_task->timer);
902 dev_err(dev, "abort tmf: executing internal task failed: %d\n",
907 wait_for_completion(&task->slow_task->completion);
908 res = TMF_RESP_FUNC_FAILED;
909 /* Even TMF timed out, return direct. */
910 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
911 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
912 struct hisi_sas_slot *slot = task->lldd_task;
914 dev_err(dev, "abort tmf: TMF task timeout and not done\n");
920 dev_err(dev, "abort tmf: TMF task timeout\n");
923 if (task->task_status.resp == SAS_TASK_COMPLETE &&
924 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
925 res = TMF_RESP_FUNC_COMPLETE;
929 if (task->task_status.resp == SAS_TASK_COMPLETE &&
930 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
931 res = TMF_RESP_FUNC_SUCC;
935 if (task->task_status.resp == SAS_TASK_COMPLETE &&
936 task->task_status.stat == SAS_DATA_UNDERRUN) {
937 /* no error, but return the number of bytes of
940 dev_warn(dev, "abort tmf: task to dev %016llx "
941 "resp: 0x%x sts 0x%x underrun\n",
942 SAS_ADDR(device->sas_addr),
943 task->task_status.resp,
944 task->task_status.stat);
945 res = task->task_status.residual;
949 if (task->task_status.resp == SAS_TASK_COMPLETE &&
950 task->task_status.stat == SAS_DATA_OVERRUN) {
951 dev_warn(dev, "abort tmf: blocked task error\n");
956 dev_warn(dev, "abort tmf: task to dev "
957 "%016llx resp: 0x%x status 0x%x\n",
958 SAS_ADDR(device->sas_addr), task->task_status.resp,
959 task->task_status.stat);
964 if (retry == TASK_RETRY)
965 dev_warn(dev, "abort tmf: executing internal task failed!\n");
970 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
971 bool reset, int pmp, u8 *fis)
973 struct ata_taskfile tf;
975 ata_tf_init(dev, &tf);
980 tf.command = ATA_CMD_DEV_RESET;
981 ata_tf_to_fis(&tf, pmp, 0, fis);
984 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
987 struct ata_port *ap = device->sata_dev.ap;
988 struct ata_link *link;
989 int rc = TMF_RESP_FUNC_FAILED;
990 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
991 struct device *dev = hisi_hba->dev;
992 int s = sizeof(struct host_to_dev_fis);
995 ata_for_each_link(link, ap, EDGE) {
996 int pmp = sata_srst_pmp(link);
998 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
999 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
1000 if (rc != TMF_RESP_FUNC_COMPLETE)
1004 if (rc == TMF_RESP_FUNC_COMPLETE) {
1005 ata_for_each_link(link, ap, EDGE) {
1006 int pmp = sata_srst_pmp(link);
1008 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1009 rc = hisi_sas_exec_internal_tmf_task(device, fis,
1011 if (rc != TMF_RESP_FUNC_COMPLETE)
1012 dev_err(dev, "ata disk de-reset failed\n");
1015 dev_err(dev, "ata disk reset failed\n");
1018 if (rc == TMF_RESP_FUNC_COMPLETE) {
1019 spin_lock_irqsave(&hisi_hba->lock, flags);
1020 hisi_sas_release_task(hisi_hba, device);
1021 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1027 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
1028 u8 *lun, struct hisi_sas_tmf_task *tmf)
1030 struct sas_ssp_task ssp_task;
1032 if (!(device->tproto & SAS_PROTOCOL_SSP))
1033 return TMF_RESP_FUNC_ESUPP;
1035 memcpy(ssp_task.LUN, lun, 8);
1037 return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
1038 sizeof(ssp_task), tmf);
1041 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1043 u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
1046 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1047 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1048 struct domain_device *device = sas_dev->sas_device;
1049 struct asd_sas_port *sas_port;
1050 struct hisi_sas_port *port;
1051 struct hisi_sas_phy *phy = NULL;
1052 struct asd_sas_phy *sas_phy;
1054 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1055 || !device || !device->port)
1058 sas_port = device->port;
1059 port = to_hisi_sas_port(sas_port);
1061 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1062 if (state & BIT(sas_phy->id)) {
1063 phy = sas_phy->lldd_phy;
1068 port->id = phy->port_id;
1070 /* Update linkrate of directly attached device. */
1071 if (!device->parent)
1072 device->linkrate = phy->sas_phy.linkrate;
1074 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1080 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1083 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1084 struct asd_sas_port *_sas_port = NULL;
1087 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1088 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1089 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1090 struct asd_sas_port *sas_port = sas_phy->port;
1091 bool do_port_check = !!(_sas_port != sas_port);
1093 if (!sas_phy->phy->enabled)
1096 /* Report PHY state change to libsas */
1097 if (state & BIT(phy_no)) {
1098 if (do_port_check && sas_port && sas_port->port_dev) {
1099 struct domain_device *dev = sas_port->port_dev;
1101 _sas_port = sas_port;
1103 if (DEV_IS_EXPANDER(dev->dev_type))
1104 sas_ha->notify_port_event(sas_phy,
1105 PORTE_BROADCAST_RCVD);
1107 } else if (old_state & (1 << phy_no))
1108 /* PHY down but was up before */
1109 hisi_sas_phy_down(hisi_hba, phy_no, 0);
1114 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1116 struct device *dev = hisi_hba->dev;
1117 struct Scsi_Host *shost = hisi_hba->shost;
1118 u32 old_state, state;
1119 unsigned long flags;
1122 if (!hisi_hba->hw->soft_reset)
1125 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1128 dev_info(dev, "controller resetting...\n");
1129 old_state = hisi_hba->hw->get_phys_state(hisi_hba);
1131 scsi_block_requests(shost);
1132 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1133 rc = hisi_hba->hw->soft_reset(hisi_hba);
1135 dev_warn(dev, "controller reset failed (%d)\n", rc);
1136 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1137 scsi_unblock_requests(shost);
1140 spin_lock_irqsave(&hisi_hba->lock, flags);
1141 hisi_sas_release_tasks(hisi_hba);
1142 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1144 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1146 /* Init and wait for PHYs to come up and all libsas event finished. */
1147 hisi_hba->hw->phys_init(hisi_hba);
1149 hisi_sas_refresh_port_id(hisi_hba);
1150 scsi_unblock_requests(shost);
1152 state = hisi_hba->hw->get_phys_state(hisi_hba);
1153 hisi_sas_rescan_topology(hisi_hba, old_state, state);
1154 dev_info(dev, "controller reset complete\n");
1157 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1162 static int hisi_sas_abort_task(struct sas_task *task)
1164 struct scsi_lun lun;
1165 struct hisi_sas_tmf_task tmf_task;
1166 struct domain_device *device = task->dev;
1167 struct hisi_sas_device *sas_dev = device->lldd_dev;
1168 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
1169 struct device *dev = hisi_hba->dev;
1170 int rc = TMF_RESP_FUNC_FAILED;
1171 unsigned long flags;
1174 dev_warn(dev, "Device has been removed\n");
1175 return TMF_RESP_FUNC_FAILED;
1178 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1179 rc = TMF_RESP_FUNC_COMPLETE;
1183 sas_dev->dev_status = HISI_SAS_DEV_EH;
1184 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1185 struct scsi_cmnd *cmnd = task->uldd_task;
1186 struct hisi_sas_slot *slot = task->lldd_task;
1187 u32 tag = slot->idx;
1190 int_to_scsilun(cmnd->device->lun, &lun);
1191 tmf_task.tmf = TMF_ABORT_TASK;
1192 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1194 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1197 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1198 HISI_SAS_INT_ABT_CMD, tag);
1200 dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1201 return TMF_RESP_FUNC_FAILED;
1205 * If the TMF finds that the IO is not in the device and also
1206 * the internal abort does not succeed, then it is safe to
1208 * Note: if the internal abort succeeds then the slot
1209 * will have already been completed
1211 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1212 if (task->lldd_task) {
1213 spin_lock_irqsave(&hisi_hba->lock, flags);
1214 hisi_sas_do_release_task(hisi_hba, task, slot);
1215 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1218 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1219 task->task_proto & SAS_PROTOCOL_STP) {
1220 if (task->dev->dev_type == SAS_SATA_DEV) {
1221 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1222 HISI_SAS_INT_ABT_DEV, 0);
1224 dev_err(dev, "abort task: internal abort failed\n");
1227 hisi_sas_dereg_device(hisi_hba, device);
1228 rc = hisi_sas_softreset_ata_disk(device);
1230 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1232 struct hisi_sas_slot *slot = task->lldd_task;
1233 u32 tag = slot->idx;
1235 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1236 HISI_SAS_INT_ABT_CMD, tag);
1237 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1239 spin_lock_irqsave(&hisi_hba->lock, flags);
1240 hisi_sas_do_release_task(hisi_hba, task, slot);
1241 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1246 if (rc != TMF_RESP_FUNC_COMPLETE)
1247 dev_notice(dev, "abort task: rc=%d\n", rc);
1251 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1253 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1254 struct device *dev = hisi_hba->dev;
1255 struct hisi_sas_tmf_task tmf_task;
1256 int rc = TMF_RESP_FUNC_FAILED;
1257 unsigned long flags;
1259 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1260 HISI_SAS_INT_ABT_DEV, 0);
1262 dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1263 return TMF_RESP_FUNC_FAILED;
1265 hisi_sas_dereg_device(hisi_hba, device);
1267 tmf_task.tmf = TMF_ABORT_TASK_SET;
1268 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1270 if (rc == TMF_RESP_FUNC_COMPLETE) {
1271 spin_lock_irqsave(&hisi_hba->lock, flags);
1272 hisi_sas_release_task(hisi_hba, device);
1273 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1279 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1281 int rc = TMF_RESP_FUNC_FAILED;
1282 struct hisi_sas_tmf_task tmf_task;
1284 tmf_task.tmf = TMF_CLEAR_ACA;
1285 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1290 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1292 struct sas_phy *phy = sas_get_local_phy(device);
1293 int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1294 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1295 rc = sas_phy_reset(phy, reset_type);
1296 sas_put_local_phy(phy);
1301 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1303 struct hisi_sas_device *sas_dev = device->lldd_dev;
1304 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1305 struct device *dev = hisi_hba->dev;
1306 int rc = TMF_RESP_FUNC_FAILED;
1307 unsigned long flags;
1309 if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1310 return TMF_RESP_FUNC_FAILED;
1311 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1313 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1314 HISI_SAS_INT_ABT_DEV, 0);
1316 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1317 return TMF_RESP_FUNC_FAILED;
1319 hisi_sas_dereg_device(hisi_hba, device);
1321 rc = hisi_sas_debug_I_T_nexus_reset(device);
1323 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) {
1324 spin_lock_irqsave(&hisi_hba->lock, flags);
1325 hisi_sas_release_task(hisi_hba, device);
1326 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1331 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1333 struct hisi_sas_device *sas_dev = device->lldd_dev;
1334 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1335 struct device *dev = hisi_hba->dev;
1336 unsigned long flags;
1337 int rc = TMF_RESP_FUNC_FAILED;
1339 sas_dev->dev_status = HISI_SAS_DEV_EH;
1340 if (dev_is_sata(device)) {
1341 struct sas_phy *phy;
1343 /* Clear internal IO and then hardreset */
1344 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1345 HISI_SAS_INT_ABT_DEV, 0);
1347 dev_err(dev, "lu_reset: internal abort failed\n");
1350 hisi_sas_dereg_device(hisi_hba, device);
1352 phy = sas_get_local_phy(device);
1354 rc = sas_phy_reset(phy, 1);
1357 spin_lock_irqsave(&hisi_hba->lock, flags);
1358 hisi_sas_release_task(hisi_hba, device);
1359 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1361 sas_put_local_phy(phy);
1363 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
1365 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1366 HISI_SAS_INT_ABT_DEV, 0);
1368 dev_err(dev, "lu_reset: internal abort failed\n");
1371 hisi_sas_dereg_device(hisi_hba, device);
1373 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1374 if (rc == TMF_RESP_FUNC_COMPLETE) {
1375 spin_lock_irqsave(&hisi_hba->lock, flags);
1376 hisi_sas_release_task(hisi_hba, device);
1377 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1381 if (rc != TMF_RESP_FUNC_COMPLETE)
1382 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1383 sas_dev->device_id, rc);
1387 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1389 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1390 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1392 queue_work(hisi_hba->wq, &r.work);
1393 wait_for_completion(r.completion);
1395 return TMF_RESP_FUNC_COMPLETE;
1397 return TMF_RESP_FUNC_FAILED;
1400 static int hisi_sas_query_task(struct sas_task *task)
1402 struct scsi_lun lun;
1403 struct hisi_sas_tmf_task tmf_task;
1404 int rc = TMF_RESP_FUNC_FAILED;
1406 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1407 struct scsi_cmnd *cmnd = task->uldd_task;
1408 struct domain_device *device = task->dev;
1409 struct hisi_sas_slot *slot = task->lldd_task;
1410 u32 tag = slot->idx;
1412 int_to_scsilun(cmnd->device->lun, &lun);
1413 tmf_task.tmf = TMF_QUERY_TASK;
1414 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1416 rc = hisi_sas_debug_issue_ssp_tmf(device,
1420 /* The task is still in Lun, release it then */
1421 case TMF_RESP_FUNC_SUCC:
1422 /* The task is not in Lun or failed, reset the phy */
1423 case TMF_RESP_FUNC_FAILED:
1424 case TMF_RESP_FUNC_COMPLETE:
1427 rc = TMF_RESP_FUNC_FAILED;
1435 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1436 struct sas_task *task, int abort_flag,
1439 struct domain_device *device = task->dev;
1440 struct hisi_sas_device *sas_dev = device->lldd_dev;
1441 struct device *dev = hisi_hba->dev;
1442 struct hisi_sas_port *port;
1443 struct hisi_sas_slot *slot;
1444 struct asd_sas_port *sas_port = device->port;
1445 struct hisi_sas_cmd_hdr *cmd_hdr_base;
1446 struct hisi_sas_dq *dq = sas_dev->dq;
1447 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1448 unsigned long flags, flags_dq;
1450 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1456 port = to_hisi_sas_port(sas_port);
1458 /* simply get a slot and send abort command */
1459 spin_lock_irqsave(&hisi_hba->lock, flags);
1460 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
1462 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1465 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1467 spin_lock_irqsave(&dq->lock, flags_dq);
1468 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
1472 dlvry_queue = dq->id;
1473 dlvry_queue_slot = dq->wr_point;
1475 slot = &hisi_hba->slot_info[slot_idx];
1476 memset(slot, 0, sizeof(struct hisi_sas_slot));
1478 slot->idx = slot_idx;
1479 slot->n_elem = n_elem;
1480 slot->dlvry_queue = dlvry_queue;
1481 slot->dlvry_queue_slot = dlvry_queue_slot;
1482 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1483 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1486 task->lldd_task = slot;
1488 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
1489 GFP_ATOMIC, &slot->buf_dma);
1495 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1496 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1497 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
1499 rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1500 abort_flag, task_tag);
1504 spin_lock_irqsave(&hisi_hba->lock, flags);
1505 list_add_tail(&slot->entry, &sas_dev->list);
1506 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1507 spin_lock_irqsave(&task->task_state_lock, flags);
1508 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1509 spin_unlock_irqrestore(&task->task_state_lock, flags);
1511 dq->slot_prep = slot;
1513 /* send abort command to the chip */
1514 hisi_hba->hw->start_delivery(dq);
1515 spin_unlock_irqrestore(&dq->lock, flags_dq);
1520 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
1523 spin_lock_irqsave(&hisi_hba->lock, flags);
1524 hisi_sas_slot_index_free(hisi_hba, slot_idx);
1525 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1526 spin_unlock_irqrestore(&dq->lock, flags_dq);
1528 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1534 * hisi_sas_internal_task_abort -- execute an internal
1535 * abort command for single IO command or a device
1536 * @hisi_hba: host controller struct
1537 * @device: domain device
1538 * @abort_flag: mode of operation, device or single IO
1539 * @tag: tag of IO to be aborted (only relevant to single
1543 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1544 struct domain_device *device,
1545 int abort_flag, int tag)
1547 struct sas_task *task;
1548 struct hisi_sas_device *sas_dev = device->lldd_dev;
1549 struct device *dev = hisi_hba->dev;
1553 * The interface is not realized means this HW don't support internal
1554 * abort, or don't need to do internal abort. Then here, we return
1555 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
1556 * the internal abort has been executed and returned CQ.
1558 if (!hisi_hba->hw->prep_abort)
1559 return TMF_RESP_FUNC_FAILED;
1561 task = sas_alloc_slow_task(GFP_KERNEL);
1566 task->task_proto = device->tproto;
1567 task->task_done = hisi_sas_task_done;
1568 task->slow_task->timer.function = hisi_sas_tmf_timedout;
1569 task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT*HZ;
1570 add_timer(&task->slow_task->timer);
1572 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1573 task, abort_flag, tag);
1575 del_timer(&task->slow_task->timer);
1576 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1580 wait_for_completion(&task->slow_task->completion);
1581 res = TMF_RESP_FUNC_FAILED;
1583 /* Internal abort timed out */
1584 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1585 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1586 struct hisi_sas_slot *slot = task->lldd_task;
1590 dev_err(dev, "internal task abort: timeout and not done.\n");
1594 dev_err(dev, "internal task abort: timeout.\n");
1597 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1598 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1599 res = TMF_RESP_FUNC_COMPLETE;
1603 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1604 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1605 res = TMF_RESP_FUNC_SUCC;
1610 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
1611 "resp: 0x%x sts 0x%x\n",
1612 SAS_ADDR(device->sas_addr),
1614 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1615 task->task_status.stat);
1616 sas_free_task(task);
1621 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1623 hisi_sas_port_notify_formed(sas_phy);
1626 static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
1630 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
1631 u8 reg_index, u8 reg_count, u8 *write_data)
1633 struct hisi_hba *hisi_hba = sha->lldd_ha;
1635 if (!hisi_hba->hw->write_gpio)
1638 return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
1639 reg_index, reg_count, write_data);
1642 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1644 phy->phy_attached = 0;
1649 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1651 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1652 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1653 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1656 /* Phy down but ready */
1657 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1658 hisi_sas_port_notify_formed(sas_phy);
1660 struct hisi_sas_port *port = phy->port;
1662 /* Phy down and not ready */
1663 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1664 sas_phy_disconnected(sas_phy);
1667 if (phy->phy_type & PORT_TYPE_SAS) {
1668 int port_id = port->id;
1670 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1672 port->port_attached = 0;
1673 } else if (phy->phy_type & PORT_TYPE_SATA)
1674 port->port_attached = 0;
1676 hisi_sas_phy_disconnected(phy);
1679 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1681 void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
1685 for (i = 0; i < hisi_hba->queue_count; i++) {
1686 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1688 tasklet_kill(&cq->tasklet);
1691 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
1693 struct scsi_transport_template *hisi_sas_stt;
1694 EXPORT_SYMBOL_GPL(hisi_sas_stt);
1696 static struct device_attribute *host_attrs[] = {
1697 &dev_attr_phy_event_threshold,
1701 static struct scsi_host_template _hisi_sas_sht = {
1702 .module = THIS_MODULE,
1704 .queuecommand = sas_queuecommand,
1705 .target_alloc = sas_target_alloc,
1706 .slave_configure = hisi_sas_slave_configure,
1707 .scan_finished = hisi_sas_scan_finished,
1708 .scan_start = hisi_sas_scan_start,
1709 .change_queue_depth = sas_change_queue_depth,
1710 .bios_param = sas_bios_param,
1713 .sg_tablesize = SG_ALL,
1714 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
1715 .use_clustering = ENABLE_CLUSTERING,
1716 .eh_device_reset_handler = sas_eh_device_reset_handler,
1717 .eh_target_reset_handler = sas_eh_target_reset_handler,
1718 .target_destroy = sas_target_destroy,
1720 .shost_attrs = host_attrs,
1722 struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
1723 EXPORT_SYMBOL_GPL(hisi_sas_sht);
1725 static struct sas_domain_function_template hisi_sas_transport_ops = {
1726 .lldd_dev_found = hisi_sas_dev_found,
1727 .lldd_dev_gone = hisi_sas_dev_gone,
1728 .lldd_execute_task = hisi_sas_queue_command,
1729 .lldd_control_phy = hisi_sas_control_phy,
1730 .lldd_abort_task = hisi_sas_abort_task,
1731 .lldd_abort_task_set = hisi_sas_abort_task_set,
1732 .lldd_clear_aca = hisi_sas_clear_aca,
1733 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
1734 .lldd_lu_reset = hisi_sas_lu_reset,
1735 .lldd_query_task = hisi_sas_query_task,
1736 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
1737 .lldd_port_formed = hisi_sas_port_formed,
1738 .lldd_port_deformed = hisi_sas_port_deformed,
1739 .lldd_write_gpio = hisi_sas_write_gpio,
1742 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
1744 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1746 for (i = 0; i < hisi_hba->queue_count; i++) {
1747 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1748 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1750 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1751 memset(hisi_hba->cmd_hdr[i], 0, s);
1754 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1755 memset(hisi_hba->complete_hdr[i], 0, s);
1759 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
1760 memset(hisi_hba->initial_fis, 0, s);
1762 s = max_command_entries * sizeof(struct hisi_sas_iost);
1763 memset(hisi_hba->iost, 0, s);
1765 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1766 memset(hisi_hba->breakpoint, 0, s);
1768 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1769 memset(hisi_hba->sata_breakpoint, 0, s);
1771 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
1773 int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1775 struct device *dev = hisi_hba->dev;
1776 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1778 spin_lock_init(&hisi_hba->lock);
1779 for (i = 0; i < hisi_hba->n_phy; i++) {
1780 hisi_sas_phy_init(hisi_hba, i);
1781 hisi_hba->port[i].port_attached = 0;
1782 hisi_hba->port[i].id = -1;
1785 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1786 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
1787 hisi_hba->devices[i].device_id = i;
1788 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
1791 for (i = 0; i < hisi_hba->queue_count; i++) {
1792 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1793 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1795 /* Completion queue structure */
1797 cq->hisi_hba = hisi_hba;
1799 /* Delivery queue structure */
1800 spin_lock_init(&dq->lock);
1802 dq->hisi_hba = hisi_hba;
1804 /* Delivery queue */
1805 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1806 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1807 &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1808 if (!hisi_hba->cmd_hdr[i])
1811 /* Completion queue */
1812 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1813 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1814 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1815 if (!hisi_hba->complete_hdr[i])
1819 s = sizeof(struct hisi_sas_slot_buf_table);
1820 hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
1821 if (!hisi_hba->buffer_pool)
1824 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1825 hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma,
1827 if (!hisi_hba->itct)
1830 memset(hisi_hba->itct, 0, s);
1832 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
1833 sizeof(struct hisi_sas_slot),
1835 if (!hisi_hba->slot_info)
1838 s = max_command_entries * sizeof(struct hisi_sas_iost);
1839 hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1841 if (!hisi_hba->iost)
1844 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1845 hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1846 &hisi_hba->breakpoint_dma, GFP_KERNEL);
1847 if (!hisi_hba->breakpoint)
1850 hisi_hba->slot_index_count = max_command_entries;
1851 s = hisi_hba->slot_index_count / BITS_PER_BYTE;
1852 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1853 if (!hisi_hba->slot_index_tags)
1856 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1857 hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1858 &hisi_hba->initial_fis_dma, GFP_KERNEL);
1859 if (!hisi_hba->initial_fis)
1862 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1863 hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1864 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1865 if (!hisi_hba->sata_breakpoint)
1867 hisi_sas_init_mem(hisi_hba);
1869 hisi_sas_slot_index_init(hisi_hba);
1871 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1872 if (!hisi_hba->wq) {
1873 dev_err(dev, "sas_alloc: failed to create workqueue\n");
1881 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
1883 void hisi_sas_free(struct hisi_hba *hisi_hba)
1885 struct device *dev = hisi_hba->dev;
1886 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1888 for (i = 0; i < hisi_hba->queue_count; i++) {
1889 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1890 if (hisi_hba->cmd_hdr[i])
1891 dma_free_coherent(dev, s,
1892 hisi_hba->cmd_hdr[i],
1893 hisi_hba->cmd_hdr_dma[i]);
1895 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1896 if (hisi_hba->complete_hdr[i])
1897 dma_free_coherent(dev, s,
1898 hisi_hba->complete_hdr[i],
1899 hisi_hba->complete_hdr_dma[i]);
1902 dma_pool_destroy(hisi_hba->buffer_pool);
1904 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1906 dma_free_coherent(dev, s,
1907 hisi_hba->itct, hisi_hba->itct_dma);
1909 s = max_command_entries * sizeof(struct hisi_sas_iost);
1911 dma_free_coherent(dev, s,
1912 hisi_hba->iost, hisi_hba->iost_dma);
1914 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1915 if (hisi_hba->breakpoint)
1916 dma_free_coherent(dev, s,
1917 hisi_hba->breakpoint,
1918 hisi_hba->breakpoint_dma);
1921 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1922 if (hisi_hba->initial_fis)
1923 dma_free_coherent(dev, s,
1924 hisi_hba->initial_fis,
1925 hisi_hba->initial_fis_dma);
1927 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1928 if (hisi_hba->sata_breakpoint)
1929 dma_free_coherent(dev, s,
1930 hisi_hba->sata_breakpoint,
1931 hisi_hba->sata_breakpoint_dma);
1934 destroy_workqueue(hisi_hba->wq);
1936 EXPORT_SYMBOL_GPL(hisi_sas_free);
1938 void hisi_sas_rst_work_handler(struct work_struct *work)
1940 struct hisi_hba *hisi_hba =
1941 container_of(work, struct hisi_hba, rst_work);
1943 hisi_sas_controller_reset(hisi_hba);
1945 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
1947 void hisi_sas_sync_rst_work_handler(struct work_struct *work)
1949 struct hisi_sas_rst *rst =
1950 container_of(work, struct hisi_sas_rst, work);
1952 if (!hisi_sas_controller_reset(rst->hisi_hba))
1954 complete(rst->completion);
1956 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
1958 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
1960 struct device *dev = hisi_hba->dev;
1961 struct platform_device *pdev = hisi_hba->platform_dev;
1962 struct device_node *np = pdev ? pdev->dev.of_node : NULL;
1965 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
1967 dev_err(dev, "could not get property sas-addr\n");
1973 * These properties are only required for platform device-based
1974 * controller with DT firmware.
1976 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
1977 "hisilicon,sas-syscon");
1978 if (IS_ERR(hisi_hba->ctrl)) {
1979 dev_err(dev, "could not get syscon\n");
1983 if (device_property_read_u32(dev, "ctrl-reset-reg",
1984 &hisi_hba->ctrl_reset_reg)) {
1986 "could not get property ctrl-reset-reg\n");
1990 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
1991 &hisi_hba->ctrl_reset_sts_reg)) {
1993 "could not get property ctrl-reset-sts-reg\n");
1997 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
1998 &hisi_hba->ctrl_clock_ena_reg)) {
2000 "could not get property ctrl-clock-ena-reg\n");
2005 refclk = devm_clk_get(dev, NULL);
2007 dev_dbg(dev, "no ref clk property\n");
2009 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
2011 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
2012 dev_err(dev, "could not get property phy-count\n");
2016 if (device_property_read_u32(dev, "queue-count",
2017 &hisi_hba->queue_count)) {
2018 dev_err(dev, "could not get property queue-count\n");
2024 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
2026 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2027 const struct hisi_sas_hw *hw)
2029 struct resource *res;
2030 struct Scsi_Host *shost;
2031 struct hisi_hba *hisi_hba;
2032 struct device *dev = &pdev->dev;
2034 shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
2036 dev_err(dev, "scsi host alloc failed\n");
2039 hisi_hba = shost_priv(shost);
2041 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2043 hisi_hba->dev = dev;
2044 hisi_hba->platform_dev = pdev;
2045 hisi_hba->shost = shost;
2046 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2048 timer_setup(&hisi_hba->timer, NULL, 0);
2050 if (hisi_sas_get_fw_info(hisi_hba) < 0)
2053 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
2054 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2055 dev_err(dev, "No usable DMA addressing method\n");
2059 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2060 hisi_hba->regs = devm_ioremap_resource(dev, res);
2061 if (IS_ERR(hisi_hba->regs))
2064 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2066 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2067 if (IS_ERR(hisi_hba->sgpio_regs))
2071 if (hisi_sas_alloc(hisi_hba, shost)) {
2072 hisi_sas_free(hisi_hba);
2078 scsi_host_put(shost);
2079 dev_err(dev, "shost alloc failed\n");
2083 void hisi_sas_init_add(struct hisi_hba *hisi_hba)
2087 for (i = 0; i < hisi_hba->n_phy; i++)
2088 memcpy(&hisi_hba->phy[i].dev_sas_addr,
2092 EXPORT_SYMBOL_GPL(hisi_sas_init_add);
2094 int hisi_sas_probe(struct platform_device *pdev,
2095 const struct hisi_sas_hw *hw)
2097 struct Scsi_Host *shost;
2098 struct hisi_hba *hisi_hba;
2099 struct device *dev = &pdev->dev;
2100 struct asd_sas_phy **arr_phy;
2101 struct asd_sas_port **arr_port;
2102 struct sas_ha_struct *sha;
2103 int rc, phy_nr, port_nr, i;
2105 shost = hisi_sas_shost_alloc(pdev, hw);
2109 sha = SHOST_TO_SAS_HA(shost);
2110 hisi_hba = shost_priv(shost);
2111 platform_set_drvdata(pdev, sha);
2113 phy_nr = port_nr = hisi_hba->n_phy;
2115 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2116 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2117 if (!arr_phy || !arr_port) {
2122 sha->sas_phy = arr_phy;
2123 sha->sas_port = arr_port;
2124 sha->lldd_ha = hisi_hba;
2126 shost->transportt = hisi_sas_stt;
2127 shost->max_id = HISI_SAS_MAX_DEVICES;
2128 shost->max_lun = ~0;
2129 shost->max_channel = 1;
2130 shost->max_cmd_len = 16;
2131 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
2132 shost->can_queue = hisi_hba->hw->max_command_entries;
2133 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
2135 sha->sas_ha_name = DRV_NAME;
2136 sha->dev = hisi_hba->dev;
2137 sha->lldd_module = THIS_MODULE;
2138 sha->sas_addr = &hisi_hba->sas_addr[0];
2139 sha->num_phys = hisi_hba->n_phy;
2140 sha->core.shost = hisi_hba->shost;
2142 for (i = 0; i < hisi_hba->n_phy; i++) {
2143 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2144 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2147 hisi_sas_init_add(hisi_hba);
2149 rc = scsi_add_host(shost, &pdev->dev);
2153 rc = sas_register_ha(sha);
2155 goto err_out_register_ha;
2157 rc = hisi_hba->hw->hw_init(hisi_hba);
2159 goto err_out_register_ha;
2161 scsi_scan_host(shost);
2165 err_out_register_ha:
2166 scsi_remove_host(shost);
2168 hisi_sas_free(hisi_hba);
2169 scsi_host_put(shost);
2172 EXPORT_SYMBOL_GPL(hisi_sas_probe);
2174 int hisi_sas_remove(struct platform_device *pdev)
2176 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2177 struct hisi_hba *hisi_hba = sha->lldd_ha;
2178 struct Scsi_Host *shost = sha->core.shost;
2180 sas_unregister_ha(sha);
2181 sas_remove_host(sha->core.shost);
2183 hisi_sas_free(hisi_hba);
2184 scsi_host_put(shost);
2187 EXPORT_SYMBOL_GPL(hisi_sas_remove);
2189 static __init int hisi_sas_init(void)
2191 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2198 static __exit void hisi_sas_exit(void)
2200 sas_release_transport(hisi_sas_stt);
2203 module_init(hisi_sas_init);
2204 module_exit(hisi_sas_exit);
2206 MODULE_LICENSE("GPL");
2207 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2208 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2209 MODULE_ALIAS("platform:" DRV_NAME);