scsi: hisi_sas: Add missing PHY spinlock init
[linux-2.6-microblaze.git] / drivers / scsi / hisi_sas / hisi_sas_main.c
1 /*
2  * Copyright (c) 2015 Linaro Ltd.
3  * Copyright (c) 2015 Hisilicon Limited.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  */
11
12 #include "hisi_sas.h"
13 #define DRV_NAME "hisi_sas"
14
15 #define DEV_IS_GONE(dev) \
16         ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
17
18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19                                 u8 *lun, struct hisi_sas_tmf_task *tmf);
20 static int
21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22                              struct domain_device *device,
23                              int abort_flag, int tag);
24 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
25 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
26                                 void *funcdata);
27 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
28                                   struct domain_device *device);
29 static void hisi_sas_dev_gone(struct domain_device *device);
30
31 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
32 {
33         switch (fis->command) {
34         case ATA_CMD_FPDMA_WRITE:
35         case ATA_CMD_FPDMA_READ:
36         case ATA_CMD_FPDMA_RECV:
37         case ATA_CMD_FPDMA_SEND:
38         case ATA_CMD_NCQ_NON_DATA:
39                 return HISI_SAS_SATA_PROTOCOL_FPDMA;
40
41         case ATA_CMD_DOWNLOAD_MICRO:
42         case ATA_CMD_ID_ATA:
43         case ATA_CMD_PMP_READ:
44         case ATA_CMD_READ_LOG_EXT:
45         case ATA_CMD_PIO_READ:
46         case ATA_CMD_PIO_READ_EXT:
47         case ATA_CMD_PMP_WRITE:
48         case ATA_CMD_WRITE_LOG_EXT:
49         case ATA_CMD_PIO_WRITE:
50         case ATA_CMD_PIO_WRITE_EXT:
51                 return HISI_SAS_SATA_PROTOCOL_PIO;
52
53         case ATA_CMD_DSM:
54         case ATA_CMD_DOWNLOAD_MICRO_DMA:
55         case ATA_CMD_PMP_READ_DMA:
56         case ATA_CMD_PMP_WRITE_DMA:
57         case ATA_CMD_READ:
58         case ATA_CMD_READ_EXT:
59         case ATA_CMD_READ_LOG_DMA_EXT:
60         case ATA_CMD_READ_STREAM_DMA_EXT:
61         case ATA_CMD_TRUSTED_RCV_DMA:
62         case ATA_CMD_TRUSTED_SND_DMA:
63         case ATA_CMD_WRITE:
64         case ATA_CMD_WRITE_EXT:
65         case ATA_CMD_WRITE_FUA_EXT:
66         case ATA_CMD_WRITE_QUEUED:
67         case ATA_CMD_WRITE_LOG_DMA_EXT:
68         case ATA_CMD_WRITE_STREAM_DMA_EXT:
69         case ATA_CMD_ZAC_MGMT_IN:
70                 return HISI_SAS_SATA_PROTOCOL_DMA;
71
72         case ATA_CMD_CHK_POWER:
73         case ATA_CMD_DEV_RESET:
74         case ATA_CMD_EDD:
75         case ATA_CMD_FLUSH:
76         case ATA_CMD_FLUSH_EXT:
77         case ATA_CMD_VERIFY:
78         case ATA_CMD_VERIFY_EXT:
79         case ATA_CMD_SET_FEATURES:
80         case ATA_CMD_STANDBY:
81         case ATA_CMD_STANDBYNOW1:
82         case ATA_CMD_ZAC_MGMT_OUT:
83                 return HISI_SAS_SATA_PROTOCOL_NONDATA;
84
85         case ATA_CMD_SET_MAX:
86                 switch (fis->features) {
87                 case ATA_SET_MAX_PASSWD:
88                 case ATA_SET_MAX_LOCK:
89                         return HISI_SAS_SATA_PROTOCOL_PIO;
90
91                 case ATA_SET_MAX_PASSWD_DMA:
92                 case ATA_SET_MAX_UNLOCK_DMA:
93                         return HISI_SAS_SATA_PROTOCOL_DMA;
94
95                 default:
96                         return HISI_SAS_SATA_PROTOCOL_NONDATA;
97                 }
98
99         default:
100         {
101                 if (direction == DMA_NONE)
102                         return HISI_SAS_SATA_PROTOCOL_NONDATA;
103                 return HISI_SAS_SATA_PROTOCOL_PIO;
104         }
105         }
106 }
107 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
108
109 void hisi_sas_sata_done(struct sas_task *task,
110                             struct hisi_sas_slot *slot)
111 {
112         struct task_status_struct *ts = &task->task_status;
113         struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
114         struct hisi_sas_status_buffer *status_buf =
115                         hisi_sas_status_buf_addr_mem(slot);
116         u8 *iu = &status_buf->iu[0];
117         struct dev_to_host_fis *d2h =  (struct dev_to_host_fis *)iu;
118
119         resp->frame_len = sizeof(struct dev_to_host_fis);
120         memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
121
122         ts->buf_valid_size = sizeof(*resp);
123 }
124 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
125
126 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
127 {
128         struct ata_queued_cmd *qc = task->uldd_task;
129
130         if (qc) {
131                 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
132                         qc->tf.command == ATA_CMD_FPDMA_READ) {
133                         *tag = qc->tag;
134                         return 1;
135                 }
136         }
137         return 0;
138 }
139 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
140
141 /*
142  * This function assumes linkrate mask fits in 8 bits, which it
143  * does for all HW versions supported.
144  */
145 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
146 {
147         u16 rate = 0;
148         int i;
149
150         max -= SAS_LINK_RATE_1_5_GBPS;
151         for (i = 0; i <= max; i++)
152                 rate |= 1 << (i * 2);
153         return rate;
154 }
155 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
156
157 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
158 {
159         return device->port->ha->lldd_ha;
160 }
161
162 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
163 {
164         return container_of(sas_port, struct hisi_sas_port, sas_port);
165 }
166 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
167
168 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
169 {
170         int phy_no;
171
172         for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
173                 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
174 }
175 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
176
177 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
178 {
179         void *bitmap = hisi_hba->slot_index_tags;
180
181         clear_bit(slot_idx, bitmap);
182 }
183
184 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
185 {
186         hisi_sas_slot_index_clear(hisi_hba, slot_idx);
187 }
188
189 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
190 {
191         void *bitmap = hisi_hba->slot_index_tags;
192
193         set_bit(slot_idx, bitmap);
194 }
195
196 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
197 {
198         unsigned int index;
199         void *bitmap = hisi_hba->slot_index_tags;
200
201         index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
202                         hisi_hba->last_slot_index + 1);
203         if (index >= hisi_hba->slot_index_count) {
204                 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
205                                            0);
206                 if (index >= hisi_hba->slot_index_count)
207                         return -SAS_QUEUE_FULL;
208         }
209         hisi_sas_slot_index_set(hisi_hba, index);
210         *slot_idx = index;
211         hisi_hba->last_slot_index = index;
212
213         return 0;
214 }
215
216 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
217 {
218         int i;
219
220         for (i = 0; i < hisi_hba->slot_index_count; ++i)
221                 hisi_sas_slot_index_clear(hisi_hba, i);
222 }
223
224 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
225                              struct hisi_sas_slot *slot)
226 {
227         struct hisi_sas_dq *dq = &hisi_hba->dq[slot->dlvry_queue];
228         unsigned long flags;
229
230         if (task) {
231                 struct device *dev = hisi_hba->dev;
232
233                 if (!task->lldd_task)
234                         return;
235
236                 task->lldd_task = NULL;
237
238                 if (!sas_protocol_ata(task->task_proto))
239                         if (slot->n_elem)
240                                 dma_unmap_sg(dev, task->scatter,
241                                              task->num_scatter,
242                                              task->data_dir);
243         }
244
245
246         spin_lock_irqsave(&dq->lock, flags);
247         list_del_init(&slot->entry);
248         spin_unlock_irqrestore(&dq->lock, flags);
249
250         memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
251
252         spin_lock_irqsave(&hisi_hba->lock, flags);
253         hisi_sas_slot_index_free(hisi_hba, slot->idx);
254         spin_unlock_irqrestore(&hisi_hba->lock, flags);
255 }
256 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
257
258 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
259                                   struct hisi_sas_slot *slot)
260 {
261         hisi_hba->hw->prep_smp(hisi_hba, slot);
262 }
263
264 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
265                                   struct hisi_sas_slot *slot)
266 {
267         hisi_hba->hw->prep_ssp(hisi_hba, slot);
268 }
269
270 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
271                                   struct hisi_sas_slot *slot)
272 {
273         hisi_hba->hw->prep_stp(hisi_hba, slot);
274 }
275
276 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
277                 struct hisi_sas_slot *slot,
278                 int device_id, int abort_flag, int tag_to_abort)
279 {
280         hisi_hba->hw->prep_abort(hisi_hba, slot,
281                         device_id, abort_flag, tag_to_abort);
282 }
283
284 /*
285  * This function will issue an abort TMF regardless of whether the
286  * task is in the sdev or not. Then it will do the task complete
287  * cleanup and callbacks.
288  */
289 static void hisi_sas_slot_abort(struct work_struct *work)
290 {
291         struct hisi_sas_slot *abort_slot =
292                 container_of(work, struct hisi_sas_slot, abort_slot);
293         struct sas_task *task = abort_slot->task;
294         struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
295         struct scsi_cmnd *cmnd = task->uldd_task;
296         struct hisi_sas_tmf_task tmf_task;
297         struct scsi_lun lun;
298         struct device *dev = hisi_hba->dev;
299         int tag = abort_slot->idx;
300
301         if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
302                 dev_err(dev, "cannot abort slot for non-ssp task\n");
303                 goto out;
304         }
305
306         int_to_scsilun(cmnd->device->lun, &lun);
307         tmf_task.tmf = TMF_ABORT_TASK;
308         tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
309
310         hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
311 out:
312         /* Do cleanup for this task */
313         hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
314         if (task->task_done)
315                 task->task_done(task);
316 }
317
318 static int hisi_sas_task_prep(struct sas_task *task,
319                               struct hisi_sas_dq **dq_pointer,
320                               bool is_tmf, struct hisi_sas_tmf_task *tmf,
321                               int *pass)
322 {
323         struct domain_device *device = task->dev;
324         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
325         struct hisi_sas_device *sas_dev = device->lldd_dev;
326         struct hisi_sas_port *port;
327         struct hisi_sas_slot *slot;
328         struct hisi_sas_cmd_hdr *cmd_hdr_base;
329         struct asd_sas_port *sas_port = device->port;
330         struct device *dev = hisi_hba->dev;
331         int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
332         int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
333         unsigned long flags, flags_dq;
334         struct hisi_sas_dq *dq;
335         int wr_q_index;
336
337         if (!sas_port) {
338                 struct task_status_struct *ts = &task->task_status;
339
340                 ts->resp = SAS_TASK_UNDELIVERED;
341                 ts->stat = SAS_PHY_DOWN;
342                 /*
343                  * libsas will use dev->port, should
344                  * not call task_done for sata
345                  */
346                 if (device->dev_type != SAS_SATA_DEV)
347                         task->task_done(task);
348                 return -ECOMM;
349         }
350
351         if (DEV_IS_GONE(sas_dev)) {
352                 if (sas_dev)
353                         dev_info(dev, "task prep: device %d not ready\n",
354                                  sas_dev->device_id);
355                 else
356                         dev_info(dev, "task prep: device %016llx not ready\n",
357                                  SAS_ADDR(device->sas_addr));
358
359                 return -ECOMM;
360         }
361
362         *dq_pointer = dq = sas_dev->dq;
363
364         port = to_hisi_sas_port(sas_port);
365         if (port && !port->port_attached) {
366                 dev_info(dev, "task prep: %s port%d not attach device\n",
367                          (dev_is_sata(device)) ?
368                          "SATA/STP" : "SAS",
369                          device->port->id);
370
371                 return -ECOMM;
372         }
373
374         if (!sas_protocol_ata(task->task_proto)) {
375                 unsigned int req_len, resp_len;
376
377                 if (task->num_scatter) {
378                         n_elem = dma_map_sg(dev, task->scatter,
379                                             task->num_scatter, task->data_dir);
380                         if (!n_elem) {
381                                 rc = -ENOMEM;
382                                 goto prep_out;
383                         }
384                 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
385                         n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
386                                                 1, DMA_TO_DEVICE);
387                         if (!n_elem_req) {
388                                 rc = -ENOMEM;
389                                 goto prep_out;
390                         }
391                         req_len = sg_dma_len(&task->smp_task.smp_req);
392                         if (req_len & 0x3) {
393                                 rc = -EINVAL;
394                                 goto err_out_dma_unmap;
395                         }
396                         n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp,
397                                                  1, DMA_FROM_DEVICE);
398                         if (!n_elem_resp) {
399                                 rc = -ENOMEM;
400                                 goto err_out_dma_unmap;
401                         }
402                         resp_len = sg_dma_len(&task->smp_task.smp_resp);
403                         if (resp_len & 0x3) {
404                                 rc = -EINVAL;
405                                 goto err_out_dma_unmap;
406                         }
407                 }
408         } else
409                 n_elem = task->num_scatter;
410
411         if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
412                 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
413                         n_elem);
414                 rc = -EINVAL;
415                 goto err_out_dma_unmap;
416         }
417
418         spin_lock_irqsave(&hisi_hba->lock, flags);
419         if (hisi_hba->hw->slot_index_alloc)
420                 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
421                                                     device);
422         else
423                 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
424         spin_unlock_irqrestore(&hisi_hba->lock, flags);
425         if (rc)
426                 goto err_out_dma_unmap;
427
428         slot = &hisi_hba->slot_info[slot_idx];
429
430         spin_lock_irqsave(&dq->lock, flags_dq);
431         wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
432         if (wr_q_index < 0) {
433                 spin_unlock_irqrestore(&dq->lock, flags_dq);
434                 rc = -EAGAIN;
435                 goto err_out_tag;
436         }
437
438         list_add_tail(&slot->delivery, &dq->list);
439         spin_unlock_irqrestore(&dq->lock, flags_dq);
440
441         dlvry_queue = dq->id;
442         dlvry_queue_slot = wr_q_index;
443
444         slot->n_elem = n_elem;
445         slot->dlvry_queue = dlvry_queue;
446         slot->dlvry_queue_slot = dlvry_queue_slot;
447         cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
448         slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
449         slot->task = task;
450         slot->port = port;
451         slot->tmf = tmf;
452         slot->is_internal = is_tmf;
453         task->lldd_task = slot;
454         INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
455
456         memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
457         memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
458         memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
459
460         switch (task->task_proto) {
461         case SAS_PROTOCOL_SMP:
462                 hisi_sas_task_prep_smp(hisi_hba, slot);
463                 break;
464         case SAS_PROTOCOL_SSP:
465                 hisi_sas_task_prep_ssp(hisi_hba, slot);
466                 break;
467         case SAS_PROTOCOL_SATA:
468         case SAS_PROTOCOL_STP:
469         case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
470                 hisi_sas_task_prep_ata(hisi_hba, slot);
471                 break;
472         default:
473                 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
474                         task->task_proto);
475                 break;
476         }
477
478         spin_lock_irqsave(&dq->lock, flags);
479         list_add_tail(&slot->entry, &sas_dev->list);
480         spin_unlock_irqrestore(&dq->lock, flags);
481         spin_lock_irqsave(&task->task_state_lock, flags);
482         task->task_state_flags |= SAS_TASK_AT_INITIATOR;
483         spin_unlock_irqrestore(&task->task_state_lock, flags);
484
485         ++(*pass);
486         slot->ready = 1;
487
488         return 0;
489
490 err_out_tag:
491         spin_lock_irqsave(&hisi_hba->lock, flags);
492         hisi_sas_slot_index_free(hisi_hba, slot_idx);
493         spin_unlock_irqrestore(&hisi_hba->lock, flags);
494 err_out_dma_unmap:
495         if (!sas_protocol_ata(task->task_proto)) {
496                 if (task->num_scatter) {
497                         dma_unmap_sg(dev, task->scatter, task->num_scatter,
498                              task->data_dir);
499                 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
500                         if (n_elem_req)
501                                 dma_unmap_sg(dev, &task->smp_task.smp_req,
502                                              1, DMA_TO_DEVICE);
503                         if (n_elem_resp)
504                                 dma_unmap_sg(dev, &task->smp_task.smp_resp,
505                                              1, DMA_FROM_DEVICE);
506                 }
507         }
508 prep_out:
509         dev_err(dev, "task prep: failed[%d]!\n", rc);
510         return rc;
511 }
512
513 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
514                               bool is_tmf, struct hisi_sas_tmf_task *tmf)
515 {
516         u32 rc;
517         u32 pass = 0;
518         unsigned long flags;
519         struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
520         struct device *dev = hisi_hba->dev;
521         struct hisi_sas_dq *dq = NULL;
522
523         if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
524                 if (in_softirq())
525                         return -EINVAL;
526
527                 down(&hisi_hba->sem);
528                 up(&hisi_hba->sem);
529         }
530
531         /* protect task_prep and start_delivery sequence */
532         rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
533         if (rc)
534                 dev_err(dev, "task exec: failed[%d]!\n", rc);
535
536         if (likely(pass)) {
537                 spin_lock_irqsave(&dq->lock, flags);
538                 hisi_hba->hw->start_delivery(dq);
539                 spin_unlock_irqrestore(&dq->lock, flags);
540         }
541
542         return rc;
543 }
544
545 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
546 {
547         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
548         struct asd_sas_phy *sas_phy = &phy->sas_phy;
549         struct sas_ha_struct *sas_ha;
550
551         if (!phy->phy_attached)
552                 return;
553
554         sas_ha = &hisi_hba->sha;
555         sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
556
557         if (sas_phy->phy) {
558                 struct sas_phy *sphy = sas_phy->phy;
559
560                 sphy->negotiated_linkrate = sas_phy->linkrate;
561                 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
562                 sphy->maximum_linkrate_hw =
563                         hisi_hba->hw->phy_get_max_linkrate();
564                 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
565                         sphy->minimum_linkrate = phy->minimum_linkrate;
566
567                 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
568                         sphy->maximum_linkrate = phy->maximum_linkrate;
569         }
570
571         if (phy->phy_type & PORT_TYPE_SAS) {
572                 struct sas_identify_frame *id;
573
574                 id = (struct sas_identify_frame *)phy->frame_rcvd;
575                 id->dev_type = phy->identify.device_type;
576                 id->initiator_bits = SAS_PROTOCOL_ALL;
577                 id->target_bits = phy->identify.target_port_protocols;
578         } else if (phy->phy_type & PORT_TYPE_SATA) {
579                 /*Nothing*/
580         }
581
582         sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
583         sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
584 }
585
586 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
587 {
588         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
589         struct hisi_sas_device *sas_dev = NULL;
590         unsigned long flags;
591         int last = hisi_hba->last_dev_id;
592         int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
593         int i;
594
595         spin_lock_irqsave(&hisi_hba->lock, flags);
596         for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
597                 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
598                         int queue = i % hisi_hba->queue_count;
599                         struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
600
601                         hisi_hba->devices[i].device_id = i;
602                         sas_dev = &hisi_hba->devices[i];
603                         sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
604                         sas_dev->dev_type = device->dev_type;
605                         sas_dev->hisi_hba = hisi_hba;
606                         sas_dev->sas_device = device;
607                         sas_dev->dq = dq;
608                         INIT_LIST_HEAD(&hisi_hba->devices[i].list);
609                         break;
610                 }
611                 i++;
612         }
613         hisi_hba->last_dev_id = i;
614         spin_unlock_irqrestore(&hisi_hba->lock, flags);
615
616         return sas_dev;
617 }
618
619 #define HISI_SAS_SRST_ATA_DISK_CNT 3
620 static int hisi_sas_init_device(struct domain_device *device)
621 {
622         int rc = TMF_RESP_FUNC_COMPLETE;
623         struct scsi_lun lun;
624         struct hisi_sas_tmf_task tmf_task;
625         int retry = HISI_SAS_SRST_ATA_DISK_CNT;
626         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
627
628         switch (device->dev_type) {
629         case SAS_END_DEVICE:
630                 int_to_scsilun(0, &lun);
631
632                 tmf_task.tmf = TMF_CLEAR_TASK_SET;
633                 rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun,
634                                                   &tmf_task);
635                 if (rc == TMF_RESP_FUNC_COMPLETE)
636                         hisi_sas_release_task(hisi_hba, device);
637                 break;
638         case SAS_SATA_DEV:
639         case SAS_SATA_PM:
640         case SAS_SATA_PM_PORT:
641         case SAS_SATA_PENDING:
642                 while (retry-- > 0) {
643                         rc = hisi_sas_softreset_ata_disk(device);
644                         if (!rc)
645                                 break;
646                 }
647                 break;
648         default:
649                 break;
650         }
651
652         return rc;
653 }
654
655 static int hisi_sas_dev_found(struct domain_device *device)
656 {
657         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
658         struct domain_device *parent_dev = device->parent;
659         struct hisi_sas_device *sas_dev;
660         struct device *dev = hisi_hba->dev;
661         int rc;
662
663         if (hisi_hba->hw->alloc_dev)
664                 sas_dev = hisi_hba->hw->alloc_dev(device);
665         else
666                 sas_dev = hisi_sas_alloc_dev(device);
667         if (!sas_dev) {
668                 dev_err(dev, "fail alloc dev: max support %d devices\n",
669                         HISI_SAS_MAX_DEVICES);
670                 return -EINVAL;
671         }
672
673         device->lldd_dev = sas_dev;
674         hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
675
676         if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
677                 int phy_no;
678                 u8 phy_num = parent_dev->ex_dev.num_phys;
679                 struct ex_phy *phy;
680
681                 for (phy_no = 0; phy_no < phy_num; phy_no++) {
682                         phy = &parent_dev->ex_dev.ex_phy[phy_no];
683                         if (SAS_ADDR(phy->attached_sas_addr) ==
684                                 SAS_ADDR(device->sas_addr))
685                                 break;
686                 }
687
688                 if (phy_no == phy_num) {
689                         dev_info(dev, "dev found: no attached "
690                                  "dev:%016llx at ex:%016llx\n",
691                                  SAS_ADDR(device->sas_addr),
692                                  SAS_ADDR(parent_dev->sas_addr));
693                         rc = -EINVAL;
694                         goto err_out;
695                 }
696         }
697
698         dev_info(dev, "dev[%d:%x] found\n",
699                 sas_dev->device_id, sas_dev->dev_type);
700
701         rc = hisi_sas_init_device(device);
702         if (rc)
703                 goto err_out;
704         return 0;
705
706 err_out:
707         hisi_sas_dev_gone(device);
708         return rc;
709 }
710
711 int hisi_sas_slave_configure(struct scsi_device *sdev)
712 {
713         struct domain_device *dev = sdev_to_domain_dev(sdev);
714         int ret = sas_slave_configure(sdev);
715
716         if (ret)
717                 return ret;
718         if (!dev_is_sata(dev))
719                 sas_change_queue_depth(sdev, 64);
720
721         return 0;
722 }
723 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure);
724
725 void hisi_sas_scan_start(struct Scsi_Host *shost)
726 {
727         struct hisi_hba *hisi_hba = shost_priv(shost);
728
729         hisi_hba->hw->phys_init(hisi_hba);
730 }
731 EXPORT_SYMBOL_GPL(hisi_sas_scan_start);
732
733 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
734 {
735         struct hisi_hba *hisi_hba = shost_priv(shost);
736         struct sas_ha_struct *sha = &hisi_hba->sha;
737
738         /* Wait for PHY up interrupt to occur */
739         if (time < HZ)
740                 return 0;
741
742         sas_drain_work(sha);
743         return 1;
744 }
745 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
746
747 static void hisi_sas_phyup_work(struct work_struct *work)
748 {
749         struct hisi_sas_phy *phy =
750                 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
751         struct hisi_hba *hisi_hba = phy->hisi_hba;
752         struct asd_sas_phy *sas_phy = &phy->sas_phy;
753         int phy_no = sas_phy->id;
754
755         hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
756         hisi_sas_bytes_dmaed(hisi_hba, phy_no);
757 }
758
759 static void hisi_sas_linkreset_work(struct work_struct *work)
760 {
761         struct hisi_sas_phy *phy =
762                 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
763         struct asd_sas_phy *sas_phy = &phy->sas_phy;
764
765         hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
766 }
767
768 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
769         [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
770         [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
771 };
772
773 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
774                                 enum hisi_sas_phy_event event)
775 {
776         struct hisi_hba *hisi_hba = phy->hisi_hba;
777
778         if (WARN_ON(event >= HISI_PHYES_NUM))
779                 return false;
780
781         return queue_work(hisi_hba->wq, &phy->works[event]);
782 }
783 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
784
785 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
786 {
787         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
788         struct asd_sas_phy *sas_phy = &phy->sas_phy;
789         int i;
790
791         phy->hisi_hba = hisi_hba;
792         phy->port = NULL;
793         phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
794         phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
795         sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
796         sas_phy->class = SAS;
797         sas_phy->iproto = SAS_PROTOCOL_ALL;
798         sas_phy->tproto = 0;
799         sas_phy->type = PHY_TYPE_PHYSICAL;
800         sas_phy->role = PHY_ROLE_INITIATOR;
801         sas_phy->oob_mode = OOB_NOT_CONNECTED;
802         sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
803         sas_phy->id = phy_no;
804         sas_phy->sas_addr = &hisi_hba->sas_addr[0];
805         sas_phy->frame_rcvd = &phy->frame_rcvd[0];
806         sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
807         sas_phy->lldd_phy = phy;
808
809         for (i = 0; i < HISI_PHYES_NUM; i++)
810                 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
811
812         spin_lock_init(&phy->lock);
813 }
814
815 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
816 {
817         struct sas_ha_struct *sas_ha = sas_phy->ha;
818         struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
819         struct hisi_sas_phy *phy = sas_phy->lldd_phy;
820         struct asd_sas_port *sas_port = sas_phy->port;
821         struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
822         unsigned long flags;
823
824         if (!sas_port)
825                 return;
826
827         spin_lock_irqsave(&hisi_hba->lock, flags);
828         port->port_attached = 1;
829         port->id = phy->port_id;
830         phy->port = port;
831         sas_port->lldd_port = port;
832         spin_unlock_irqrestore(&hisi_hba->lock, flags);
833 }
834
835 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
836                                      struct hisi_sas_slot *slot)
837 {
838         if (task) {
839                 unsigned long flags;
840                 struct task_status_struct *ts;
841
842                 ts = &task->task_status;
843
844                 ts->resp = SAS_TASK_COMPLETE;
845                 ts->stat = SAS_ABORTED_TASK;
846                 spin_lock_irqsave(&task->task_state_lock, flags);
847                 task->task_state_flags &=
848                         ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
849                 task->task_state_flags |= SAS_TASK_STATE_DONE;
850                 spin_unlock_irqrestore(&task->task_state_lock, flags);
851         }
852
853         hisi_sas_slot_task_free(hisi_hba, task, slot);
854 }
855
856 /* hisi_hba.lock should be locked */
857 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
858                         struct domain_device *device)
859 {
860         struct hisi_sas_slot *slot, *slot2;
861         struct hisi_sas_device *sas_dev = device->lldd_dev;
862
863         list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
864                 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
865 }
866
867 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
868 {
869         struct hisi_sas_device *sas_dev;
870         struct domain_device *device;
871         int i;
872
873         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
874                 sas_dev = &hisi_hba->devices[i];
875                 device = sas_dev->sas_device;
876
877                 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
878                     !device)
879                         continue;
880
881                 hisi_sas_release_task(hisi_hba, device);
882         }
883 }
884 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
885
886 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
887                                 struct domain_device *device)
888 {
889         if (hisi_hba->hw->dereg_device)
890                 hisi_hba->hw->dereg_device(hisi_hba, device);
891 }
892
893 static void hisi_sas_dev_gone(struct domain_device *device)
894 {
895         struct hisi_sas_device *sas_dev = device->lldd_dev;
896         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
897         struct device *dev = hisi_hba->dev;
898
899         dev_info(dev, "dev[%d:%x] is gone\n",
900                  sas_dev->device_id, sas_dev->dev_type);
901
902         if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
903                 hisi_sas_internal_task_abort(hisi_hba, device,
904                                      HISI_SAS_INT_ABT_DEV, 0);
905
906                 hisi_sas_dereg_device(hisi_hba, device);
907
908                 down(&hisi_hba->sem);
909                 hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
910                 up(&hisi_hba->sem);
911                 device->lldd_dev = NULL;
912         }
913
914         if (hisi_hba->hw->free_device)
915                 hisi_hba->hw->free_device(sas_dev);
916         sas_dev->dev_type = SAS_PHY_UNUSED;
917 }
918
919 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
920 {
921         return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
922 }
923
924 static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
925                         struct sas_phy_linkrates *r)
926 {
927         struct sas_phy_linkrates _r;
928
929         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
930         struct asd_sas_phy *sas_phy = &phy->sas_phy;
931         enum sas_linkrate min, max;
932
933         if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
934                 max = sas_phy->phy->maximum_linkrate;
935                 min = r->minimum_linkrate;
936         } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
937                 max = r->maximum_linkrate;
938                 min = sas_phy->phy->minimum_linkrate;
939         } else
940                 return;
941
942         _r.maximum_linkrate = max;
943         _r.minimum_linkrate = min;
944
945         hisi_hba->hw->phy_disable(hisi_hba, phy_no);
946         msleep(100);
947         hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
948         hisi_hba->hw->phy_start(hisi_hba, phy_no);
949 }
950
951 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
952                                 void *funcdata)
953 {
954         struct sas_ha_struct *sas_ha = sas_phy->ha;
955         struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
956         int phy_no = sas_phy->id;
957
958         switch (func) {
959         case PHY_FUNC_HARD_RESET:
960                 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
961                 break;
962
963         case PHY_FUNC_LINK_RESET:
964                 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
965                 msleep(100);
966                 hisi_hba->hw->phy_start(hisi_hba, phy_no);
967                 break;
968
969         case PHY_FUNC_DISABLE:
970                 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
971                 break;
972
973         case PHY_FUNC_SET_LINK_RATE:
974                 hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
975                 break;
976         case PHY_FUNC_GET_EVENTS:
977                 if (hisi_hba->hw->get_events) {
978                         hisi_hba->hw->get_events(hisi_hba, phy_no);
979                         break;
980                 }
981                 /* fallthru */
982         case PHY_FUNC_RELEASE_SPINUP_HOLD:
983         default:
984                 return -EOPNOTSUPP;
985         }
986         return 0;
987 }
988
989 static void hisi_sas_task_done(struct sas_task *task)
990 {
991         if (!del_timer(&task->slow_task->timer))
992                 return;
993         complete(&task->slow_task->completion);
994 }
995
996 static void hisi_sas_tmf_timedout(struct timer_list *t)
997 {
998         struct sas_task_slow *slow = from_timer(slow, t, timer);
999         struct sas_task *task = slow->task;
1000         unsigned long flags;
1001
1002         spin_lock_irqsave(&task->task_state_lock, flags);
1003         if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
1004                 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1005         spin_unlock_irqrestore(&task->task_state_lock, flags);
1006
1007         complete(&task->slow_task->completion);
1008 }
1009
1010 #define TASK_TIMEOUT 20
1011 #define TASK_RETRY 3
1012 #define INTERNAL_ABORT_TIMEOUT 6
1013 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
1014                                            void *parameter, u32 para_len,
1015                                            struct hisi_sas_tmf_task *tmf)
1016 {
1017         struct hisi_sas_device *sas_dev = device->lldd_dev;
1018         struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
1019         struct device *dev = hisi_hba->dev;
1020         struct sas_task *task;
1021         int res, retry;
1022
1023         for (retry = 0; retry < TASK_RETRY; retry++) {
1024                 task = sas_alloc_slow_task(GFP_KERNEL);
1025                 if (!task)
1026                         return -ENOMEM;
1027
1028                 task->dev = device;
1029                 task->task_proto = device->tproto;
1030
1031                 if (dev_is_sata(device)) {
1032                         task->ata_task.device_control_reg_update = 1;
1033                         memcpy(&task->ata_task.fis, parameter, para_len);
1034                 } else {
1035                         memcpy(&task->ssp_task, parameter, para_len);
1036                 }
1037                 task->task_done = hisi_sas_task_done;
1038
1039                 task->slow_task->timer.function = hisi_sas_tmf_timedout;
1040                 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
1041                 add_timer(&task->slow_task->timer);
1042
1043                 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
1044
1045                 if (res) {
1046                         del_timer(&task->slow_task->timer);
1047                         dev_err(dev, "abort tmf: executing internal task failed: %d\n",
1048                                 res);
1049                         goto ex_err;
1050                 }
1051
1052                 wait_for_completion(&task->slow_task->completion);
1053                 res = TMF_RESP_FUNC_FAILED;
1054                 /* Even TMF timed out, return direct. */
1055                 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1056                         if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1057                                 struct hisi_sas_slot *slot = task->lldd_task;
1058
1059                                 dev_err(dev, "abort tmf: TMF task timeout and not done\n");
1060                                 if (slot)
1061                                         slot->task = NULL;
1062
1063                                 goto ex_err;
1064                         } else
1065                                 dev_err(dev, "abort tmf: TMF task timeout\n");
1066                 }
1067
1068                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1069                      task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1070                         res = TMF_RESP_FUNC_COMPLETE;
1071                         break;
1072                 }
1073
1074                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1075                         task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1076                         res = TMF_RESP_FUNC_SUCC;
1077                         break;
1078                 }
1079
1080                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1081                       task->task_status.stat == SAS_DATA_UNDERRUN) {
1082                         /* no error, but return the number of bytes of
1083                          * underrun
1084                          */
1085                         dev_warn(dev, "abort tmf: task to dev %016llx "
1086                                  "resp: 0x%x sts 0x%x underrun\n",
1087                                  SAS_ADDR(device->sas_addr),
1088                                  task->task_status.resp,
1089                                  task->task_status.stat);
1090                         res = task->task_status.residual;
1091                         break;
1092                 }
1093
1094                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1095                         task->task_status.stat == SAS_DATA_OVERRUN) {
1096                         dev_warn(dev, "abort tmf: blocked task error\n");
1097                         res = -EMSGSIZE;
1098                         break;
1099                 }
1100
1101                 dev_warn(dev, "abort tmf: task to dev "
1102                          "%016llx resp: 0x%x status 0x%x\n",
1103                          SAS_ADDR(device->sas_addr), task->task_status.resp,
1104                          task->task_status.stat);
1105                 sas_free_task(task);
1106                 task = NULL;
1107         }
1108 ex_err:
1109         if (retry == TASK_RETRY)
1110                 dev_warn(dev, "abort tmf: executing internal task failed!\n");
1111         sas_free_task(task);
1112         return res;
1113 }
1114
1115 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
1116                 bool reset, int pmp, u8 *fis)
1117 {
1118         struct ata_taskfile tf;
1119
1120         ata_tf_init(dev, &tf);
1121         if (reset)
1122                 tf.ctl |= ATA_SRST;
1123         else
1124                 tf.ctl &= ~ATA_SRST;
1125         tf.command = ATA_CMD_DEV_RESET;
1126         ata_tf_to_fis(&tf, pmp, 0, fis);
1127 }
1128
1129 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
1130 {
1131         u8 fis[20] = {0};
1132         struct ata_port *ap = device->sata_dev.ap;
1133         struct ata_link *link;
1134         int rc = TMF_RESP_FUNC_FAILED;
1135         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1136         struct device *dev = hisi_hba->dev;
1137         int s = sizeof(struct host_to_dev_fis);
1138
1139         ata_for_each_link(link, ap, EDGE) {
1140                 int pmp = sata_srst_pmp(link);
1141
1142                 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1143                 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
1144                 if (rc != TMF_RESP_FUNC_COMPLETE)
1145                         break;
1146         }
1147
1148         if (rc == TMF_RESP_FUNC_COMPLETE) {
1149                 ata_for_each_link(link, ap, EDGE) {
1150                         int pmp = sata_srst_pmp(link);
1151
1152                         hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1153                         rc = hisi_sas_exec_internal_tmf_task(device, fis,
1154                                                              s, NULL);
1155                         if (rc != TMF_RESP_FUNC_COMPLETE)
1156                                 dev_err(dev, "ata disk de-reset failed\n");
1157                 }
1158         } else {
1159                 dev_err(dev, "ata disk reset failed\n");
1160         }
1161
1162         if (rc == TMF_RESP_FUNC_COMPLETE)
1163                 hisi_sas_release_task(hisi_hba, device);
1164
1165         return rc;
1166 }
1167
1168 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
1169                                 u8 *lun, struct hisi_sas_tmf_task *tmf)
1170 {
1171         struct sas_ssp_task ssp_task;
1172
1173         if (!(device->tproto & SAS_PROTOCOL_SSP))
1174                 return TMF_RESP_FUNC_ESUPP;
1175
1176         memcpy(ssp_task.LUN, lun, 8);
1177
1178         return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
1179                                 sizeof(ssp_task), tmf);
1180 }
1181
1182 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1183 {
1184         u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
1185         int i;
1186
1187         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1188                 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1189                 struct domain_device *device = sas_dev->sas_device;
1190                 struct asd_sas_port *sas_port;
1191                 struct hisi_sas_port *port;
1192                 struct hisi_sas_phy *phy = NULL;
1193                 struct asd_sas_phy *sas_phy;
1194
1195                 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1196                                 || !device || !device->port)
1197                         continue;
1198
1199                 sas_port = device->port;
1200                 port = to_hisi_sas_port(sas_port);
1201
1202                 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1203                         if (state & BIT(sas_phy->id)) {
1204                                 phy = sas_phy->lldd_phy;
1205                                 break;
1206                         }
1207
1208                 if (phy) {
1209                         port->id = phy->port_id;
1210
1211                         /* Update linkrate of directly attached device. */
1212                         if (!device->parent)
1213                                 device->linkrate = phy->sas_phy.linkrate;
1214
1215                         hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1216                 } else
1217                         port->id = 0xff;
1218         }
1219 }
1220
1221 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1222                               u32 state)
1223 {
1224         struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1225         struct asd_sas_port *_sas_port = NULL;
1226         int phy_no;
1227
1228         for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1229                 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1230                 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1231                 struct asd_sas_port *sas_port = sas_phy->port;
1232                 bool do_port_check = !!(_sas_port != sas_port);
1233
1234                 if (!sas_phy->phy->enabled)
1235                         continue;
1236
1237                 /* Report PHY state change to libsas */
1238                 if (state & BIT(phy_no)) {
1239                         if (do_port_check && sas_port && sas_port->port_dev) {
1240                                 struct domain_device *dev = sas_port->port_dev;
1241
1242                                 _sas_port = sas_port;
1243
1244                                 if (DEV_IS_EXPANDER(dev->dev_type))
1245                                         sas_ha->notify_port_event(sas_phy,
1246                                                         PORTE_BROADCAST_RCVD);
1247                         }
1248                 } else if (old_state & (1 << phy_no))
1249                         /* PHY down but was up before */
1250                         hisi_sas_phy_down(hisi_hba, phy_no, 0);
1251
1252         }
1253 }
1254
1255 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba)
1256 {
1257         struct hisi_sas_device *sas_dev;
1258         struct domain_device *device;
1259         int i;
1260
1261         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1262                 sas_dev = &hisi_hba->devices[i];
1263                 device = sas_dev->sas_device;
1264
1265                 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1266                         continue;
1267
1268                 hisi_sas_init_device(device);
1269         }
1270 }
1271
1272 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
1273                                              struct asd_sas_port *sas_port,
1274                                              struct domain_device *device)
1275 {
1276         struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 };
1277         struct ata_port *ap = device->sata_dev.ap;
1278         struct device *dev = hisi_hba->dev;
1279         int s = sizeof(struct host_to_dev_fis);
1280         int rc = TMF_RESP_FUNC_FAILED;
1281         struct asd_sas_phy *sas_phy;
1282         struct ata_link *link;
1283         u8 fis[20] = {0};
1284         u32 state;
1285
1286         state = hisi_hba->hw->get_phys_state(hisi_hba);
1287         list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) {
1288                 if (!(state & BIT(sas_phy->id)))
1289                         continue;
1290
1291                 ata_for_each_link(link, ap, EDGE) {
1292                         int pmp = sata_srst_pmp(link);
1293
1294                         tmf_task.phy_id = sas_phy->id;
1295                         hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1296                         rc = hisi_sas_exec_internal_tmf_task(device, fis, s,
1297                                                              &tmf_task);
1298                         if (rc != TMF_RESP_FUNC_COMPLETE) {
1299                                 dev_err(dev, "phy%d ata reset failed rc=%d\n",
1300                                         sas_phy->id, rc);
1301                                 break;
1302                         }
1303                 }
1304         }
1305 }
1306
1307 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
1308 {
1309         struct device *dev = hisi_hba->dev;
1310         int port_no, rc, i;
1311
1312         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1313                 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1314                 struct domain_device *device = sas_dev->sas_device;
1315
1316                 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1317                         continue;
1318
1319                 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1320                                                   HISI_SAS_INT_ABT_DEV, 0);
1321                 if (rc < 0)
1322                         dev_err(dev, "STP reject: abort dev failed %d\n", rc);
1323         }
1324
1325         for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) {
1326                 struct hisi_sas_port *port = &hisi_hba->port[port_no];
1327                 struct asd_sas_port *sas_port = &port->sas_port;
1328                 struct domain_device *port_dev = sas_port->port_dev;
1329                 struct domain_device *device;
1330
1331                 if (!port_dev || !DEV_IS_EXPANDER(port_dev->dev_type))
1332                         continue;
1333
1334                 /* Try to find a SATA device */
1335                 list_for_each_entry(device, &sas_port->dev_list,
1336                                     dev_list_node) {
1337                         if (dev_is_sata(device)) {
1338                                 hisi_sas_send_ata_reset_each_phy(hisi_hba,
1339                                                                  sas_port,
1340                                                                  device);
1341                                 break;
1342                         }
1343                 }
1344         }
1345 }
1346
1347 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1348 {
1349         struct device *dev = hisi_hba->dev;
1350         struct Scsi_Host *shost = hisi_hba->shost;
1351         u32 old_state, state;
1352         int rc;
1353
1354         if (!hisi_hba->hw->soft_reset)
1355                 return -1;
1356
1357         if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1358                 return -1;
1359
1360         down(&hisi_hba->sem);
1361         dev_info(dev, "controller resetting...\n");
1362         old_state = hisi_hba->hw->get_phys_state(hisi_hba);
1363
1364         scsi_block_requests(shost);
1365         hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
1366
1367         if (timer_pending(&hisi_hba->timer))
1368                 del_timer_sync(&hisi_hba->timer);
1369
1370         set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1371         rc = hisi_hba->hw->soft_reset(hisi_hba);
1372         if (rc) {
1373                 dev_warn(dev, "controller reset failed (%d)\n", rc);
1374                 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1375                 up(&hisi_hba->sem);
1376                 scsi_unblock_requests(shost);
1377                 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1378                 return rc;
1379         }
1380
1381         /* Init and wait for PHYs to come up and all libsas event finished. */
1382         hisi_hba->hw->phys_init(hisi_hba);
1383         msleep(1000);
1384         hisi_sas_refresh_port_id(hisi_hba);
1385         clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1386         up(&hisi_hba->sem);
1387
1388         if (hisi_hba->reject_stp_links_msk)
1389                 hisi_sas_terminate_stp_reject(hisi_hba);
1390         hisi_sas_reset_init_all_devices(hisi_hba);
1391         scsi_unblock_requests(shost);
1392         clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1393
1394         state = hisi_hba->hw->get_phys_state(hisi_hba);
1395         hisi_sas_rescan_topology(hisi_hba, old_state, state);
1396         dev_info(dev, "controller reset complete\n");
1397
1398         return 0;
1399 }
1400
1401 static int hisi_sas_abort_task(struct sas_task *task)
1402 {
1403         struct scsi_lun lun;
1404         struct hisi_sas_tmf_task tmf_task;
1405         struct domain_device *device = task->dev;
1406         struct hisi_sas_device *sas_dev = device->lldd_dev;
1407         struct hisi_hba *hisi_hba;
1408         struct device *dev;
1409         int rc = TMF_RESP_FUNC_FAILED;
1410         unsigned long flags;
1411
1412         if (!sas_dev)
1413                 return TMF_RESP_FUNC_FAILED;
1414
1415         hisi_hba = dev_to_hisi_hba(task->dev);
1416         dev = hisi_hba->dev;
1417
1418         spin_lock_irqsave(&task->task_state_lock, flags);
1419         if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1420                 spin_unlock_irqrestore(&task->task_state_lock, flags);
1421                 rc = TMF_RESP_FUNC_COMPLETE;
1422                 goto out;
1423         }
1424         task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1425         spin_unlock_irqrestore(&task->task_state_lock, flags);
1426
1427         sas_dev->dev_status = HISI_SAS_DEV_EH;
1428         if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1429                 struct scsi_cmnd *cmnd = task->uldd_task;
1430                 struct hisi_sas_slot *slot = task->lldd_task;
1431                 u32 tag = slot->idx;
1432                 int rc2;
1433
1434                 int_to_scsilun(cmnd->device->lun, &lun);
1435                 tmf_task.tmf = TMF_ABORT_TASK;
1436                 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1437
1438                 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1439                                                   &tmf_task);
1440
1441                 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1442                                                    HISI_SAS_INT_ABT_CMD, tag);
1443                 if (rc2 < 0) {
1444                         dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1445                         return TMF_RESP_FUNC_FAILED;
1446                 }
1447
1448                 /*
1449                  * If the TMF finds that the IO is not in the device and also
1450                  * the internal abort does not succeed, then it is safe to
1451                  * free the slot.
1452                  * Note: if the internal abort succeeds then the slot
1453                  * will have already been completed
1454                  */
1455                 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1456                         if (task->lldd_task)
1457                                 hisi_sas_do_release_task(hisi_hba, task, slot);
1458                 }
1459         } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1460                 task->task_proto & SAS_PROTOCOL_STP) {
1461                 if (task->dev->dev_type == SAS_SATA_DEV) {
1462                         rc = hisi_sas_internal_task_abort(hisi_hba, device,
1463                                                 HISI_SAS_INT_ABT_DEV, 0);
1464                         if (rc < 0) {
1465                                 dev_err(dev, "abort task: internal abort failed\n");
1466                                 goto out;
1467                         }
1468                         hisi_sas_dereg_device(hisi_hba, device);
1469                         rc = hisi_sas_softreset_ata_disk(device);
1470                 }
1471         } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1472                 /* SMP */
1473                 struct hisi_sas_slot *slot = task->lldd_task;
1474                 u32 tag = slot->idx;
1475
1476                 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1477                              HISI_SAS_INT_ABT_CMD, tag);
1478                 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1479                                         task->lldd_task)
1480                         hisi_sas_do_release_task(hisi_hba, task, slot);
1481         }
1482
1483 out:
1484         if (rc != TMF_RESP_FUNC_COMPLETE)
1485                 dev_notice(dev, "abort task: rc=%d\n", rc);
1486         return rc;
1487 }
1488
1489 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1490 {
1491         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1492         struct device *dev = hisi_hba->dev;
1493         struct hisi_sas_tmf_task tmf_task;
1494         int rc = TMF_RESP_FUNC_FAILED;
1495
1496         rc = hisi_sas_internal_task_abort(hisi_hba, device,
1497                                         HISI_SAS_INT_ABT_DEV, 0);
1498         if (rc < 0) {
1499                 dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1500                 return TMF_RESP_FUNC_FAILED;
1501         }
1502         hisi_sas_dereg_device(hisi_hba, device);
1503
1504         tmf_task.tmf = TMF_ABORT_TASK_SET;
1505         rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1506
1507         if (rc == TMF_RESP_FUNC_COMPLETE)
1508                 hisi_sas_release_task(hisi_hba, device);
1509
1510         return rc;
1511 }
1512
1513 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1514 {
1515         int rc = TMF_RESP_FUNC_FAILED;
1516         struct hisi_sas_tmf_task tmf_task;
1517
1518         tmf_task.tmf = TMF_CLEAR_ACA;
1519         rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1520
1521         return rc;
1522 }
1523
1524 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1525 {
1526         struct sas_phy *local_phy = sas_get_local_phy(device);
1527         int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1528                         (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1529         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1530         struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1531         struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number];
1532         struct hisi_sas_phy *phy = container_of(sas_phy,
1533                         struct hisi_sas_phy, sas_phy);
1534         DECLARE_COMPLETION_ONSTACK(phyreset);
1535
1536         if (scsi_is_sas_phy_local(local_phy)) {
1537                 phy->in_reset = 1;
1538                 phy->reset_completion = &phyreset;
1539         }
1540
1541         rc = sas_phy_reset(local_phy, reset_type);
1542         sas_put_local_phy(local_phy);
1543
1544         if (scsi_is_sas_phy_local(local_phy)) {
1545                 int ret = wait_for_completion_timeout(&phyreset, 2 * HZ);
1546                 unsigned long flags;
1547
1548                 spin_lock_irqsave(&phy->lock, flags);
1549                 phy->reset_completion = NULL;
1550                 phy->in_reset = 0;
1551                 spin_unlock_irqrestore(&phy->lock, flags);
1552
1553                 /* report PHY down if timed out */
1554                 if (!ret)
1555                         hisi_sas_phy_down(hisi_hba, sas_phy->id, 0);
1556         } else
1557                 msleep(2000);
1558
1559         return rc;
1560 }
1561
1562 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1563 {
1564         struct hisi_sas_device *sas_dev = device->lldd_dev;
1565         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1566         struct device *dev = hisi_hba->dev;
1567         int rc = TMF_RESP_FUNC_FAILED;
1568
1569         if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1570                 return TMF_RESP_FUNC_FAILED;
1571         sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1572
1573         rc = hisi_sas_internal_task_abort(hisi_hba, device,
1574                                         HISI_SAS_INT_ABT_DEV, 0);
1575         if (rc < 0) {
1576                 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1577                 return TMF_RESP_FUNC_FAILED;
1578         }
1579         hisi_sas_dereg_device(hisi_hba, device);
1580
1581         rc = hisi_sas_debug_I_T_nexus_reset(device);
1582
1583         if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
1584                 hisi_sas_release_task(hisi_hba, device);
1585
1586         return rc;
1587 }
1588
1589 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1590 {
1591         struct hisi_sas_device *sas_dev = device->lldd_dev;
1592         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1593         struct device *dev = hisi_hba->dev;
1594         int rc = TMF_RESP_FUNC_FAILED;
1595
1596         sas_dev->dev_status = HISI_SAS_DEV_EH;
1597         if (dev_is_sata(device)) {
1598                 struct sas_phy *phy;
1599
1600                 /* Clear internal IO and then hardreset */
1601                 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1602                                                   HISI_SAS_INT_ABT_DEV, 0);
1603                 if (rc < 0) {
1604                         dev_err(dev, "lu_reset: internal abort failed\n");
1605                         goto out;
1606                 }
1607                 hisi_sas_dereg_device(hisi_hba, device);
1608
1609                 phy = sas_get_local_phy(device);
1610
1611                 rc = sas_phy_reset(phy, 1);
1612
1613                 if (rc == 0)
1614                         hisi_sas_release_task(hisi_hba, device);
1615                 sas_put_local_phy(phy);
1616         } else {
1617                 struct hisi_sas_tmf_task tmf_task = { .tmf =  TMF_LU_RESET };
1618
1619                 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1620                                                 HISI_SAS_INT_ABT_DEV, 0);
1621                 if (rc < 0) {
1622                         dev_err(dev, "lu_reset: internal abort failed\n");
1623                         goto out;
1624                 }
1625                 hisi_sas_dereg_device(hisi_hba, device);
1626
1627                 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1628                 if (rc == TMF_RESP_FUNC_COMPLETE)
1629                         hisi_sas_release_task(hisi_hba, device);
1630         }
1631 out:
1632         if (rc != TMF_RESP_FUNC_COMPLETE)
1633                 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1634                              sas_dev->device_id, rc);
1635         return rc;
1636 }
1637
1638 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1639 {
1640         struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1641         struct device *dev = hisi_hba->dev;
1642         HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1643         int rc, i;
1644
1645         queue_work(hisi_hba->wq, &r.work);
1646         wait_for_completion(r.completion);
1647         if (!r.done)
1648                 return TMF_RESP_FUNC_FAILED;
1649
1650         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1651                 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1652                 struct domain_device *device = sas_dev->sas_device;
1653
1654                 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device ||
1655                     DEV_IS_EXPANDER(device->dev_type))
1656                         continue;
1657
1658                 rc = hisi_sas_debug_I_T_nexus_reset(device);
1659                 if (rc != TMF_RESP_FUNC_COMPLETE)
1660                         dev_info(dev, "clear nexus ha: for device[%d] rc=%d\n",
1661                                  sas_dev->device_id, rc);
1662         }
1663
1664         hisi_sas_release_tasks(hisi_hba);
1665
1666         return TMF_RESP_FUNC_COMPLETE;
1667 }
1668
1669 static int hisi_sas_query_task(struct sas_task *task)
1670 {
1671         struct scsi_lun lun;
1672         struct hisi_sas_tmf_task tmf_task;
1673         int rc = TMF_RESP_FUNC_FAILED;
1674
1675         if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1676                 struct scsi_cmnd *cmnd = task->uldd_task;
1677                 struct domain_device *device = task->dev;
1678                 struct hisi_sas_slot *slot = task->lldd_task;
1679                 u32 tag = slot->idx;
1680
1681                 int_to_scsilun(cmnd->device->lun, &lun);
1682                 tmf_task.tmf = TMF_QUERY_TASK;
1683                 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1684
1685                 rc = hisi_sas_debug_issue_ssp_tmf(device,
1686                                                   lun.scsi_lun,
1687                                                   &tmf_task);
1688                 switch (rc) {
1689                 /* The task is still in Lun, release it then */
1690                 case TMF_RESP_FUNC_SUCC:
1691                 /* The task is not in Lun or failed, reset the phy */
1692                 case TMF_RESP_FUNC_FAILED:
1693                 case TMF_RESP_FUNC_COMPLETE:
1694                         break;
1695                 default:
1696                         rc = TMF_RESP_FUNC_FAILED;
1697                         break;
1698                 }
1699         }
1700         return rc;
1701 }
1702
1703 static int
1704 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1705                                   struct sas_task *task, int abort_flag,
1706                                   int task_tag)
1707 {
1708         struct domain_device *device = task->dev;
1709         struct hisi_sas_device *sas_dev = device->lldd_dev;
1710         struct device *dev = hisi_hba->dev;
1711         struct hisi_sas_port *port;
1712         struct hisi_sas_slot *slot;
1713         struct asd_sas_port *sas_port = device->port;
1714         struct hisi_sas_cmd_hdr *cmd_hdr_base;
1715         struct hisi_sas_dq *dq = sas_dev->dq;
1716         int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1717         unsigned long flags, flags_dq = 0;
1718         int wr_q_index;
1719
1720         if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1721                 return -EINVAL;
1722
1723         if (!device->port)
1724                 return -1;
1725
1726         port = to_hisi_sas_port(sas_port);
1727
1728         /* simply get a slot and send abort command */
1729         spin_lock_irqsave(&hisi_hba->lock, flags);
1730         rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
1731         if (rc) {
1732                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1733                 goto err_out;
1734         }
1735         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1736
1737         slot = &hisi_hba->slot_info[slot_idx];
1738
1739         spin_lock_irqsave(&dq->lock, flags_dq);
1740         wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
1741         if (wr_q_index < 0) {
1742                 spin_unlock_irqrestore(&dq->lock, flags_dq);
1743                 rc = -EAGAIN;
1744                 goto err_out_tag;
1745         }
1746         list_add_tail(&slot->delivery, &dq->list);
1747         spin_unlock_irqrestore(&dq->lock, flags_dq);
1748
1749         dlvry_queue = dq->id;
1750         dlvry_queue_slot = wr_q_index;
1751
1752         slot->n_elem = n_elem;
1753         slot->dlvry_queue = dlvry_queue;
1754         slot->dlvry_queue_slot = dlvry_queue_slot;
1755         cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1756         slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1757         slot->task = task;
1758         slot->port = port;
1759         slot->is_internal = true;
1760         task->lldd_task = slot;
1761
1762         memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1763         memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1764         memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
1765
1766         hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1767                                       abort_flag, task_tag);
1768
1769         spin_lock_irqsave(&task->task_state_lock, flags);
1770         task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1771         spin_unlock_irqrestore(&task->task_state_lock, flags);
1772
1773         slot->ready = 1;
1774         /* send abort command to the chip */
1775         spin_lock_irqsave(&dq->lock, flags);
1776         list_add_tail(&slot->entry, &sas_dev->list);
1777         hisi_hba->hw->start_delivery(dq);
1778         spin_unlock_irqrestore(&dq->lock, flags);
1779
1780         return 0;
1781
1782 err_out_tag:
1783         spin_lock_irqsave(&hisi_hba->lock, flags);
1784         hisi_sas_slot_index_free(hisi_hba, slot_idx);
1785         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1786 err_out:
1787         dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1788
1789         return rc;
1790 }
1791
1792 /**
1793  * hisi_sas_internal_task_abort -- execute an internal
1794  * abort command for single IO command or a device
1795  * @hisi_hba: host controller struct
1796  * @device: domain device
1797  * @abort_flag: mode of operation, device or single IO
1798  * @tag: tag of IO to be aborted (only relevant to single
1799  *       IO mode)
1800  */
1801 static int
1802 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1803                              struct domain_device *device,
1804                              int abort_flag, int tag)
1805 {
1806         struct sas_task *task;
1807         struct hisi_sas_device *sas_dev = device->lldd_dev;
1808         struct device *dev = hisi_hba->dev;
1809         int res;
1810
1811         /*
1812          * The interface is not realized means this HW don't support internal
1813          * abort, or don't need to do internal abort. Then here, we return
1814          * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
1815          * the internal abort has been executed and returned CQ.
1816          */
1817         if (!hisi_hba->hw->prep_abort)
1818                 return TMF_RESP_FUNC_FAILED;
1819
1820         task = sas_alloc_slow_task(GFP_KERNEL);
1821         if (!task)
1822                 return -ENOMEM;
1823
1824         task->dev = device;
1825         task->task_proto = device->tproto;
1826         task->task_done = hisi_sas_task_done;
1827         task->slow_task->timer.function = hisi_sas_tmf_timedout;
1828         task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT*HZ;
1829         add_timer(&task->slow_task->timer);
1830
1831         res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1832                                                 task, abort_flag, tag);
1833         if (res) {
1834                 del_timer(&task->slow_task->timer);
1835                 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1836                         res);
1837                 goto exit;
1838         }
1839         wait_for_completion(&task->slow_task->completion);
1840         res = TMF_RESP_FUNC_FAILED;
1841
1842         /* Internal abort timed out */
1843         if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1844                 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1845                         struct hisi_sas_slot *slot = task->lldd_task;
1846
1847                         if (slot)
1848                                 slot->task = NULL;
1849                         dev_err(dev, "internal task abort: timeout and not done.\n");
1850                         res = -EIO;
1851                         goto exit;
1852                 } else
1853                         dev_err(dev, "internal task abort: timeout.\n");
1854         }
1855
1856         if (task->task_status.resp == SAS_TASK_COMPLETE &&
1857                 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1858                 res = TMF_RESP_FUNC_COMPLETE;
1859                 goto exit;
1860         }
1861
1862         if (task->task_status.resp == SAS_TASK_COMPLETE &&
1863                 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1864                 res = TMF_RESP_FUNC_SUCC;
1865                 goto exit;
1866         }
1867
1868 exit:
1869         dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
1870                 "resp: 0x%x sts 0x%x\n",
1871                 SAS_ADDR(device->sas_addr),
1872                 task,
1873                 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1874                 task->task_status.stat);
1875         sas_free_task(task);
1876
1877         return res;
1878 }
1879
1880 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1881 {
1882         hisi_sas_port_notify_formed(sas_phy);
1883 }
1884
1885 static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
1886 {
1887 }
1888
1889 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
1890                         u8 reg_index, u8 reg_count, u8 *write_data)
1891 {
1892         struct hisi_hba *hisi_hba = sha->lldd_ha;
1893
1894         if (!hisi_hba->hw->write_gpio)
1895                 return -EOPNOTSUPP;
1896
1897         return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
1898                                 reg_index, reg_count, write_data);
1899 }
1900
1901 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1902 {
1903         phy->phy_attached = 0;
1904         phy->phy_type = 0;
1905         phy->port = NULL;
1906 }
1907
1908 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1909 {
1910         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1911         struct asd_sas_phy *sas_phy = &phy->sas_phy;
1912         struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1913         struct device *dev = hisi_hba->dev;
1914
1915         if (rdy) {
1916                 /* Phy down but ready */
1917                 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1918                 hisi_sas_port_notify_formed(sas_phy);
1919         } else {
1920                 struct hisi_sas_port *port  = phy->port;
1921
1922                 if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) ||
1923                     phy->in_reset) {
1924                         dev_info(dev, "ignore flutter phy%d down\n", phy_no);
1925                         return;
1926                 }
1927                 /* Phy down and not ready */
1928                 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1929                 sas_phy_disconnected(sas_phy);
1930
1931                 if (port) {
1932                         if (phy->phy_type & PORT_TYPE_SAS) {
1933                                 int port_id = port->id;
1934
1935                                 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1936                                                                        port_id))
1937                                         port->port_attached = 0;
1938                         } else if (phy->phy_type & PORT_TYPE_SATA)
1939                                 port->port_attached = 0;
1940                 }
1941                 hisi_sas_phy_disconnected(phy);
1942         }
1943 }
1944 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1945
1946 void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
1947 {
1948         int i;
1949
1950         for (i = 0; i < hisi_hba->queue_count; i++) {
1951                 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1952
1953                 tasklet_kill(&cq->tasklet);
1954         }
1955 }
1956 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
1957
1958 struct scsi_transport_template *hisi_sas_stt;
1959 EXPORT_SYMBOL_GPL(hisi_sas_stt);
1960
1961 struct device_attribute *host_attrs[] = {
1962         &dev_attr_phy_event_threshold,
1963         NULL,
1964 };
1965 EXPORT_SYMBOL_GPL(host_attrs);
1966
1967 static struct sas_domain_function_template hisi_sas_transport_ops = {
1968         .lldd_dev_found         = hisi_sas_dev_found,
1969         .lldd_dev_gone          = hisi_sas_dev_gone,
1970         .lldd_execute_task      = hisi_sas_queue_command,
1971         .lldd_control_phy       = hisi_sas_control_phy,
1972         .lldd_abort_task        = hisi_sas_abort_task,
1973         .lldd_abort_task_set    = hisi_sas_abort_task_set,
1974         .lldd_clear_aca         = hisi_sas_clear_aca,
1975         .lldd_I_T_nexus_reset   = hisi_sas_I_T_nexus_reset,
1976         .lldd_lu_reset          = hisi_sas_lu_reset,
1977         .lldd_query_task        = hisi_sas_query_task,
1978         .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
1979         .lldd_port_formed       = hisi_sas_port_formed,
1980         .lldd_port_deformed = hisi_sas_port_deformed,
1981         .lldd_write_gpio = hisi_sas_write_gpio,
1982 };
1983
1984 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
1985 {
1986         int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1987
1988         for (i = 0; i < hisi_hba->queue_count; i++) {
1989                 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1990                 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1991
1992                 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1993                 memset(hisi_hba->cmd_hdr[i], 0, s);
1994                 dq->wr_point = 0;
1995
1996                 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1997                 memset(hisi_hba->complete_hdr[i], 0, s);
1998                 cq->rd_point = 0;
1999         }
2000
2001         s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
2002         memset(hisi_hba->initial_fis, 0, s);
2003
2004         s = max_command_entries * sizeof(struct hisi_sas_iost);
2005         memset(hisi_hba->iost, 0, s);
2006
2007         s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2008         memset(hisi_hba->breakpoint, 0, s);
2009
2010         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
2011         memset(hisi_hba->sata_breakpoint, 0, s);
2012 }
2013 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
2014
2015 int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
2016 {
2017         struct device *dev = hisi_hba->dev;
2018         int i, j, s, max_command_entries = hisi_hba->hw->max_command_entries;
2019         int max_command_entries_ru, sz_slot_buf_ru;
2020         int blk_cnt, slots_per_blk;
2021
2022         sema_init(&hisi_hba->sem, 1);
2023         spin_lock_init(&hisi_hba->lock);
2024         for (i = 0; i < hisi_hba->n_phy; i++) {
2025                 hisi_sas_phy_init(hisi_hba, i);
2026                 hisi_hba->port[i].port_attached = 0;
2027                 hisi_hba->port[i].id = -1;
2028         }
2029
2030         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
2031                 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
2032                 hisi_hba->devices[i].device_id = i;
2033                 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
2034         }
2035
2036         for (i = 0; i < hisi_hba->queue_count; i++) {
2037                 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2038                 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2039
2040                 /* Completion queue structure */
2041                 cq->id = i;
2042                 cq->hisi_hba = hisi_hba;
2043
2044                 /* Delivery queue structure */
2045                 spin_lock_init(&dq->lock);
2046                 INIT_LIST_HEAD(&dq->list);
2047                 dq->id = i;
2048                 dq->hisi_hba = hisi_hba;
2049
2050                 /* Delivery queue */
2051                 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
2052                 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s,
2053                                                 &hisi_hba->cmd_hdr_dma[i],
2054                                                 GFP_KERNEL);
2055                 if (!hisi_hba->cmd_hdr[i])
2056                         goto err_out;
2057
2058                 /* Completion queue */
2059                 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2060                 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s,
2061                                                 &hisi_hba->complete_hdr_dma[i],
2062                                                 GFP_KERNEL);
2063                 if (!hisi_hba->complete_hdr[i])
2064                         goto err_out;
2065         }
2066
2067         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
2068         hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma,
2069                                              GFP_KERNEL);
2070         if (!hisi_hba->itct)
2071                 goto err_out;
2072         memset(hisi_hba->itct, 0, s);
2073
2074         hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
2075                                            sizeof(struct hisi_sas_slot),
2076                                            GFP_KERNEL);
2077         if (!hisi_hba->slot_info)
2078                 goto err_out;
2079
2080         /* roundup to avoid overly large block size */
2081         max_command_entries_ru = roundup(max_command_entries, 64);
2082         sz_slot_buf_ru = roundup(sizeof(struct hisi_sas_slot_buf_table), 64);
2083         s = lcm(max_command_entries_ru, sz_slot_buf_ru);
2084         blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
2085         slots_per_blk = s / sz_slot_buf_ru;
2086         for (i = 0; i < blk_cnt; i++) {
2087                 struct hisi_sas_slot_buf_table *buf;
2088                 dma_addr_t buf_dma;
2089                 int slot_index = i * slots_per_blk;
2090
2091                 buf = dmam_alloc_coherent(dev, s, &buf_dma, GFP_KERNEL);
2092                 if (!buf)
2093                         goto err_out;
2094                 memset(buf, 0, s);
2095
2096                 for (j = 0; j < slots_per_blk; j++, slot_index++) {
2097                         struct hisi_sas_slot *slot;
2098
2099                         slot = &hisi_hba->slot_info[slot_index];
2100                         slot->buf = buf;
2101                         slot->buf_dma = buf_dma;
2102                         slot->idx = slot_index;
2103
2104                         buf++;
2105                         buf_dma += sizeof(*buf);
2106                 }
2107         }
2108
2109         s = max_command_entries * sizeof(struct hisi_sas_iost);
2110         hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma,
2111                                              GFP_KERNEL);
2112         if (!hisi_hba->iost)
2113                 goto err_out;
2114
2115         s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2116         hisi_hba->breakpoint = dmam_alloc_coherent(dev, s,
2117                                                    &hisi_hba->breakpoint_dma,
2118                                                    GFP_KERNEL);
2119         if (!hisi_hba->breakpoint)
2120                 goto err_out;
2121
2122         hisi_hba->slot_index_count = max_command_entries;
2123         s = hisi_hba->slot_index_count / BITS_PER_BYTE;
2124         hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
2125         if (!hisi_hba->slot_index_tags)
2126                 goto err_out;
2127
2128         s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
2129         hisi_hba->initial_fis = dmam_alloc_coherent(dev, s,
2130                                                     &hisi_hba->initial_fis_dma,
2131                                                     GFP_KERNEL);
2132         if (!hisi_hba->initial_fis)
2133                 goto err_out;
2134
2135         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
2136         hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s,
2137                                         &hisi_hba->sata_breakpoint_dma,
2138                                         GFP_KERNEL);
2139         if (!hisi_hba->sata_breakpoint)
2140                 goto err_out;
2141         hisi_sas_init_mem(hisi_hba);
2142
2143         hisi_sas_slot_index_init(hisi_hba);
2144
2145         hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
2146         if (!hisi_hba->wq) {
2147                 dev_err(dev, "sas_alloc: failed to create workqueue\n");
2148                 goto err_out;
2149         }
2150
2151         return 0;
2152 err_out:
2153         return -ENOMEM;
2154 }
2155 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
2156
2157 void hisi_sas_free(struct hisi_hba *hisi_hba)
2158 {
2159         if (hisi_hba->wq)
2160                 destroy_workqueue(hisi_hba->wq);
2161 }
2162 EXPORT_SYMBOL_GPL(hisi_sas_free);
2163
2164 void hisi_sas_rst_work_handler(struct work_struct *work)
2165 {
2166         struct hisi_hba *hisi_hba =
2167                 container_of(work, struct hisi_hba, rst_work);
2168
2169         hisi_sas_controller_reset(hisi_hba);
2170 }
2171 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
2172
2173 void hisi_sas_sync_rst_work_handler(struct work_struct *work)
2174 {
2175         struct hisi_sas_rst *rst =
2176                 container_of(work, struct hisi_sas_rst, work);
2177
2178         if (!hisi_sas_controller_reset(rst->hisi_hba))
2179                 rst->done = true;
2180         complete(rst->completion);
2181 }
2182 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
2183
2184 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
2185 {
2186         struct device *dev = hisi_hba->dev;
2187         struct platform_device *pdev = hisi_hba->platform_dev;
2188         struct device_node *np = pdev ? pdev->dev.of_node : NULL;
2189         struct clk *refclk;
2190
2191         if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
2192                                           SAS_ADDR_SIZE)) {
2193                 dev_err(dev, "could not get property sas-addr\n");
2194                 return -ENOENT;
2195         }
2196
2197         if (np) {
2198                 /*
2199                  * These properties are only required for platform device-based
2200                  * controller with DT firmware.
2201                  */
2202                 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
2203                                         "hisilicon,sas-syscon");
2204                 if (IS_ERR(hisi_hba->ctrl)) {
2205                         dev_err(dev, "could not get syscon\n");
2206                         return -ENOENT;
2207                 }
2208
2209                 if (device_property_read_u32(dev, "ctrl-reset-reg",
2210                                              &hisi_hba->ctrl_reset_reg)) {
2211                         dev_err(dev,
2212                                 "could not get property ctrl-reset-reg\n");
2213                         return -ENOENT;
2214                 }
2215
2216                 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
2217                                              &hisi_hba->ctrl_reset_sts_reg)) {
2218                         dev_err(dev,
2219                                 "could not get property ctrl-reset-sts-reg\n");
2220                         return -ENOENT;
2221                 }
2222
2223                 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
2224                                              &hisi_hba->ctrl_clock_ena_reg)) {
2225                         dev_err(dev,
2226                                 "could not get property ctrl-clock-ena-reg\n");
2227                         return -ENOENT;
2228                 }
2229         }
2230
2231         refclk = devm_clk_get(dev, NULL);
2232         if (IS_ERR(refclk))
2233                 dev_dbg(dev, "no ref clk property\n");
2234         else
2235                 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
2236
2237         if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
2238                 dev_err(dev, "could not get property phy-count\n");
2239                 return -ENOENT;
2240         }
2241
2242         if (device_property_read_u32(dev, "queue-count",
2243                                      &hisi_hba->queue_count)) {
2244                 dev_err(dev, "could not get property queue-count\n");
2245                 return -ENOENT;
2246         }
2247
2248         return 0;
2249 }
2250 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
2251
2252 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2253                                               const struct hisi_sas_hw *hw)
2254 {
2255         struct resource *res;
2256         struct Scsi_Host *shost;
2257         struct hisi_hba *hisi_hba;
2258         struct device *dev = &pdev->dev;
2259
2260         shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
2261         if (!shost) {
2262                 dev_err(dev, "scsi host alloc failed\n");
2263                 return NULL;
2264         }
2265         hisi_hba = shost_priv(shost);
2266
2267         INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2268         hisi_hba->hw = hw;
2269         hisi_hba->dev = dev;
2270         hisi_hba->platform_dev = pdev;
2271         hisi_hba->shost = shost;
2272         SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2273
2274         timer_setup(&hisi_hba->timer, NULL, 0);
2275
2276         if (hisi_sas_get_fw_info(hisi_hba) < 0)
2277                 goto err_out;
2278
2279         if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
2280             dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2281                 dev_err(dev, "No usable DMA addressing method\n");
2282                 goto err_out;
2283         }
2284
2285         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2286         hisi_hba->regs = devm_ioremap_resource(dev, res);
2287         if (IS_ERR(hisi_hba->regs))
2288                 goto err_out;
2289
2290         res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2291         if (res) {
2292                 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2293                 if (IS_ERR(hisi_hba->sgpio_regs))
2294                         goto err_out;
2295         }
2296
2297         if (hisi_sas_alloc(hisi_hba, shost)) {
2298                 hisi_sas_free(hisi_hba);
2299                 goto err_out;
2300         }
2301
2302         return shost;
2303 err_out:
2304         scsi_host_put(shost);
2305         dev_err(dev, "shost alloc failed\n");
2306         return NULL;
2307 }
2308
2309 int hisi_sas_probe(struct platform_device *pdev,
2310                    const struct hisi_sas_hw *hw)
2311 {
2312         struct Scsi_Host *shost;
2313         struct hisi_hba *hisi_hba;
2314         struct device *dev = &pdev->dev;
2315         struct asd_sas_phy **arr_phy;
2316         struct asd_sas_port **arr_port;
2317         struct sas_ha_struct *sha;
2318         int rc, phy_nr, port_nr, i;
2319
2320         shost = hisi_sas_shost_alloc(pdev, hw);
2321         if (!shost)
2322                 return -ENOMEM;
2323
2324         sha = SHOST_TO_SAS_HA(shost);
2325         hisi_hba = shost_priv(shost);
2326         platform_set_drvdata(pdev, sha);
2327
2328         phy_nr = port_nr = hisi_hba->n_phy;
2329
2330         arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2331         arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2332         if (!arr_phy || !arr_port) {
2333                 rc = -ENOMEM;
2334                 goto err_out_ha;
2335         }
2336
2337         sha->sas_phy = arr_phy;
2338         sha->sas_port = arr_port;
2339         sha->lldd_ha = hisi_hba;
2340
2341         shost->transportt = hisi_sas_stt;
2342         shost->max_id = HISI_SAS_MAX_DEVICES;
2343         shost->max_lun = ~0;
2344         shost->max_channel = 1;
2345         shost->max_cmd_len = 16;
2346         shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
2347         shost->can_queue = hisi_hba->hw->max_command_entries;
2348         shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
2349
2350         sha->sas_ha_name = DRV_NAME;
2351         sha->dev = hisi_hba->dev;
2352         sha->lldd_module = THIS_MODULE;
2353         sha->sas_addr = &hisi_hba->sas_addr[0];
2354         sha->num_phys = hisi_hba->n_phy;
2355         sha->core.shost = hisi_hba->shost;
2356
2357         for (i = 0; i < hisi_hba->n_phy; i++) {
2358                 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2359                 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2360         }
2361
2362         rc = scsi_add_host(shost, &pdev->dev);
2363         if (rc)
2364                 goto err_out_ha;
2365
2366         rc = sas_register_ha(sha);
2367         if (rc)
2368                 goto err_out_register_ha;
2369
2370         rc = hisi_hba->hw->hw_init(hisi_hba);
2371         if (rc)
2372                 goto err_out_register_ha;
2373
2374         scsi_scan_host(shost);
2375
2376         return 0;
2377
2378 err_out_register_ha:
2379         scsi_remove_host(shost);
2380 err_out_ha:
2381         hisi_sas_free(hisi_hba);
2382         scsi_host_put(shost);
2383         return rc;
2384 }
2385 EXPORT_SYMBOL_GPL(hisi_sas_probe);
2386
2387 int hisi_sas_remove(struct platform_device *pdev)
2388 {
2389         struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2390         struct hisi_hba *hisi_hba = sha->lldd_ha;
2391         struct Scsi_Host *shost = sha->core.shost;
2392
2393         if (timer_pending(&hisi_hba->timer))
2394                 del_timer(&hisi_hba->timer);
2395
2396         sas_unregister_ha(sha);
2397         sas_remove_host(sha->core.shost);
2398
2399         hisi_sas_free(hisi_hba);
2400         scsi_host_put(shost);
2401         return 0;
2402 }
2403 EXPORT_SYMBOL_GPL(hisi_sas_remove);
2404
2405 static __init int hisi_sas_init(void)
2406 {
2407         hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2408         if (!hisi_sas_stt)
2409                 return -ENOMEM;
2410
2411         return 0;
2412 }
2413
2414 static __exit void hisi_sas_exit(void)
2415 {
2416         sas_release_transport(hisi_sas_stt);
2417 }
2418
2419 module_init(hisi_sas_init);
2420 module_exit(hisi_sas_exit);
2421
2422 MODULE_LICENSE("GPL");
2423 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2424 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2425 MODULE_ALIAS("platform:" DRV_NAME);