scsi: hisi_sas: Release all remaining resources in clear nexus ha
[linux-2.6-microblaze.git] / drivers / scsi / hisi_sas / hisi_sas_main.c
1 /*
2  * Copyright (c) 2015 Linaro Ltd.
3  * Copyright (c) 2015 Hisilicon Limited.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  */
11
12 #include "hisi_sas.h"
13 #define DRV_NAME "hisi_sas"
14
15 #define DEV_IS_GONE(dev) \
16         ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
17
18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19                                 u8 *lun, struct hisi_sas_tmf_task *tmf);
20 static int
21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22                              struct domain_device *device,
23                              int abort_flag, int tag);
24 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
25 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
26                                 void *funcdata);
27 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
28                                   struct domain_device *device);
29 static void hisi_sas_dev_gone(struct domain_device *device);
30
31 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
32 {
33         switch (fis->command) {
34         case ATA_CMD_FPDMA_WRITE:
35         case ATA_CMD_FPDMA_READ:
36         case ATA_CMD_FPDMA_RECV:
37         case ATA_CMD_FPDMA_SEND:
38         case ATA_CMD_NCQ_NON_DATA:
39                 return HISI_SAS_SATA_PROTOCOL_FPDMA;
40
41         case ATA_CMD_DOWNLOAD_MICRO:
42         case ATA_CMD_ID_ATA:
43         case ATA_CMD_PMP_READ:
44         case ATA_CMD_READ_LOG_EXT:
45         case ATA_CMD_PIO_READ:
46         case ATA_CMD_PIO_READ_EXT:
47         case ATA_CMD_PMP_WRITE:
48         case ATA_CMD_WRITE_LOG_EXT:
49         case ATA_CMD_PIO_WRITE:
50         case ATA_CMD_PIO_WRITE_EXT:
51                 return HISI_SAS_SATA_PROTOCOL_PIO;
52
53         case ATA_CMD_DSM:
54         case ATA_CMD_DOWNLOAD_MICRO_DMA:
55         case ATA_CMD_PMP_READ_DMA:
56         case ATA_CMD_PMP_WRITE_DMA:
57         case ATA_CMD_READ:
58         case ATA_CMD_READ_EXT:
59         case ATA_CMD_READ_LOG_DMA_EXT:
60         case ATA_CMD_READ_STREAM_DMA_EXT:
61         case ATA_CMD_TRUSTED_RCV_DMA:
62         case ATA_CMD_TRUSTED_SND_DMA:
63         case ATA_CMD_WRITE:
64         case ATA_CMD_WRITE_EXT:
65         case ATA_CMD_WRITE_FUA_EXT:
66         case ATA_CMD_WRITE_QUEUED:
67         case ATA_CMD_WRITE_LOG_DMA_EXT:
68         case ATA_CMD_WRITE_STREAM_DMA_EXT:
69         case ATA_CMD_ZAC_MGMT_IN:
70                 return HISI_SAS_SATA_PROTOCOL_DMA;
71
72         case ATA_CMD_CHK_POWER:
73         case ATA_CMD_DEV_RESET:
74         case ATA_CMD_EDD:
75         case ATA_CMD_FLUSH:
76         case ATA_CMD_FLUSH_EXT:
77         case ATA_CMD_VERIFY:
78         case ATA_CMD_VERIFY_EXT:
79         case ATA_CMD_SET_FEATURES:
80         case ATA_CMD_STANDBY:
81         case ATA_CMD_STANDBYNOW1:
82         case ATA_CMD_ZAC_MGMT_OUT:
83                 return HISI_SAS_SATA_PROTOCOL_NONDATA;
84
85         case ATA_CMD_SET_MAX:
86                 switch (fis->features) {
87                 case ATA_SET_MAX_PASSWD:
88                 case ATA_SET_MAX_LOCK:
89                         return HISI_SAS_SATA_PROTOCOL_PIO;
90
91                 case ATA_SET_MAX_PASSWD_DMA:
92                 case ATA_SET_MAX_UNLOCK_DMA:
93                         return HISI_SAS_SATA_PROTOCOL_DMA;
94
95                 default:
96                         return HISI_SAS_SATA_PROTOCOL_NONDATA;
97                 }
98
99         default:
100         {
101                 if (direction == DMA_NONE)
102                         return HISI_SAS_SATA_PROTOCOL_NONDATA;
103                 return HISI_SAS_SATA_PROTOCOL_PIO;
104         }
105         }
106 }
107 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
108
109 void hisi_sas_sata_done(struct sas_task *task,
110                             struct hisi_sas_slot *slot)
111 {
112         struct task_status_struct *ts = &task->task_status;
113         struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
114         struct hisi_sas_status_buffer *status_buf =
115                         hisi_sas_status_buf_addr_mem(slot);
116         u8 *iu = &status_buf->iu[0];
117         struct dev_to_host_fis *d2h =  (struct dev_to_host_fis *)iu;
118
119         resp->frame_len = sizeof(struct dev_to_host_fis);
120         memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
121
122         ts->buf_valid_size = sizeof(*resp);
123 }
124 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
125
126 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
127 {
128         struct ata_queued_cmd *qc = task->uldd_task;
129
130         if (qc) {
131                 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
132                         qc->tf.command == ATA_CMD_FPDMA_READ) {
133                         *tag = qc->tag;
134                         return 1;
135                 }
136         }
137         return 0;
138 }
139 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
140
141 /*
142  * This function assumes linkrate mask fits in 8 bits, which it
143  * does for all HW versions supported.
144  */
145 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
146 {
147         u16 rate = 0;
148         int i;
149
150         max -= SAS_LINK_RATE_1_5_GBPS;
151         for (i = 0; i <= max; i++)
152                 rate |= 1 << (i * 2);
153         return rate;
154 }
155 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
156
157 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
158 {
159         return device->port->ha->lldd_ha;
160 }
161
162 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
163 {
164         return container_of(sas_port, struct hisi_sas_port, sas_port);
165 }
166 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
167
168 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
169 {
170         int phy_no;
171
172         for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
173                 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
174 }
175 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
176
177 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
178 {
179         void *bitmap = hisi_hba->slot_index_tags;
180
181         clear_bit(slot_idx, bitmap);
182 }
183
184 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
185 {
186         hisi_sas_slot_index_clear(hisi_hba, slot_idx);
187 }
188
189 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
190 {
191         void *bitmap = hisi_hba->slot_index_tags;
192
193         set_bit(slot_idx, bitmap);
194 }
195
196 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
197 {
198         unsigned int index;
199         void *bitmap = hisi_hba->slot_index_tags;
200
201         index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
202                         hisi_hba->last_slot_index + 1);
203         if (index >= hisi_hba->slot_index_count) {
204                 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
205                                            0);
206                 if (index >= hisi_hba->slot_index_count)
207                         return -SAS_QUEUE_FULL;
208         }
209         hisi_sas_slot_index_set(hisi_hba, index);
210         *slot_idx = index;
211         hisi_hba->last_slot_index = index;
212
213         return 0;
214 }
215
216 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
217 {
218         int i;
219
220         for (i = 0; i < hisi_hba->slot_index_count; ++i)
221                 hisi_sas_slot_index_clear(hisi_hba, i);
222 }
223
224 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
225                              struct hisi_sas_slot *slot)
226 {
227         struct hisi_sas_dq *dq = &hisi_hba->dq[slot->dlvry_queue];
228         unsigned long flags;
229
230         if (task) {
231                 struct device *dev = hisi_hba->dev;
232
233                 if (!task->lldd_task)
234                         return;
235
236                 task->lldd_task = NULL;
237
238                 if (!sas_protocol_ata(task->task_proto))
239                         if (slot->n_elem)
240                                 dma_unmap_sg(dev, task->scatter,
241                                              task->num_scatter,
242                                              task->data_dir);
243         }
244
245         if (slot->buf)
246                 dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
247
248         spin_lock_irqsave(&dq->lock, flags);
249         list_del_init(&slot->entry);
250         spin_unlock_irqrestore(&dq->lock, flags);
251         slot->buf = NULL;
252         slot->task = NULL;
253         slot->port = NULL;
254         spin_lock_irqsave(&hisi_hba->lock, flags);
255         hisi_sas_slot_index_free(hisi_hba, slot->idx);
256         spin_unlock_irqrestore(&hisi_hba->lock, flags);
257
258         /* slot memory is fully zeroed when it is reused */
259 }
260 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
261
262 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
263                                   struct hisi_sas_slot *slot)
264 {
265         hisi_hba->hw->prep_smp(hisi_hba, slot);
266 }
267
268 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
269                                   struct hisi_sas_slot *slot)
270 {
271         hisi_hba->hw->prep_ssp(hisi_hba, slot);
272 }
273
274 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
275                                   struct hisi_sas_slot *slot)
276 {
277         hisi_hba->hw->prep_stp(hisi_hba, slot);
278 }
279
280 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
281                 struct hisi_sas_slot *slot,
282                 int device_id, int abort_flag, int tag_to_abort)
283 {
284         hisi_hba->hw->prep_abort(hisi_hba, slot,
285                         device_id, abort_flag, tag_to_abort);
286 }
287
288 /*
289  * This function will issue an abort TMF regardless of whether the
290  * task is in the sdev or not. Then it will do the task complete
291  * cleanup and callbacks.
292  */
293 static void hisi_sas_slot_abort(struct work_struct *work)
294 {
295         struct hisi_sas_slot *abort_slot =
296                 container_of(work, struct hisi_sas_slot, abort_slot);
297         struct sas_task *task = abort_slot->task;
298         struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
299         struct scsi_cmnd *cmnd = task->uldd_task;
300         struct hisi_sas_tmf_task tmf_task;
301         struct scsi_lun lun;
302         struct device *dev = hisi_hba->dev;
303         int tag = abort_slot->idx;
304
305         if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
306                 dev_err(dev, "cannot abort slot for non-ssp task\n");
307                 goto out;
308         }
309
310         int_to_scsilun(cmnd->device->lun, &lun);
311         tmf_task.tmf = TMF_ABORT_TASK;
312         tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
313
314         hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
315 out:
316         /* Do cleanup for this task */
317         hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
318         if (task->task_done)
319                 task->task_done(task);
320 }
321
322 static int hisi_sas_task_prep(struct sas_task *task,
323                               struct hisi_sas_dq **dq_pointer,
324                               bool is_tmf, struct hisi_sas_tmf_task *tmf,
325                               int *pass)
326 {
327         struct domain_device *device = task->dev;
328         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
329         struct hisi_sas_device *sas_dev = device->lldd_dev;
330         struct hisi_sas_port *port;
331         struct hisi_sas_slot *slot;
332         struct hisi_sas_cmd_hdr *cmd_hdr_base;
333         struct asd_sas_port *sas_port = device->port;
334         struct device *dev = hisi_hba->dev;
335         int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
336         int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
337         unsigned long flags, flags_dq;
338         struct hisi_sas_dq *dq;
339         int wr_q_index;
340
341         if (!sas_port) {
342                 struct task_status_struct *ts = &task->task_status;
343
344                 ts->resp = SAS_TASK_UNDELIVERED;
345                 ts->stat = SAS_PHY_DOWN;
346                 /*
347                  * libsas will use dev->port, should
348                  * not call task_done for sata
349                  */
350                 if (device->dev_type != SAS_SATA_DEV)
351                         task->task_done(task);
352                 return -ECOMM;
353         }
354
355         if (DEV_IS_GONE(sas_dev)) {
356                 if (sas_dev)
357                         dev_info(dev, "task prep: device %d not ready\n",
358                                  sas_dev->device_id);
359                 else
360                         dev_info(dev, "task prep: device %016llx not ready\n",
361                                  SAS_ADDR(device->sas_addr));
362
363                 return -ECOMM;
364         }
365
366         *dq_pointer = dq = sas_dev->dq;
367
368         port = to_hisi_sas_port(sas_port);
369         if (port && !port->port_attached) {
370                 dev_info(dev, "task prep: %s port%d not attach device\n",
371                          (dev_is_sata(device)) ?
372                          "SATA/STP" : "SAS",
373                          device->port->id);
374
375                 return -ECOMM;
376         }
377
378         if (!sas_protocol_ata(task->task_proto)) {
379                 unsigned int req_len, resp_len;
380
381                 if (task->num_scatter) {
382                         n_elem = dma_map_sg(dev, task->scatter,
383                                             task->num_scatter, task->data_dir);
384                         if (!n_elem) {
385                                 rc = -ENOMEM;
386                                 goto prep_out;
387                         }
388                 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
389                         n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
390                                                 1, DMA_TO_DEVICE);
391                         if (!n_elem_req) {
392                                 rc = -ENOMEM;
393                                 goto prep_out;
394                         }
395                         req_len = sg_dma_len(&task->smp_task.smp_req);
396                         if (req_len & 0x3) {
397                                 rc = -EINVAL;
398                                 goto err_out_dma_unmap;
399                         }
400                         n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp,
401                                                  1, DMA_FROM_DEVICE);
402                         if (!n_elem_resp) {
403                                 rc = -ENOMEM;
404                                 goto err_out_dma_unmap;
405                         }
406                         resp_len = sg_dma_len(&task->smp_task.smp_resp);
407                         if (resp_len & 0x3) {
408                                 rc = -EINVAL;
409                                 goto err_out_dma_unmap;
410                         }
411                 }
412         } else
413                 n_elem = task->num_scatter;
414
415         if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
416                 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
417                         n_elem);
418                 rc = -EINVAL;
419                 goto err_out_dma_unmap;
420         }
421
422         spin_lock_irqsave(&hisi_hba->lock, flags);
423         if (hisi_hba->hw->slot_index_alloc)
424                 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
425                                                     device);
426         else
427                 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
428         spin_unlock_irqrestore(&hisi_hba->lock, flags);
429         if (rc)
430                 goto err_out_dma_unmap;
431
432         slot = &hisi_hba->slot_info[slot_idx];
433         memset(slot, 0, sizeof(struct hisi_sas_slot));
434
435         slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
436                                    GFP_ATOMIC, &slot->buf_dma);
437         if (!slot->buf) {
438                 rc = -ENOMEM;
439                 goto err_out_tag;
440         }
441
442         spin_lock_irqsave(&dq->lock, flags_dq);
443         wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
444         if (wr_q_index < 0) {
445                 spin_unlock_irqrestore(&dq->lock, flags_dq);
446                 rc = -EAGAIN;
447                 goto err_out_buf;
448         }
449
450         list_add_tail(&slot->delivery, &dq->list);
451         spin_unlock_irqrestore(&dq->lock, flags_dq);
452
453         dlvry_queue = dq->id;
454         dlvry_queue_slot = wr_q_index;
455
456         slot->idx = slot_idx;
457         slot->n_elem = n_elem;
458         slot->dlvry_queue = dlvry_queue;
459         slot->dlvry_queue_slot = dlvry_queue_slot;
460         cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
461         slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
462         slot->task = task;
463         slot->port = port;
464         slot->tmf = tmf;
465         slot->is_internal = is_tmf;
466         task->lldd_task = slot;
467         INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
468
469         memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
470         memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
471         memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
472
473         switch (task->task_proto) {
474         case SAS_PROTOCOL_SMP:
475                 hisi_sas_task_prep_smp(hisi_hba, slot);
476                 break;
477         case SAS_PROTOCOL_SSP:
478                 hisi_sas_task_prep_ssp(hisi_hba, slot);
479                 break;
480         case SAS_PROTOCOL_SATA:
481         case SAS_PROTOCOL_STP:
482         case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
483                 hisi_sas_task_prep_ata(hisi_hba, slot);
484                 break;
485         default:
486                 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
487                         task->task_proto);
488                 break;
489         }
490
491         spin_lock_irqsave(&dq->lock, flags);
492         list_add_tail(&slot->entry, &sas_dev->list);
493         spin_unlock_irqrestore(&dq->lock, flags);
494         spin_lock_irqsave(&task->task_state_lock, flags);
495         task->task_state_flags |= SAS_TASK_AT_INITIATOR;
496         spin_unlock_irqrestore(&task->task_state_lock, flags);
497
498         ++(*pass);
499         slot->ready = 1;
500
501         return 0;
502
503 err_out_buf:
504         dma_pool_free(hisi_hba->buffer_pool, slot->buf,
505                       slot->buf_dma);
506 err_out_tag:
507         spin_lock_irqsave(&hisi_hba->lock, flags);
508         hisi_sas_slot_index_free(hisi_hba, slot_idx);
509         spin_unlock_irqrestore(&hisi_hba->lock, flags);
510 err_out_dma_unmap:
511         if (!sas_protocol_ata(task->task_proto)) {
512                 if (task->num_scatter) {
513                         dma_unmap_sg(dev, task->scatter, task->num_scatter,
514                              task->data_dir);
515                 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
516                         if (n_elem_req)
517                                 dma_unmap_sg(dev, &task->smp_task.smp_req,
518                                              1, DMA_TO_DEVICE);
519                         if (n_elem_resp)
520                                 dma_unmap_sg(dev, &task->smp_task.smp_resp,
521                                              1, DMA_FROM_DEVICE);
522                 }
523         }
524 prep_out:
525         dev_err(dev, "task prep: failed[%d]!\n", rc);
526         return rc;
527 }
528
529 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
530                               bool is_tmf, struct hisi_sas_tmf_task *tmf)
531 {
532         u32 rc;
533         u32 pass = 0;
534         unsigned long flags;
535         struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
536         struct device *dev = hisi_hba->dev;
537         struct hisi_sas_dq *dq = NULL;
538
539         if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
540                 if (in_softirq())
541                         return -EINVAL;
542
543                 down(&hisi_hba->sem);
544                 up(&hisi_hba->sem);
545         }
546
547         /* protect task_prep and start_delivery sequence */
548         rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
549         if (rc)
550                 dev_err(dev, "task exec: failed[%d]!\n", rc);
551
552         if (likely(pass)) {
553                 spin_lock_irqsave(&dq->lock, flags);
554                 hisi_hba->hw->start_delivery(dq);
555                 spin_unlock_irqrestore(&dq->lock, flags);
556         }
557
558         return rc;
559 }
560
561 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
562 {
563         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
564         struct asd_sas_phy *sas_phy = &phy->sas_phy;
565         struct sas_ha_struct *sas_ha;
566
567         if (!phy->phy_attached)
568                 return;
569
570         sas_ha = &hisi_hba->sha;
571         sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
572
573         if (sas_phy->phy) {
574                 struct sas_phy *sphy = sas_phy->phy;
575
576                 sphy->negotiated_linkrate = sas_phy->linkrate;
577                 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
578                 sphy->maximum_linkrate_hw =
579                         hisi_hba->hw->phy_get_max_linkrate();
580                 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
581                         sphy->minimum_linkrate = phy->minimum_linkrate;
582
583                 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
584                         sphy->maximum_linkrate = phy->maximum_linkrate;
585         }
586
587         if (phy->phy_type & PORT_TYPE_SAS) {
588                 struct sas_identify_frame *id;
589
590                 id = (struct sas_identify_frame *)phy->frame_rcvd;
591                 id->dev_type = phy->identify.device_type;
592                 id->initiator_bits = SAS_PROTOCOL_ALL;
593                 id->target_bits = phy->identify.target_port_protocols;
594         } else if (phy->phy_type & PORT_TYPE_SATA) {
595                 /*Nothing*/
596         }
597
598         sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
599         sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
600 }
601
602 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
603 {
604         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
605         struct hisi_sas_device *sas_dev = NULL;
606         unsigned long flags;
607         int last = hisi_hba->last_dev_id;
608         int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
609         int i;
610
611         spin_lock_irqsave(&hisi_hba->lock, flags);
612         for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
613                 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
614                         int queue = i % hisi_hba->queue_count;
615                         struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
616
617                         hisi_hba->devices[i].device_id = i;
618                         sas_dev = &hisi_hba->devices[i];
619                         sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
620                         sas_dev->dev_type = device->dev_type;
621                         sas_dev->hisi_hba = hisi_hba;
622                         sas_dev->sas_device = device;
623                         sas_dev->dq = dq;
624                         INIT_LIST_HEAD(&hisi_hba->devices[i].list);
625                         break;
626                 }
627                 i++;
628         }
629         hisi_hba->last_dev_id = i;
630         spin_unlock_irqrestore(&hisi_hba->lock, flags);
631
632         return sas_dev;
633 }
634
635 #define HISI_SAS_SRST_ATA_DISK_CNT 3
636 static int hisi_sas_init_device(struct domain_device *device)
637 {
638         int rc = TMF_RESP_FUNC_COMPLETE;
639         struct scsi_lun lun;
640         struct hisi_sas_tmf_task tmf_task;
641         int retry = HISI_SAS_SRST_ATA_DISK_CNT;
642         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
643
644         switch (device->dev_type) {
645         case SAS_END_DEVICE:
646                 int_to_scsilun(0, &lun);
647
648                 tmf_task.tmf = TMF_CLEAR_TASK_SET;
649                 rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun,
650                                                   &tmf_task);
651                 if (rc == TMF_RESP_FUNC_COMPLETE)
652                         hisi_sas_release_task(hisi_hba, device);
653                 break;
654         case SAS_SATA_DEV:
655         case SAS_SATA_PM:
656         case SAS_SATA_PM_PORT:
657         case SAS_SATA_PENDING:
658                 while (retry-- > 0) {
659                         rc = hisi_sas_softreset_ata_disk(device);
660                         if (!rc)
661                                 break;
662                 }
663                 break;
664         default:
665                 break;
666         }
667
668         return rc;
669 }
670
671 static int hisi_sas_dev_found(struct domain_device *device)
672 {
673         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
674         struct domain_device *parent_dev = device->parent;
675         struct hisi_sas_device *sas_dev;
676         struct device *dev = hisi_hba->dev;
677         int rc;
678
679         if (hisi_hba->hw->alloc_dev)
680                 sas_dev = hisi_hba->hw->alloc_dev(device);
681         else
682                 sas_dev = hisi_sas_alloc_dev(device);
683         if (!sas_dev) {
684                 dev_err(dev, "fail alloc dev: max support %d devices\n",
685                         HISI_SAS_MAX_DEVICES);
686                 return -EINVAL;
687         }
688
689         device->lldd_dev = sas_dev;
690         hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
691
692         if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
693                 int phy_no;
694                 u8 phy_num = parent_dev->ex_dev.num_phys;
695                 struct ex_phy *phy;
696
697                 for (phy_no = 0; phy_no < phy_num; phy_no++) {
698                         phy = &parent_dev->ex_dev.ex_phy[phy_no];
699                         if (SAS_ADDR(phy->attached_sas_addr) ==
700                                 SAS_ADDR(device->sas_addr))
701                                 break;
702                 }
703
704                 if (phy_no == phy_num) {
705                         dev_info(dev, "dev found: no attached "
706                                  "dev:%016llx at ex:%016llx\n",
707                                  SAS_ADDR(device->sas_addr),
708                                  SAS_ADDR(parent_dev->sas_addr));
709                         rc = -EINVAL;
710                         goto err_out;
711                 }
712         }
713
714         dev_info(dev, "dev[%d:%x] found\n",
715                 sas_dev->device_id, sas_dev->dev_type);
716
717         rc = hisi_sas_init_device(device);
718         if (rc)
719                 goto err_out;
720         return 0;
721
722 err_out:
723         hisi_sas_dev_gone(device);
724         return rc;
725 }
726
727 int hisi_sas_slave_configure(struct scsi_device *sdev)
728 {
729         struct domain_device *dev = sdev_to_domain_dev(sdev);
730         int ret = sas_slave_configure(sdev);
731
732         if (ret)
733                 return ret;
734         if (!dev_is_sata(dev))
735                 sas_change_queue_depth(sdev, 64);
736
737         return 0;
738 }
739 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure);
740
741 void hisi_sas_scan_start(struct Scsi_Host *shost)
742 {
743         struct hisi_hba *hisi_hba = shost_priv(shost);
744
745         hisi_hba->hw->phys_init(hisi_hba);
746 }
747 EXPORT_SYMBOL_GPL(hisi_sas_scan_start);
748
749 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
750 {
751         struct hisi_hba *hisi_hba = shost_priv(shost);
752         struct sas_ha_struct *sha = &hisi_hba->sha;
753
754         /* Wait for PHY up interrupt to occur */
755         if (time < HZ)
756                 return 0;
757
758         sas_drain_work(sha);
759         return 1;
760 }
761 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
762
763 static void hisi_sas_phyup_work(struct work_struct *work)
764 {
765         struct hisi_sas_phy *phy =
766                 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
767         struct hisi_hba *hisi_hba = phy->hisi_hba;
768         struct asd_sas_phy *sas_phy = &phy->sas_phy;
769         int phy_no = sas_phy->id;
770
771         hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
772         hisi_sas_bytes_dmaed(hisi_hba, phy_no);
773 }
774
775 static void hisi_sas_linkreset_work(struct work_struct *work)
776 {
777         struct hisi_sas_phy *phy =
778                 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
779         struct asd_sas_phy *sas_phy = &phy->sas_phy;
780
781         hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
782 }
783
784 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
785         [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
786         [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
787 };
788
789 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
790                                 enum hisi_sas_phy_event event)
791 {
792         struct hisi_hba *hisi_hba = phy->hisi_hba;
793
794         if (WARN_ON(event >= HISI_PHYES_NUM))
795                 return false;
796
797         return queue_work(hisi_hba->wq, &phy->works[event]);
798 }
799 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
800
801 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
802 {
803         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
804         struct asd_sas_phy *sas_phy = &phy->sas_phy;
805         int i;
806
807         phy->hisi_hba = hisi_hba;
808         phy->port = NULL;
809         phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
810         phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
811         sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
812         sas_phy->class = SAS;
813         sas_phy->iproto = SAS_PROTOCOL_ALL;
814         sas_phy->tproto = 0;
815         sas_phy->type = PHY_TYPE_PHYSICAL;
816         sas_phy->role = PHY_ROLE_INITIATOR;
817         sas_phy->oob_mode = OOB_NOT_CONNECTED;
818         sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
819         sas_phy->id = phy_no;
820         sas_phy->sas_addr = &hisi_hba->sas_addr[0];
821         sas_phy->frame_rcvd = &phy->frame_rcvd[0];
822         sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
823         sas_phy->lldd_phy = phy;
824
825         for (i = 0; i < HISI_PHYES_NUM; i++)
826                 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
827 }
828
829 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
830 {
831         struct sas_ha_struct *sas_ha = sas_phy->ha;
832         struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
833         struct hisi_sas_phy *phy = sas_phy->lldd_phy;
834         struct asd_sas_port *sas_port = sas_phy->port;
835         struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
836         unsigned long flags;
837
838         if (!sas_port)
839                 return;
840
841         spin_lock_irqsave(&hisi_hba->lock, flags);
842         port->port_attached = 1;
843         port->id = phy->port_id;
844         phy->port = port;
845         sas_port->lldd_port = port;
846         spin_unlock_irqrestore(&hisi_hba->lock, flags);
847 }
848
849 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
850                                      struct hisi_sas_slot *slot)
851 {
852         if (task) {
853                 unsigned long flags;
854                 struct task_status_struct *ts;
855
856                 ts = &task->task_status;
857
858                 ts->resp = SAS_TASK_COMPLETE;
859                 ts->stat = SAS_ABORTED_TASK;
860                 spin_lock_irqsave(&task->task_state_lock, flags);
861                 task->task_state_flags &=
862                         ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
863                 task->task_state_flags |= SAS_TASK_STATE_DONE;
864                 spin_unlock_irqrestore(&task->task_state_lock, flags);
865         }
866
867         hisi_sas_slot_task_free(hisi_hba, task, slot);
868 }
869
870 /* hisi_hba.lock should be locked */
871 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
872                         struct domain_device *device)
873 {
874         struct hisi_sas_slot *slot, *slot2;
875         struct hisi_sas_device *sas_dev = device->lldd_dev;
876
877         list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
878                 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
879 }
880
881 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
882 {
883         struct hisi_sas_device *sas_dev;
884         struct domain_device *device;
885         int i;
886
887         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
888                 sas_dev = &hisi_hba->devices[i];
889                 device = sas_dev->sas_device;
890
891                 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
892                     !device)
893                         continue;
894
895                 hisi_sas_release_task(hisi_hba, device);
896         }
897 }
898 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
899
900 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
901                                 struct domain_device *device)
902 {
903         if (hisi_hba->hw->dereg_device)
904                 hisi_hba->hw->dereg_device(hisi_hba, device);
905 }
906
907 static void hisi_sas_dev_gone(struct domain_device *device)
908 {
909         struct hisi_sas_device *sas_dev = device->lldd_dev;
910         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
911         struct device *dev = hisi_hba->dev;
912
913         dev_info(dev, "dev[%d:%x] is gone\n",
914                  sas_dev->device_id, sas_dev->dev_type);
915
916         if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
917                 hisi_sas_internal_task_abort(hisi_hba, device,
918                                      HISI_SAS_INT_ABT_DEV, 0);
919
920                 hisi_sas_dereg_device(hisi_hba, device);
921
922                 down(&hisi_hba->sem);
923                 hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
924                 up(&hisi_hba->sem);
925                 device->lldd_dev = NULL;
926         }
927
928         if (hisi_hba->hw->free_device)
929                 hisi_hba->hw->free_device(sas_dev);
930         sas_dev->dev_type = SAS_PHY_UNUSED;
931 }
932
933 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
934 {
935         return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
936 }
937
938 static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
939                         struct sas_phy_linkrates *r)
940 {
941         struct sas_phy_linkrates _r;
942
943         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
944         struct asd_sas_phy *sas_phy = &phy->sas_phy;
945         enum sas_linkrate min, max;
946
947         if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
948                 max = sas_phy->phy->maximum_linkrate;
949                 min = r->minimum_linkrate;
950         } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
951                 max = r->maximum_linkrate;
952                 min = sas_phy->phy->minimum_linkrate;
953         } else
954                 return;
955
956         _r.maximum_linkrate = max;
957         _r.minimum_linkrate = min;
958
959         hisi_hba->hw->phy_disable(hisi_hba, phy_no);
960         msleep(100);
961         hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
962         hisi_hba->hw->phy_start(hisi_hba, phy_no);
963 }
964
965 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
966                                 void *funcdata)
967 {
968         struct sas_ha_struct *sas_ha = sas_phy->ha;
969         struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
970         int phy_no = sas_phy->id;
971
972         switch (func) {
973         case PHY_FUNC_HARD_RESET:
974                 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
975                 break;
976
977         case PHY_FUNC_LINK_RESET:
978                 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
979                 msleep(100);
980                 hisi_hba->hw->phy_start(hisi_hba, phy_no);
981                 break;
982
983         case PHY_FUNC_DISABLE:
984                 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
985                 break;
986
987         case PHY_FUNC_SET_LINK_RATE:
988                 hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
989                 break;
990         case PHY_FUNC_GET_EVENTS:
991                 if (hisi_hba->hw->get_events) {
992                         hisi_hba->hw->get_events(hisi_hba, phy_no);
993                         break;
994                 }
995                 /* fallthru */
996         case PHY_FUNC_RELEASE_SPINUP_HOLD:
997         default:
998                 return -EOPNOTSUPP;
999         }
1000         return 0;
1001 }
1002
1003 static void hisi_sas_task_done(struct sas_task *task)
1004 {
1005         if (!del_timer(&task->slow_task->timer))
1006                 return;
1007         complete(&task->slow_task->completion);
1008 }
1009
1010 static void hisi_sas_tmf_timedout(struct timer_list *t)
1011 {
1012         struct sas_task_slow *slow = from_timer(slow, t, timer);
1013         struct sas_task *task = slow->task;
1014         unsigned long flags;
1015
1016         spin_lock_irqsave(&task->task_state_lock, flags);
1017         if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
1018                 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1019         spin_unlock_irqrestore(&task->task_state_lock, flags);
1020
1021         complete(&task->slow_task->completion);
1022 }
1023
1024 #define TASK_TIMEOUT 20
1025 #define TASK_RETRY 3
1026 #define INTERNAL_ABORT_TIMEOUT 6
1027 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
1028                                            void *parameter, u32 para_len,
1029                                            struct hisi_sas_tmf_task *tmf)
1030 {
1031         struct hisi_sas_device *sas_dev = device->lldd_dev;
1032         struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
1033         struct device *dev = hisi_hba->dev;
1034         struct sas_task *task;
1035         int res, retry;
1036
1037         for (retry = 0; retry < TASK_RETRY; retry++) {
1038                 task = sas_alloc_slow_task(GFP_KERNEL);
1039                 if (!task)
1040                         return -ENOMEM;
1041
1042                 task->dev = device;
1043                 task->task_proto = device->tproto;
1044
1045                 if (dev_is_sata(device)) {
1046                         task->ata_task.device_control_reg_update = 1;
1047                         memcpy(&task->ata_task.fis, parameter, para_len);
1048                 } else {
1049                         memcpy(&task->ssp_task, parameter, para_len);
1050                 }
1051                 task->task_done = hisi_sas_task_done;
1052
1053                 task->slow_task->timer.function = hisi_sas_tmf_timedout;
1054                 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
1055                 add_timer(&task->slow_task->timer);
1056
1057                 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
1058
1059                 if (res) {
1060                         del_timer(&task->slow_task->timer);
1061                         dev_err(dev, "abort tmf: executing internal task failed: %d\n",
1062                                 res);
1063                         goto ex_err;
1064                 }
1065
1066                 wait_for_completion(&task->slow_task->completion);
1067                 res = TMF_RESP_FUNC_FAILED;
1068                 /* Even TMF timed out, return direct. */
1069                 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1070                         if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1071                                 struct hisi_sas_slot *slot = task->lldd_task;
1072
1073                                 dev_err(dev, "abort tmf: TMF task timeout and not done\n");
1074                                 if (slot)
1075                                         slot->task = NULL;
1076
1077                                 goto ex_err;
1078                         } else
1079                                 dev_err(dev, "abort tmf: TMF task timeout\n");
1080                 }
1081
1082                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1083                      task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1084                         res = TMF_RESP_FUNC_COMPLETE;
1085                         break;
1086                 }
1087
1088                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1089                         task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1090                         res = TMF_RESP_FUNC_SUCC;
1091                         break;
1092                 }
1093
1094                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1095                       task->task_status.stat == SAS_DATA_UNDERRUN) {
1096                         /* no error, but return the number of bytes of
1097                          * underrun
1098                          */
1099                         dev_warn(dev, "abort tmf: task to dev %016llx "
1100                                  "resp: 0x%x sts 0x%x underrun\n",
1101                                  SAS_ADDR(device->sas_addr),
1102                                  task->task_status.resp,
1103                                  task->task_status.stat);
1104                         res = task->task_status.residual;
1105                         break;
1106                 }
1107
1108                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1109                         task->task_status.stat == SAS_DATA_OVERRUN) {
1110                         dev_warn(dev, "abort tmf: blocked task error\n");
1111                         res = -EMSGSIZE;
1112                         break;
1113                 }
1114
1115                 dev_warn(dev, "abort tmf: task to dev "
1116                          "%016llx resp: 0x%x status 0x%x\n",
1117                          SAS_ADDR(device->sas_addr), task->task_status.resp,
1118                          task->task_status.stat);
1119                 sas_free_task(task);
1120                 task = NULL;
1121         }
1122 ex_err:
1123         if (retry == TASK_RETRY)
1124                 dev_warn(dev, "abort tmf: executing internal task failed!\n");
1125         sas_free_task(task);
1126         return res;
1127 }
1128
1129 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
1130                 bool reset, int pmp, u8 *fis)
1131 {
1132         struct ata_taskfile tf;
1133
1134         ata_tf_init(dev, &tf);
1135         if (reset)
1136                 tf.ctl |= ATA_SRST;
1137         else
1138                 tf.ctl &= ~ATA_SRST;
1139         tf.command = ATA_CMD_DEV_RESET;
1140         ata_tf_to_fis(&tf, pmp, 0, fis);
1141 }
1142
1143 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
1144 {
1145         u8 fis[20] = {0};
1146         struct ata_port *ap = device->sata_dev.ap;
1147         struct ata_link *link;
1148         int rc = TMF_RESP_FUNC_FAILED;
1149         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1150         struct device *dev = hisi_hba->dev;
1151         int s = sizeof(struct host_to_dev_fis);
1152
1153         ata_for_each_link(link, ap, EDGE) {
1154                 int pmp = sata_srst_pmp(link);
1155
1156                 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1157                 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
1158                 if (rc != TMF_RESP_FUNC_COMPLETE)
1159                         break;
1160         }
1161
1162         if (rc == TMF_RESP_FUNC_COMPLETE) {
1163                 ata_for_each_link(link, ap, EDGE) {
1164                         int pmp = sata_srst_pmp(link);
1165
1166                         hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1167                         rc = hisi_sas_exec_internal_tmf_task(device, fis,
1168                                                              s, NULL);
1169                         if (rc != TMF_RESP_FUNC_COMPLETE)
1170                                 dev_err(dev, "ata disk de-reset failed\n");
1171                 }
1172         } else {
1173                 dev_err(dev, "ata disk reset failed\n");
1174         }
1175
1176         if (rc == TMF_RESP_FUNC_COMPLETE)
1177                 hisi_sas_release_task(hisi_hba, device);
1178
1179         return rc;
1180 }
1181
1182 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
1183                                 u8 *lun, struct hisi_sas_tmf_task *tmf)
1184 {
1185         struct sas_ssp_task ssp_task;
1186
1187         if (!(device->tproto & SAS_PROTOCOL_SSP))
1188                 return TMF_RESP_FUNC_ESUPP;
1189
1190         memcpy(ssp_task.LUN, lun, 8);
1191
1192         return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
1193                                 sizeof(ssp_task), tmf);
1194 }
1195
1196 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1197 {
1198         u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
1199         int i;
1200
1201         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1202                 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1203                 struct domain_device *device = sas_dev->sas_device;
1204                 struct asd_sas_port *sas_port;
1205                 struct hisi_sas_port *port;
1206                 struct hisi_sas_phy *phy = NULL;
1207                 struct asd_sas_phy *sas_phy;
1208
1209                 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1210                                 || !device || !device->port)
1211                         continue;
1212
1213                 sas_port = device->port;
1214                 port = to_hisi_sas_port(sas_port);
1215
1216                 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1217                         if (state & BIT(sas_phy->id)) {
1218                                 phy = sas_phy->lldd_phy;
1219                                 break;
1220                         }
1221
1222                 if (phy) {
1223                         port->id = phy->port_id;
1224
1225                         /* Update linkrate of directly attached device. */
1226                         if (!device->parent)
1227                                 device->linkrate = phy->sas_phy.linkrate;
1228
1229                         hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1230                 } else
1231                         port->id = 0xff;
1232         }
1233 }
1234
1235 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1236                               u32 state)
1237 {
1238         struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1239         struct asd_sas_port *_sas_port = NULL;
1240         int phy_no;
1241
1242         for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1243                 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1244                 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1245                 struct asd_sas_port *sas_port = sas_phy->port;
1246                 bool do_port_check = !!(_sas_port != sas_port);
1247
1248                 if (!sas_phy->phy->enabled)
1249                         continue;
1250
1251                 /* Report PHY state change to libsas */
1252                 if (state & BIT(phy_no)) {
1253                         if (do_port_check && sas_port && sas_port->port_dev) {
1254                                 struct domain_device *dev = sas_port->port_dev;
1255
1256                                 _sas_port = sas_port;
1257
1258                                 if (DEV_IS_EXPANDER(dev->dev_type))
1259                                         sas_ha->notify_port_event(sas_phy,
1260                                                         PORTE_BROADCAST_RCVD);
1261                         }
1262                 } else if (old_state & (1 << phy_no))
1263                         /* PHY down but was up before */
1264                         hisi_sas_phy_down(hisi_hba, phy_no, 0);
1265
1266         }
1267 }
1268
1269 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba)
1270 {
1271         struct hisi_sas_device *sas_dev;
1272         struct domain_device *device;
1273         int i;
1274
1275         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1276                 sas_dev = &hisi_hba->devices[i];
1277                 device = sas_dev->sas_device;
1278
1279                 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1280                         continue;
1281
1282                 hisi_sas_init_device(device);
1283         }
1284 }
1285
1286 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
1287                                              struct asd_sas_port *sas_port,
1288                                              struct domain_device *device)
1289 {
1290         struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 };
1291         struct ata_port *ap = device->sata_dev.ap;
1292         struct device *dev = hisi_hba->dev;
1293         int s = sizeof(struct host_to_dev_fis);
1294         int rc = TMF_RESP_FUNC_FAILED;
1295         struct asd_sas_phy *sas_phy;
1296         struct ata_link *link;
1297         u8 fis[20] = {0};
1298         u32 state;
1299
1300         state = hisi_hba->hw->get_phys_state(hisi_hba);
1301         list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) {
1302                 if (!(state & BIT(sas_phy->id)))
1303                         continue;
1304
1305                 ata_for_each_link(link, ap, EDGE) {
1306                         int pmp = sata_srst_pmp(link);
1307
1308                         tmf_task.phy_id = sas_phy->id;
1309                         hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1310                         rc = hisi_sas_exec_internal_tmf_task(device, fis, s,
1311                                                              &tmf_task);
1312                         if (rc != TMF_RESP_FUNC_COMPLETE) {
1313                                 dev_err(dev, "phy%d ata reset failed rc=%d\n",
1314                                         sas_phy->id, rc);
1315                                 break;
1316                         }
1317                 }
1318         }
1319 }
1320
1321 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
1322 {
1323         struct device *dev = hisi_hba->dev;
1324         int port_no, rc, i;
1325
1326         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1327                 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1328                 struct domain_device *device = sas_dev->sas_device;
1329
1330                 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1331                         continue;
1332
1333                 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1334                                                   HISI_SAS_INT_ABT_DEV, 0);
1335                 if (rc < 0)
1336                         dev_err(dev, "STP reject: abort dev failed %d\n", rc);
1337         }
1338
1339         for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) {
1340                 struct hisi_sas_port *port = &hisi_hba->port[port_no];
1341                 struct asd_sas_port *sas_port = &port->sas_port;
1342                 struct domain_device *port_dev = sas_port->port_dev;
1343                 struct domain_device *device;
1344
1345                 if (!port_dev || !DEV_IS_EXPANDER(port_dev->dev_type))
1346                         continue;
1347
1348                 /* Try to find a SATA device */
1349                 list_for_each_entry(device, &sas_port->dev_list,
1350                                     dev_list_node) {
1351                         if (dev_is_sata(device)) {
1352                                 hisi_sas_send_ata_reset_each_phy(hisi_hba,
1353                                                                  sas_port,
1354                                                                  device);
1355                                 break;
1356                         }
1357                 }
1358         }
1359 }
1360
1361 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1362 {
1363         struct device *dev = hisi_hba->dev;
1364         struct Scsi_Host *shost = hisi_hba->shost;
1365         u32 old_state, state;
1366         int rc;
1367
1368         if (!hisi_hba->hw->soft_reset)
1369                 return -1;
1370
1371         if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1372                 return -1;
1373
1374         down(&hisi_hba->sem);
1375         dev_info(dev, "controller resetting...\n");
1376         old_state = hisi_hba->hw->get_phys_state(hisi_hba);
1377
1378         scsi_block_requests(shost);
1379         hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
1380
1381         if (timer_pending(&hisi_hba->timer))
1382                 del_timer_sync(&hisi_hba->timer);
1383
1384         set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1385         rc = hisi_hba->hw->soft_reset(hisi_hba);
1386         if (rc) {
1387                 dev_warn(dev, "controller reset failed (%d)\n", rc);
1388                 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1389                 up(&hisi_hba->sem);
1390                 scsi_unblock_requests(shost);
1391                 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1392                 return rc;
1393         }
1394
1395         /* Init and wait for PHYs to come up and all libsas event finished. */
1396         hisi_hba->hw->phys_init(hisi_hba);
1397         msleep(1000);
1398         hisi_sas_refresh_port_id(hisi_hba);
1399         clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1400         up(&hisi_hba->sem);
1401
1402         if (hisi_hba->reject_stp_links_msk)
1403                 hisi_sas_terminate_stp_reject(hisi_hba);
1404         hisi_sas_reset_init_all_devices(hisi_hba);
1405         scsi_unblock_requests(shost);
1406         clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1407
1408         state = hisi_hba->hw->get_phys_state(hisi_hba);
1409         hisi_sas_rescan_topology(hisi_hba, old_state, state);
1410         dev_info(dev, "controller reset complete\n");
1411
1412         return 0;
1413 }
1414
1415 static int hisi_sas_abort_task(struct sas_task *task)
1416 {
1417         struct scsi_lun lun;
1418         struct hisi_sas_tmf_task tmf_task;
1419         struct domain_device *device = task->dev;
1420         struct hisi_sas_device *sas_dev = device->lldd_dev;
1421         struct hisi_hba *hisi_hba;
1422         struct device *dev;
1423         int rc = TMF_RESP_FUNC_FAILED;
1424         unsigned long flags;
1425
1426         if (!sas_dev)
1427                 return TMF_RESP_FUNC_FAILED;
1428
1429         hisi_hba = dev_to_hisi_hba(task->dev);
1430         dev = hisi_hba->dev;
1431
1432         spin_lock_irqsave(&task->task_state_lock, flags);
1433         if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1434                 spin_unlock_irqrestore(&task->task_state_lock, flags);
1435                 rc = TMF_RESP_FUNC_COMPLETE;
1436                 goto out;
1437         }
1438         task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1439         spin_unlock_irqrestore(&task->task_state_lock, flags);
1440
1441         sas_dev->dev_status = HISI_SAS_DEV_EH;
1442         if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1443                 struct scsi_cmnd *cmnd = task->uldd_task;
1444                 struct hisi_sas_slot *slot = task->lldd_task;
1445                 u32 tag = slot->idx;
1446                 int rc2;
1447
1448                 int_to_scsilun(cmnd->device->lun, &lun);
1449                 tmf_task.tmf = TMF_ABORT_TASK;
1450                 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1451
1452                 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1453                                                   &tmf_task);
1454
1455                 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1456                                                    HISI_SAS_INT_ABT_CMD, tag);
1457                 if (rc2 < 0) {
1458                         dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1459                         return TMF_RESP_FUNC_FAILED;
1460                 }
1461
1462                 /*
1463                  * If the TMF finds that the IO is not in the device and also
1464                  * the internal abort does not succeed, then it is safe to
1465                  * free the slot.
1466                  * Note: if the internal abort succeeds then the slot
1467                  * will have already been completed
1468                  */
1469                 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1470                         if (task->lldd_task)
1471                                 hisi_sas_do_release_task(hisi_hba, task, slot);
1472                 }
1473         } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1474                 task->task_proto & SAS_PROTOCOL_STP) {
1475                 if (task->dev->dev_type == SAS_SATA_DEV) {
1476                         rc = hisi_sas_internal_task_abort(hisi_hba, device,
1477                                                 HISI_SAS_INT_ABT_DEV, 0);
1478                         if (rc < 0) {
1479                                 dev_err(dev, "abort task: internal abort failed\n");
1480                                 goto out;
1481                         }
1482                         hisi_sas_dereg_device(hisi_hba, device);
1483                         rc = hisi_sas_softreset_ata_disk(device);
1484                 }
1485         } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1486                 /* SMP */
1487                 struct hisi_sas_slot *slot = task->lldd_task;
1488                 u32 tag = slot->idx;
1489
1490                 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1491                              HISI_SAS_INT_ABT_CMD, tag);
1492                 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1493                                         task->lldd_task)
1494                         hisi_sas_do_release_task(hisi_hba, task, slot);
1495         }
1496
1497 out:
1498         if (rc != TMF_RESP_FUNC_COMPLETE)
1499                 dev_notice(dev, "abort task: rc=%d\n", rc);
1500         return rc;
1501 }
1502
1503 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1504 {
1505         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1506         struct device *dev = hisi_hba->dev;
1507         struct hisi_sas_tmf_task tmf_task;
1508         int rc = TMF_RESP_FUNC_FAILED;
1509
1510         rc = hisi_sas_internal_task_abort(hisi_hba, device,
1511                                         HISI_SAS_INT_ABT_DEV, 0);
1512         if (rc < 0) {
1513                 dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1514                 return TMF_RESP_FUNC_FAILED;
1515         }
1516         hisi_sas_dereg_device(hisi_hba, device);
1517
1518         tmf_task.tmf = TMF_ABORT_TASK_SET;
1519         rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1520
1521         if (rc == TMF_RESP_FUNC_COMPLETE)
1522                 hisi_sas_release_task(hisi_hba, device);
1523
1524         return rc;
1525 }
1526
1527 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1528 {
1529         int rc = TMF_RESP_FUNC_FAILED;
1530         struct hisi_sas_tmf_task tmf_task;
1531
1532         tmf_task.tmf = TMF_CLEAR_ACA;
1533         rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1534
1535         return rc;
1536 }
1537
1538 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1539 {
1540         struct sas_phy *local_phy = sas_get_local_phy(device);
1541         int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1542                         (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1543         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1544         struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1545         struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number];
1546         struct hisi_sas_phy *phy = container_of(sas_phy,
1547                         struct hisi_sas_phy, sas_phy);
1548         DECLARE_COMPLETION_ONSTACK(phyreset);
1549
1550         if (scsi_is_sas_phy_local(local_phy)) {
1551                 phy->in_reset = 1;
1552                 phy->reset_completion = &phyreset;
1553         }
1554
1555         rc = sas_phy_reset(local_phy, reset_type);
1556         sas_put_local_phy(local_phy);
1557
1558         if (scsi_is_sas_phy_local(local_phy)) {
1559                 int ret = wait_for_completion_timeout(&phyreset, 2 * HZ);
1560                 unsigned long flags;
1561
1562                 spin_lock_irqsave(&phy->lock, flags);
1563                 phy->reset_completion = NULL;
1564                 phy->in_reset = 0;
1565                 spin_unlock_irqrestore(&phy->lock, flags);
1566
1567                 /* report PHY down if timed out */
1568                 if (!ret)
1569                         hisi_sas_phy_down(hisi_hba, sas_phy->id, 0);
1570         } else
1571                 msleep(2000);
1572
1573         return rc;
1574 }
1575
1576 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1577 {
1578         struct hisi_sas_device *sas_dev = device->lldd_dev;
1579         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1580         struct device *dev = hisi_hba->dev;
1581         int rc = TMF_RESP_FUNC_FAILED;
1582
1583         if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1584                 return TMF_RESP_FUNC_FAILED;
1585         sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1586
1587         rc = hisi_sas_internal_task_abort(hisi_hba, device,
1588                                         HISI_SAS_INT_ABT_DEV, 0);
1589         if (rc < 0) {
1590                 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1591                 return TMF_RESP_FUNC_FAILED;
1592         }
1593         hisi_sas_dereg_device(hisi_hba, device);
1594
1595         rc = hisi_sas_debug_I_T_nexus_reset(device);
1596
1597         if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
1598                 hisi_sas_release_task(hisi_hba, device);
1599
1600         return rc;
1601 }
1602
1603 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1604 {
1605         struct hisi_sas_device *sas_dev = device->lldd_dev;
1606         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1607         struct device *dev = hisi_hba->dev;
1608         int rc = TMF_RESP_FUNC_FAILED;
1609
1610         sas_dev->dev_status = HISI_SAS_DEV_EH;
1611         if (dev_is_sata(device)) {
1612                 struct sas_phy *phy;
1613
1614                 /* Clear internal IO and then hardreset */
1615                 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1616                                                   HISI_SAS_INT_ABT_DEV, 0);
1617                 if (rc < 0) {
1618                         dev_err(dev, "lu_reset: internal abort failed\n");
1619                         goto out;
1620                 }
1621                 hisi_sas_dereg_device(hisi_hba, device);
1622
1623                 phy = sas_get_local_phy(device);
1624
1625                 rc = sas_phy_reset(phy, 1);
1626
1627                 if (rc == 0)
1628                         hisi_sas_release_task(hisi_hba, device);
1629                 sas_put_local_phy(phy);
1630         } else {
1631                 struct hisi_sas_tmf_task tmf_task = { .tmf =  TMF_LU_RESET };
1632
1633                 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1634                                                 HISI_SAS_INT_ABT_DEV, 0);
1635                 if (rc < 0) {
1636                         dev_err(dev, "lu_reset: internal abort failed\n");
1637                         goto out;
1638                 }
1639                 hisi_sas_dereg_device(hisi_hba, device);
1640
1641                 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1642                 if (rc == TMF_RESP_FUNC_COMPLETE)
1643                         hisi_sas_release_task(hisi_hba, device);
1644         }
1645 out:
1646         if (rc != TMF_RESP_FUNC_COMPLETE)
1647                 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1648                              sas_dev->device_id, rc);
1649         return rc;
1650 }
1651
1652 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1653 {
1654         struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1655         struct device *dev = hisi_hba->dev;
1656         HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1657         int rc, i;
1658
1659         queue_work(hisi_hba->wq, &r.work);
1660         wait_for_completion(r.completion);
1661         if (!r.done)
1662                 return TMF_RESP_FUNC_FAILED;
1663
1664         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1665                 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1666                 struct domain_device *device = sas_dev->sas_device;
1667
1668                 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device ||
1669                     DEV_IS_EXPANDER(device->dev_type))
1670                         continue;
1671
1672                 rc = hisi_sas_debug_I_T_nexus_reset(device);
1673                 if (rc != TMF_RESP_FUNC_COMPLETE)
1674                         dev_info(dev, "clear nexus ha: for device[%d] rc=%d\n",
1675                                  sas_dev->device_id, rc);
1676         }
1677
1678         hisi_sas_release_tasks(hisi_hba);
1679
1680         return TMF_RESP_FUNC_COMPLETE;
1681 }
1682
1683 static int hisi_sas_query_task(struct sas_task *task)
1684 {
1685         struct scsi_lun lun;
1686         struct hisi_sas_tmf_task tmf_task;
1687         int rc = TMF_RESP_FUNC_FAILED;
1688
1689         if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1690                 struct scsi_cmnd *cmnd = task->uldd_task;
1691                 struct domain_device *device = task->dev;
1692                 struct hisi_sas_slot *slot = task->lldd_task;
1693                 u32 tag = slot->idx;
1694
1695                 int_to_scsilun(cmnd->device->lun, &lun);
1696                 tmf_task.tmf = TMF_QUERY_TASK;
1697                 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1698
1699                 rc = hisi_sas_debug_issue_ssp_tmf(device,
1700                                                   lun.scsi_lun,
1701                                                   &tmf_task);
1702                 switch (rc) {
1703                 /* The task is still in Lun, release it then */
1704                 case TMF_RESP_FUNC_SUCC:
1705                 /* The task is not in Lun or failed, reset the phy */
1706                 case TMF_RESP_FUNC_FAILED:
1707                 case TMF_RESP_FUNC_COMPLETE:
1708                         break;
1709                 default:
1710                         rc = TMF_RESP_FUNC_FAILED;
1711                         break;
1712                 }
1713         }
1714         return rc;
1715 }
1716
1717 static int
1718 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1719                                   struct sas_task *task, int abort_flag,
1720                                   int task_tag)
1721 {
1722         struct domain_device *device = task->dev;
1723         struct hisi_sas_device *sas_dev = device->lldd_dev;
1724         struct device *dev = hisi_hba->dev;
1725         struct hisi_sas_port *port;
1726         struct hisi_sas_slot *slot;
1727         struct asd_sas_port *sas_port = device->port;
1728         struct hisi_sas_cmd_hdr *cmd_hdr_base;
1729         struct hisi_sas_dq *dq = sas_dev->dq;
1730         int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1731         unsigned long flags, flags_dq = 0;
1732         int wr_q_index;
1733
1734         if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1735                 return -EINVAL;
1736
1737         if (!device->port)
1738                 return -1;
1739
1740         port = to_hisi_sas_port(sas_port);
1741
1742         /* simply get a slot and send abort command */
1743         spin_lock_irqsave(&hisi_hba->lock, flags);
1744         rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
1745         if (rc) {
1746                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1747                 goto err_out;
1748         }
1749         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1750
1751         slot = &hisi_hba->slot_info[slot_idx];
1752         memset(slot, 0, sizeof(struct hisi_sas_slot));
1753
1754         slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
1755                         GFP_ATOMIC, &slot->buf_dma);
1756         if (!slot->buf) {
1757                 rc = -ENOMEM;
1758                 goto err_out_tag;
1759         }
1760
1761         spin_lock_irqsave(&dq->lock, flags_dq);
1762         wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
1763         if (wr_q_index < 0) {
1764                 spin_unlock_irqrestore(&dq->lock, flags_dq);
1765                 rc = -EAGAIN;
1766                 goto err_out_buf;
1767         }
1768         list_add_tail(&slot->delivery, &dq->list);
1769         spin_unlock_irqrestore(&dq->lock, flags_dq);
1770
1771         dlvry_queue = dq->id;
1772         dlvry_queue_slot = wr_q_index;
1773
1774         slot->idx = slot_idx;
1775         slot->n_elem = n_elem;
1776         slot->dlvry_queue = dlvry_queue;
1777         slot->dlvry_queue_slot = dlvry_queue_slot;
1778         cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1779         slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1780         slot->task = task;
1781         slot->port = port;
1782         slot->is_internal = true;
1783         task->lldd_task = slot;
1784
1785         memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1786         memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1787         memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
1788
1789         hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1790                                       abort_flag, task_tag);
1791
1792         spin_lock_irqsave(&task->task_state_lock, flags);
1793         task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1794         spin_unlock_irqrestore(&task->task_state_lock, flags);
1795
1796         slot->ready = 1;
1797         /* send abort command to the chip */
1798         spin_lock_irqsave(&dq->lock, flags);
1799         list_add_tail(&slot->entry, &sas_dev->list);
1800         hisi_hba->hw->start_delivery(dq);
1801         spin_unlock_irqrestore(&dq->lock, flags);
1802
1803         return 0;
1804
1805 err_out_buf:
1806         dma_pool_free(hisi_hba->buffer_pool, slot->buf,
1807                       slot->buf_dma);
1808 err_out_tag:
1809         spin_lock_irqsave(&hisi_hba->lock, flags);
1810         hisi_sas_slot_index_free(hisi_hba, slot_idx);
1811         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1812 err_out:
1813         dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1814
1815         return rc;
1816 }
1817
1818 /**
1819  * hisi_sas_internal_task_abort -- execute an internal
1820  * abort command for single IO command or a device
1821  * @hisi_hba: host controller struct
1822  * @device: domain device
1823  * @abort_flag: mode of operation, device or single IO
1824  * @tag: tag of IO to be aborted (only relevant to single
1825  *       IO mode)
1826  */
1827 static int
1828 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1829                              struct domain_device *device,
1830                              int abort_flag, int tag)
1831 {
1832         struct sas_task *task;
1833         struct hisi_sas_device *sas_dev = device->lldd_dev;
1834         struct device *dev = hisi_hba->dev;
1835         int res;
1836
1837         /*
1838          * The interface is not realized means this HW don't support internal
1839          * abort, or don't need to do internal abort. Then here, we return
1840          * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
1841          * the internal abort has been executed and returned CQ.
1842          */
1843         if (!hisi_hba->hw->prep_abort)
1844                 return TMF_RESP_FUNC_FAILED;
1845
1846         task = sas_alloc_slow_task(GFP_KERNEL);
1847         if (!task)
1848                 return -ENOMEM;
1849
1850         task->dev = device;
1851         task->task_proto = device->tproto;
1852         task->task_done = hisi_sas_task_done;
1853         task->slow_task->timer.function = hisi_sas_tmf_timedout;
1854         task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT*HZ;
1855         add_timer(&task->slow_task->timer);
1856
1857         res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1858                                                 task, abort_flag, tag);
1859         if (res) {
1860                 del_timer(&task->slow_task->timer);
1861                 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1862                         res);
1863                 goto exit;
1864         }
1865         wait_for_completion(&task->slow_task->completion);
1866         res = TMF_RESP_FUNC_FAILED;
1867
1868         /* Internal abort timed out */
1869         if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1870                 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1871                         struct hisi_sas_slot *slot = task->lldd_task;
1872
1873                         if (slot)
1874                                 slot->task = NULL;
1875                         dev_err(dev, "internal task abort: timeout and not done.\n");
1876                         res = -EIO;
1877                         goto exit;
1878                 } else
1879                         dev_err(dev, "internal task abort: timeout.\n");
1880         }
1881
1882         if (task->task_status.resp == SAS_TASK_COMPLETE &&
1883                 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1884                 res = TMF_RESP_FUNC_COMPLETE;
1885                 goto exit;
1886         }
1887
1888         if (task->task_status.resp == SAS_TASK_COMPLETE &&
1889                 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1890                 res = TMF_RESP_FUNC_SUCC;
1891                 goto exit;
1892         }
1893
1894 exit:
1895         dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
1896                 "resp: 0x%x sts 0x%x\n",
1897                 SAS_ADDR(device->sas_addr),
1898                 task,
1899                 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1900                 task->task_status.stat);
1901         sas_free_task(task);
1902
1903         return res;
1904 }
1905
1906 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1907 {
1908         hisi_sas_port_notify_formed(sas_phy);
1909 }
1910
1911 static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
1912 {
1913 }
1914
1915 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
1916                         u8 reg_index, u8 reg_count, u8 *write_data)
1917 {
1918         struct hisi_hba *hisi_hba = sha->lldd_ha;
1919
1920         if (!hisi_hba->hw->write_gpio)
1921                 return -EOPNOTSUPP;
1922
1923         return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
1924                                 reg_index, reg_count, write_data);
1925 }
1926
1927 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1928 {
1929         phy->phy_attached = 0;
1930         phy->phy_type = 0;
1931         phy->port = NULL;
1932 }
1933
1934 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1935 {
1936         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1937         struct asd_sas_phy *sas_phy = &phy->sas_phy;
1938         struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1939         struct device *dev = hisi_hba->dev;
1940
1941         if (rdy) {
1942                 /* Phy down but ready */
1943                 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1944                 hisi_sas_port_notify_formed(sas_phy);
1945         } else {
1946                 struct hisi_sas_port *port  = phy->port;
1947
1948                 if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) ||
1949                     phy->in_reset) {
1950                         dev_info(dev, "ignore flutter phy%d down\n", phy_no);
1951                         return;
1952                 }
1953                 /* Phy down and not ready */
1954                 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1955                 sas_phy_disconnected(sas_phy);
1956
1957                 if (port) {
1958                         if (phy->phy_type & PORT_TYPE_SAS) {
1959                                 int port_id = port->id;
1960
1961                                 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1962                                                                        port_id))
1963                                         port->port_attached = 0;
1964                         } else if (phy->phy_type & PORT_TYPE_SATA)
1965                                 port->port_attached = 0;
1966                 }
1967                 hisi_sas_phy_disconnected(phy);
1968         }
1969 }
1970 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1971
1972 void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
1973 {
1974         int i;
1975
1976         for (i = 0; i < hisi_hba->queue_count; i++) {
1977                 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1978
1979                 tasklet_kill(&cq->tasklet);
1980         }
1981 }
1982 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
1983
1984 struct scsi_transport_template *hisi_sas_stt;
1985 EXPORT_SYMBOL_GPL(hisi_sas_stt);
1986
1987 struct device_attribute *host_attrs[] = {
1988         &dev_attr_phy_event_threshold,
1989         NULL,
1990 };
1991 EXPORT_SYMBOL_GPL(host_attrs);
1992
1993 static struct sas_domain_function_template hisi_sas_transport_ops = {
1994         .lldd_dev_found         = hisi_sas_dev_found,
1995         .lldd_dev_gone          = hisi_sas_dev_gone,
1996         .lldd_execute_task      = hisi_sas_queue_command,
1997         .lldd_control_phy       = hisi_sas_control_phy,
1998         .lldd_abort_task        = hisi_sas_abort_task,
1999         .lldd_abort_task_set    = hisi_sas_abort_task_set,
2000         .lldd_clear_aca         = hisi_sas_clear_aca,
2001         .lldd_I_T_nexus_reset   = hisi_sas_I_T_nexus_reset,
2002         .lldd_lu_reset          = hisi_sas_lu_reset,
2003         .lldd_query_task        = hisi_sas_query_task,
2004         .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
2005         .lldd_port_formed       = hisi_sas_port_formed,
2006         .lldd_port_deformed = hisi_sas_port_deformed,
2007         .lldd_write_gpio = hisi_sas_write_gpio,
2008 };
2009
2010 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
2011 {
2012         int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
2013
2014         for (i = 0; i < hisi_hba->queue_count; i++) {
2015                 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2016                 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2017
2018                 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
2019                 memset(hisi_hba->cmd_hdr[i], 0, s);
2020                 dq->wr_point = 0;
2021
2022                 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2023                 memset(hisi_hba->complete_hdr[i], 0, s);
2024                 cq->rd_point = 0;
2025         }
2026
2027         s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
2028         memset(hisi_hba->initial_fis, 0, s);
2029
2030         s = max_command_entries * sizeof(struct hisi_sas_iost);
2031         memset(hisi_hba->iost, 0, s);
2032
2033         s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2034         memset(hisi_hba->breakpoint, 0, s);
2035
2036         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
2037         memset(hisi_hba->sata_breakpoint, 0, s);
2038 }
2039 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
2040
2041 int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
2042 {
2043         struct device *dev = hisi_hba->dev;
2044         int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
2045
2046         sema_init(&hisi_hba->sem, 1);
2047         spin_lock_init(&hisi_hba->lock);
2048         for (i = 0; i < hisi_hba->n_phy; i++) {
2049                 hisi_sas_phy_init(hisi_hba, i);
2050                 hisi_hba->port[i].port_attached = 0;
2051                 hisi_hba->port[i].id = -1;
2052         }
2053
2054         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
2055                 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
2056                 hisi_hba->devices[i].device_id = i;
2057                 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
2058         }
2059
2060         for (i = 0; i < hisi_hba->queue_count; i++) {
2061                 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2062                 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2063
2064                 /* Completion queue structure */
2065                 cq->id = i;
2066                 cq->hisi_hba = hisi_hba;
2067
2068                 /* Delivery queue structure */
2069                 spin_lock_init(&dq->lock);
2070                 INIT_LIST_HEAD(&dq->list);
2071                 dq->id = i;
2072                 dq->hisi_hba = hisi_hba;
2073
2074                 /* Delivery queue */
2075                 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
2076                 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s,
2077                                                 &hisi_hba->cmd_hdr_dma[i],
2078                                                 GFP_KERNEL);
2079                 if (!hisi_hba->cmd_hdr[i])
2080                         goto err_out;
2081
2082                 /* Completion queue */
2083                 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2084                 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s,
2085                                                 &hisi_hba->complete_hdr_dma[i],
2086                                                 GFP_KERNEL);
2087                 if (!hisi_hba->complete_hdr[i])
2088                         goto err_out;
2089         }
2090
2091         s = sizeof(struct hisi_sas_slot_buf_table);
2092         hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
2093         if (!hisi_hba->buffer_pool)
2094                 goto err_out;
2095
2096         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
2097         hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma,
2098                                              GFP_KERNEL);
2099         if (!hisi_hba->itct)
2100                 goto err_out;
2101         memset(hisi_hba->itct, 0, s);
2102
2103         hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
2104                                            sizeof(struct hisi_sas_slot),
2105                                            GFP_KERNEL);
2106         if (!hisi_hba->slot_info)
2107                 goto err_out;
2108
2109         s = max_command_entries * sizeof(struct hisi_sas_iost);
2110         hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma,
2111                                              GFP_KERNEL);
2112         if (!hisi_hba->iost)
2113                 goto err_out;
2114
2115         s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2116         hisi_hba->breakpoint = dmam_alloc_coherent(dev, s,
2117                                                    &hisi_hba->breakpoint_dma,
2118                                                    GFP_KERNEL);
2119         if (!hisi_hba->breakpoint)
2120                 goto err_out;
2121
2122         hisi_hba->slot_index_count = max_command_entries;
2123         s = hisi_hba->slot_index_count / BITS_PER_BYTE;
2124         hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
2125         if (!hisi_hba->slot_index_tags)
2126                 goto err_out;
2127
2128         s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
2129         hisi_hba->initial_fis = dmam_alloc_coherent(dev, s,
2130                                                     &hisi_hba->initial_fis_dma,
2131                                                     GFP_KERNEL);
2132         if (!hisi_hba->initial_fis)
2133                 goto err_out;
2134
2135         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
2136         hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s,
2137                                         &hisi_hba->sata_breakpoint_dma,
2138                                         GFP_KERNEL);
2139         if (!hisi_hba->sata_breakpoint)
2140                 goto err_out;
2141         hisi_sas_init_mem(hisi_hba);
2142
2143         hisi_sas_slot_index_init(hisi_hba);
2144
2145         hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
2146         if (!hisi_hba->wq) {
2147                 dev_err(dev, "sas_alloc: failed to create workqueue\n");
2148                 goto err_out;
2149         }
2150
2151         return 0;
2152 err_out:
2153         return -ENOMEM;
2154 }
2155 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
2156
2157 void hisi_sas_free(struct hisi_hba *hisi_hba)
2158 {
2159         dma_pool_destroy(hisi_hba->buffer_pool);
2160
2161         if (hisi_hba->wq)
2162                 destroy_workqueue(hisi_hba->wq);
2163 }
2164 EXPORT_SYMBOL_GPL(hisi_sas_free);
2165
2166 void hisi_sas_rst_work_handler(struct work_struct *work)
2167 {
2168         struct hisi_hba *hisi_hba =
2169                 container_of(work, struct hisi_hba, rst_work);
2170
2171         hisi_sas_controller_reset(hisi_hba);
2172 }
2173 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
2174
2175 void hisi_sas_sync_rst_work_handler(struct work_struct *work)
2176 {
2177         struct hisi_sas_rst *rst =
2178                 container_of(work, struct hisi_sas_rst, work);
2179
2180         if (!hisi_sas_controller_reset(rst->hisi_hba))
2181                 rst->done = true;
2182         complete(rst->completion);
2183 }
2184 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
2185
2186 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
2187 {
2188         struct device *dev = hisi_hba->dev;
2189         struct platform_device *pdev = hisi_hba->platform_dev;
2190         struct device_node *np = pdev ? pdev->dev.of_node : NULL;
2191         struct clk *refclk;
2192
2193         if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
2194                                           SAS_ADDR_SIZE)) {
2195                 dev_err(dev, "could not get property sas-addr\n");
2196                 return -ENOENT;
2197         }
2198
2199         if (np) {
2200                 /*
2201                  * These properties are only required for platform device-based
2202                  * controller with DT firmware.
2203                  */
2204                 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
2205                                         "hisilicon,sas-syscon");
2206                 if (IS_ERR(hisi_hba->ctrl)) {
2207                         dev_err(dev, "could not get syscon\n");
2208                         return -ENOENT;
2209                 }
2210
2211                 if (device_property_read_u32(dev, "ctrl-reset-reg",
2212                                              &hisi_hba->ctrl_reset_reg)) {
2213                         dev_err(dev,
2214                                 "could not get property ctrl-reset-reg\n");
2215                         return -ENOENT;
2216                 }
2217
2218                 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
2219                                              &hisi_hba->ctrl_reset_sts_reg)) {
2220                         dev_err(dev,
2221                                 "could not get property ctrl-reset-sts-reg\n");
2222                         return -ENOENT;
2223                 }
2224
2225                 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
2226                                              &hisi_hba->ctrl_clock_ena_reg)) {
2227                         dev_err(dev,
2228                                 "could not get property ctrl-clock-ena-reg\n");
2229                         return -ENOENT;
2230                 }
2231         }
2232
2233         refclk = devm_clk_get(dev, NULL);
2234         if (IS_ERR(refclk))
2235                 dev_dbg(dev, "no ref clk property\n");
2236         else
2237                 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
2238
2239         if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
2240                 dev_err(dev, "could not get property phy-count\n");
2241                 return -ENOENT;
2242         }
2243
2244         if (device_property_read_u32(dev, "queue-count",
2245                                      &hisi_hba->queue_count)) {
2246                 dev_err(dev, "could not get property queue-count\n");
2247                 return -ENOENT;
2248         }
2249
2250         return 0;
2251 }
2252 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
2253
2254 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2255                                               const struct hisi_sas_hw *hw)
2256 {
2257         struct resource *res;
2258         struct Scsi_Host *shost;
2259         struct hisi_hba *hisi_hba;
2260         struct device *dev = &pdev->dev;
2261
2262         shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
2263         if (!shost) {
2264                 dev_err(dev, "scsi host alloc failed\n");
2265                 return NULL;
2266         }
2267         hisi_hba = shost_priv(shost);
2268
2269         INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2270         hisi_hba->hw = hw;
2271         hisi_hba->dev = dev;
2272         hisi_hba->platform_dev = pdev;
2273         hisi_hba->shost = shost;
2274         SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2275
2276         timer_setup(&hisi_hba->timer, NULL, 0);
2277
2278         if (hisi_sas_get_fw_info(hisi_hba) < 0)
2279                 goto err_out;
2280
2281         if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
2282             dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2283                 dev_err(dev, "No usable DMA addressing method\n");
2284                 goto err_out;
2285         }
2286
2287         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2288         hisi_hba->regs = devm_ioremap_resource(dev, res);
2289         if (IS_ERR(hisi_hba->regs))
2290                 goto err_out;
2291
2292         res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2293         if (res) {
2294                 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2295                 if (IS_ERR(hisi_hba->sgpio_regs))
2296                         goto err_out;
2297         }
2298
2299         if (hisi_sas_alloc(hisi_hba, shost)) {
2300                 hisi_sas_free(hisi_hba);
2301                 goto err_out;
2302         }
2303
2304         return shost;
2305 err_out:
2306         scsi_host_put(shost);
2307         dev_err(dev, "shost alloc failed\n");
2308         return NULL;
2309 }
2310
2311 int hisi_sas_probe(struct platform_device *pdev,
2312                    const struct hisi_sas_hw *hw)
2313 {
2314         struct Scsi_Host *shost;
2315         struct hisi_hba *hisi_hba;
2316         struct device *dev = &pdev->dev;
2317         struct asd_sas_phy **arr_phy;
2318         struct asd_sas_port **arr_port;
2319         struct sas_ha_struct *sha;
2320         int rc, phy_nr, port_nr, i;
2321
2322         shost = hisi_sas_shost_alloc(pdev, hw);
2323         if (!shost)
2324                 return -ENOMEM;
2325
2326         sha = SHOST_TO_SAS_HA(shost);
2327         hisi_hba = shost_priv(shost);
2328         platform_set_drvdata(pdev, sha);
2329
2330         phy_nr = port_nr = hisi_hba->n_phy;
2331
2332         arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2333         arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2334         if (!arr_phy || !arr_port) {
2335                 rc = -ENOMEM;
2336                 goto err_out_ha;
2337         }
2338
2339         sha->sas_phy = arr_phy;
2340         sha->sas_port = arr_port;
2341         sha->lldd_ha = hisi_hba;
2342
2343         shost->transportt = hisi_sas_stt;
2344         shost->max_id = HISI_SAS_MAX_DEVICES;
2345         shost->max_lun = ~0;
2346         shost->max_channel = 1;
2347         shost->max_cmd_len = 16;
2348         shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
2349         shost->can_queue = hisi_hba->hw->max_command_entries;
2350         shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
2351
2352         sha->sas_ha_name = DRV_NAME;
2353         sha->dev = hisi_hba->dev;
2354         sha->lldd_module = THIS_MODULE;
2355         sha->sas_addr = &hisi_hba->sas_addr[0];
2356         sha->num_phys = hisi_hba->n_phy;
2357         sha->core.shost = hisi_hba->shost;
2358
2359         for (i = 0; i < hisi_hba->n_phy; i++) {
2360                 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2361                 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2362         }
2363
2364         rc = scsi_add_host(shost, &pdev->dev);
2365         if (rc)
2366                 goto err_out_ha;
2367
2368         rc = sas_register_ha(sha);
2369         if (rc)
2370                 goto err_out_register_ha;
2371
2372         rc = hisi_hba->hw->hw_init(hisi_hba);
2373         if (rc)
2374                 goto err_out_register_ha;
2375
2376         scsi_scan_host(shost);
2377
2378         return 0;
2379
2380 err_out_register_ha:
2381         scsi_remove_host(shost);
2382 err_out_ha:
2383         hisi_sas_free(hisi_hba);
2384         scsi_host_put(shost);
2385         return rc;
2386 }
2387 EXPORT_SYMBOL_GPL(hisi_sas_probe);
2388
2389 int hisi_sas_remove(struct platform_device *pdev)
2390 {
2391         struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2392         struct hisi_hba *hisi_hba = sha->lldd_ha;
2393         struct Scsi_Host *shost = sha->core.shost;
2394
2395         if (timer_pending(&hisi_hba->timer))
2396                 del_timer(&hisi_hba->timer);
2397
2398         sas_unregister_ha(sha);
2399         sas_remove_host(sha->core.shost);
2400
2401         hisi_sas_free(hisi_hba);
2402         scsi_host_put(shost);
2403         return 0;
2404 }
2405 EXPORT_SYMBOL_GPL(hisi_sas_remove);
2406
2407 static __init int hisi_sas_init(void)
2408 {
2409         hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2410         if (!hisi_sas_stt)
2411                 return -ENOMEM;
2412
2413         return 0;
2414 }
2415
2416 static __exit void hisi_sas_exit(void)
2417 {
2418         sas_release_transport(hisi_sas_stt);
2419 }
2420
2421 module_init(hisi_sas_init);
2422 module_exit(hisi_sas_exit);
2423
2424 MODULE_LICENSE("GPL");
2425 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2426 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2427 MODULE_ALIAS("platform:" DRV_NAME);