scsi: hisi_sas: Change common allocation mode of device id
[linux-2.6-microblaze.git] / drivers / scsi / hisi_sas / hisi_sas_main.c
1 /*
2  * Copyright (c) 2015 Linaro Ltd.
3  * Copyright (c) 2015 Hisilicon Limited.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  */
11
12 #include "hisi_sas.h"
13 #define DRV_NAME "hisi_sas"
14
15 #define DEV_IS_GONE(dev) \
16         ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
17
18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19                                 u8 *lun, struct hisi_sas_tmf_task *tmf);
20 static int
21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22                              struct domain_device *device,
23                              int abort_flag, int tag);
24 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
25 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
26                                 void *funcdata);
27
28 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
29 {
30         switch (fis->command) {
31         case ATA_CMD_FPDMA_WRITE:
32         case ATA_CMD_FPDMA_READ:
33         case ATA_CMD_FPDMA_RECV:
34         case ATA_CMD_FPDMA_SEND:
35         case ATA_CMD_NCQ_NON_DATA:
36                 return HISI_SAS_SATA_PROTOCOL_FPDMA;
37
38         case ATA_CMD_DOWNLOAD_MICRO:
39         case ATA_CMD_ID_ATA:
40         case ATA_CMD_PMP_READ:
41         case ATA_CMD_READ_LOG_EXT:
42         case ATA_CMD_PIO_READ:
43         case ATA_CMD_PIO_READ_EXT:
44         case ATA_CMD_PMP_WRITE:
45         case ATA_CMD_WRITE_LOG_EXT:
46         case ATA_CMD_PIO_WRITE:
47         case ATA_CMD_PIO_WRITE_EXT:
48                 return HISI_SAS_SATA_PROTOCOL_PIO;
49
50         case ATA_CMD_DSM:
51         case ATA_CMD_DOWNLOAD_MICRO_DMA:
52         case ATA_CMD_PMP_READ_DMA:
53         case ATA_CMD_PMP_WRITE_DMA:
54         case ATA_CMD_READ:
55         case ATA_CMD_READ_EXT:
56         case ATA_CMD_READ_LOG_DMA_EXT:
57         case ATA_CMD_READ_STREAM_DMA_EXT:
58         case ATA_CMD_TRUSTED_RCV_DMA:
59         case ATA_CMD_TRUSTED_SND_DMA:
60         case ATA_CMD_WRITE:
61         case ATA_CMD_WRITE_EXT:
62         case ATA_CMD_WRITE_FUA_EXT:
63         case ATA_CMD_WRITE_QUEUED:
64         case ATA_CMD_WRITE_LOG_DMA_EXT:
65         case ATA_CMD_WRITE_STREAM_DMA_EXT:
66         case ATA_CMD_ZAC_MGMT_IN:
67                 return HISI_SAS_SATA_PROTOCOL_DMA;
68
69         case ATA_CMD_CHK_POWER:
70         case ATA_CMD_DEV_RESET:
71         case ATA_CMD_EDD:
72         case ATA_CMD_FLUSH:
73         case ATA_CMD_FLUSH_EXT:
74         case ATA_CMD_VERIFY:
75         case ATA_CMD_VERIFY_EXT:
76         case ATA_CMD_SET_FEATURES:
77         case ATA_CMD_STANDBY:
78         case ATA_CMD_STANDBYNOW1:
79         case ATA_CMD_ZAC_MGMT_OUT:
80                 return HISI_SAS_SATA_PROTOCOL_NONDATA;
81
82         case ATA_CMD_SET_MAX:
83                 switch (fis->features) {
84                 case ATA_SET_MAX_PASSWD:
85                 case ATA_SET_MAX_LOCK:
86                         return HISI_SAS_SATA_PROTOCOL_PIO;
87
88                 case ATA_SET_MAX_PASSWD_DMA:
89                 case ATA_SET_MAX_UNLOCK_DMA:
90                         return HISI_SAS_SATA_PROTOCOL_DMA;
91
92                 default:
93                         return HISI_SAS_SATA_PROTOCOL_NONDATA;
94                 }
95
96         default:
97         {
98                 if (direction == DMA_NONE)
99                         return HISI_SAS_SATA_PROTOCOL_NONDATA;
100                 return HISI_SAS_SATA_PROTOCOL_PIO;
101         }
102         }
103 }
104 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
105
106 void hisi_sas_sata_done(struct sas_task *task,
107                             struct hisi_sas_slot *slot)
108 {
109         struct task_status_struct *ts = &task->task_status;
110         struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
111         struct hisi_sas_status_buffer *status_buf =
112                         hisi_sas_status_buf_addr_mem(slot);
113         u8 *iu = &status_buf->iu[0];
114         struct dev_to_host_fis *d2h =  (struct dev_to_host_fis *)iu;
115
116         resp->frame_len = sizeof(struct dev_to_host_fis);
117         memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
118
119         ts->buf_valid_size = sizeof(*resp);
120 }
121 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
122
123 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
124 {
125         struct ata_queued_cmd *qc = task->uldd_task;
126
127         if (qc) {
128                 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
129                         qc->tf.command == ATA_CMD_FPDMA_READ) {
130                         *tag = qc->tag;
131                         return 1;
132                 }
133         }
134         return 0;
135 }
136 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
137
138 /*
139  * This function assumes linkrate mask fits in 8 bits, which it
140  * does for all HW versions supported.
141  */
142 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
143 {
144         u16 rate = 0;
145         int i;
146
147         max -= SAS_LINK_RATE_1_5_GBPS;
148         for (i = 0; i <= max; i++)
149                 rate |= 1 << (i * 2);
150         return rate;
151 }
152 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
153
154 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
155 {
156         return device->port->ha->lldd_ha;
157 }
158
159 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
160 {
161         return container_of(sas_port, struct hisi_sas_port, sas_port);
162 }
163 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
164
165 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
166 {
167         int phy_no;
168
169         for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
170                 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
171 }
172 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
173
174 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
175 {
176         void *bitmap = hisi_hba->slot_index_tags;
177
178         clear_bit(slot_idx, bitmap);
179 }
180
181 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
182 {
183         hisi_sas_slot_index_clear(hisi_hba, slot_idx);
184 }
185
186 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
187 {
188         void *bitmap = hisi_hba->slot_index_tags;
189
190         set_bit(slot_idx, bitmap);
191 }
192
193 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
194 {
195         unsigned int index;
196         void *bitmap = hisi_hba->slot_index_tags;
197
198         index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
199                         hisi_hba->last_slot_index + 1);
200         if (index >= hisi_hba->slot_index_count) {
201                 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
202                                            0);
203                 if (index >= hisi_hba->slot_index_count)
204                         return -SAS_QUEUE_FULL;
205         }
206         hisi_sas_slot_index_set(hisi_hba, index);
207         *slot_idx = index;
208         hisi_hba->last_slot_index = index;
209
210         return 0;
211 }
212
213 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
214 {
215         int i;
216
217         for (i = 0; i < hisi_hba->slot_index_count; ++i)
218                 hisi_sas_slot_index_clear(hisi_hba, i);
219 }
220
221 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
222                              struct hisi_sas_slot *slot)
223 {
224         struct hisi_sas_dq *dq = &hisi_hba->dq[slot->dlvry_queue];
225         unsigned long flags;
226
227         if (task) {
228                 struct device *dev = hisi_hba->dev;
229
230                 if (!task->lldd_task)
231                         return;
232
233                 task->lldd_task = NULL;
234
235                 if (!sas_protocol_ata(task->task_proto))
236                         if (slot->n_elem)
237                                 dma_unmap_sg(dev, task->scatter,
238                                              task->num_scatter,
239                                              task->data_dir);
240         }
241
242         if (slot->buf)
243                 dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
244
245         spin_lock_irqsave(&dq->lock, flags);
246         list_del_init(&slot->entry);
247         spin_unlock_irqrestore(&dq->lock, flags);
248         slot->buf = NULL;
249         slot->task = NULL;
250         slot->port = NULL;
251         spin_lock_irqsave(&hisi_hba->lock, flags);
252         hisi_sas_slot_index_free(hisi_hba, slot->idx);
253         spin_unlock_irqrestore(&hisi_hba->lock, flags);
254
255         /* slot memory is fully zeroed when it is reused */
256 }
257 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
258
259 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
260                                   struct hisi_sas_slot *slot)
261 {
262         hisi_hba->hw->prep_smp(hisi_hba, slot);
263 }
264
265 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
266                                   struct hisi_sas_slot *slot, int is_tmf,
267                                   struct hisi_sas_tmf_task *tmf)
268 {
269         hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
270 }
271
272 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
273                                   struct hisi_sas_slot *slot)
274 {
275         hisi_hba->hw->prep_stp(hisi_hba, slot);
276 }
277
278 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
279                 struct hisi_sas_slot *slot,
280                 int device_id, int abort_flag, int tag_to_abort)
281 {
282         hisi_hba->hw->prep_abort(hisi_hba, slot,
283                         device_id, abort_flag, tag_to_abort);
284 }
285
286 /*
287  * This function will issue an abort TMF regardless of whether the
288  * task is in the sdev or not. Then it will do the task complete
289  * cleanup and callbacks.
290  */
291 static void hisi_sas_slot_abort(struct work_struct *work)
292 {
293         struct hisi_sas_slot *abort_slot =
294                 container_of(work, struct hisi_sas_slot, abort_slot);
295         struct sas_task *task = abort_slot->task;
296         struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
297         struct scsi_cmnd *cmnd = task->uldd_task;
298         struct hisi_sas_tmf_task tmf_task;
299         struct scsi_lun lun;
300         struct device *dev = hisi_hba->dev;
301         int tag = abort_slot->idx;
302
303         if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
304                 dev_err(dev, "cannot abort slot for non-ssp task\n");
305                 goto out;
306         }
307
308         int_to_scsilun(cmnd->device->lun, &lun);
309         tmf_task.tmf = TMF_ABORT_TASK;
310         tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
311
312         hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
313 out:
314         /* Do cleanup for this task */
315         hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
316         if (task->task_done)
317                 task->task_done(task);
318 }
319
320 static int hisi_sas_task_prep(struct sas_task *task,
321                               struct hisi_sas_dq **dq_pointer,
322                               int is_tmf, struct hisi_sas_tmf_task *tmf,
323                               int *pass)
324 {
325         struct domain_device *device = task->dev;
326         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
327         struct hisi_sas_device *sas_dev = device->lldd_dev;
328         struct hisi_sas_port *port;
329         struct hisi_sas_slot *slot;
330         struct hisi_sas_cmd_hdr *cmd_hdr_base;
331         struct asd_sas_port *sas_port = device->port;
332         struct device *dev = hisi_hba->dev;
333         int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
334         int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
335         unsigned long flags, flags_dq;
336         struct hisi_sas_dq *dq;
337         int wr_q_index;
338
339         if (!sas_port) {
340                 struct task_status_struct *ts = &task->task_status;
341
342                 ts->resp = SAS_TASK_UNDELIVERED;
343                 ts->stat = SAS_PHY_DOWN;
344                 /*
345                  * libsas will use dev->port, should
346                  * not call task_done for sata
347                  */
348                 if (device->dev_type != SAS_SATA_DEV)
349                         task->task_done(task);
350                 return -ECOMM;
351         }
352
353         if (DEV_IS_GONE(sas_dev)) {
354                 if (sas_dev)
355                         dev_info(dev, "task prep: device %d not ready\n",
356                                  sas_dev->device_id);
357                 else
358                         dev_info(dev, "task prep: device %016llx not ready\n",
359                                  SAS_ADDR(device->sas_addr));
360
361                 return -ECOMM;
362         }
363
364         *dq_pointer = dq = sas_dev->dq;
365
366         port = to_hisi_sas_port(sas_port);
367         if (port && !port->port_attached) {
368                 dev_info(dev, "task prep: %s port%d not attach device\n",
369                          (dev_is_sata(device)) ?
370                          "SATA/STP" : "SAS",
371                          device->port->id);
372
373                 return -ECOMM;
374         }
375
376         if (!sas_protocol_ata(task->task_proto)) {
377                 unsigned int req_len, resp_len;
378
379                 if (task->num_scatter) {
380                         n_elem = dma_map_sg(dev, task->scatter,
381                                             task->num_scatter, task->data_dir);
382                         if (!n_elem) {
383                                 rc = -ENOMEM;
384                                 goto prep_out;
385                         }
386                 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
387                         n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
388                                                 1, DMA_TO_DEVICE);
389                         if (!n_elem_req) {
390                                 rc = -ENOMEM;
391                                 goto prep_out;
392                         }
393                         req_len = sg_dma_len(&task->smp_task.smp_req);
394                         if (req_len & 0x3) {
395                                 rc = -EINVAL;
396                                 goto err_out_dma_unmap;
397                         }
398                         n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp,
399                                                  1, DMA_FROM_DEVICE);
400                         if (!n_elem_resp) {
401                                 rc = -ENOMEM;
402                                 goto err_out_dma_unmap;
403                         }
404                         resp_len = sg_dma_len(&task->smp_task.smp_resp);
405                         if (resp_len & 0x3) {
406                                 rc = -EINVAL;
407                                 goto err_out_dma_unmap;
408                         }
409                 }
410         } else
411                 n_elem = task->num_scatter;
412
413         if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
414                 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
415                         n_elem);
416                 rc = -EINVAL;
417                 goto err_out_dma_unmap;
418         }
419
420         spin_lock_irqsave(&hisi_hba->lock, flags);
421         if (hisi_hba->hw->slot_index_alloc)
422                 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
423                                                     device);
424         else
425                 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
426         spin_unlock_irqrestore(&hisi_hba->lock, flags);
427         if (rc)
428                 goto err_out_dma_unmap;
429
430         slot = &hisi_hba->slot_info[slot_idx];
431         memset(slot, 0, sizeof(struct hisi_sas_slot));
432
433         slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
434                                    GFP_ATOMIC, &slot->buf_dma);
435         if (!slot->buf) {
436                 rc = -ENOMEM;
437                 goto err_out_tag;
438         }
439
440         spin_lock_irqsave(&dq->lock, flags_dq);
441         wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
442         if (wr_q_index < 0) {
443                 spin_unlock_irqrestore(&dq->lock, flags_dq);
444                 goto err_out_buf;
445         }
446
447         list_add_tail(&slot->delivery, &dq->list);
448         spin_unlock_irqrestore(&dq->lock, flags_dq);
449
450         dlvry_queue = dq->id;
451         dlvry_queue_slot = wr_q_index;
452
453         slot->idx = slot_idx;
454         slot->n_elem = n_elem;
455         slot->dlvry_queue = dlvry_queue;
456         slot->dlvry_queue_slot = dlvry_queue_slot;
457         cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
458         slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
459         slot->task = task;
460         slot->port = port;
461         if (is_tmf)
462                 slot->is_internal = true;
463         task->lldd_task = slot;
464         INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
465
466         memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
467         memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
468         memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
469
470         switch (task->task_proto) {
471         case SAS_PROTOCOL_SMP:
472                 hisi_sas_task_prep_smp(hisi_hba, slot);
473                 break;
474         case SAS_PROTOCOL_SSP:
475                 hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
476                 break;
477         case SAS_PROTOCOL_SATA:
478         case SAS_PROTOCOL_STP:
479         case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
480                 hisi_sas_task_prep_ata(hisi_hba, slot);
481                 break;
482         default:
483                 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
484                         task->task_proto);
485                 break;
486         }
487
488         spin_lock_irqsave(&dq->lock, flags);
489         list_add_tail(&slot->entry, &sas_dev->list);
490         spin_unlock_irqrestore(&dq->lock, flags);
491         spin_lock_irqsave(&task->task_state_lock, flags);
492         task->task_state_flags |= SAS_TASK_AT_INITIATOR;
493         spin_unlock_irqrestore(&task->task_state_lock, flags);
494
495         ++(*pass);
496         slot->ready = 1;
497
498         return 0;
499
500 err_out_buf:
501         dma_pool_free(hisi_hba->buffer_pool, slot->buf,
502                       slot->buf_dma);
503 err_out_tag:
504         spin_lock_irqsave(&hisi_hba->lock, flags);
505         hisi_sas_slot_index_free(hisi_hba, slot_idx);
506         spin_unlock_irqrestore(&hisi_hba->lock, flags);
507 err_out_dma_unmap:
508         if (!sas_protocol_ata(task->task_proto)) {
509                 if (task->num_scatter) {
510                         dma_unmap_sg(dev, task->scatter, task->num_scatter,
511                              task->data_dir);
512                 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
513                         if (n_elem_req)
514                                 dma_unmap_sg(dev, &task->smp_task.smp_req,
515                                              1, DMA_TO_DEVICE);
516                         if (n_elem_resp)
517                                 dma_unmap_sg(dev, &task->smp_task.smp_resp,
518                                              1, DMA_FROM_DEVICE);
519                 }
520         }
521 prep_out:
522         dev_err(dev, "task prep: failed[%d]!\n", rc);
523         return rc;
524 }
525
526 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
527                               int is_tmf, struct hisi_sas_tmf_task *tmf)
528 {
529         u32 rc;
530         u32 pass = 0;
531         unsigned long flags;
532         struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
533         struct device *dev = hisi_hba->dev;
534         struct hisi_sas_dq *dq = NULL;
535
536         if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
537                 return -EINVAL;
538
539         /* protect task_prep and start_delivery sequence */
540         rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
541         if (rc)
542                 dev_err(dev, "task exec: failed[%d]!\n", rc);
543
544         if (likely(pass)) {
545                 spin_lock_irqsave(&dq->lock, flags);
546                 hisi_hba->hw->start_delivery(dq);
547                 spin_unlock_irqrestore(&dq->lock, flags);
548         }
549
550         return rc;
551 }
552
553 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
554 {
555         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
556         struct asd_sas_phy *sas_phy = &phy->sas_phy;
557         struct sas_ha_struct *sas_ha;
558
559         if (!phy->phy_attached)
560                 return;
561
562         sas_ha = &hisi_hba->sha;
563         sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
564
565         if (sas_phy->phy) {
566                 struct sas_phy *sphy = sas_phy->phy;
567
568                 sphy->negotiated_linkrate = sas_phy->linkrate;
569                 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
570                 sphy->maximum_linkrate_hw =
571                         hisi_hba->hw->phy_get_max_linkrate();
572                 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
573                         sphy->minimum_linkrate = phy->minimum_linkrate;
574
575                 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
576                         sphy->maximum_linkrate = phy->maximum_linkrate;
577         }
578
579         if (phy->phy_type & PORT_TYPE_SAS) {
580                 struct sas_identify_frame *id;
581
582                 id = (struct sas_identify_frame *)phy->frame_rcvd;
583                 id->dev_type = phy->identify.device_type;
584                 id->initiator_bits = SAS_PROTOCOL_ALL;
585                 id->target_bits = phy->identify.target_port_protocols;
586         } else if (phy->phy_type & PORT_TYPE_SATA) {
587                 /*Nothing*/
588         }
589
590         sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
591         sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
592 }
593
594 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
595 {
596         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
597         struct hisi_sas_device *sas_dev = NULL;
598         unsigned long flags;
599         int last = hisi_hba->last_dev_id;
600         int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
601         int i;
602
603         spin_lock_irqsave(&hisi_hba->lock, flags);
604         for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
605                 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
606                         int queue = i % hisi_hba->queue_count;
607                         struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
608
609                         hisi_hba->devices[i].device_id = i;
610                         sas_dev = &hisi_hba->devices[i];
611                         sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
612                         sas_dev->dev_type = device->dev_type;
613                         sas_dev->hisi_hba = hisi_hba;
614                         sas_dev->sas_device = device;
615                         sas_dev->dq = dq;
616                         INIT_LIST_HEAD(&hisi_hba->devices[i].list);
617                         break;
618                 }
619                 i++;
620         }
621         hisi_hba->last_dev_id = i;
622         spin_unlock_irqrestore(&hisi_hba->lock, flags);
623
624         return sas_dev;
625 }
626
627 static int hisi_sas_dev_found(struct domain_device *device)
628 {
629         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
630         struct domain_device *parent_dev = device->parent;
631         struct hisi_sas_device *sas_dev;
632         struct device *dev = hisi_hba->dev;
633
634         if (hisi_hba->hw->alloc_dev)
635                 sas_dev = hisi_hba->hw->alloc_dev(device);
636         else
637                 sas_dev = hisi_sas_alloc_dev(device);
638         if (!sas_dev) {
639                 dev_err(dev, "fail alloc dev: max support %d devices\n",
640                         HISI_SAS_MAX_DEVICES);
641                 return -EINVAL;
642         }
643
644         device->lldd_dev = sas_dev;
645         hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
646
647         if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
648                 int phy_no;
649                 u8 phy_num = parent_dev->ex_dev.num_phys;
650                 struct ex_phy *phy;
651
652                 for (phy_no = 0; phy_no < phy_num; phy_no++) {
653                         phy = &parent_dev->ex_dev.ex_phy[phy_no];
654                         if (SAS_ADDR(phy->attached_sas_addr) ==
655                                 SAS_ADDR(device->sas_addr))
656                                 break;
657                 }
658
659                 if (phy_no == phy_num) {
660                         dev_info(dev, "dev found: no attached "
661                                  "dev:%016llx at ex:%016llx\n",
662                                  SAS_ADDR(device->sas_addr),
663                                  SAS_ADDR(parent_dev->sas_addr));
664                         return -EINVAL;
665                 }
666         }
667
668         dev_info(dev, "dev[%d:%x] found\n",
669                 sas_dev->device_id, sas_dev->dev_type);
670
671         return 0;
672 }
673
674 static int hisi_sas_slave_configure(struct scsi_device *sdev)
675 {
676         struct domain_device *dev = sdev_to_domain_dev(sdev);
677         int ret = sas_slave_configure(sdev);
678
679         if (ret)
680                 return ret;
681         if (!dev_is_sata(dev))
682                 sas_change_queue_depth(sdev, 64);
683
684         return 0;
685 }
686
687 static void hisi_sas_scan_start(struct Scsi_Host *shost)
688 {
689         struct hisi_hba *hisi_hba = shost_priv(shost);
690
691         hisi_hba->hw->phys_init(hisi_hba);
692 }
693
694 static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
695 {
696         struct hisi_hba *hisi_hba = shost_priv(shost);
697         struct sas_ha_struct *sha = &hisi_hba->sha;
698
699         /* Wait for PHY up interrupt to occur */
700         if (time < HZ)
701                 return 0;
702
703         sas_drain_work(sha);
704         return 1;
705 }
706
707 static void hisi_sas_phyup_work(struct work_struct *work)
708 {
709         struct hisi_sas_phy *phy =
710                 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
711         struct hisi_hba *hisi_hba = phy->hisi_hba;
712         struct asd_sas_phy *sas_phy = &phy->sas_phy;
713         int phy_no = sas_phy->id;
714
715         hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
716         hisi_sas_bytes_dmaed(hisi_hba, phy_no);
717 }
718
719 static void hisi_sas_linkreset_work(struct work_struct *work)
720 {
721         struct hisi_sas_phy *phy =
722                 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
723         struct asd_sas_phy *sas_phy = &phy->sas_phy;
724
725         hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
726 }
727
728 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
729         [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
730         [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
731 };
732
733 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
734                                 enum hisi_sas_phy_event event)
735 {
736         struct hisi_hba *hisi_hba = phy->hisi_hba;
737
738         if (WARN_ON(event >= HISI_PHYES_NUM))
739                 return false;
740
741         return queue_work(hisi_hba->wq, &phy->works[event]);
742 }
743 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
744
745 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
746 {
747         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
748         struct asd_sas_phy *sas_phy = &phy->sas_phy;
749         int i;
750
751         phy->hisi_hba = hisi_hba;
752         phy->port = NULL;
753         phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
754         phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
755         sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
756         sas_phy->class = SAS;
757         sas_phy->iproto = SAS_PROTOCOL_ALL;
758         sas_phy->tproto = 0;
759         sas_phy->type = PHY_TYPE_PHYSICAL;
760         sas_phy->role = PHY_ROLE_INITIATOR;
761         sas_phy->oob_mode = OOB_NOT_CONNECTED;
762         sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
763         sas_phy->id = phy_no;
764         sas_phy->sas_addr = &hisi_hba->sas_addr[0];
765         sas_phy->frame_rcvd = &phy->frame_rcvd[0];
766         sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
767         sas_phy->lldd_phy = phy;
768
769         for (i = 0; i < HISI_PHYES_NUM; i++)
770                 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
771 }
772
773 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
774 {
775         struct sas_ha_struct *sas_ha = sas_phy->ha;
776         struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
777         struct hisi_sas_phy *phy = sas_phy->lldd_phy;
778         struct asd_sas_port *sas_port = sas_phy->port;
779         struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
780         unsigned long flags;
781
782         if (!sas_port)
783                 return;
784
785         spin_lock_irqsave(&hisi_hba->lock, flags);
786         port->port_attached = 1;
787         port->id = phy->port_id;
788         phy->port = port;
789         sas_port->lldd_port = port;
790         spin_unlock_irqrestore(&hisi_hba->lock, flags);
791 }
792
793 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
794                                      struct hisi_sas_slot *slot)
795 {
796         if (task) {
797                 unsigned long flags;
798                 struct task_status_struct *ts;
799
800                 ts = &task->task_status;
801
802                 ts->resp = SAS_TASK_COMPLETE;
803                 ts->stat = SAS_ABORTED_TASK;
804                 spin_lock_irqsave(&task->task_state_lock, flags);
805                 task->task_state_flags &=
806                         ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
807                 task->task_state_flags |= SAS_TASK_STATE_DONE;
808                 spin_unlock_irqrestore(&task->task_state_lock, flags);
809         }
810
811         hisi_sas_slot_task_free(hisi_hba, task, slot);
812 }
813
814 /* hisi_hba.lock should be locked */
815 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
816                         struct domain_device *device)
817 {
818         struct hisi_sas_slot *slot, *slot2;
819         struct hisi_sas_device *sas_dev = device->lldd_dev;
820
821         list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
822                 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
823 }
824
825 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
826 {
827         struct hisi_sas_device *sas_dev;
828         struct domain_device *device;
829         int i;
830
831         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
832                 sas_dev = &hisi_hba->devices[i];
833                 device = sas_dev->sas_device;
834
835                 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
836                     !device)
837                         continue;
838
839                 hisi_sas_release_task(hisi_hba, device);
840         }
841 }
842 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
843
844 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
845                                 struct domain_device *device)
846 {
847         if (hisi_hba->hw->dereg_device)
848                 hisi_hba->hw->dereg_device(hisi_hba, device);
849 }
850
851 static void hisi_sas_dev_gone(struct domain_device *device)
852 {
853         struct hisi_sas_device *sas_dev = device->lldd_dev;
854         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
855         struct device *dev = hisi_hba->dev;
856
857         dev_info(dev, "dev[%d:%x] is gone\n",
858                  sas_dev->device_id, sas_dev->dev_type);
859
860         if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
861                 hisi_sas_internal_task_abort(hisi_hba, device,
862                                      HISI_SAS_INT_ABT_DEV, 0);
863
864                 hisi_sas_dereg_device(hisi_hba, device);
865
866                 hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
867                 device->lldd_dev = NULL;
868         }
869
870         if (hisi_hba->hw->free_device)
871                 hisi_hba->hw->free_device(sas_dev);
872         sas_dev->dev_type = SAS_PHY_UNUSED;
873 }
874
875 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
876 {
877         return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
878 }
879
880 static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
881                         struct sas_phy_linkrates *r)
882 {
883         struct sas_phy_linkrates _r;
884
885         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
886         struct asd_sas_phy *sas_phy = &phy->sas_phy;
887         enum sas_linkrate min, max;
888
889         if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
890                 max = sas_phy->phy->maximum_linkrate;
891                 min = r->minimum_linkrate;
892         } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
893                 max = r->maximum_linkrate;
894                 min = sas_phy->phy->minimum_linkrate;
895         } else
896                 return;
897
898         _r.maximum_linkrate = max;
899         _r.minimum_linkrate = min;
900
901         hisi_hba->hw->phy_disable(hisi_hba, phy_no);
902         msleep(100);
903         hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
904         hisi_hba->hw->phy_start(hisi_hba, phy_no);
905 }
906
907 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
908                                 void *funcdata)
909 {
910         struct sas_ha_struct *sas_ha = sas_phy->ha;
911         struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
912         int phy_no = sas_phy->id;
913
914         switch (func) {
915         case PHY_FUNC_HARD_RESET:
916                 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
917                 break;
918
919         case PHY_FUNC_LINK_RESET:
920                 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
921                 msleep(100);
922                 hisi_hba->hw->phy_start(hisi_hba, phy_no);
923                 break;
924
925         case PHY_FUNC_DISABLE:
926                 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
927                 break;
928
929         case PHY_FUNC_SET_LINK_RATE:
930                 hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
931                 break;
932         case PHY_FUNC_GET_EVENTS:
933                 if (hisi_hba->hw->get_events) {
934                         hisi_hba->hw->get_events(hisi_hba, phy_no);
935                         break;
936                 }
937                 /* fallthru */
938         case PHY_FUNC_RELEASE_SPINUP_HOLD:
939         default:
940                 return -EOPNOTSUPP;
941         }
942         return 0;
943 }
944
945 static void hisi_sas_task_done(struct sas_task *task)
946 {
947         if (!del_timer(&task->slow_task->timer))
948                 return;
949         complete(&task->slow_task->completion);
950 }
951
952 static void hisi_sas_tmf_timedout(struct timer_list *t)
953 {
954         struct sas_task_slow *slow = from_timer(slow, t, timer);
955         struct sas_task *task = slow->task;
956         unsigned long flags;
957
958         spin_lock_irqsave(&task->task_state_lock, flags);
959         if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
960                 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
961         spin_unlock_irqrestore(&task->task_state_lock, flags);
962
963         complete(&task->slow_task->completion);
964 }
965
966 #define TASK_TIMEOUT 20
967 #define TASK_RETRY 3
968 #define INTERNAL_ABORT_TIMEOUT 6
969 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
970                                            void *parameter, u32 para_len,
971                                            struct hisi_sas_tmf_task *tmf)
972 {
973         struct hisi_sas_device *sas_dev = device->lldd_dev;
974         struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
975         struct device *dev = hisi_hba->dev;
976         struct sas_task *task;
977         int res, retry;
978
979         for (retry = 0; retry < TASK_RETRY; retry++) {
980                 task = sas_alloc_slow_task(GFP_KERNEL);
981                 if (!task)
982                         return -ENOMEM;
983
984                 task->dev = device;
985                 task->task_proto = device->tproto;
986
987                 if (dev_is_sata(device)) {
988                         task->ata_task.device_control_reg_update = 1;
989                         memcpy(&task->ata_task.fis, parameter, para_len);
990                 } else {
991                         memcpy(&task->ssp_task, parameter, para_len);
992                 }
993                 task->task_done = hisi_sas_task_done;
994
995                 task->slow_task->timer.function = hisi_sas_tmf_timedout;
996                 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
997                 add_timer(&task->slow_task->timer);
998
999                 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
1000
1001                 if (res) {
1002                         del_timer(&task->slow_task->timer);
1003                         dev_err(dev, "abort tmf: executing internal task failed: %d\n",
1004                                 res);
1005                         goto ex_err;
1006                 }
1007
1008                 wait_for_completion(&task->slow_task->completion);
1009                 res = TMF_RESP_FUNC_FAILED;
1010                 /* Even TMF timed out, return direct. */
1011                 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1012                         if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1013                                 struct hisi_sas_slot *slot = task->lldd_task;
1014
1015                                 dev_err(dev, "abort tmf: TMF task timeout and not done\n");
1016                                 if (slot)
1017                                         slot->task = NULL;
1018
1019                                 goto ex_err;
1020                         } else
1021                                 dev_err(dev, "abort tmf: TMF task timeout\n");
1022                 }
1023
1024                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1025                      task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1026                         res = TMF_RESP_FUNC_COMPLETE;
1027                         break;
1028                 }
1029
1030                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1031                         task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1032                         res = TMF_RESP_FUNC_SUCC;
1033                         break;
1034                 }
1035
1036                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1037                       task->task_status.stat == SAS_DATA_UNDERRUN) {
1038                         /* no error, but return the number of bytes of
1039                          * underrun
1040                          */
1041                         dev_warn(dev, "abort tmf: task to dev %016llx "
1042                                  "resp: 0x%x sts 0x%x underrun\n",
1043                                  SAS_ADDR(device->sas_addr),
1044                                  task->task_status.resp,
1045                                  task->task_status.stat);
1046                         res = task->task_status.residual;
1047                         break;
1048                 }
1049
1050                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1051                         task->task_status.stat == SAS_DATA_OVERRUN) {
1052                         dev_warn(dev, "abort tmf: blocked task error\n");
1053                         res = -EMSGSIZE;
1054                         break;
1055                 }
1056
1057                 dev_warn(dev, "abort tmf: task to dev "
1058                          "%016llx resp: 0x%x status 0x%x\n",
1059                          SAS_ADDR(device->sas_addr), task->task_status.resp,
1060                          task->task_status.stat);
1061                 sas_free_task(task);
1062                 task = NULL;
1063         }
1064 ex_err:
1065         if (retry == TASK_RETRY)
1066                 dev_warn(dev, "abort tmf: executing internal task failed!\n");
1067         sas_free_task(task);
1068         return res;
1069 }
1070
1071 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
1072                 bool reset, int pmp, u8 *fis)
1073 {
1074         struct ata_taskfile tf;
1075
1076         ata_tf_init(dev, &tf);
1077         if (reset)
1078                 tf.ctl |= ATA_SRST;
1079         else
1080                 tf.ctl &= ~ATA_SRST;
1081         tf.command = ATA_CMD_DEV_RESET;
1082         ata_tf_to_fis(&tf, pmp, 0, fis);
1083 }
1084
1085 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
1086 {
1087         u8 fis[20] = {0};
1088         struct ata_port *ap = device->sata_dev.ap;
1089         struct ata_link *link;
1090         int rc = TMF_RESP_FUNC_FAILED;
1091         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1092         struct device *dev = hisi_hba->dev;
1093         int s = sizeof(struct host_to_dev_fis);
1094
1095         ata_for_each_link(link, ap, EDGE) {
1096                 int pmp = sata_srst_pmp(link);
1097
1098                 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1099                 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
1100                 if (rc != TMF_RESP_FUNC_COMPLETE)
1101                         break;
1102         }
1103
1104         if (rc == TMF_RESP_FUNC_COMPLETE) {
1105                 ata_for_each_link(link, ap, EDGE) {
1106                         int pmp = sata_srst_pmp(link);
1107
1108                         hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1109                         rc = hisi_sas_exec_internal_tmf_task(device, fis,
1110                                                              s, NULL);
1111                         if (rc != TMF_RESP_FUNC_COMPLETE)
1112                                 dev_err(dev, "ata disk de-reset failed\n");
1113                 }
1114         } else {
1115                 dev_err(dev, "ata disk reset failed\n");
1116         }
1117
1118         if (rc == TMF_RESP_FUNC_COMPLETE)
1119                 hisi_sas_release_task(hisi_hba, device);
1120
1121         return rc;
1122 }
1123
1124 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
1125                                 u8 *lun, struct hisi_sas_tmf_task *tmf)
1126 {
1127         struct sas_ssp_task ssp_task;
1128
1129         if (!(device->tproto & SAS_PROTOCOL_SSP))
1130                 return TMF_RESP_FUNC_ESUPP;
1131
1132         memcpy(ssp_task.LUN, lun, 8);
1133
1134         return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
1135                                 sizeof(ssp_task), tmf);
1136 }
1137
1138 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1139 {
1140         u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
1141         int i;
1142
1143         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1144                 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1145                 struct domain_device *device = sas_dev->sas_device;
1146                 struct asd_sas_port *sas_port;
1147                 struct hisi_sas_port *port;
1148                 struct hisi_sas_phy *phy = NULL;
1149                 struct asd_sas_phy *sas_phy;
1150
1151                 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1152                                 || !device || !device->port)
1153                         continue;
1154
1155                 sas_port = device->port;
1156                 port = to_hisi_sas_port(sas_port);
1157
1158                 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1159                         if (state & BIT(sas_phy->id)) {
1160                                 phy = sas_phy->lldd_phy;
1161                                 break;
1162                         }
1163
1164                 if (phy) {
1165                         port->id = phy->port_id;
1166
1167                         /* Update linkrate of directly attached device. */
1168                         if (!device->parent)
1169                                 device->linkrate = phy->sas_phy.linkrate;
1170
1171                         hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1172                 } else
1173                         port->id = 0xff;
1174         }
1175 }
1176
1177 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1178                               u32 state)
1179 {
1180         struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1181         struct asd_sas_port *_sas_port = NULL;
1182         int phy_no;
1183
1184         for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1185                 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1186                 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1187                 struct asd_sas_port *sas_port = sas_phy->port;
1188                 bool do_port_check = !!(_sas_port != sas_port);
1189
1190                 if (!sas_phy->phy->enabled)
1191                         continue;
1192
1193                 /* Report PHY state change to libsas */
1194                 if (state & BIT(phy_no)) {
1195                         if (do_port_check && sas_port && sas_port->port_dev) {
1196                                 struct domain_device *dev = sas_port->port_dev;
1197
1198                                 _sas_port = sas_port;
1199
1200                                 if (DEV_IS_EXPANDER(dev->dev_type))
1201                                         sas_ha->notify_port_event(sas_phy,
1202                                                         PORTE_BROADCAST_RCVD);
1203                         }
1204                 } else if (old_state & (1 << phy_no))
1205                         /* PHY down but was up before */
1206                         hisi_sas_phy_down(hisi_hba, phy_no, 0);
1207
1208         }
1209 }
1210
1211 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1212 {
1213         struct device *dev = hisi_hba->dev;
1214         struct Scsi_Host *shost = hisi_hba->shost;
1215         u32 old_state, state;
1216         int rc;
1217
1218         if (!hisi_hba->hw->soft_reset)
1219                 return -1;
1220
1221         if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1222                 return -1;
1223
1224         dev_info(dev, "controller resetting...\n");
1225         old_state = hisi_hba->hw->get_phys_state(hisi_hba);
1226
1227         scsi_block_requests(shost);
1228         if (timer_pending(&hisi_hba->timer))
1229                 del_timer_sync(&hisi_hba->timer);
1230
1231         set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1232         rc = hisi_hba->hw->soft_reset(hisi_hba);
1233         if (rc) {
1234                 dev_warn(dev, "controller reset failed (%d)\n", rc);
1235                 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1236                 scsi_unblock_requests(shost);
1237                 goto out;
1238         }
1239         hisi_sas_release_tasks(hisi_hba);
1240
1241         clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1242
1243         /* Init and wait for PHYs to come up and all libsas event finished. */
1244         hisi_hba->hw->phys_init(hisi_hba);
1245         msleep(1000);
1246         hisi_sas_refresh_port_id(hisi_hba);
1247         scsi_unblock_requests(shost);
1248
1249         state = hisi_hba->hw->get_phys_state(hisi_hba);
1250         hisi_sas_rescan_topology(hisi_hba, old_state, state);
1251         dev_info(dev, "controller reset complete\n");
1252
1253 out:
1254         clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1255
1256         return rc;
1257 }
1258
1259 static int hisi_sas_abort_task(struct sas_task *task)
1260 {
1261         struct scsi_lun lun;
1262         struct hisi_sas_tmf_task tmf_task;
1263         struct domain_device *device = task->dev;
1264         struct hisi_sas_device *sas_dev = device->lldd_dev;
1265         struct hisi_hba *hisi_hba;
1266         struct device *dev;
1267         int rc = TMF_RESP_FUNC_FAILED;
1268         unsigned long flags;
1269
1270         if (!sas_dev)
1271                 return TMF_RESP_FUNC_FAILED;
1272
1273         hisi_hba = dev_to_hisi_hba(task->dev);
1274         dev = hisi_hba->dev;
1275
1276         spin_lock_irqsave(&task->task_state_lock, flags);
1277         if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1278                 spin_unlock_irqrestore(&task->task_state_lock, flags);
1279                 rc = TMF_RESP_FUNC_COMPLETE;
1280                 goto out;
1281         }
1282         task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1283         spin_unlock_irqrestore(&task->task_state_lock, flags);
1284
1285         sas_dev->dev_status = HISI_SAS_DEV_EH;
1286         if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1287                 struct scsi_cmnd *cmnd = task->uldd_task;
1288                 struct hisi_sas_slot *slot = task->lldd_task;
1289                 u32 tag = slot->idx;
1290                 int rc2;
1291
1292                 int_to_scsilun(cmnd->device->lun, &lun);
1293                 tmf_task.tmf = TMF_ABORT_TASK;
1294                 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1295
1296                 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1297                                                   &tmf_task);
1298
1299                 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1300                                                    HISI_SAS_INT_ABT_CMD, tag);
1301                 if (rc2 < 0) {
1302                         dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1303                         return TMF_RESP_FUNC_FAILED;
1304                 }
1305
1306                 /*
1307                  * If the TMF finds that the IO is not in the device and also
1308                  * the internal abort does not succeed, then it is safe to
1309                  * free the slot.
1310                  * Note: if the internal abort succeeds then the slot
1311                  * will have already been completed
1312                  */
1313                 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1314                         if (task->lldd_task)
1315                                 hisi_sas_do_release_task(hisi_hba, task, slot);
1316                 }
1317         } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1318                 task->task_proto & SAS_PROTOCOL_STP) {
1319                 if (task->dev->dev_type == SAS_SATA_DEV) {
1320                         rc = hisi_sas_internal_task_abort(hisi_hba, device,
1321                                                 HISI_SAS_INT_ABT_DEV, 0);
1322                         if (rc < 0) {
1323                                 dev_err(dev, "abort task: internal abort failed\n");
1324                                 goto out;
1325                         }
1326                         hisi_sas_dereg_device(hisi_hba, device);
1327                         rc = hisi_sas_softreset_ata_disk(device);
1328                 }
1329         } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1330                 /* SMP */
1331                 struct hisi_sas_slot *slot = task->lldd_task;
1332                 u32 tag = slot->idx;
1333
1334                 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1335                              HISI_SAS_INT_ABT_CMD, tag);
1336                 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1337                                         task->lldd_task)
1338                         hisi_sas_do_release_task(hisi_hba, task, slot);
1339         }
1340
1341 out:
1342         if (rc != TMF_RESP_FUNC_COMPLETE)
1343                 dev_notice(dev, "abort task: rc=%d\n", rc);
1344         return rc;
1345 }
1346
1347 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1348 {
1349         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1350         struct device *dev = hisi_hba->dev;
1351         struct hisi_sas_tmf_task tmf_task;
1352         int rc = TMF_RESP_FUNC_FAILED;
1353
1354         rc = hisi_sas_internal_task_abort(hisi_hba, device,
1355                                         HISI_SAS_INT_ABT_DEV, 0);
1356         if (rc < 0) {
1357                 dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1358                 return TMF_RESP_FUNC_FAILED;
1359         }
1360         hisi_sas_dereg_device(hisi_hba, device);
1361
1362         tmf_task.tmf = TMF_ABORT_TASK_SET;
1363         rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1364
1365         if (rc == TMF_RESP_FUNC_COMPLETE)
1366                 hisi_sas_release_task(hisi_hba, device);
1367
1368         return rc;
1369 }
1370
1371 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1372 {
1373         int rc = TMF_RESP_FUNC_FAILED;
1374         struct hisi_sas_tmf_task tmf_task;
1375
1376         tmf_task.tmf = TMF_CLEAR_ACA;
1377         rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1378
1379         return rc;
1380 }
1381
1382 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1383 {
1384         struct sas_phy *phy = sas_get_local_phy(device);
1385         int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1386                         (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1387         rc = sas_phy_reset(phy, reset_type);
1388         sas_put_local_phy(phy);
1389         msleep(2000);
1390         return rc;
1391 }
1392
1393 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1394 {
1395         struct hisi_sas_device *sas_dev = device->lldd_dev;
1396         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1397         struct device *dev = hisi_hba->dev;
1398         int rc = TMF_RESP_FUNC_FAILED;
1399
1400         if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1401                 return TMF_RESP_FUNC_FAILED;
1402         sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1403
1404         rc = hisi_sas_internal_task_abort(hisi_hba, device,
1405                                         HISI_SAS_INT_ABT_DEV, 0);
1406         if (rc < 0) {
1407                 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1408                 return TMF_RESP_FUNC_FAILED;
1409         }
1410         hisi_sas_dereg_device(hisi_hba, device);
1411
1412         rc = hisi_sas_debug_I_T_nexus_reset(device);
1413
1414         if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
1415                 hisi_sas_release_task(hisi_hba, device);
1416
1417         return rc;
1418 }
1419
1420 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1421 {
1422         struct hisi_sas_device *sas_dev = device->lldd_dev;
1423         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1424         struct device *dev = hisi_hba->dev;
1425         int rc = TMF_RESP_FUNC_FAILED;
1426
1427         sas_dev->dev_status = HISI_SAS_DEV_EH;
1428         if (dev_is_sata(device)) {
1429                 struct sas_phy *phy;
1430
1431                 /* Clear internal IO and then hardreset */
1432                 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1433                                                   HISI_SAS_INT_ABT_DEV, 0);
1434                 if (rc < 0) {
1435                         dev_err(dev, "lu_reset: internal abort failed\n");
1436                         goto out;
1437                 }
1438                 hisi_sas_dereg_device(hisi_hba, device);
1439
1440                 phy = sas_get_local_phy(device);
1441
1442                 rc = sas_phy_reset(phy, 1);
1443
1444                 if (rc == 0)
1445                         hisi_sas_release_task(hisi_hba, device);
1446                 sas_put_local_phy(phy);
1447         } else {
1448                 struct hisi_sas_tmf_task tmf_task = { .tmf =  TMF_LU_RESET };
1449
1450                 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1451                                                 HISI_SAS_INT_ABT_DEV, 0);
1452                 if (rc < 0) {
1453                         dev_err(dev, "lu_reset: internal abort failed\n");
1454                         goto out;
1455                 }
1456                 hisi_sas_dereg_device(hisi_hba, device);
1457
1458                 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1459                 if (rc == TMF_RESP_FUNC_COMPLETE)
1460                         hisi_sas_release_task(hisi_hba, device);
1461         }
1462 out:
1463         if (rc != TMF_RESP_FUNC_COMPLETE)
1464                 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1465                              sas_dev->device_id, rc);
1466         return rc;
1467 }
1468
1469 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1470 {
1471         struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1472         HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1473
1474         queue_work(hisi_hba->wq, &r.work);
1475         wait_for_completion(r.completion);
1476         if (r.done)
1477                 return TMF_RESP_FUNC_COMPLETE;
1478
1479         return TMF_RESP_FUNC_FAILED;
1480 }
1481
1482 static int hisi_sas_query_task(struct sas_task *task)
1483 {
1484         struct scsi_lun lun;
1485         struct hisi_sas_tmf_task tmf_task;
1486         int rc = TMF_RESP_FUNC_FAILED;
1487
1488         if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1489                 struct scsi_cmnd *cmnd = task->uldd_task;
1490                 struct domain_device *device = task->dev;
1491                 struct hisi_sas_slot *slot = task->lldd_task;
1492                 u32 tag = slot->idx;
1493
1494                 int_to_scsilun(cmnd->device->lun, &lun);
1495                 tmf_task.tmf = TMF_QUERY_TASK;
1496                 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1497
1498                 rc = hisi_sas_debug_issue_ssp_tmf(device,
1499                                                   lun.scsi_lun,
1500                                                   &tmf_task);
1501                 switch (rc) {
1502                 /* The task is still in Lun, release it then */
1503                 case TMF_RESP_FUNC_SUCC:
1504                 /* The task is not in Lun or failed, reset the phy */
1505                 case TMF_RESP_FUNC_FAILED:
1506                 case TMF_RESP_FUNC_COMPLETE:
1507                         break;
1508                 default:
1509                         rc = TMF_RESP_FUNC_FAILED;
1510                         break;
1511                 }
1512         }
1513         return rc;
1514 }
1515
1516 static int
1517 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1518                                   struct sas_task *task, int abort_flag,
1519                                   int task_tag)
1520 {
1521         struct domain_device *device = task->dev;
1522         struct hisi_sas_device *sas_dev = device->lldd_dev;
1523         struct device *dev = hisi_hba->dev;
1524         struct hisi_sas_port *port;
1525         struct hisi_sas_slot *slot;
1526         struct asd_sas_port *sas_port = device->port;
1527         struct hisi_sas_cmd_hdr *cmd_hdr_base;
1528         struct hisi_sas_dq *dq = sas_dev->dq;
1529         int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1530         unsigned long flags, flags_dq = 0;
1531         int wr_q_index;
1532
1533         if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1534                 return -EINVAL;
1535
1536         if (!device->port)
1537                 return -1;
1538
1539         port = to_hisi_sas_port(sas_port);
1540
1541         /* simply get a slot and send abort command */
1542         spin_lock_irqsave(&hisi_hba->lock, flags);
1543         rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
1544         if (rc) {
1545                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1546                 goto err_out;
1547         }
1548         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1549
1550         slot = &hisi_hba->slot_info[slot_idx];
1551         memset(slot, 0, sizeof(struct hisi_sas_slot));
1552
1553         slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
1554                         GFP_ATOMIC, &slot->buf_dma);
1555         if (!slot->buf) {
1556                 rc = -ENOMEM;
1557                 goto err_out_tag;
1558         }
1559
1560         spin_lock_irqsave(&dq->lock, flags_dq);
1561         wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
1562         if (wr_q_index < 0) {
1563                 spin_unlock_irqrestore(&dq->lock, flags_dq);
1564                 goto err_out_buf;
1565         }
1566         list_add_tail(&slot->delivery, &dq->list);
1567         spin_unlock_irqrestore(&dq->lock, flags_dq);
1568
1569         dlvry_queue = dq->id;
1570         dlvry_queue_slot = wr_q_index;
1571
1572         slot->idx = slot_idx;
1573         slot->n_elem = n_elem;
1574         slot->dlvry_queue = dlvry_queue;
1575         slot->dlvry_queue_slot = dlvry_queue_slot;
1576         cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1577         slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1578         slot->task = task;
1579         slot->port = port;
1580         slot->is_internal = true;
1581         task->lldd_task = slot;
1582
1583         memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1584         memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1585         memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
1586
1587         hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1588                                       abort_flag, task_tag);
1589
1590         spin_lock_irqsave(&task->task_state_lock, flags);
1591         task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1592         spin_unlock_irqrestore(&task->task_state_lock, flags);
1593
1594         slot->ready = 1;
1595         /* send abort command to the chip */
1596         spin_lock_irqsave(&dq->lock, flags);
1597         list_add_tail(&slot->entry, &sas_dev->list);
1598         hisi_hba->hw->start_delivery(dq);
1599         spin_unlock_irqrestore(&dq->lock, flags);
1600
1601         return 0;
1602
1603 err_out_buf:
1604         dma_pool_free(hisi_hba->buffer_pool, slot->buf,
1605                       slot->buf_dma);
1606 err_out_tag:
1607         spin_lock_irqsave(&hisi_hba->lock, flags);
1608         hisi_sas_slot_index_free(hisi_hba, slot_idx);
1609         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1610 err_out:
1611         dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1612
1613         return rc;
1614 }
1615
1616 /**
1617  * hisi_sas_internal_task_abort -- execute an internal
1618  * abort command for single IO command or a device
1619  * @hisi_hba: host controller struct
1620  * @device: domain device
1621  * @abort_flag: mode of operation, device or single IO
1622  * @tag: tag of IO to be aborted (only relevant to single
1623  *       IO mode)
1624  */
1625 static int
1626 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1627                              struct domain_device *device,
1628                              int abort_flag, int tag)
1629 {
1630         struct sas_task *task;
1631         struct hisi_sas_device *sas_dev = device->lldd_dev;
1632         struct device *dev = hisi_hba->dev;
1633         int res;
1634
1635         /*
1636          * The interface is not realized means this HW don't support internal
1637          * abort, or don't need to do internal abort. Then here, we return
1638          * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
1639          * the internal abort has been executed and returned CQ.
1640          */
1641         if (!hisi_hba->hw->prep_abort)
1642                 return TMF_RESP_FUNC_FAILED;
1643
1644         task = sas_alloc_slow_task(GFP_KERNEL);
1645         if (!task)
1646                 return -ENOMEM;
1647
1648         task->dev = device;
1649         task->task_proto = device->tproto;
1650         task->task_done = hisi_sas_task_done;
1651         task->slow_task->timer.function = hisi_sas_tmf_timedout;
1652         task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT*HZ;
1653         add_timer(&task->slow_task->timer);
1654
1655         res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1656                                                 task, abort_flag, tag);
1657         if (res) {
1658                 del_timer(&task->slow_task->timer);
1659                 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1660                         res);
1661                 goto exit;
1662         }
1663         wait_for_completion(&task->slow_task->completion);
1664         res = TMF_RESP_FUNC_FAILED;
1665
1666         /* Internal abort timed out */
1667         if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1668                 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1669                         struct hisi_sas_slot *slot = task->lldd_task;
1670
1671                         if (slot)
1672                                 slot->task = NULL;
1673                         dev_err(dev, "internal task abort: timeout and not done.\n");
1674                         res = -EIO;
1675                         goto exit;
1676                 } else
1677                         dev_err(dev, "internal task abort: timeout.\n");
1678         }
1679
1680         if (task->task_status.resp == SAS_TASK_COMPLETE &&
1681                 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1682                 res = TMF_RESP_FUNC_COMPLETE;
1683                 goto exit;
1684         }
1685
1686         if (task->task_status.resp == SAS_TASK_COMPLETE &&
1687                 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1688                 res = TMF_RESP_FUNC_SUCC;
1689                 goto exit;
1690         }
1691
1692 exit:
1693         dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
1694                 "resp: 0x%x sts 0x%x\n",
1695                 SAS_ADDR(device->sas_addr),
1696                 task,
1697                 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1698                 task->task_status.stat);
1699         sas_free_task(task);
1700
1701         return res;
1702 }
1703
1704 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1705 {
1706         hisi_sas_port_notify_formed(sas_phy);
1707 }
1708
1709 static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
1710 {
1711 }
1712
1713 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
1714                         u8 reg_index, u8 reg_count, u8 *write_data)
1715 {
1716         struct hisi_hba *hisi_hba = sha->lldd_ha;
1717
1718         if (!hisi_hba->hw->write_gpio)
1719                 return -EOPNOTSUPP;
1720
1721         return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
1722                                 reg_index, reg_count, write_data);
1723 }
1724
1725 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1726 {
1727         phy->phy_attached = 0;
1728         phy->phy_type = 0;
1729         phy->port = NULL;
1730 }
1731
1732 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1733 {
1734         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1735         struct asd_sas_phy *sas_phy = &phy->sas_phy;
1736         struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1737
1738         if (rdy) {
1739                 /* Phy down but ready */
1740                 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1741                 hisi_sas_port_notify_formed(sas_phy);
1742         } else {
1743                 struct hisi_sas_port *port  = phy->port;
1744
1745                 /* Phy down and not ready */
1746                 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1747                 sas_phy_disconnected(sas_phy);
1748
1749                 if (port) {
1750                         if (phy->phy_type & PORT_TYPE_SAS) {
1751                                 int port_id = port->id;
1752
1753                                 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1754                                                                        port_id))
1755                                         port->port_attached = 0;
1756                         } else if (phy->phy_type & PORT_TYPE_SATA)
1757                                 port->port_attached = 0;
1758                 }
1759                 hisi_sas_phy_disconnected(phy);
1760         }
1761 }
1762 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1763
1764 void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
1765 {
1766         int i;
1767
1768         for (i = 0; i < hisi_hba->queue_count; i++) {
1769                 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1770
1771                 tasklet_kill(&cq->tasklet);
1772         }
1773 }
1774 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
1775
1776 struct scsi_transport_template *hisi_sas_stt;
1777 EXPORT_SYMBOL_GPL(hisi_sas_stt);
1778
1779 static struct device_attribute *host_attrs[] = {
1780         &dev_attr_phy_event_threshold,
1781         NULL,
1782 };
1783
1784 static struct scsi_host_template _hisi_sas_sht = {
1785         .module                 = THIS_MODULE,
1786         .name                   = DRV_NAME,
1787         .queuecommand           = sas_queuecommand,
1788         .target_alloc           = sas_target_alloc,
1789         .slave_configure        = hisi_sas_slave_configure,
1790         .scan_finished          = hisi_sas_scan_finished,
1791         .scan_start             = hisi_sas_scan_start,
1792         .change_queue_depth     = sas_change_queue_depth,
1793         .bios_param             = sas_bios_param,
1794         .can_queue              = 1,
1795         .this_id                = -1,
1796         .sg_tablesize           = SG_ALL,
1797         .max_sectors            = SCSI_DEFAULT_MAX_SECTORS,
1798         .use_clustering         = ENABLE_CLUSTERING,
1799         .eh_device_reset_handler = sas_eh_device_reset_handler,
1800         .eh_target_reset_handler = sas_eh_target_reset_handler,
1801         .target_destroy         = sas_target_destroy,
1802         .ioctl                  = sas_ioctl,
1803         .shost_attrs            = host_attrs,
1804 };
1805 struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
1806 EXPORT_SYMBOL_GPL(hisi_sas_sht);
1807
1808 static struct sas_domain_function_template hisi_sas_transport_ops = {
1809         .lldd_dev_found         = hisi_sas_dev_found,
1810         .lldd_dev_gone          = hisi_sas_dev_gone,
1811         .lldd_execute_task      = hisi_sas_queue_command,
1812         .lldd_control_phy       = hisi_sas_control_phy,
1813         .lldd_abort_task        = hisi_sas_abort_task,
1814         .lldd_abort_task_set    = hisi_sas_abort_task_set,
1815         .lldd_clear_aca         = hisi_sas_clear_aca,
1816         .lldd_I_T_nexus_reset   = hisi_sas_I_T_nexus_reset,
1817         .lldd_lu_reset          = hisi_sas_lu_reset,
1818         .lldd_query_task        = hisi_sas_query_task,
1819         .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
1820         .lldd_port_formed       = hisi_sas_port_formed,
1821         .lldd_port_deformed = hisi_sas_port_deformed,
1822         .lldd_write_gpio = hisi_sas_write_gpio,
1823 };
1824
1825 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
1826 {
1827         int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1828
1829         for (i = 0; i < hisi_hba->queue_count; i++) {
1830                 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1831                 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1832
1833                 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1834                 memset(hisi_hba->cmd_hdr[i], 0, s);
1835                 dq->wr_point = 0;
1836
1837                 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1838                 memset(hisi_hba->complete_hdr[i], 0, s);
1839                 cq->rd_point = 0;
1840         }
1841
1842         s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
1843         memset(hisi_hba->initial_fis, 0, s);
1844
1845         s = max_command_entries * sizeof(struct hisi_sas_iost);
1846         memset(hisi_hba->iost, 0, s);
1847
1848         s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1849         memset(hisi_hba->breakpoint, 0, s);
1850
1851         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1852         memset(hisi_hba->sata_breakpoint, 0, s);
1853 }
1854 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
1855
1856 int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1857 {
1858         struct device *dev = hisi_hba->dev;
1859         int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1860
1861         spin_lock_init(&hisi_hba->lock);
1862         for (i = 0; i < hisi_hba->n_phy; i++) {
1863                 hisi_sas_phy_init(hisi_hba, i);
1864                 hisi_hba->port[i].port_attached = 0;
1865                 hisi_hba->port[i].id = -1;
1866         }
1867
1868         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1869                 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
1870                 hisi_hba->devices[i].device_id = i;
1871                 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
1872         }
1873
1874         for (i = 0; i < hisi_hba->queue_count; i++) {
1875                 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1876                 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1877
1878                 /* Completion queue structure */
1879                 cq->id = i;
1880                 cq->hisi_hba = hisi_hba;
1881
1882                 /* Delivery queue structure */
1883                 spin_lock_init(&dq->lock);
1884                 INIT_LIST_HEAD(&dq->list);
1885                 dq->id = i;
1886                 dq->hisi_hba = hisi_hba;
1887
1888                 /* Delivery queue */
1889                 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1890                 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1891                                         &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1892                 if (!hisi_hba->cmd_hdr[i])
1893                         goto err_out;
1894
1895                 /* Completion queue */
1896                 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1897                 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1898                                 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1899                 if (!hisi_hba->complete_hdr[i])
1900                         goto err_out;
1901         }
1902
1903         s = sizeof(struct hisi_sas_slot_buf_table);
1904         hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
1905         if (!hisi_hba->buffer_pool)
1906                 goto err_out;
1907
1908         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1909         hisi_hba->itct = dma_zalloc_coherent(dev, s, &hisi_hba->itct_dma,
1910                                             GFP_KERNEL);
1911         if (!hisi_hba->itct)
1912                 goto err_out;
1913
1914         hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
1915                                            sizeof(struct hisi_sas_slot),
1916                                            GFP_KERNEL);
1917         if (!hisi_hba->slot_info)
1918                 goto err_out;
1919
1920         s = max_command_entries * sizeof(struct hisi_sas_iost);
1921         hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1922                                             GFP_KERNEL);
1923         if (!hisi_hba->iost)
1924                 goto err_out;
1925
1926         s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1927         hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1928                                 &hisi_hba->breakpoint_dma, GFP_KERNEL);
1929         if (!hisi_hba->breakpoint)
1930                 goto err_out;
1931
1932         hisi_hba->slot_index_count = max_command_entries;
1933         s = hisi_hba->slot_index_count / BITS_PER_BYTE;
1934         hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1935         if (!hisi_hba->slot_index_tags)
1936                 goto err_out;
1937
1938         s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1939         hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1940                                 &hisi_hba->initial_fis_dma, GFP_KERNEL);
1941         if (!hisi_hba->initial_fis)
1942                 goto err_out;
1943
1944         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1945         hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1946                                 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1947         if (!hisi_hba->sata_breakpoint)
1948                 goto err_out;
1949         hisi_sas_init_mem(hisi_hba);
1950
1951         hisi_sas_slot_index_init(hisi_hba);
1952
1953         hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1954         if (!hisi_hba->wq) {
1955                 dev_err(dev, "sas_alloc: failed to create workqueue\n");
1956                 goto err_out;
1957         }
1958
1959         return 0;
1960 err_out:
1961         return -ENOMEM;
1962 }
1963 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
1964
1965 void hisi_sas_free(struct hisi_hba *hisi_hba)
1966 {
1967         struct device *dev = hisi_hba->dev;
1968         int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1969
1970         for (i = 0; i < hisi_hba->queue_count; i++) {
1971                 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1972                 if (hisi_hba->cmd_hdr[i])
1973                         dma_free_coherent(dev, s,
1974                                           hisi_hba->cmd_hdr[i],
1975                                           hisi_hba->cmd_hdr_dma[i]);
1976
1977                 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1978                 if (hisi_hba->complete_hdr[i])
1979                         dma_free_coherent(dev, s,
1980                                           hisi_hba->complete_hdr[i],
1981                                           hisi_hba->complete_hdr_dma[i]);
1982         }
1983
1984         dma_pool_destroy(hisi_hba->buffer_pool);
1985
1986         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1987         if (hisi_hba->itct)
1988                 dma_free_coherent(dev, s,
1989                                   hisi_hba->itct, hisi_hba->itct_dma);
1990
1991         s = max_command_entries * sizeof(struct hisi_sas_iost);
1992         if (hisi_hba->iost)
1993                 dma_free_coherent(dev, s,
1994                                   hisi_hba->iost, hisi_hba->iost_dma);
1995
1996         s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1997         if (hisi_hba->breakpoint)
1998                 dma_free_coherent(dev, s,
1999                                   hisi_hba->breakpoint,
2000                                   hisi_hba->breakpoint_dma);
2001
2002
2003         s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
2004         if (hisi_hba->initial_fis)
2005                 dma_free_coherent(dev, s,
2006                                   hisi_hba->initial_fis,
2007                                   hisi_hba->initial_fis_dma);
2008
2009         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
2010         if (hisi_hba->sata_breakpoint)
2011                 dma_free_coherent(dev, s,
2012                                   hisi_hba->sata_breakpoint,
2013                                   hisi_hba->sata_breakpoint_dma);
2014
2015         if (hisi_hba->wq)
2016                 destroy_workqueue(hisi_hba->wq);
2017 }
2018 EXPORT_SYMBOL_GPL(hisi_sas_free);
2019
2020 void hisi_sas_rst_work_handler(struct work_struct *work)
2021 {
2022         struct hisi_hba *hisi_hba =
2023                 container_of(work, struct hisi_hba, rst_work);
2024
2025         hisi_sas_controller_reset(hisi_hba);
2026 }
2027 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
2028
2029 void hisi_sas_sync_rst_work_handler(struct work_struct *work)
2030 {
2031         struct hisi_sas_rst *rst =
2032                 container_of(work, struct hisi_sas_rst, work);
2033
2034         if (!hisi_sas_controller_reset(rst->hisi_hba))
2035                 rst->done = true;
2036         complete(rst->completion);
2037 }
2038 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
2039
2040 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
2041 {
2042         struct device *dev = hisi_hba->dev;
2043         struct platform_device *pdev = hisi_hba->platform_dev;
2044         struct device_node *np = pdev ? pdev->dev.of_node : NULL;
2045         struct clk *refclk;
2046
2047         if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
2048                                           SAS_ADDR_SIZE)) {
2049                 dev_err(dev, "could not get property sas-addr\n");
2050                 return -ENOENT;
2051         }
2052
2053         if (np) {
2054                 /*
2055                  * These properties are only required for platform device-based
2056                  * controller with DT firmware.
2057                  */
2058                 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
2059                                         "hisilicon,sas-syscon");
2060                 if (IS_ERR(hisi_hba->ctrl)) {
2061                         dev_err(dev, "could not get syscon\n");
2062                         return -ENOENT;
2063                 }
2064
2065                 if (device_property_read_u32(dev, "ctrl-reset-reg",
2066                                              &hisi_hba->ctrl_reset_reg)) {
2067                         dev_err(dev,
2068                                 "could not get property ctrl-reset-reg\n");
2069                         return -ENOENT;
2070                 }
2071
2072                 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
2073                                              &hisi_hba->ctrl_reset_sts_reg)) {
2074                         dev_err(dev,
2075                                 "could not get property ctrl-reset-sts-reg\n");
2076                         return -ENOENT;
2077                 }
2078
2079                 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
2080                                              &hisi_hba->ctrl_clock_ena_reg)) {
2081                         dev_err(dev,
2082                                 "could not get property ctrl-clock-ena-reg\n");
2083                         return -ENOENT;
2084                 }
2085         }
2086
2087         refclk = devm_clk_get(dev, NULL);
2088         if (IS_ERR(refclk))
2089                 dev_dbg(dev, "no ref clk property\n");
2090         else
2091                 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
2092
2093         if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
2094                 dev_err(dev, "could not get property phy-count\n");
2095                 return -ENOENT;
2096         }
2097
2098         if (device_property_read_u32(dev, "queue-count",
2099                                      &hisi_hba->queue_count)) {
2100                 dev_err(dev, "could not get property queue-count\n");
2101                 return -ENOENT;
2102         }
2103
2104         return 0;
2105 }
2106 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
2107
2108 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2109                                               const struct hisi_sas_hw *hw)
2110 {
2111         struct resource *res;
2112         struct Scsi_Host *shost;
2113         struct hisi_hba *hisi_hba;
2114         struct device *dev = &pdev->dev;
2115
2116         shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
2117         if (!shost) {
2118                 dev_err(dev, "scsi host alloc failed\n");
2119                 return NULL;
2120         }
2121         hisi_hba = shost_priv(shost);
2122
2123         INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2124         hisi_hba->hw = hw;
2125         hisi_hba->dev = dev;
2126         hisi_hba->platform_dev = pdev;
2127         hisi_hba->shost = shost;
2128         SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2129
2130         timer_setup(&hisi_hba->timer, NULL, 0);
2131
2132         if (hisi_sas_get_fw_info(hisi_hba) < 0)
2133                 goto err_out;
2134
2135         if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
2136             dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2137                 dev_err(dev, "No usable DMA addressing method\n");
2138                 goto err_out;
2139         }
2140
2141         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2142         hisi_hba->regs = devm_ioremap_resource(dev, res);
2143         if (IS_ERR(hisi_hba->regs))
2144                 goto err_out;
2145
2146         res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2147         if (res) {
2148                 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2149                 if (IS_ERR(hisi_hba->sgpio_regs))
2150                         goto err_out;
2151         }
2152
2153         if (hisi_sas_alloc(hisi_hba, shost)) {
2154                 hisi_sas_free(hisi_hba);
2155                 goto err_out;
2156         }
2157
2158         return shost;
2159 err_out:
2160         scsi_host_put(shost);
2161         dev_err(dev, "shost alloc failed\n");
2162         return NULL;
2163 }
2164
2165 int hisi_sas_probe(struct platform_device *pdev,
2166                          const struct hisi_sas_hw *hw)
2167 {
2168         struct Scsi_Host *shost;
2169         struct hisi_hba *hisi_hba;
2170         struct device *dev = &pdev->dev;
2171         struct asd_sas_phy **arr_phy;
2172         struct asd_sas_port **arr_port;
2173         struct sas_ha_struct *sha;
2174         int rc, phy_nr, port_nr, i;
2175
2176         shost = hisi_sas_shost_alloc(pdev, hw);
2177         if (!shost)
2178                 return -ENOMEM;
2179
2180         sha = SHOST_TO_SAS_HA(shost);
2181         hisi_hba = shost_priv(shost);
2182         platform_set_drvdata(pdev, sha);
2183
2184         phy_nr = port_nr = hisi_hba->n_phy;
2185
2186         arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2187         arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2188         if (!arr_phy || !arr_port) {
2189                 rc = -ENOMEM;
2190                 goto err_out_ha;
2191         }
2192
2193         sha->sas_phy = arr_phy;
2194         sha->sas_port = arr_port;
2195         sha->lldd_ha = hisi_hba;
2196
2197         shost->transportt = hisi_sas_stt;
2198         shost->max_id = HISI_SAS_MAX_DEVICES;
2199         shost->max_lun = ~0;
2200         shost->max_channel = 1;
2201         shost->max_cmd_len = 16;
2202         shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
2203         shost->can_queue = hisi_hba->hw->max_command_entries;
2204         shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
2205
2206         sha->sas_ha_name = DRV_NAME;
2207         sha->dev = hisi_hba->dev;
2208         sha->lldd_module = THIS_MODULE;
2209         sha->sas_addr = &hisi_hba->sas_addr[0];
2210         sha->num_phys = hisi_hba->n_phy;
2211         sha->core.shost = hisi_hba->shost;
2212
2213         for (i = 0; i < hisi_hba->n_phy; i++) {
2214                 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2215                 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2216         }
2217
2218         rc = scsi_add_host(shost, &pdev->dev);
2219         if (rc)
2220                 goto err_out_ha;
2221
2222         rc = sas_register_ha(sha);
2223         if (rc)
2224                 goto err_out_register_ha;
2225
2226         rc = hisi_hba->hw->hw_init(hisi_hba);
2227         if (rc)
2228                 goto err_out_register_ha;
2229
2230         scsi_scan_host(shost);
2231
2232         return 0;
2233
2234 err_out_register_ha:
2235         scsi_remove_host(shost);
2236 err_out_ha:
2237         hisi_sas_free(hisi_hba);
2238         scsi_host_put(shost);
2239         return rc;
2240 }
2241 EXPORT_SYMBOL_GPL(hisi_sas_probe);
2242
2243 int hisi_sas_remove(struct platform_device *pdev)
2244 {
2245         struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2246         struct hisi_hba *hisi_hba = sha->lldd_ha;
2247         struct Scsi_Host *shost = sha->core.shost;
2248
2249         if (timer_pending(&hisi_hba->timer))
2250                 del_timer(&hisi_hba->timer);
2251
2252         sas_unregister_ha(sha);
2253         sas_remove_host(sha->core.shost);
2254
2255         hisi_sas_free(hisi_hba);
2256         scsi_host_put(shost);
2257         return 0;
2258 }
2259 EXPORT_SYMBOL_GPL(hisi_sas_remove);
2260
2261 static __init int hisi_sas_init(void)
2262 {
2263         hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2264         if (!hisi_sas_stt)
2265                 return -ENOMEM;
2266
2267         return 0;
2268 }
2269
2270 static __exit void hisi_sas_exit(void)
2271 {
2272         sas_release_transport(hisi_sas_stt);
2273 }
2274
2275 module_init(hisi_sas_init);
2276 module_exit(hisi_sas_exit);
2277
2278 MODULE_LICENSE("GPL");
2279 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2280 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2281 MODULE_ALIAS("platform:" DRV_NAME);