Merge branch 'for-4.18/hid-redragon' into for-linus
[linux-2.6-microblaze.git] / drivers / scsi / hisi_sas / hisi_sas_main.c
1 /*
2  * Copyright (c) 2015 Linaro Ltd.
3  * Copyright (c) 2015 Hisilicon Limited.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  */
11
12 #include "hisi_sas.h"
13 #define DRV_NAME "hisi_sas"
14
15 #define DEV_IS_GONE(dev) \
16         ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
17
18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19                                 u8 *lun, struct hisi_sas_tmf_task *tmf);
20 static int
21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22                              struct domain_device *device,
23                              int abort_flag, int tag);
24 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
25 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
26                                 void *funcdata);
27
28 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
29 {
30         switch (fis->command) {
31         case ATA_CMD_FPDMA_WRITE:
32         case ATA_CMD_FPDMA_READ:
33         case ATA_CMD_FPDMA_RECV:
34         case ATA_CMD_FPDMA_SEND:
35         case ATA_CMD_NCQ_NON_DATA:
36                 return HISI_SAS_SATA_PROTOCOL_FPDMA;
37
38         case ATA_CMD_DOWNLOAD_MICRO:
39         case ATA_CMD_ID_ATA:
40         case ATA_CMD_PMP_READ:
41         case ATA_CMD_READ_LOG_EXT:
42         case ATA_CMD_PIO_READ:
43         case ATA_CMD_PIO_READ_EXT:
44         case ATA_CMD_PMP_WRITE:
45         case ATA_CMD_WRITE_LOG_EXT:
46         case ATA_CMD_PIO_WRITE:
47         case ATA_CMD_PIO_WRITE_EXT:
48                 return HISI_SAS_SATA_PROTOCOL_PIO;
49
50         case ATA_CMD_DSM:
51         case ATA_CMD_DOWNLOAD_MICRO_DMA:
52         case ATA_CMD_PMP_READ_DMA:
53         case ATA_CMD_PMP_WRITE_DMA:
54         case ATA_CMD_READ:
55         case ATA_CMD_READ_EXT:
56         case ATA_CMD_READ_LOG_DMA_EXT:
57         case ATA_CMD_READ_STREAM_DMA_EXT:
58         case ATA_CMD_TRUSTED_RCV_DMA:
59         case ATA_CMD_TRUSTED_SND_DMA:
60         case ATA_CMD_WRITE:
61         case ATA_CMD_WRITE_EXT:
62         case ATA_CMD_WRITE_FUA_EXT:
63         case ATA_CMD_WRITE_QUEUED:
64         case ATA_CMD_WRITE_LOG_DMA_EXT:
65         case ATA_CMD_WRITE_STREAM_DMA_EXT:
66         case ATA_CMD_ZAC_MGMT_IN:
67                 return HISI_SAS_SATA_PROTOCOL_DMA;
68
69         case ATA_CMD_CHK_POWER:
70         case ATA_CMD_DEV_RESET:
71         case ATA_CMD_EDD:
72         case ATA_CMD_FLUSH:
73         case ATA_CMD_FLUSH_EXT:
74         case ATA_CMD_VERIFY:
75         case ATA_CMD_VERIFY_EXT:
76         case ATA_CMD_SET_FEATURES:
77         case ATA_CMD_STANDBY:
78         case ATA_CMD_STANDBYNOW1:
79         case ATA_CMD_ZAC_MGMT_OUT:
80                 return HISI_SAS_SATA_PROTOCOL_NONDATA;
81         default:
82         {
83                 if (fis->command == ATA_CMD_SET_MAX) {
84                         switch (fis->features) {
85                         case ATA_SET_MAX_PASSWD:
86                         case ATA_SET_MAX_LOCK:
87                                 return HISI_SAS_SATA_PROTOCOL_PIO;
88
89                         case ATA_SET_MAX_PASSWD_DMA:
90                         case ATA_SET_MAX_UNLOCK_DMA:
91                                 return HISI_SAS_SATA_PROTOCOL_DMA;
92
93                         default:
94                                 return HISI_SAS_SATA_PROTOCOL_NONDATA;
95                         }
96                 }
97                 if (direction == DMA_NONE)
98                         return HISI_SAS_SATA_PROTOCOL_NONDATA;
99                 return HISI_SAS_SATA_PROTOCOL_PIO;
100         }
101         }
102 }
103 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
104
105 void hisi_sas_sata_done(struct sas_task *task,
106                             struct hisi_sas_slot *slot)
107 {
108         struct task_status_struct *ts = &task->task_status;
109         struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
110         struct hisi_sas_status_buffer *status_buf =
111                         hisi_sas_status_buf_addr_mem(slot);
112         u8 *iu = &status_buf->iu[0];
113         struct dev_to_host_fis *d2h =  (struct dev_to_host_fis *)iu;
114
115         resp->frame_len = sizeof(struct dev_to_host_fis);
116         memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
117
118         ts->buf_valid_size = sizeof(*resp);
119 }
120 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
121
122 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
123 {
124         struct ata_queued_cmd *qc = task->uldd_task;
125
126         if (qc) {
127                 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
128                         qc->tf.command == ATA_CMD_FPDMA_READ) {
129                         *tag = qc->tag;
130                         return 1;
131                 }
132         }
133         return 0;
134 }
135 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
136
137 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
138 {
139         return device->port->ha->lldd_ha;
140 }
141
142 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
143 {
144         return container_of(sas_port, struct hisi_sas_port, sas_port);
145 }
146 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
147
148 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
149 {
150         int phy_no;
151
152         for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
153                 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
154 }
155 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
156
157 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
158 {
159         void *bitmap = hisi_hba->slot_index_tags;
160
161         clear_bit(slot_idx, bitmap);
162 }
163
164 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
165 {
166         hisi_sas_slot_index_clear(hisi_hba, slot_idx);
167 }
168
169 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
170 {
171         void *bitmap = hisi_hba->slot_index_tags;
172
173         set_bit(slot_idx, bitmap);
174 }
175
176 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
177 {
178         unsigned int index;
179         void *bitmap = hisi_hba->slot_index_tags;
180
181         index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count);
182         if (index >= hisi_hba->slot_index_count)
183                 return -SAS_QUEUE_FULL;
184         hisi_sas_slot_index_set(hisi_hba, index);
185         *slot_idx = index;
186         return 0;
187 }
188
189 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
190 {
191         int i;
192
193         for (i = 0; i < hisi_hba->slot_index_count; ++i)
194                 hisi_sas_slot_index_clear(hisi_hba, i);
195 }
196
197 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
198                              struct hisi_sas_slot *slot)
199 {
200
201         if (task) {
202                 struct device *dev = hisi_hba->dev;
203
204                 if (!task->lldd_task)
205                         return;
206
207                 task->lldd_task = NULL;
208
209                 if (!sas_protocol_ata(task->task_proto))
210                         if (slot->n_elem)
211                                 dma_unmap_sg(dev, task->scatter,
212                                              task->num_scatter,
213                                              task->data_dir);
214         }
215
216         if (slot->buf)
217                 dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
218
219         list_del_init(&slot->entry);
220         slot->buf = NULL;
221         slot->task = NULL;
222         slot->port = NULL;
223         hisi_sas_slot_index_free(hisi_hba, slot->idx);
224
225         /* slot memory is fully zeroed when it is reused */
226 }
227 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
228
229 static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
230                                   struct hisi_sas_slot *slot)
231 {
232         return hisi_hba->hw->prep_smp(hisi_hba, slot);
233 }
234
235 static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
236                                   struct hisi_sas_slot *slot, int is_tmf,
237                                   struct hisi_sas_tmf_task *tmf)
238 {
239         return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
240 }
241
242 static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
243                                   struct hisi_sas_slot *slot)
244 {
245         return hisi_hba->hw->prep_stp(hisi_hba, slot);
246 }
247
248 static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
249                 struct hisi_sas_slot *slot,
250                 int device_id, int abort_flag, int tag_to_abort)
251 {
252         return hisi_hba->hw->prep_abort(hisi_hba, slot,
253                         device_id, abort_flag, tag_to_abort);
254 }
255
256 /*
257  * This function will issue an abort TMF regardless of whether the
258  * task is in the sdev or not. Then it will do the task complete
259  * cleanup and callbacks.
260  */
261 static void hisi_sas_slot_abort(struct work_struct *work)
262 {
263         struct hisi_sas_slot *abort_slot =
264                 container_of(work, struct hisi_sas_slot, abort_slot);
265         struct sas_task *task = abort_slot->task;
266         struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
267         struct scsi_cmnd *cmnd = task->uldd_task;
268         struct hisi_sas_tmf_task tmf_task;
269         struct scsi_lun lun;
270         struct device *dev = hisi_hba->dev;
271         int tag = abort_slot->idx;
272         unsigned long flags;
273
274         if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
275                 dev_err(dev, "cannot abort slot for non-ssp task\n");
276                 goto out;
277         }
278
279         int_to_scsilun(cmnd->device->lun, &lun);
280         tmf_task.tmf = TMF_ABORT_TASK;
281         tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
282
283         hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
284 out:
285         /* Do cleanup for this task */
286         spin_lock_irqsave(&hisi_hba->lock, flags);
287         hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
288         spin_unlock_irqrestore(&hisi_hba->lock, flags);
289         if (task->task_done)
290                 task->task_done(task);
291 }
292
293 static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
294                 *dq, int is_tmf, struct hisi_sas_tmf_task *tmf,
295                 int *pass)
296 {
297         struct hisi_hba *hisi_hba = dq->hisi_hba;
298         struct domain_device *device = task->dev;
299         struct hisi_sas_device *sas_dev = device->lldd_dev;
300         struct hisi_sas_port *port;
301         struct hisi_sas_slot *slot;
302         struct hisi_sas_cmd_hdr *cmd_hdr_base;
303         struct asd_sas_port *sas_port = device->port;
304         struct device *dev = hisi_hba->dev;
305         int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
306         unsigned long flags;
307
308         if (!sas_port) {
309                 struct task_status_struct *ts = &task->task_status;
310
311                 ts->resp = SAS_TASK_UNDELIVERED;
312                 ts->stat = SAS_PHY_DOWN;
313                 /*
314                  * libsas will use dev->port, should
315                  * not call task_done for sata
316                  */
317                 if (device->dev_type != SAS_SATA_DEV)
318                         task->task_done(task);
319                 return -ECOMM;
320         }
321
322         if (DEV_IS_GONE(sas_dev)) {
323                 if (sas_dev)
324                         dev_info(dev, "task prep: device %d not ready\n",
325                                  sas_dev->device_id);
326                 else
327                         dev_info(dev, "task prep: device %016llx not ready\n",
328                                  SAS_ADDR(device->sas_addr));
329
330                 return -ECOMM;
331         }
332
333         port = to_hisi_sas_port(sas_port);
334         if (port && !port->port_attached) {
335                 dev_info(dev, "task prep: %s port%d not attach device\n",
336                          (dev_is_sata(device)) ?
337                          "SATA/STP" : "SAS",
338                          device->port->id);
339
340                 return -ECOMM;
341         }
342
343         if (!sas_protocol_ata(task->task_proto)) {
344                 if (task->num_scatter) {
345                         n_elem = dma_map_sg(dev, task->scatter,
346                                             task->num_scatter, task->data_dir);
347                         if (!n_elem) {
348                                 rc = -ENOMEM;
349                                 goto prep_out;
350                         }
351                 }
352         } else
353                 n_elem = task->num_scatter;
354
355         spin_lock_irqsave(&hisi_hba->lock, flags);
356         if (hisi_hba->hw->slot_index_alloc)
357                 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
358                                                     device);
359         else
360                 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
361         if (rc) {
362                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
363                 goto err_out;
364         }
365         spin_unlock_irqrestore(&hisi_hba->lock, flags);
366
367         rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
368         if (rc)
369                 goto err_out_tag;
370
371         dlvry_queue = dq->id;
372         dlvry_queue_slot = dq->wr_point;
373         slot = &hisi_hba->slot_info[slot_idx];
374         memset(slot, 0, sizeof(struct hisi_sas_slot));
375
376         slot->idx = slot_idx;
377         slot->n_elem = n_elem;
378         slot->dlvry_queue = dlvry_queue;
379         slot->dlvry_queue_slot = dlvry_queue_slot;
380         cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
381         slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
382         slot->task = task;
383         slot->port = port;
384         task->lldd_task = slot;
385         INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
386
387         slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
388                                    GFP_ATOMIC, &slot->buf_dma);
389         if (!slot->buf) {
390                 rc = -ENOMEM;
391                 goto err_out_slot_buf;
392         }
393         memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
394         memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
395         memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
396
397         switch (task->task_proto) {
398         case SAS_PROTOCOL_SMP:
399                 rc = hisi_sas_task_prep_smp(hisi_hba, slot);
400                 break;
401         case SAS_PROTOCOL_SSP:
402                 rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
403                 break;
404         case SAS_PROTOCOL_SATA:
405         case SAS_PROTOCOL_STP:
406         case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
407                 rc = hisi_sas_task_prep_ata(hisi_hba, slot);
408                 break;
409         default:
410                 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
411                         task->task_proto);
412                 rc = -EINVAL;
413                 break;
414         }
415
416         if (rc) {
417                 dev_err(dev, "task prep: rc = 0x%x\n", rc);
418                 goto err_out_buf;
419         }
420
421         spin_lock_irqsave(&hisi_hba->lock, flags);
422         list_add_tail(&slot->entry, &sas_dev->list);
423         spin_unlock_irqrestore(&hisi_hba->lock, flags);
424         spin_lock_irqsave(&task->task_state_lock, flags);
425         task->task_state_flags |= SAS_TASK_AT_INITIATOR;
426         spin_unlock_irqrestore(&task->task_state_lock, flags);
427
428         dq->slot_prep = slot;
429         ++(*pass);
430
431         return 0;
432
433 err_out_buf:
434         dma_pool_free(hisi_hba->buffer_pool, slot->buf,
435                 slot->buf_dma);
436 err_out_slot_buf:
437         /* Nothing to be done */
438 err_out_tag:
439         spin_lock_irqsave(&hisi_hba->lock, flags);
440         hisi_sas_slot_index_free(hisi_hba, slot_idx);
441         spin_unlock_irqrestore(&hisi_hba->lock, flags);
442 err_out:
443         dev_err(dev, "task prep: failed[%d]!\n", rc);
444         if (!sas_protocol_ata(task->task_proto))
445                 if (n_elem)
446                         dma_unmap_sg(dev, task->scatter,
447                                      task->num_scatter,
448                                      task->data_dir);
449 prep_out:
450         return rc;
451 }
452
453 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
454                               int is_tmf, struct hisi_sas_tmf_task *tmf)
455 {
456         u32 rc;
457         u32 pass = 0;
458         unsigned long flags;
459         struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
460         struct device *dev = hisi_hba->dev;
461         struct domain_device *device = task->dev;
462         struct hisi_sas_device *sas_dev = device->lldd_dev;
463         struct hisi_sas_dq *dq = sas_dev->dq;
464
465         if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
466                 return -EINVAL;
467
468         /* protect task_prep and start_delivery sequence */
469         spin_lock_irqsave(&dq->lock, flags);
470         rc = hisi_sas_task_prep(task, dq, is_tmf, tmf, &pass);
471         if (rc)
472                 dev_err(dev, "task exec: failed[%d]!\n", rc);
473
474         if (likely(pass))
475                 hisi_hba->hw->start_delivery(dq);
476         spin_unlock_irqrestore(&dq->lock, flags);
477
478         return rc;
479 }
480
481 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
482 {
483         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
484         struct asd_sas_phy *sas_phy = &phy->sas_phy;
485         struct sas_ha_struct *sas_ha;
486
487         if (!phy->phy_attached)
488                 return;
489
490         sas_ha = &hisi_hba->sha;
491         sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
492
493         if (sas_phy->phy) {
494                 struct sas_phy *sphy = sas_phy->phy;
495
496                 sphy->negotiated_linkrate = sas_phy->linkrate;
497                 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
498                 sphy->maximum_linkrate_hw =
499                         hisi_hba->hw->phy_get_max_linkrate();
500                 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
501                         sphy->minimum_linkrate = phy->minimum_linkrate;
502
503                 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
504                         sphy->maximum_linkrate = phy->maximum_linkrate;
505         }
506
507         if (phy->phy_type & PORT_TYPE_SAS) {
508                 struct sas_identify_frame *id;
509
510                 id = (struct sas_identify_frame *)phy->frame_rcvd;
511                 id->dev_type = phy->identify.device_type;
512                 id->initiator_bits = SAS_PROTOCOL_ALL;
513                 id->target_bits = phy->identify.target_port_protocols;
514         } else if (phy->phy_type & PORT_TYPE_SATA) {
515                 /*Nothing*/
516         }
517
518         sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
519         sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
520 }
521
522 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
523 {
524         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
525         struct hisi_sas_device *sas_dev = NULL;
526         unsigned long flags;
527         int i;
528
529         spin_lock_irqsave(&hisi_hba->lock, flags);
530         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
531                 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
532                         int queue = i % hisi_hba->queue_count;
533                         struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
534
535                         hisi_hba->devices[i].device_id = i;
536                         sas_dev = &hisi_hba->devices[i];
537                         sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
538                         sas_dev->dev_type = device->dev_type;
539                         sas_dev->hisi_hba = hisi_hba;
540                         sas_dev->sas_device = device;
541                         sas_dev->dq = dq;
542                         INIT_LIST_HEAD(&hisi_hba->devices[i].list);
543                         break;
544                 }
545         }
546         spin_unlock_irqrestore(&hisi_hba->lock, flags);
547
548         return sas_dev;
549 }
550
551 static int hisi_sas_dev_found(struct domain_device *device)
552 {
553         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
554         struct domain_device *parent_dev = device->parent;
555         struct hisi_sas_device *sas_dev;
556         struct device *dev = hisi_hba->dev;
557
558         if (hisi_hba->hw->alloc_dev)
559                 sas_dev = hisi_hba->hw->alloc_dev(device);
560         else
561                 sas_dev = hisi_sas_alloc_dev(device);
562         if (!sas_dev) {
563                 dev_err(dev, "fail alloc dev: max support %d devices\n",
564                         HISI_SAS_MAX_DEVICES);
565                 return -EINVAL;
566         }
567
568         device->lldd_dev = sas_dev;
569         hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
570
571         if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
572                 int phy_no;
573                 u8 phy_num = parent_dev->ex_dev.num_phys;
574                 struct ex_phy *phy;
575
576                 for (phy_no = 0; phy_no < phy_num; phy_no++) {
577                         phy = &parent_dev->ex_dev.ex_phy[phy_no];
578                         if (SAS_ADDR(phy->attached_sas_addr) ==
579                                 SAS_ADDR(device->sas_addr)) {
580                                 sas_dev->attached_phy = phy_no;
581                                 break;
582                         }
583                 }
584
585                 if (phy_no == phy_num) {
586                         dev_info(dev, "dev found: no attached "
587                                  "dev:%016llx at ex:%016llx\n",
588                                  SAS_ADDR(device->sas_addr),
589                                  SAS_ADDR(parent_dev->sas_addr));
590                         return -EINVAL;
591                 }
592         }
593
594         dev_info(dev, "dev[%d:%x] found\n",
595                 sas_dev->device_id, sas_dev->dev_type);
596
597         return 0;
598 }
599
600 static int hisi_sas_slave_configure(struct scsi_device *sdev)
601 {
602         struct domain_device *dev = sdev_to_domain_dev(sdev);
603         int ret = sas_slave_configure(sdev);
604
605         if (ret)
606                 return ret;
607         if (!dev_is_sata(dev))
608                 sas_change_queue_depth(sdev, 64);
609
610         return 0;
611 }
612
613 static void hisi_sas_scan_start(struct Scsi_Host *shost)
614 {
615         struct hisi_hba *hisi_hba = shost_priv(shost);
616
617         hisi_hba->hw->phys_init(hisi_hba);
618 }
619
620 static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
621 {
622         struct hisi_hba *hisi_hba = shost_priv(shost);
623         struct sas_ha_struct *sha = &hisi_hba->sha;
624
625         /* Wait for PHY up interrupt to occur */
626         if (time < HZ)
627                 return 0;
628
629         sas_drain_work(sha);
630         return 1;
631 }
632
633 static void hisi_sas_phyup_work(struct work_struct *work)
634 {
635         struct hisi_sas_phy *phy =
636                 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
637         struct hisi_hba *hisi_hba = phy->hisi_hba;
638         struct asd_sas_phy *sas_phy = &phy->sas_phy;
639         int phy_no = sas_phy->id;
640
641         hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
642         hisi_sas_bytes_dmaed(hisi_hba, phy_no);
643 }
644
645 static void hisi_sas_linkreset_work(struct work_struct *work)
646 {
647         struct hisi_sas_phy *phy =
648                 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
649         struct asd_sas_phy *sas_phy = &phy->sas_phy;
650
651         hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
652 }
653
654 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
655         [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
656         [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
657 };
658
659 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
660                                 enum hisi_sas_phy_event event)
661 {
662         struct hisi_hba *hisi_hba = phy->hisi_hba;
663
664         if (WARN_ON(event >= HISI_PHYES_NUM))
665                 return false;
666
667         return queue_work(hisi_hba->wq, &phy->works[event]);
668 }
669 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
670
671 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
672 {
673         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
674         struct asd_sas_phy *sas_phy = &phy->sas_phy;
675         int i;
676
677         phy->hisi_hba = hisi_hba;
678         phy->port = NULL;
679         phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
680         phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
681         sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
682         sas_phy->class = SAS;
683         sas_phy->iproto = SAS_PROTOCOL_ALL;
684         sas_phy->tproto = 0;
685         sas_phy->type = PHY_TYPE_PHYSICAL;
686         sas_phy->role = PHY_ROLE_INITIATOR;
687         sas_phy->oob_mode = OOB_NOT_CONNECTED;
688         sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
689         sas_phy->id = phy_no;
690         sas_phy->sas_addr = &hisi_hba->sas_addr[0];
691         sas_phy->frame_rcvd = &phy->frame_rcvd[0];
692         sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
693         sas_phy->lldd_phy = phy;
694
695         for (i = 0; i < HISI_PHYES_NUM; i++)
696                 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
697 }
698
699 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
700 {
701         struct sas_ha_struct *sas_ha = sas_phy->ha;
702         struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
703         struct hisi_sas_phy *phy = sas_phy->lldd_phy;
704         struct asd_sas_port *sas_port = sas_phy->port;
705         struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
706         unsigned long flags;
707
708         if (!sas_port)
709                 return;
710
711         spin_lock_irqsave(&hisi_hba->lock, flags);
712         port->port_attached = 1;
713         port->id = phy->port_id;
714         phy->port = port;
715         sas_port->lldd_port = port;
716         spin_unlock_irqrestore(&hisi_hba->lock, flags);
717 }
718
719 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
720                                      struct hisi_sas_slot *slot)
721 {
722         if (task) {
723                 unsigned long flags;
724                 struct task_status_struct *ts;
725
726                 ts = &task->task_status;
727
728                 ts->resp = SAS_TASK_COMPLETE;
729                 ts->stat = SAS_ABORTED_TASK;
730                 spin_lock_irqsave(&task->task_state_lock, flags);
731                 task->task_state_flags &=
732                         ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
733                 task->task_state_flags |= SAS_TASK_STATE_DONE;
734                 spin_unlock_irqrestore(&task->task_state_lock, flags);
735         }
736
737         hisi_sas_slot_task_free(hisi_hba, task, slot);
738 }
739
740 /* hisi_hba.lock should be locked */
741 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
742                         struct domain_device *device)
743 {
744         struct hisi_sas_slot *slot, *slot2;
745         struct hisi_sas_device *sas_dev = device->lldd_dev;
746
747         list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
748                 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
749 }
750
751 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
752 {
753         struct hisi_sas_device *sas_dev;
754         struct domain_device *device;
755         int i;
756
757         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
758                 sas_dev = &hisi_hba->devices[i];
759                 device = sas_dev->sas_device;
760
761                 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
762                     !device)
763                         continue;
764
765                 hisi_sas_release_task(hisi_hba, device);
766         }
767 }
768 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
769
770 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
771                                 struct domain_device *device)
772 {
773         if (hisi_hba->hw->dereg_device)
774                 hisi_hba->hw->dereg_device(hisi_hba, device);
775 }
776
777 static void hisi_sas_dev_gone(struct domain_device *device)
778 {
779         struct hisi_sas_device *sas_dev = device->lldd_dev;
780         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
781         struct device *dev = hisi_hba->dev;
782
783         dev_info(dev, "dev[%d:%x] is gone\n",
784                  sas_dev->device_id, sas_dev->dev_type);
785
786         if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
787                 hisi_sas_internal_task_abort(hisi_hba, device,
788                                      HISI_SAS_INT_ABT_DEV, 0);
789
790                 hisi_sas_dereg_device(hisi_hba, device);
791
792                 hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
793                 device->lldd_dev = NULL;
794         }
795
796         if (hisi_hba->hw->free_device)
797                 hisi_hba->hw->free_device(sas_dev);
798         sas_dev->dev_type = SAS_PHY_UNUSED;
799 }
800
801 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
802 {
803         return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
804 }
805
806 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
807                                 void *funcdata)
808 {
809         struct sas_ha_struct *sas_ha = sas_phy->ha;
810         struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
811         int phy_no = sas_phy->id;
812
813         switch (func) {
814         case PHY_FUNC_HARD_RESET:
815                 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
816                 break;
817
818         case PHY_FUNC_LINK_RESET:
819                 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
820                 msleep(100);
821                 hisi_hba->hw->phy_start(hisi_hba, phy_no);
822                 break;
823
824         case PHY_FUNC_DISABLE:
825                 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
826                 break;
827
828         case PHY_FUNC_SET_LINK_RATE:
829                 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
830                 break;
831         case PHY_FUNC_GET_EVENTS:
832                 if (hisi_hba->hw->get_events) {
833                         hisi_hba->hw->get_events(hisi_hba, phy_no);
834                         break;
835                 }
836                 /* fallthru */
837         case PHY_FUNC_RELEASE_SPINUP_HOLD:
838         default:
839                 return -EOPNOTSUPP;
840         }
841         return 0;
842 }
843
844 static void hisi_sas_task_done(struct sas_task *task)
845 {
846         if (!del_timer(&task->slow_task->timer))
847                 return;
848         complete(&task->slow_task->completion);
849 }
850
851 static void hisi_sas_tmf_timedout(struct timer_list *t)
852 {
853         struct sas_task_slow *slow = from_timer(slow, t, timer);
854         struct sas_task *task = slow->task;
855         unsigned long flags;
856
857         spin_lock_irqsave(&task->task_state_lock, flags);
858         if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
859                 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
860         spin_unlock_irqrestore(&task->task_state_lock, flags);
861
862         complete(&task->slow_task->completion);
863 }
864
865 #define TASK_TIMEOUT 20
866 #define TASK_RETRY 3
867 #define INTERNAL_ABORT_TIMEOUT 6
868 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
869                                            void *parameter, u32 para_len,
870                                            struct hisi_sas_tmf_task *tmf)
871 {
872         struct hisi_sas_device *sas_dev = device->lldd_dev;
873         struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
874         struct device *dev = hisi_hba->dev;
875         struct sas_task *task;
876         int res, retry;
877
878         for (retry = 0; retry < TASK_RETRY; retry++) {
879                 task = sas_alloc_slow_task(GFP_KERNEL);
880                 if (!task)
881                         return -ENOMEM;
882
883                 task->dev = device;
884                 task->task_proto = device->tproto;
885
886                 if (dev_is_sata(device)) {
887                         task->ata_task.device_control_reg_update = 1;
888                         memcpy(&task->ata_task.fis, parameter, para_len);
889                 } else {
890                         memcpy(&task->ssp_task, parameter, para_len);
891                 }
892                 task->task_done = hisi_sas_task_done;
893
894                 task->slow_task->timer.function = hisi_sas_tmf_timedout;
895                 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
896                 add_timer(&task->slow_task->timer);
897
898                 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
899
900                 if (res) {
901                         del_timer(&task->slow_task->timer);
902                         dev_err(dev, "abort tmf: executing internal task failed: %d\n",
903                                 res);
904                         goto ex_err;
905                 }
906
907                 wait_for_completion(&task->slow_task->completion);
908                 res = TMF_RESP_FUNC_FAILED;
909                 /* Even TMF timed out, return direct. */
910                 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
911                         if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
912                                 struct hisi_sas_slot *slot = task->lldd_task;
913
914                                 dev_err(dev, "abort tmf: TMF task timeout and not done\n");
915                                 if (slot)
916                                         slot->task = NULL;
917
918                                 goto ex_err;
919                         } else
920                                 dev_err(dev, "abort tmf: TMF task timeout\n");
921                 }
922
923                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
924                      task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
925                         res = TMF_RESP_FUNC_COMPLETE;
926                         break;
927                 }
928
929                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
930                         task->task_status.stat == TMF_RESP_FUNC_SUCC) {
931                         res = TMF_RESP_FUNC_SUCC;
932                         break;
933                 }
934
935                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
936                       task->task_status.stat == SAS_DATA_UNDERRUN) {
937                         /* no error, but return the number of bytes of
938                          * underrun
939                          */
940                         dev_warn(dev, "abort tmf: task to dev %016llx "
941                                  "resp: 0x%x sts 0x%x underrun\n",
942                                  SAS_ADDR(device->sas_addr),
943                                  task->task_status.resp,
944                                  task->task_status.stat);
945                         res = task->task_status.residual;
946                         break;
947                 }
948
949                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
950                         task->task_status.stat == SAS_DATA_OVERRUN) {
951                         dev_warn(dev, "abort tmf: blocked task error\n");
952                         res = -EMSGSIZE;
953                         break;
954                 }
955
956                 dev_warn(dev, "abort tmf: task to dev "
957                          "%016llx resp: 0x%x status 0x%x\n",
958                          SAS_ADDR(device->sas_addr), task->task_status.resp,
959                          task->task_status.stat);
960                 sas_free_task(task);
961                 task = NULL;
962         }
963 ex_err:
964         if (retry == TASK_RETRY)
965                 dev_warn(dev, "abort tmf: executing internal task failed!\n");
966         sas_free_task(task);
967         return res;
968 }
969
970 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
971                 bool reset, int pmp, u8 *fis)
972 {
973         struct ata_taskfile tf;
974
975         ata_tf_init(dev, &tf);
976         if (reset)
977                 tf.ctl |= ATA_SRST;
978         else
979                 tf.ctl &= ~ATA_SRST;
980         tf.command = ATA_CMD_DEV_RESET;
981         ata_tf_to_fis(&tf, pmp, 0, fis);
982 }
983
984 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
985 {
986         u8 fis[20] = {0};
987         struct ata_port *ap = device->sata_dev.ap;
988         struct ata_link *link;
989         int rc = TMF_RESP_FUNC_FAILED;
990         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
991         struct device *dev = hisi_hba->dev;
992         int s = sizeof(struct host_to_dev_fis);
993         unsigned long flags;
994
995         ata_for_each_link(link, ap, EDGE) {
996                 int pmp = sata_srst_pmp(link);
997
998                 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
999                 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
1000                 if (rc != TMF_RESP_FUNC_COMPLETE)
1001                         break;
1002         }
1003
1004         if (rc == TMF_RESP_FUNC_COMPLETE) {
1005                 ata_for_each_link(link, ap, EDGE) {
1006                         int pmp = sata_srst_pmp(link);
1007
1008                         hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1009                         rc = hisi_sas_exec_internal_tmf_task(device, fis,
1010                                                              s, NULL);
1011                         if (rc != TMF_RESP_FUNC_COMPLETE)
1012                                 dev_err(dev, "ata disk de-reset failed\n");
1013                 }
1014         } else {
1015                 dev_err(dev, "ata disk reset failed\n");
1016         }
1017
1018         if (rc == TMF_RESP_FUNC_COMPLETE) {
1019                 spin_lock_irqsave(&hisi_hba->lock, flags);
1020                 hisi_sas_release_task(hisi_hba, device);
1021                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1022         }
1023
1024         return rc;
1025 }
1026
1027 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
1028                                 u8 *lun, struct hisi_sas_tmf_task *tmf)
1029 {
1030         struct sas_ssp_task ssp_task;
1031
1032         if (!(device->tproto & SAS_PROTOCOL_SSP))
1033                 return TMF_RESP_FUNC_ESUPP;
1034
1035         memcpy(ssp_task.LUN, lun, 8);
1036
1037         return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
1038                                 sizeof(ssp_task), tmf);
1039 }
1040
1041 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1042 {
1043         u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
1044         int i;
1045
1046         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1047                 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1048                 struct domain_device *device = sas_dev->sas_device;
1049                 struct asd_sas_port *sas_port;
1050                 struct hisi_sas_port *port;
1051                 struct hisi_sas_phy *phy = NULL;
1052                 struct asd_sas_phy *sas_phy;
1053
1054                 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1055                                 || !device || !device->port)
1056                         continue;
1057
1058                 sas_port = device->port;
1059                 port = to_hisi_sas_port(sas_port);
1060
1061                 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1062                         if (state & BIT(sas_phy->id)) {
1063                                 phy = sas_phy->lldd_phy;
1064                                 break;
1065                         }
1066
1067                 if (phy) {
1068                         port->id = phy->port_id;
1069
1070                         /* Update linkrate of directly attached device. */
1071                         if (!device->parent)
1072                                 device->linkrate = phy->sas_phy.linkrate;
1073
1074                         hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1075                 } else
1076                         port->id = 0xff;
1077         }
1078 }
1079
1080 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1081                               u32 state)
1082 {
1083         struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1084         struct asd_sas_port *_sas_port = NULL;
1085         int phy_no;
1086
1087         for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1088                 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1089                 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1090                 struct asd_sas_port *sas_port = sas_phy->port;
1091                 bool do_port_check = !!(_sas_port != sas_port);
1092
1093                 if (!sas_phy->phy->enabled)
1094                         continue;
1095
1096                 /* Report PHY state change to libsas */
1097                 if (state & BIT(phy_no)) {
1098                         if (do_port_check && sas_port && sas_port->port_dev) {
1099                                 struct domain_device *dev = sas_port->port_dev;
1100
1101                                 _sas_port = sas_port;
1102
1103                                 if (DEV_IS_EXPANDER(dev->dev_type))
1104                                         sas_ha->notify_port_event(sas_phy,
1105                                                         PORTE_BROADCAST_RCVD);
1106                         }
1107                 } else if (old_state & (1 << phy_no))
1108                         /* PHY down but was up before */
1109                         hisi_sas_phy_down(hisi_hba, phy_no, 0);
1110
1111         }
1112 }
1113
1114 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1115 {
1116         struct device *dev = hisi_hba->dev;
1117         struct Scsi_Host *shost = hisi_hba->shost;
1118         u32 old_state, state;
1119         unsigned long flags;
1120         int rc;
1121
1122         if (!hisi_hba->hw->soft_reset)
1123                 return -1;
1124
1125         if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1126                 return -1;
1127
1128         dev_info(dev, "controller resetting...\n");
1129         old_state = hisi_hba->hw->get_phys_state(hisi_hba);
1130
1131         scsi_block_requests(shost);
1132         set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1133         rc = hisi_hba->hw->soft_reset(hisi_hba);
1134         if (rc) {
1135                 dev_warn(dev, "controller reset failed (%d)\n", rc);
1136                 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1137                 scsi_unblock_requests(shost);
1138                 goto out;
1139         }
1140         spin_lock_irqsave(&hisi_hba->lock, flags);
1141         hisi_sas_release_tasks(hisi_hba);
1142         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1143
1144         clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1145
1146         /* Init and wait for PHYs to come up and all libsas event finished. */
1147         hisi_hba->hw->phys_init(hisi_hba);
1148         msleep(1000);
1149         hisi_sas_refresh_port_id(hisi_hba);
1150         scsi_unblock_requests(shost);
1151
1152         state = hisi_hba->hw->get_phys_state(hisi_hba);
1153         hisi_sas_rescan_topology(hisi_hba, old_state, state);
1154         dev_info(dev, "controller reset complete\n");
1155
1156 out:
1157         clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1158
1159         return rc;
1160 }
1161
1162 static int hisi_sas_abort_task(struct sas_task *task)
1163 {
1164         struct scsi_lun lun;
1165         struct hisi_sas_tmf_task tmf_task;
1166         struct domain_device *device = task->dev;
1167         struct hisi_sas_device *sas_dev = device->lldd_dev;
1168         struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
1169         struct device *dev = hisi_hba->dev;
1170         int rc = TMF_RESP_FUNC_FAILED;
1171         unsigned long flags;
1172
1173         if (!sas_dev) {
1174                 dev_warn(dev, "Device has been removed\n");
1175                 return TMF_RESP_FUNC_FAILED;
1176         }
1177
1178         if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1179                 rc = TMF_RESP_FUNC_COMPLETE;
1180                 goto out;
1181         }
1182
1183         sas_dev->dev_status = HISI_SAS_DEV_EH;
1184         if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1185                 struct scsi_cmnd *cmnd = task->uldd_task;
1186                 struct hisi_sas_slot *slot = task->lldd_task;
1187                 u32 tag = slot->idx;
1188                 int rc2;
1189
1190                 int_to_scsilun(cmnd->device->lun, &lun);
1191                 tmf_task.tmf = TMF_ABORT_TASK;
1192                 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1193
1194                 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1195                                                   &tmf_task);
1196
1197                 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1198                                                    HISI_SAS_INT_ABT_CMD, tag);
1199                 if (rc2 < 0) {
1200                         dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1201                         return TMF_RESP_FUNC_FAILED;
1202                 }
1203
1204                 /*
1205                  * If the TMF finds that the IO is not in the device and also
1206                  * the internal abort does not succeed, then it is safe to
1207                  * free the slot.
1208                  * Note: if the internal abort succeeds then the slot
1209                  * will have already been completed
1210                  */
1211                 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1212                         if (task->lldd_task) {
1213                                 spin_lock_irqsave(&hisi_hba->lock, flags);
1214                                 hisi_sas_do_release_task(hisi_hba, task, slot);
1215                                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1216                         }
1217                 }
1218         } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1219                 task->task_proto & SAS_PROTOCOL_STP) {
1220                 if (task->dev->dev_type == SAS_SATA_DEV) {
1221                         rc = hisi_sas_internal_task_abort(hisi_hba, device,
1222                                                 HISI_SAS_INT_ABT_DEV, 0);
1223                         if (rc < 0) {
1224                                 dev_err(dev, "abort task: internal abort failed\n");
1225                                 goto out;
1226                         }
1227                         hisi_sas_dereg_device(hisi_hba, device);
1228                         rc = hisi_sas_softreset_ata_disk(device);
1229                 }
1230         } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1231                 /* SMP */
1232                 struct hisi_sas_slot *slot = task->lldd_task;
1233                 u32 tag = slot->idx;
1234
1235                 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1236                              HISI_SAS_INT_ABT_CMD, tag);
1237                 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1238                                         task->lldd_task) {
1239                         spin_lock_irqsave(&hisi_hba->lock, flags);
1240                         hisi_sas_do_release_task(hisi_hba, task, slot);
1241                         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1242                 }
1243         }
1244
1245 out:
1246         if (rc != TMF_RESP_FUNC_COMPLETE)
1247                 dev_notice(dev, "abort task: rc=%d\n", rc);
1248         return rc;
1249 }
1250
1251 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1252 {
1253         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1254         struct device *dev = hisi_hba->dev;
1255         struct hisi_sas_tmf_task tmf_task;
1256         int rc = TMF_RESP_FUNC_FAILED;
1257         unsigned long flags;
1258
1259         rc = hisi_sas_internal_task_abort(hisi_hba, device,
1260                                         HISI_SAS_INT_ABT_DEV, 0);
1261         if (rc < 0) {
1262                 dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1263                 return TMF_RESP_FUNC_FAILED;
1264         }
1265         hisi_sas_dereg_device(hisi_hba, device);
1266
1267         tmf_task.tmf = TMF_ABORT_TASK_SET;
1268         rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1269
1270         if (rc == TMF_RESP_FUNC_COMPLETE) {
1271                 spin_lock_irqsave(&hisi_hba->lock, flags);
1272                 hisi_sas_release_task(hisi_hba, device);
1273                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1274         }
1275
1276         return rc;
1277 }
1278
1279 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1280 {
1281         int rc = TMF_RESP_FUNC_FAILED;
1282         struct hisi_sas_tmf_task tmf_task;
1283
1284         tmf_task.tmf = TMF_CLEAR_ACA;
1285         rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1286
1287         return rc;
1288 }
1289
1290 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1291 {
1292         struct sas_phy *phy = sas_get_local_phy(device);
1293         int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1294                         (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1295         rc = sas_phy_reset(phy, reset_type);
1296         sas_put_local_phy(phy);
1297         msleep(2000);
1298         return rc;
1299 }
1300
1301 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1302 {
1303         struct hisi_sas_device *sas_dev = device->lldd_dev;
1304         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1305         struct device *dev = hisi_hba->dev;
1306         int rc = TMF_RESP_FUNC_FAILED;
1307         unsigned long flags;
1308
1309         if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1310                 return TMF_RESP_FUNC_FAILED;
1311         sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1312
1313         rc = hisi_sas_internal_task_abort(hisi_hba, device,
1314                                         HISI_SAS_INT_ABT_DEV, 0);
1315         if (rc < 0) {
1316                 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1317                 return TMF_RESP_FUNC_FAILED;
1318         }
1319         hisi_sas_dereg_device(hisi_hba, device);
1320
1321         rc = hisi_sas_debug_I_T_nexus_reset(device);
1322
1323         if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) {
1324                 spin_lock_irqsave(&hisi_hba->lock, flags);
1325                 hisi_sas_release_task(hisi_hba, device);
1326                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1327         }
1328         return rc;
1329 }
1330
1331 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1332 {
1333         struct hisi_sas_device *sas_dev = device->lldd_dev;
1334         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1335         struct device *dev = hisi_hba->dev;
1336         unsigned long flags;
1337         int rc = TMF_RESP_FUNC_FAILED;
1338
1339         sas_dev->dev_status = HISI_SAS_DEV_EH;
1340         if (dev_is_sata(device)) {
1341                 struct sas_phy *phy;
1342
1343                 /* Clear internal IO and then hardreset */
1344                 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1345                                                   HISI_SAS_INT_ABT_DEV, 0);
1346                 if (rc < 0) {
1347                         dev_err(dev, "lu_reset: internal abort failed\n");
1348                         goto out;
1349                 }
1350                 hisi_sas_dereg_device(hisi_hba, device);
1351
1352                 phy = sas_get_local_phy(device);
1353
1354                 rc = sas_phy_reset(phy, 1);
1355
1356                 if (rc == 0) {
1357                         spin_lock_irqsave(&hisi_hba->lock, flags);
1358                         hisi_sas_release_task(hisi_hba, device);
1359                         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1360                 }
1361                 sas_put_local_phy(phy);
1362         } else {
1363                 struct hisi_sas_tmf_task tmf_task = { .tmf =  TMF_LU_RESET };
1364
1365                 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1366                                                 HISI_SAS_INT_ABT_DEV, 0);
1367                 if (rc < 0) {
1368                         dev_err(dev, "lu_reset: internal abort failed\n");
1369                         goto out;
1370                 }
1371                 hisi_sas_dereg_device(hisi_hba, device);
1372
1373                 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1374                 if (rc == TMF_RESP_FUNC_COMPLETE) {
1375                         spin_lock_irqsave(&hisi_hba->lock, flags);
1376                         hisi_sas_release_task(hisi_hba, device);
1377                         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1378                 }
1379         }
1380 out:
1381         if (rc != TMF_RESP_FUNC_COMPLETE)
1382                 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1383                              sas_dev->device_id, rc);
1384         return rc;
1385 }
1386
1387 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1388 {
1389         struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1390         HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1391
1392         queue_work(hisi_hba->wq, &r.work);
1393         wait_for_completion(r.completion);
1394         if (r.done)
1395                 return TMF_RESP_FUNC_COMPLETE;
1396
1397         return TMF_RESP_FUNC_FAILED;
1398 }
1399
1400 static int hisi_sas_query_task(struct sas_task *task)
1401 {
1402         struct scsi_lun lun;
1403         struct hisi_sas_tmf_task tmf_task;
1404         int rc = TMF_RESP_FUNC_FAILED;
1405
1406         if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1407                 struct scsi_cmnd *cmnd = task->uldd_task;
1408                 struct domain_device *device = task->dev;
1409                 struct hisi_sas_slot *slot = task->lldd_task;
1410                 u32 tag = slot->idx;
1411
1412                 int_to_scsilun(cmnd->device->lun, &lun);
1413                 tmf_task.tmf = TMF_QUERY_TASK;
1414                 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1415
1416                 rc = hisi_sas_debug_issue_ssp_tmf(device,
1417                                                   lun.scsi_lun,
1418                                                   &tmf_task);
1419                 switch (rc) {
1420                 /* The task is still in Lun, release it then */
1421                 case TMF_RESP_FUNC_SUCC:
1422                 /* The task is not in Lun or failed, reset the phy */
1423                 case TMF_RESP_FUNC_FAILED:
1424                 case TMF_RESP_FUNC_COMPLETE:
1425                         break;
1426                 default:
1427                         rc = TMF_RESP_FUNC_FAILED;
1428                         break;
1429                 }
1430         }
1431         return rc;
1432 }
1433
1434 static int
1435 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1436                                   struct sas_task *task, int abort_flag,
1437                                   int task_tag)
1438 {
1439         struct domain_device *device = task->dev;
1440         struct hisi_sas_device *sas_dev = device->lldd_dev;
1441         struct device *dev = hisi_hba->dev;
1442         struct hisi_sas_port *port;
1443         struct hisi_sas_slot *slot;
1444         struct asd_sas_port *sas_port = device->port;
1445         struct hisi_sas_cmd_hdr *cmd_hdr_base;
1446         struct hisi_sas_dq *dq = sas_dev->dq;
1447         int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1448         unsigned long flags, flags_dq;
1449
1450         if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1451                 return -EINVAL;
1452
1453         if (!device->port)
1454                 return -1;
1455
1456         port = to_hisi_sas_port(sas_port);
1457
1458         /* simply get a slot and send abort command */
1459         spin_lock_irqsave(&hisi_hba->lock, flags);
1460         rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
1461         if (rc) {
1462                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1463                 goto err_out;
1464         }
1465         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1466
1467         spin_lock_irqsave(&dq->lock, flags_dq);
1468         rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
1469         if (rc)
1470                 goto err_out_tag;
1471
1472         dlvry_queue = dq->id;
1473         dlvry_queue_slot = dq->wr_point;
1474
1475         slot = &hisi_hba->slot_info[slot_idx];
1476         memset(slot, 0, sizeof(struct hisi_sas_slot));
1477
1478         slot->idx = slot_idx;
1479         slot->n_elem = n_elem;
1480         slot->dlvry_queue = dlvry_queue;
1481         slot->dlvry_queue_slot = dlvry_queue_slot;
1482         cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1483         slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1484         slot->task = task;
1485         slot->port = port;
1486         task->lldd_task = slot;
1487
1488         slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
1489                         GFP_ATOMIC, &slot->buf_dma);
1490         if (!slot->buf) {
1491                 rc = -ENOMEM;
1492                 goto err_out_tag;
1493         }
1494
1495         memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1496         memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1497         memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
1498
1499         rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1500                                       abort_flag, task_tag);
1501         if (rc)
1502                 goto err_out_buf;
1503
1504         spin_lock_irqsave(&hisi_hba->lock, flags);
1505         list_add_tail(&slot->entry, &sas_dev->list);
1506         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1507         spin_lock_irqsave(&task->task_state_lock, flags);
1508         task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1509         spin_unlock_irqrestore(&task->task_state_lock, flags);
1510
1511         dq->slot_prep = slot;
1512
1513         /* send abort command to the chip */
1514         hisi_hba->hw->start_delivery(dq);
1515         spin_unlock_irqrestore(&dq->lock, flags_dq);
1516
1517         return 0;
1518
1519 err_out_buf:
1520         dma_pool_free(hisi_hba->buffer_pool, slot->buf,
1521                 slot->buf_dma);
1522 err_out_tag:
1523         spin_lock_irqsave(&hisi_hba->lock, flags);
1524         hisi_sas_slot_index_free(hisi_hba, slot_idx);
1525         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1526         spin_unlock_irqrestore(&dq->lock, flags_dq);
1527 err_out:
1528         dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1529
1530         return rc;
1531 }
1532
1533 /**
1534  * hisi_sas_internal_task_abort -- execute an internal
1535  * abort command for single IO command or a device
1536  * @hisi_hba: host controller struct
1537  * @device: domain device
1538  * @abort_flag: mode of operation, device or single IO
1539  * @tag: tag of IO to be aborted (only relevant to single
1540  *       IO mode)
1541  */
1542 static int
1543 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1544                              struct domain_device *device,
1545                              int abort_flag, int tag)
1546 {
1547         struct sas_task *task;
1548         struct hisi_sas_device *sas_dev = device->lldd_dev;
1549         struct device *dev = hisi_hba->dev;
1550         int res;
1551
1552         /*
1553          * The interface is not realized means this HW don't support internal
1554          * abort, or don't need to do internal abort. Then here, we return
1555          * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
1556          * the internal abort has been executed and returned CQ.
1557          */
1558         if (!hisi_hba->hw->prep_abort)
1559                 return TMF_RESP_FUNC_FAILED;
1560
1561         task = sas_alloc_slow_task(GFP_KERNEL);
1562         if (!task)
1563                 return -ENOMEM;
1564
1565         task->dev = device;
1566         task->task_proto = device->tproto;
1567         task->task_done = hisi_sas_task_done;
1568         task->slow_task->timer.function = hisi_sas_tmf_timedout;
1569         task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT*HZ;
1570         add_timer(&task->slow_task->timer);
1571
1572         res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1573                                                 task, abort_flag, tag);
1574         if (res) {
1575                 del_timer(&task->slow_task->timer);
1576                 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1577                         res);
1578                 goto exit;
1579         }
1580         wait_for_completion(&task->slow_task->completion);
1581         res = TMF_RESP_FUNC_FAILED;
1582
1583         /* Internal abort timed out */
1584         if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1585                 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1586                         struct hisi_sas_slot *slot = task->lldd_task;
1587
1588                         if (slot)
1589                                 slot->task = NULL;
1590                         dev_err(dev, "internal task abort: timeout and not done.\n");
1591                         res = -EIO;
1592                         goto exit;
1593                 } else
1594                         dev_err(dev, "internal task abort: timeout.\n");
1595         }
1596
1597         if (task->task_status.resp == SAS_TASK_COMPLETE &&
1598                 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1599                 res = TMF_RESP_FUNC_COMPLETE;
1600                 goto exit;
1601         }
1602
1603         if (task->task_status.resp == SAS_TASK_COMPLETE &&
1604                 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1605                 res = TMF_RESP_FUNC_SUCC;
1606                 goto exit;
1607         }
1608
1609 exit:
1610         dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
1611                 "resp: 0x%x sts 0x%x\n",
1612                 SAS_ADDR(device->sas_addr),
1613                 task,
1614                 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1615                 task->task_status.stat);
1616         sas_free_task(task);
1617
1618         return res;
1619 }
1620
1621 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1622 {
1623         hisi_sas_port_notify_formed(sas_phy);
1624 }
1625
1626 static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
1627 {
1628 }
1629
1630 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
1631                         u8 reg_index, u8 reg_count, u8 *write_data)
1632 {
1633         struct hisi_hba *hisi_hba = sha->lldd_ha;
1634
1635         if (!hisi_hba->hw->write_gpio)
1636                 return -EOPNOTSUPP;
1637
1638         return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
1639                                 reg_index, reg_count, write_data);
1640 }
1641
1642 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1643 {
1644         phy->phy_attached = 0;
1645         phy->phy_type = 0;
1646         phy->port = NULL;
1647 }
1648
1649 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1650 {
1651         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1652         struct asd_sas_phy *sas_phy = &phy->sas_phy;
1653         struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1654
1655         if (rdy) {
1656                 /* Phy down but ready */
1657                 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1658                 hisi_sas_port_notify_formed(sas_phy);
1659         } else {
1660                 struct hisi_sas_port *port  = phy->port;
1661
1662                 /* Phy down and not ready */
1663                 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1664                 sas_phy_disconnected(sas_phy);
1665
1666                 if (port) {
1667                         if (phy->phy_type & PORT_TYPE_SAS) {
1668                                 int port_id = port->id;
1669
1670                                 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1671                                                                        port_id))
1672                                         port->port_attached = 0;
1673                         } else if (phy->phy_type & PORT_TYPE_SATA)
1674                                 port->port_attached = 0;
1675                 }
1676                 hisi_sas_phy_disconnected(phy);
1677         }
1678 }
1679 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1680
1681 void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
1682 {
1683         int i;
1684
1685         for (i = 0; i < hisi_hba->queue_count; i++) {
1686                 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1687
1688                 tasklet_kill(&cq->tasklet);
1689         }
1690 }
1691 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
1692
1693 struct scsi_transport_template *hisi_sas_stt;
1694 EXPORT_SYMBOL_GPL(hisi_sas_stt);
1695
1696 static struct device_attribute *host_attrs[] = {
1697         &dev_attr_phy_event_threshold,
1698         NULL,
1699 };
1700
1701 static struct scsi_host_template _hisi_sas_sht = {
1702         .module                 = THIS_MODULE,
1703         .name                   = DRV_NAME,
1704         .queuecommand           = sas_queuecommand,
1705         .target_alloc           = sas_target_alloc,
1706         .slave_configure        = hisi_sas_slave_configure,
1707         .scan_finished          = hisi_sas_scan_finished,
1708         .scan_start             = hisi_sas_scan_start,
1709         .change_queue_depth     = sas_change_queue_depth,
1710         .bios_param             = sas_bios_param,
1711         .can_queue              = 1,
1712         .this_id                = -1,
1713         .sg_tablesize           = SG_ALL,
1714         .max_sectors            = SCSI_DEFAULT_MAX_SECTORS,
1715         .use_clustering         = ENABLE_CLUSTERING,
1716         .eh_device_reset_handler = sas_eh_device_reset_handler,
1717         .eh_target_reset_handler = sas_eh_target_reset_handler,
1718         .target_destroy         = sas_target_destroy,
1719         .ioctl                  = sas_ioctl,
1720         .shost_attrs            = host_attrs,
1721 };
1722 struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
1723 EXPORT_SYMBOL_GPL(hisi_sas_sht);
1724
1725 static struct sas_domain_function_template hisi_sas_transport_ops = {
1726         .lldd_dev_found         = hisi_sas_dev_found,
1727         .lldd_dev_gone          = hisi_sas_dev_gone,
1728         .lldd_execute_task      = hisi_sas_queue_command,
1729         .lldd_control_phy       = hisi_sas_control_phy,
1730         .lldd_abort_task        = hisi_sas_abort_task,
1731         .lldd_abort_task_set    = hisi_sas_abort_task_set,
1732         .lldd_clear_aca         = hisi_sas_clear_aca,
1733         .lldd_I_T_nexus_reset   = hisi_sas_I_T_nexus_reset,
1734         .lldd_lu_reset          = hisi_sas_lu_reset,
1735         .lldd_query_task        = hisi_sas_query_task,
1736         .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
1737         .lldd_port_formed       = hisi_sas_port_formed,
1738         .lldd_port_deformed = hisi_sas_port_deformed,
1739         .lldd_write_gpio = hisi_sas_write_gpio,
1740 };
1741
1742 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
1743 {
1744         int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1745
1746         for (i = 0; i < hisi_hba->queue_count; i++) {
1747                 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1748                 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1749
1750                 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1751                 memset(hisi_hba->cmd_hdr[i], 0, s);
1752                 dq->wr_point = 0;
1753
1754                 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1755                 memset(hisi_hba->complete_hdr[i], 0, s);
1756                 cq->rd_point = 0;
1757         }
1758
1759         s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
1760         memset(hisi_hba->initial_fis, 0, s);
1761
1762         s = max_command_entries * sizeof(struct hisi_sas_iost);
1763         memset(hisi_hba->iost, 0, s);
1764
1765         s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1766         memset(hisi_hba->breakpoint, 0, s);
1767
1768         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1769         memset(hisi_hba->sata_breakpoint, 0, s);
1770 }
1771 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
1772
1773 int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1774 {
1775         struct device *dev = hisi_hba->dev;
1776         int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1777
1778         spin_lock_init(&hisi_hba->lock);
1779         for (i = 0; i < hisi_hba->n_phy; i++) {
1780                 hisi_sas_phy_init(hisi_hba, i);
1781                 hisi_hba->port[i].port_attached = 0;
1782                 hisi_hba->port[i].id = -1;
1783         }
1784
1785         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1786                 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
1787                 hisi_hba->devices[i].device_id = i;
1788                 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
1789         }
1790
1791         for (i = 0; i < hisi_hba->queue_count; i++) {
1792                 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1793                 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1794
1795                 /* Completion queue structure */
1796                 cq->id = i;
1797                 cq->hisi_hba = hisi_hba;
1798
1799                 /* Delivery queue structure */
1800                 spin_lock_init(&dq->lock);
1801                 dq->id = i;
1802                 dq->hisi_hba = hisi_hba;
1803
1804                 /* Delivery queue */
1805                 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1806                 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1807                                         &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1808                 if (!hisi_hba->cmd_hdr[i])
1809                         goto err_out;
1810
1811                 /* Completion queue */
1812                 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1813                 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1814                                 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1815                 if (!hisi_hba->complete_hdr[i])
1816                         goto err_out;
1817         }
1818
1819         s = sizeof(struct hisi_sas_slot_buf_table);
1820         hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
1821         if (!hisi_hba->buffer_pool)
1822                 goto err_out;
1823
1824         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1825         hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma,
1826                                             GFP_KERNEL);
1827         if (!hisi_hba->itct)
1828                 goto err_out;
1829
1830         memset(hisi_hba->itct, 0, s);
1831
1832         hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
1833                                            sizeof(struct hisi_sas_slot),
1834                                            GFP_KERNEL);
1835         if (!hisi_hba->slot_info)
1836                 goto err_out;
1837
1838         s = max_command_entries * sizeof(struct hisi_sas_iost);
1839         hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1840                                             GFP_KERNEL);
1841         if (!hisi_hba->iost)
1842                 goto err_out;
1843
1844         s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1845         hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1846                                 &hisi_hba->breakpoint_dma, GFP_KERNEL);
1847         if (!hisi_hba->breakpoint)
1848                 goto err_out;
1849
1850         hisi_hba->slot_index_count = max_command_entries;
1851         s = hisi_hba->slot_index_count / BITS_PER_BYTE;
1852         hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1853         if (!hisi_hba->slot_index_tags)
1854                 goto err_out;
1855
1856         s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1857         hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1858                                 &hisi_hba->initial_fis_dma, GFP_KERNEL);
1859         if (!hisi_hba->initial_fis)
1860                 goto err_out;
1861
1862         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1863         hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1864                                 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1865         if (!hisi_hba->sata_breakpoint)
1866                 goto err_out;
1867         hisi_sas_init_mem(hisi_hba);
1868
1869         hisi_sas_slot_index_init(hisi_hba);
1870
1871         hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1872         if (!hisi_hba->wq) {
1873                 dev_err(dev, "sas_alloc: failed to create workqueue\n");
1874                 goto err_out;
1875         }
1876
1877         return 0;
1878 err_out:
1879         return -ENOMEM;
1880 }
1881 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
1882
1883 void hisi_sas_free(struct hisi_hba *hisi_hba)
1884 {
1885         struct device *dev = hisi_hba->dev;
1886         int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1887
1888         for (i = 0; i < hisi_hba->queue_count; i++) {
1889                 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1890                 if (hisi_hba->cmd_hdr[i])
1891                         dma_free_coherent(dev, s,
1892                                           hisi_hba->cmd_hdr[i],
1893                                           hisi_hba->cmd_hdr_dma[i]);
1894
1895                 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1896                 if (hisi_hba->complete_hdr[i])
1897                         dma_free_coherent(dev, s,
1898                                           hisi_hba->complete_hdr[i],
1899                                           hisi_hba->complete_hdr_dma[i]);
1900         }
1901
1902         dma_pool_destroy(hisi_hba->buffer_pool);
1903
1904         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1905         if (hisi_hba->itct)
1906                 dma_free_coherent(dev, s,
1907                                   hisi_hba->itct, hisi_hba->itct_dma);
1908
1909         s = max_command_entries * sizeof(struct hisi_sas_iost);
1910         if (hisi_hba->iost)
1911                 dma_free_coherent(dev, s,
1912                                   hisi_hba->iost, hisi_hba->iost_dma);
1913
1914         s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1915         if (hisi_hba->breakpoint)
1916                 dma_free_coherent(dev, s,
1917                                   hisi_hba->breakpoint,
1918                                   hisi_hba->breakpoint_dma);
1919
1920
1921         s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1922         if (hisi_hba->initial_fis)
1923                 dma_free_coherent(dev, s,
1924                                   hisi_hba->initial_fis,
1925                                   hisi_hba->initial_fis_dma);
1926
1927         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1928         if (hisi_hba->sata_breakpoint)
1929                 dma_free_coherent(dev, s,
1930                                   hisi_hba->sata_breakpoint,
1931                                   hisi_hba->sata_breakpoint_dma);
1932
1933         if (hisi_hba->wq)
1934                 destroy_workqueue(hisi_hba->wq);
1935 }
1936 EXPORT_SYMBOL_GPL(hisi_sas_free);
1937
1938 void hisi_sas_rst_work_handler(struct work_struct *work)
1939 {
1940         struct hisi_hba *hisi_hba =
1941                 container_of(work, struct hisi_hba, rst_work);
1942
1943         hisi_sas_controller_reset(hisi_hba);
1944 }
1945 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
1946
1947 void hisi_sas_sync_rst_work_handler(struct work_struct *work)
1948 {
1949         struct hisi_sas_rst *rst =
1950                 container_of(work, struct hisi_sas_rst, work);
1951
1952         if (!hisi_sas_controller_reset(rst->hisi_hba))
1953                 rst->done = true;
1954         complete(rst->completion);
1955 }
1956 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
1957
1958 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
1959 {
1960         struct device *dev = hisi_hba->dev;
1961         struct platform_device *pdev = hisi_hba->platform_dev;
1962         struct device_node *np = pdev ? pdev->dev.of_node : NULL;
1963         struct clk *refclk;
1964
1965         if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
1966                                           SAS_ADDR_SIZE)) {
1967                 dev_err(dev, "could not get property sas-addr\n");
1968                 return -ENOENT;
1969         }
1970
1971         if (np) {
1972                 /*
1973                  * These properties are only required for platform device-based
1974                  * controller with DT firmware.
1975                  */
1976                 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
1977                                         "hisilicon,sas-syscon");
1978                 if (IS_ERR(hisi_hba->ctrl)) {
1979                         dev_err(dev, "could not get syscon\n");
1980                         return -ENOENT;
1981                 }
1982
1983                 if (device_property_read_u32(dev, "ctrl-reset-reg",
1984                                              &hisi_hba->ctrl_reset_reg)) {
1985                         dev_err(dev,
1986                                 "could not get property ctrl-reset-reg\n");
1987                         return -ENOENT;
1988                 }
1989
1990                 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
1991                                              &hisi_hba->ctrl_reset_sts_reg)) {
1992                         dev_err(dev,
1993                                 "could not get property ctrl-reset-sts-reg\n");
1994                         return -ENOENT;
1995                 }
1996
1997                 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
1998                                              &hisi_hba->ctrl_clock_ena_reg)) {
1999                         dev_err(dev,
2000                                 "could not get property ctrl-clock-ena-reg\n");
2001                         return -ENOENT;
2002                 }
2003         }
2004
2005         refclk = devm_clk_get(dev, NULL);
2006         if (IS_ERR(refclk))
2007                 dev_dbg(dev, "no ref clk property\n");
2008         else
2009                 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
2010
2011         if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
2012                 dev_err(dev, "could not get property phy-count\n");
2013                 return -ENOENT;
2014         }
2015
2016         if (device_property_read_u32(dev, "queue-count",
2017                                      &hisi_hba->queue_count)) {
2018                 dev_err(dev, "could not get property queue-count\n");
2019                 return -ENOENT;
2020         }
2021
2022         return 0;
2023 }
2024 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
2025
2026 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2027                                               const struct hisi_sas_hw *hw)
2028 {
2029         struct resource *res;
2030         struct Scsi_Host *shost;
2031         struct hisi_hba *hisi_hba;
2032         struct device *dev = &pdev->dev;
2033
2034         shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
2035         if (!shost) {
2036                 dev_err(dev, "scsi host alloc failed\n");
2037                 return NULL;
2038         }
2039         hisi_hba = shost_priv(shost);
2040
2041         INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2042         hisi_hba->hw = hw;
2043         hisi_hba->dev = dev;
2044         hisi_hba->platform_dev = pdev;
2045         hisi_hba->shost = shost;
2046         SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2047
2048         timer_setup(&hisi_hba->timer, NULL, 0);
2049
2050         if (hisi_sas_get_fw_info(hisi_hba) < 0)
2051                 goto err_out;
2052
2053         if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
2054             dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2055                 dev_err(dev, "No usable DMA addressing method\n");
2056                 goto err_out;
2057         }
2058
2059         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2060         hisi_hba->regs = devm_ioremap_resource(dev, res);
2061         if (IS_ERR(hisi_hba->regs))
2062                 goto err_out;
2063
2064         res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2065         if (res) {
2066                 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2067                 if (IS_ERR(hisi_hba->sgpio_regs))
2068                         goto err_out;
2069         }
2070
2071         if (hisi_sas_alloc(hisi_hba, shost)) {
2072                 hisi_sas_free(hisi_hba);
2073                 goto err_out;
2074         }
2075
2076         return shost;
2077 err_out:
2078         scsi_host_put(shost);
2079         dev_err(dev, "shost alloc failed\n");
2080         return NULL;
2081 }
2082
2083 void hisi_sas_init_add(struct hisi_hba *hisi_hba)
2084 {
2085         int i;
2086
2087         for (i = 0; i < hisi_hba->n_phy; i++)
2088                 memcpy(&hisi_hba->phy[i].dev_sas_addr,
2089                        hisi_hba->sas_addr,
2090                        SAS_ADDR_SIZE);
2091 }
2092 EXPORT_SYMBOL_GPL(hisi_sas_init_add);
2093
2094 int hisi_sas_probe(struct platform_device *pdev,
2095                          const struct hisi_sas_hw *hw)
2096 {
2097         struct Scsi_Host *shost;
2098         struct hisi_hba *hisi_hba;
2099         struct device *dev = &pdev->dev;
2100         struct asd_sas_phy **arr_phy;
2101         struct asd_sas_port **arr_port;
2102         struct sas_ha_struct *sha;
2103         int rc, phy_nr, port_nr, i;
2104
2105         shost = hisi_sas_shost_alloc(pdev, hw);
2106         if (!shost)
2107                 return -ENOMEM;
2108
2109         sha = SHOST_TO_SAS_HA(shost);
2110         hisi_hba = shost_priv(shost);
2111         platform_set_drvdata(pdev, sha);
2112
2113         phy_nr = port_nr = hisi_hba->n_phy;
2114
2115         arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2116         arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2117         if (!arr_phy || !arr_port) {
2118                 rc = -ENOMEM;
2119                 goto err_out_ha;
2120         }
2121
2122         sha->sas_phy = arr_phy;
2123         sha->sas_port = arr_port;
2124         sha->lldd_ha = hisi_hba;
2125
2126         shost->transportt = hisi_sas_stt;
2127         shost->max_id = HISI_SAS_MAX_DEVICES;
2128         shost->max_lun = ~0;
2129         shost->max_channel = 1;
2130         shost->max_cmd_len = 16;
2131         shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
2132         shost->can_queue = hisi_hba->hw->max_command_entries;
2133         shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
2134
2135         sha->sas_ha_name = DRV_NAME;
2136         sha->dev = hisi_hba->dev;
2137         sha->lldd_module = THIS_MODULE;
2138         sha->sas_addr = &hisi_hba->sas_addr[0];
2139         sha->num_phys = hisi_hba->n_phy;
2140         sha->core.shost = hisi_hba->shost;
2141
2142         for (i = 0; i < hisi_hba->n_phy; i++) {
2143                 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2144                 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2145         }
2146
2147         hisi_sas_init_add(hisi_hba);
2148
2149         rc = scsi_add_host(shost, &pdev->dev);
2150         if (rc)
2151                 goto err_out_ha;
2152
2153         rc = sas_register_ha(sha);
2154         if (rc)
2155                 goto err_out_register_ha;
2156
2157         rc = hisi_hba->hw->hw_init(hisi_hba);
2158         if (rc)
2159                 goto err_out_register_ha;
2160
2161         scsi_scan_host(shost);
2162
2163         return 0;
2164
2165 err_out_register_ha:
2166         scsi_remove_host(shost);
2167 err_out_ha:
2168         hisi_sas_free(hisi_hba);
2169         scsi_host_put(shost);
2170         return rc;
2171 }
2172 EXPORT_SYMBOL_GPL(hisi_sas_probe);
2173
2174 int hisi_sas_remove(struct platform_device *pdev)
2175 {
2176         struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2177         struct hisi_hba *hisi_hba = sha->lldd_ha;
2178         struct Scsi_Host *shost = sha->core.shost;
2179
2180         sas_unregister_ha(sha);
2181         sas_remove_host(sha->core.shost);
2182
2183         hisi_sas_free(hisi_hba);
2184         scsi_host_put(shost);
2185         return 0;
2186 }
2187 EXPORT_SYMBOL_GPL(hisi_sas_remove);
2188
2189 static __init int hisi_sas_init(void)
2190 {
2191         hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2192         if (!hisi_sas_stt)
2193                 return -ENOMEM;
2194
2195         return 0;
2196 }
2197
2198 static __exit void hisi_sas_exit(void)
2199 {
2200         sas_release_transport(hisi_sas_stt);
2201 }
2202
2203 module_init(hisi_sas_init);
2204 module_exit(hisi_sas_exit);
2205
2206 MODULE_LICENSE("GPL");
2207 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2208 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2209 MODULE_ALIAS("platform:" DRV_NAME);