Merge drm/drm-next into drm-intel-gt-next
[linux-2.6-microblaze.git] / drivers / scsi / hptiop.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * HighPoint RR3xxx/4xxx controller driver for Linux
4  * Copyright (C) 2006-2015 HighPoint Technologies, Inc. All Rights Reserved.
5  *
6  * Please report bugs/comments/suggestions to linux@highpoint-tech.com
7  *
8  * For more information, visit http://www.highpoint-tech.com
9  */
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/interrupt.h>
16 #include <linux/errno.h>
17 #include <linux/delay.h>
18 #include <linux/timer.h>
19 #include <linux/spinlock.h>
20 #include <linux/gfp.h>
21 #include <linux/uaccess.h>
22 #include <asm/io.h>
23 #include <asm/div64.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_tcq.h>
28 #include <scsi/scsi_host.h>
29
30 #include "hptiop.h"
31
32 MODULE_AUTHOR("HighPoint Technologies, Inc.");
33 MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
34
35 static char driver_name[] = "hptiop";
36 static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
37 static const char driver_ver[] = "v1.10.0";
38
39 static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
40 static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
41                                 struct hpt_iop_request_scsi_command *req);
42 static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag);
43 static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag);
44 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
45
46 static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec)
47 {
48         u32 req = 0;
49         int i;
50
51         for (i = 0; i < millisec; i++) {
52                 req = readl(&hba->u.itl.iop->inbound_queue);
53                 if (req != IOPMU_QUEUE_EMPTY)
54                         break;
55                 msleep(1);
56         }
57
58         if (req != IOPMU_QUEUE_EMPTY) {
59                 writel(req, &hba->u.itl.iop->outbound_queue);
60                 readl(&hba->u.itl.iop->outbound_intstatus);
61                 return 0;
62         }
63
64         return -1;
65 }
66
67 static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec)
68 {
69         return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
70 }
71
72 static int iop_wait_ready_mvfrey(struct hptiop_hba *hba, u32 millisec)
73 {
74         return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
75 }
76
77 static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
78 {
79         if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)
80                 hptiop_host_request_callback_itl(hba,
81                                 tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);
82         else
83                 hptiop_iop_request_callback_itl(hba, tag);
84 }
85
86 static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba)
87 {
88         u32 req;
89
90         while ((req = readl(&hba->u.itl.iop->outbound_queue)) !=
91                                                 IOPMU_QUEUE_EMPTY) {
92
93                 if (req & IOPMU_QUEUE_MASK_HOST_BITS)
94                         hptiop_request_callback_itl(hba, req);
95                 else {
96                         struct hpt_iop_request_header __iomem * p;
97
98                         p = (struct hpt_iop_request_header __iomem *)
99                                 ((char __iomem *)hba->u.itl.iop + req);
100
101                         if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) {
102                                 if (readl(&p->context))
103                                         hptiop_request_callback_itl(hba, req);
104                                 else
105                                         writel(1, &p->context);
106                         }
107                         else
108                                 hptiop_request_callback_itl(hba, req);
109                 }
110         }
111 }
112
113 static int iop_intr_itl(struct hptiop_hba *hba)
114 {
115         struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop;
116         void __iomem *plx = hba->u.itl.plx;
117         u32 status;
118         int ret = 0;
119
120         if (plx && readl(plx + 0x11C5C) & 0xf)
121                 writel(1, plx + 0x11C60);
122
123         status = readl(&iop->outbound_intstatus);
124
125         if (status & IOPMU_OUTBOUND_INT_MSG0) {
126                 u32 msg = readl(&iop->outbound_msgaddr0);
127
128                 dprintk("received outbound msg %x\n", msg);
129                 writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);
130                 hptiop_message_callback(hba, msg);
131                 ret = 1;
132         }
133
134         if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
135                 hptiop_drain_outbound_queue_itl(hba);
136                 ret = 1;
137         }
138
139         return ret;
140 }
141
142 static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu)
143 {
144         u32 outbound_tail = readl(&mu->outbound_tail);
145         u32 outbound_head = readl(&mu->outbound_head);
146
147         if (outbound_tail != outbound_head) {
148                 u64 p;
149
150                 memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8);
151                 outbound_tail++;
152
153                 if (outbound_tail == MVIOP_QUEUE_LEN)
154                         outbound_tail = 0;
155                 writel(outbound_tail, &mu->outbound_tail);
156                 return p;
157         } else
158                 return 0;
159 }
160
161 static void mv_inbound_write(u64 p, struct hptiop_hba *hba)
162 {
163         u32 inbound_head = readl(&hba->u.mv.mu->inbound_head);
164         u32 head = inbound_head + 1;
165
166         if (head == MVIOP_QUEUE_LEN)
167                 head = 0;
168
169         memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8);
170         writel(head, &hba->u.mv.mu->inbound_head);
171         writel(MVIOP_MU_INBOUND_INT_POSTQUEUE,
172                         &hba->u.mv.regs->inbound_doorbell);
173 }
174
175 static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag)
176 {
177         u32 req_type = (tag >> 5) & 0x7;
178         struct hpt_iop_request_scsi_command *req;
179
180         dprintk("hptiop_request_callback_mv: tag=%llx\n", tag);
181
182         BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0);
183
184         switch (req_type) {
185         case IOP_REQUEST_TYPE_GET_CONFIG:
186         case IOP_REQUEST_TYPE_SET_CONFIG:
187                 hba->msg_done = 1;
188                 break;
189
190         case IOP_REQUEST_TYPE_SCSI_COMMAND:
191                 req = hba->reqs[tag >> 8].req_virt;
192                 if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT))
193                         req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
194
195                 hptiop_finish_scsi_req(hba, tag>>8, req);
196                 break;
197
198         default:
199                 break;
200         }
201 }
202
203 static int iop_intr_mv(struct hptiop_hba *hba)
204 {
205         u32 status;
206         int ret = 0;
207
208         status = readl(&hba->u.mv.regs->outbound_doorbell);
209         writel(~status, &hba->u.mv.regs->outbound_doorbell);
210
211         if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
212                 u32 msg;
213                 msg = readl(&hba->u.mv.mu->outbound_msg);
214                 dprintk("received outbound msg %x\n", msg);
215                 hptiop_message_callback(hba, msg);
216                 ret = 1;
217         }
218
219         if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
220                 u64 tag;
221
222                 while ((tag = mv_outbound_read(hba->u.mv.mu)))
223                         hptiop_request_callback_mv(hba, tag);
224                 ret = 1;
225         }
226
227         return ret;
228 }
229
230 static void hptiop_request_callback_mvfrey(struct hptiop_hba *hba, u32 _tag)
231 {
232         u32 req_type = _tag & 0xf;
233         struct hpt_iop_request_scsi_command *req;
234
235         switch (req_type) {
236         case IOP_REQUEST_TYPE_GET_CONFIG:
237         case IOP_REQUEST_TYPE_SET_CONFIG:
238                 hba->msg_done = 1;
239                 break;
240
241         case IOP_REQUEST_TYPE_SCSI_COMMAND:
242                 req = hba->reqs[(_tag >> 4) & 0xff].req_virt;
243                 if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
244                         req->header.result = IOP_RESULT_SUCCESS;
245                 hptiop_finish_scsi_req(hba, (_tag >> 4) & 0xff, req);
246                 break;
247
248         default:
249                 break;
250         }
251 }
252
253 static int iop_intr_mvfrey(struct hptiop_hba *hba)
254 {
255         u32 _tag, status, cptr, cur_rptr;
256         int ret = 0;
257
258         if (hba->initialized)
259                 writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
260
261         status = readl(&(hba->u.mvfrey.mu->f0_doorbell));
262         if (status) {
263                 writel(status, &(hba->u.mvfrey.mu->f0_doorbell));
264                 if (status & CPU_TO_F0_DRBL_MSG_BIT) {
265                         u32 msg = readl(&(hba->u.mvfrey.mu->cpu_to_f0_msg_a));
266                         dprintk("received outbound msg %x\n", msg);
267                         hptiop_message_callback(hba, msg);
268                 }
269                 ret = 1;
270         }
271
272         status = readl(&(hba->u.mvfrey.mu->isr_cause));
273         if (status) {
274                 writel(status, &(hba->u.mvfrey.mu->isr_cause));
275                 do {
276                         cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
277                         cur_rptr = hba->u.mvfrey.outlist_rptr;
278                         while (cur_rptr != cptr) {
279                                 cur_rptr++;
280                                 if (cur_rptr == hba->u.mvfrey.list_count)
281                                         cur_rptr = 0;
282
283                                 _tag = hba->u.mvfrey.outlist[cur_rptr].val;
284                                 BUG_ON(!(_tag & IOPMU_QUEUE_MASK_HOST_BITS));
285                                 hptiop_request_callback_mvfrey(hba, _tag);
286                                 ret = 1;
287                         }
288                         hba->u.mvfrey.outlist_rptr = cur_rptr;
289                 } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
290         }
291
292         if (hba->initialized)
293                 writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
294
295         return ret;
296 }
297
298 static int iop_send_sync_request_itl(struct hptiop_hba *hba,
299                                         void __iomem *_req, u32 millisec)
300 {
301         struct hpt_iop_request_header __iomem *req = _req;
302         u32 i;
303
304         writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags);
305         writel(0, &req->context);
306         writel((unsigned long)req - (unsigned long)hba->u.itl.iop,
307                         &hba->u.itl.iop->inbound_queue);
308         readl(&hba->u.itl.iop->outbound_intstatus);
309
310         for (i = 0; i < millisec; i++) {
311                 iop_intr_itl(hba);
312                 if (readl(&req->context))
313                         return 0;
314                 msleep(1);
315         }
316
317         return -1;
318 }
319
320 static int iop_send_sync_request_mv(struct hptiop_hba *hba,
321                                         u32 size_bits, u32 millisec)
322 {
323         struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req;
324         u32 i;
325
326         hba->msg_done = 0;
327         reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
328         mv_inbound_write(hba->u.mv.internal_req_phy |
329                         MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba);
330
331         for (i = 0; i < millisec; i++) {
332                 iop_intr_mv(hba);
333                 if (hba->msg_done)
334                         return 0;
335                 msleep(1);
336         }
337         return -1;
338 }
339
340 static int iop_send_sync_request_mvfrey(struct hptiop_hba *hba,
341                                         u32 size_bits, u32 millisec)
342 {
343         struct hpt_iop_request_header *reqhdr =
344                 hba->u.mvfrey.internal_req.req_virt;
345         u32 i;
346
347         hba->msg_done = 0;
348         reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
349         hba->ops->post_req(hba, &(hba->u.mvfrey.internal_req));
350
351         for (i = 0; i < millisec; i++) {
352                 iop_intr_mvfrey(hba);
353                 if (hba->msg_done)
354                         break;
355                 msleep(1);
356         }
357         return hba->msg_done ? 0 : -1;
358 }
359
360 static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg)
361 {
362         writel(msg, &hba->u.itl.iop->inbound_msgaddr0);
363         readl(&hba->u.itl.iop->outbound_intstatus);
364 }
365
366 static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg)
367 {
368         writel(msg, &hba->u.mv.mu->inbound_msg);
369         writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell);
370         readl(&hba->u.mv.regs->inbound_doorbell);
371 }
372
373 static void hptiop_post_msg_mvfrey(struct hptiop_hba *hba, u32 msg)
374 {
375         writel(msg, &(hba->u.mvfrey.mu->f0_to_cpu_msg_a));
376         readl(&(hba->u.mvfrey.mu->f0_to_cpu_msg_a));
377 }
378
379 static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
380 {
381         u32 i;
382
383         hba->msg_done = 0;
384         hba->ops->disable_intr(hba);
385         hba->ops->post_msg(hba, msg);
386
387         for (i = 0; i < millisec; i++) {
388                 spin_lock_irq(hba->host->host_lock);
389                 hba->ops->iop_intr(hba);
390                 spin_unlock_irq(hba->host->host_lock);
391                 if (hba->msg_done)
392                         break;
393                 msleep(1);
394         }
395
396         hba->ops->enable_intr(hba);
397         return hba->msg_done? 0 : -1;
398 }
399
400 static int iop_get_config_itl(struct hptiop_hba *hba,
401                                 struct hpt_iop_request_get_config *config)
402 {
403         u32 req32;
404         struct hpt_iop_request_get_config __iomem *req;
405
406         req32 = readl(&hba->u.itl.iop->inbound_queue);
407         if (req32 == IOPMU_QUEUE_EMPTY)
408                 return -1;
409
410         req = (struct hpt_iop_request_get_config __iomem *)
411                         ((unsigned long)hba->u.itl.iop + req32);
412
413         writel(0, &req->header.flags);
414         writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type);
415         writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);
416         writel(IOP_RESULT_PENDING, &req->header.result);
417
418         if (iop_send_sync_request_itl(hba, req, 20000)) {
419                 dprintk("Get config send cmd failed\n");
420                 return -1;
421         }
422
423         memcpy_fromio(config, req, sizeof(*config));
424         writel(req32, &hba->u.itl.iop->outbound_queue);
425         return 0;
426 }
427
428 static int iop_get_config_mv(struct hptiop_hba *hba,
429                                 struct hpt_iop_request_get_config *config)
430 {
431         struct hpt_iop_request_get_config *req = hba->u.mv.internal_req;
432
433         req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
434         req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG);
435         req->header.size =
436                 cpu_to_le32(sizeof(struct hpt_iop_request_get_config));
437         req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
438         req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG<<5);
439         req->header.context_hi32 = 0;
440
441         if (iop_send_sync_request_mv(hba, 0, 20000)) {
442                 dprintk("Get config send cmd failed\n");
443                 return -1;
444         }
445
446         memcpy(config, req, sizeof(struct hpt_iop_request_get_config));
447         return 0;
448 }
449
450 static int iop_get_config_mvfrey(struct hptiop_hba *hba,
451                                 struct hpt_iop_request_get_config *config)
452 {
453         struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
454
455         if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
456                         info->header.type != IOP_REQUEST_TYPE_GET_CONFIG)
457                 return -1;
458
459         config->interface_version = info->interface_version;
460         config->firmware_version = info->firmware_version;
461         config->max_requests = info->max_requests;
462         config->request_size = info->request_size;
463         config->max_sg_count = info->max_sg_count;
464         config->data_transfer_length = info->data_transfer_length;
465         config->alignment_mask = info->alignment_mask;
466         config->max_devices = info->max_devices;
467         config->sdram_size = info->sdram_size;
468
469         return 0;
470 }
471
472 static int iop_set_config_itl(struct hptiop_hba *hba,
473                                 struct hpt_iop_request_set_config *config)
474 {
475         u32 req32;
476         struct hpt_iop_request_set_config __iomem *req;
477
478         req32 = readl(&hba->u.itl.iop->inbound_queue);
479         if (req32 == IOPMU_QUEUE_EMPTY)
480                 return -1;
481
482         req = (struct hpt_iop_request_set_config __iomem *)
483                         ((unsigned long)hba->u.itl.iop + req32);
484
485         memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),
486                 (u8 *)config + sizeof(struct hpt_iop_request_header),
487                 sizeof(struct hpt_iop_request_set_config) -
488                         sizeof(struct hpt_iop_request_header));
489
490         writel(0, &req->header.flags);
491         writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type);
492         writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);
493         writel(IOP_RESULT_PENDING, &req->header.result);
494
495         if (iop_send_sync_request_itl(hba, req, 20000)) {
496                 dprintk("Set config send cmd failed\n");
497                 return -1;
498         }
499
500         writel(req32, &hba->u.itl.iop->outbound_queue);
501         return 0;
502 }
503
504 static int iop_set_config_mv(struct hptiop_hba *hba,
505                                 struct hpt_iop_request_set_config *config)
506 {
507         struct hpt_iop_request_set_config *req = hba->u.mv.internal_req;
508
509         memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
510         req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
511         req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
512         req->header.size =
513                 cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
514         req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
515         req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5);
516         req->header.context_hi32 = 0;
517
518         if (iop_send_sync_request_mv(hba, 0, 20000)) {
519                 dprintk("Set config send cmd failed\n");
520                 return -1;
521         }
522
523         return 0;
524 }
525
526 static int iop_set_config_mvfrey(struct hptiop_hba *hba,
527                                 struct hpt_iop_request_set_config *config)
528 {
529         struct hpt_iop_request_set_config *req =
530                 hba->u.mvfrey.internal_req.req_virt;
531
532         memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
533         req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
534         req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
535         req->header.size =
536                 cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
537         req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
538         req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5);
539         req->header.context_hi32 = 0;
540
541         if (iop_send_sync_request_mvfrey(hba, 0, 20000)) {
542                 dprintk("Set config send cmd failed\n");
543                 return -1;
544         }
545
546         return 0;
547 }
548
549 static void hptiop_enable_intr_itl(struct hptiop_hba *hba)
550 {
551         writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
552                 &hba->u.itl.iop->outbound_intmask);
553 }
554
555 static void hptiop_enable_intr_mv(struct hptiop_hba *hba)
556 {
557         writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG,
558                 &hba->u.mv.regs->outbound_intmask);
559 }
560
561 static void hptiop_enable_intr_mvfrey(struct hptiop_hba *hba)
562 {
563         writel(CPU_TO_F0_DRBL_MSG_BIT, &(hba->u.mvfrey.mu->f0_doorbell_enable));
564         writel(0x1, &(hba->u.mvfrey.mu->isr_enable));
565         writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
566 }
567
568 static int hptiop_initialize_iop(struct hptiop_hba *hba)
569 {
570         /* enable interrupts */
571         hba->ops->enable_intr(hba);
572
573         hba->initialized = 1;
574
575         /* start background tasks */
576         if (iop_send_sync_msg(hba,
577                         IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
578                 printk(KERN_ERR "scsi%d: fail to start background task\n",
579                         hba->host->host_no);
580                 return -1;
581         }
582         return 0;
583 }
584
585 static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index)
586 {
587         u32 mem_base_phy, length;
588         void __iomem *mem_base_virt;
589
590         struct pci_dev *pcidev = hba->pcidev;
591
592
593         if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) {
594                 printk(KERN_ERR "scsi%d: pci resource invalid\n",
595                                 hba->host->host_no);
596                 return NULL;
597         }
598
599         mem_base_phy = pci_resource_start(pcidev, index);
600         length = pci_resource_len(pcidev, index);
601         mem_base_virt = ioremap(mem_base_phy, length);
602
603         if (!mem_base_virt) {
604                 printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n",
605                                 hba->host->host_no);
606                 return NULL;
607         }
608         return mem_base_virt;
609 }
610
611 static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba)
612 {
613         struct pci_dev *pcidev = hba->pcidev;
614         hba->u.itl.iop = hptiop_map_pci_bar(hba, 0);
615         if (hba->u.itl.iop == NULL)
616                 return -1;
617         if ((pcidev->device & 0xff00) == 0x4400) {
618                 hba->u.itl.plx = hba->u.itl.iop;
619                 hba->u.itl.iop = hptiop_map_pci_bar(hba, 2);
620                 if (hba->u.itl.iop == NULL) {
621                         iounmap(hba->u.itl.plx);
622                         return -1;
623                 }
624         }
625         return 0;
626 }
627
628 static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba)
629 {
630         if (hba->u.itl.plx)
631                 iounmap(hba->u.itl.plx);
632         iounmap(hba->u.itl.iop);
633 }
634
635 static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba)
636 {
637         hba->u.mv.regs = hptiop_map_pci_bar(hba, 0);
638         if (hba->u.mv.regs == NULL)
639                 return -1;
640
641         hba->u.mv.mu = hptiop_map_pci_bar(hba, 2);
642         if (hba->u.mv.mu == NULL) {
643                 iounmap(hba->u.mv.regs);
644                 return -1;
645         }
646
647         return 0;
648 }
649
650 static int hptiop_map_pci_bar_mvfrey(struct hptiop_hba *hba)
651 {
652         hba->u.mvfrey.config = hptiop_map_pci_bar(hba, 0);
653         if (hba->u.mvfrey.config == NULL)
654                 return -1;
655
656         hba->u.mvfrey.mu = hptiop_map_pci_bar(hba, 2);
657         if (hba->u.mvfrey.mu == NULL) {
658                 iounmap(hba->u.mvfrey.config);
659                 return -1;
660         }
661
662         return 0;
663 }
664
665 static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba)
666 {
667         iounmap(hba->u.mv.regs);
668         iounmap(hba->u.mv.mu);
669 }
670
671 static void hptiop_unmap_pci_bar_mvfrey(struct hptiop_hba *hba)
672 {
673         iounmap(hba->u.mvfrey.config);
674         iounmap(hba->u.mvfrey.mu);
675 }
676
677 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
678 {
679         dprintk("iop message 0x%x\n", msg);
680
681         if (msg == IOPMU_INBOUND_MSG0_NOP ||
682                 msg == IOPMU_INBOUND_MSG0_RESET_COMM)
683                 hba->msg_done = 1;
684
685         if (!hba->initialized)
686                 return;
687
688         if (msg == IOPMU_INBOUND_MSG0_RESET) {
689                 atomic_set(&hba->resetting, 0);
690                 wake_up(&hba->reset_wq);
691         }
692         else if (msg <= IOPMU_INBOUND_MSG0_MAX)
693                 hba->msg_done = 1;
694 }
695
696 static struct hptiop_request *get_req(struct hptiop_hba *hba)
697 {
698         struct hptiop_request *ret;
699
700         dprintk("get_req : req=%p\n", hba->req_list);
701
702         ret = hba->req_list;
703         if (ret)
704                 hba->req_list = ret->next;
705
706         return ret;
707 }
708
709 static void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
710 {
711         dprintk("free_req(%d, %p)\n", req->index, req);
712         req->next = hba->req_list;
713         hba->req_list = req;
714 }
715
716 static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
717                                 struct hpt_iop_request_scsi_command *req)
718 {
719         struct scsi_cmnd *scp;
720
721         dprintk("hptiop_finish_scsi_req: req=%p, type=%d, "
722                         "result=%d, context=0x%x tag=%d\n",
723                         req, req->header.type, req->header.result,
724                         req->header.context, tag);
725
726         BUG_ON(!req->header.result);
727         BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND));
728
729         scp = hba->reqs[tag].scp;
730
731         if (HPT_SCP(scp)->mapped)
732                 scsi_dma_unmap(scp);
733
734         switch (le32_to_cpu(req->header.result)) {
735         case IOP_RESULT_SUCCESS:
736                 scsi_set_resid(scp,
737                         scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
738                 scp->result = (DID_OK<<16);
739                 break;
740         case IOP_RESULT_BAD_TARGET:
741                 scp->result = (DID_BAD_TARGET<<16);
742                 break;
743         case IOP_RESULT_BUSY:
744                 scp->result = (DID_BUS_BUSY<<16);
745                 break;
746         case IOP_RESULT_RESET:
747                 scp->result = (DID_RESET<<16);
748                 break;
749         case IOP_RESULT_FAIL:
750                 scp->result = (DID_ERROR<<16);
751                 break;
752         case IOP_RESULT_INVALID_REQUEST:
753                 scp->result = (DID_ABORT<<16);
754                 break;
755         case IOP_RESULT_CHECK_CONDITION:
756                 scsi_set_resid(scp,
757                         scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
758                 scp->result = SAM_STAT_CHECK_CONDITION;
759                 memcpy(scp->sense_buffer, &req->sg_list, SCSI_SENSE_BUFFERSIZE);
760                 goto skip_resid;
761
762         default:
763                 scp->result = DID_ABORT << 16;
764                 break;
765         }
766
767         scsi_set_resid(scp,
768                 scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
769
770 skip_resid:
771         dprintk("scsi_done(%p)\n", scp);
772         scsi_done(scp);
773         free_req(hba, &hba->reqs[tag]);
774 }
775
776 static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag)
777 {
778         struct hpt_iop_request_scsi_command *req;
779         u32 tag;
780
781         if (hba->iopintf_v2) {
782                 tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT;
783                 req = hba->reqs[tag].req_virt;
784                 if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
785                         req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
786         } else {
787                 tag = _tag;
788                 req = hba->reqs[tag].req_virt;
789         }
790
791         hptiop_finish_scsi_req(hba, tag, req);
792 }
793
794 static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
795 {
796         struct hpt_iop_request_header __iomem *req;
797         struct hpt_iop_request_ioctl_command __iomem *p;
798         struct hpt_ioctl_k *arg;
799
800         req = (struct hpt_iop_request_header __iomem *)
801                         ((unsigned long)hba->u.itl.iop + tag);
802         dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, "
803                         "result=%d, context=0x%x tag=%d\n",
804                         req, readl(&req->type), readl(&req->result),
805                         readl(&req->context), tag);
806
807         BUG_ON(!readl(&req->result));
808         BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND);
809
810         p = (struct hpt_iop_request_ioctl_command __iomem *)req;
811         arg = (struct hpt_ioctl_k *)(unsigned long)
812                 (readl(&req->context) |
813                         ((u64)readl(&req->context_hi32)<<32));
814
815         if (readl(&req->result) == IOP_RESULT_SUCCESS) {
816                 arg->result = HPT_IOCTL_RESULT_OK;
817
818                 if (arg->outbuf_size)
819                         memcpy_fromio(arg->outbuf,
820                                 &p->buf[(readl(&p->inbuf_size) + 3)& ~3],
821                                 arg->outbuf_size);
822
823                 if (arg->bytes_returned)
824                         *arg->bytes_returned = arg->outbuf_size;
825         }
826         else
827                 arg->result = HPT_IOCTL_RESULT_FAILED;
828
829         arg->done(arg);
830         writel(tag, &hba->u.itl.iop->outbound_queue);
831 }
832
833 static irqreturn_t hptiop_intr(int irq, void *dev_id)
834 {
835         struct hptiop_hba  *hba = dev_id;
836         int  handled;
837         unsigned long flags;
838
839         spin_lock_irqsave(hba->host->host_lock, flags);
840         handled = hba->ops->iop_intr(hba);
841         spin_unlock_irqrestore(hba->host->host_lock, flags);
842
843         return handled;
844 }
845
846 static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
847 {
848         struct Scsi_Host *host = scp->device->host;
849         struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
850         struct scatterlist *sg;
851         int idx, nseg;
852
853         nseg = scsi_dma_map(scp);
854         BUG_ON(nseg < 0);
855         if (!nseg)
856                 return 0;
857
858         HPT_SCP(scp)->sgcnt = nseg;
859         HPT_SCP(scp)->mapped = 1;
860
861         BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
862
863         scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) {
864                 psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg)) |
865                         hba->ops->host_phy_flag;
866                 psg[idx].size = cpu_to_le32(sg_dma_len(sg));
867                 psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
868                         cpu_to_le32(1) : 0;
869         }
870         return HPT_SCP(scp)->sgcnt;
871 }
872
873 static void hptiop_post_req_itl(struct hptiop_hba *hba,
874                                         struct hptiop_request *_req)
875 {
876         struct hpt_iop_request_header *reqhdr = _req->req_virt;
877
878         reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
879                                                         (u32)_req->index);
880         reqhdr->context_hi32 = 0;
881
882         if (hba->iopintf_v2) {
883                 u32 size, size_bits;
884
885                 size = le32_to_cpu(reqhdr->size);
886                 if (size < 256)
887                         size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
888                 else if (size < 512)
889                         size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
890                 else
891                         size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
892                                                 IOPMU_QUEUE_ADDR_HOST_BIT;
893                 writel(_req->req_shifted_phy | size_bits,
894                         &hba->u.itl.iop->inbound_queue);
895         } else
896                 writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT,
897                                         &hba->u.itl.iop->inbound_queue);
898 }
899
900 static void hptiop_post_req_mv(struct hptiop_hba *hba,
901                                         struct hptiop_request *_req)
902 {
903         struct hpt_iop_request_header *reqhdr = _req->req_virt;
904         u32 size, size_bit;
905
906         reqhdr->context = cpu_to_le32(_req->index<<8 |
907                                         IOP_REQUEST_TYPE_SCSI_COMMAND<<5);
908         reqhdr->context_hi32 = 0;
909         size = le32_to_cpu(reqhdr->size);
910
911         if (size <= 256)
912                 size_bit = 0;
913         else if (size <= 256*2)
914                 size_bit = 1;
915         else if (size <= 256*3)
916                 size_bit = 2;
917         else
918                 size_bit = 3;
919
920         mv_inbound_write((_req->req_shifted_phy << 5) |
921                 MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba);
922 }
923
924 static void hptiop_post_req_mvfrey(struct hptiop_hba *hba,
925                                         struct hptiop_request *_req)
926 {
927         struct hpt_iop_request_header *reqhdr = _req->req_virt;
928         u32 index;
929
930         reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT |
931                         IOP_REQUEST_FLAG_ADDR_BITS |
932                         ((_req->req_shifted_phy >> 11) & 0xffff0000));
933         reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
934                         (_req->index << 4) | reqhdr->type);
935         reqhdr->context_hi32 = cpu_to_le32((_req->req_shifted_phy << 5) &
936                         0xffffffff);
937
938         hba->u.mvfrey.inlist_wptr++;
939         index = hba->u.mvfrey.inlist_wptr & 0x3fff;
940
941         if (index == hba->u.mvfrey.list_count) {
942                 index = 0;
943                 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
944                 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
945         }
946
947         hba->u.mvfrey.inlist[index].addr =
948                         (dma_addr_t)_req->req_shifted_phy << 5;
949         hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
950         writel(hba->u.mvfrey.inlist_wptr,
951                 &(hba->u.mvfrey.mu->inbound_write_ptr));
952         readl(&(hba->u.mvfrey.mu->inbound_write_ptr));
953 }
954
955 static int hptiop_reset_comm_itl(struct hptiop_hba *hba)
956 {
957         return 0;
958 }
959
960 static int hptiop_reset_comm_mv(struct hptiop_hba *hba)
961 {
962         return 0;
963 }
964
965 static int hptiop_reset_comm_mvfrey(struct hptiop_hba *hba)
966 {
967         u32 list_count = hba->u.mvfrey.list_count;
968
969         if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
970                 return -1;
971
972         /* wait 100ms for MCU ready */
973         msleep(100);
974
975         writel(cpu_to_le32(hba->u.mvfrey.inlist_phy & 0xffffffff),
976                         &(hba->u.mvfrey.mu->inbound_base));
977         writel(cpu_to_le32((hba->u.mvfrey.inlist_phy >> 16) >> 16),
978                         &(hba->u.mvfrey.mu->inbound_base_high));
979
980         writel(cpu_to_le32(hba->u.mvfrey.outlist_phy & 0xffffffff),
981                         &(hba->u.mvfrey.mu->outbound_base));
982         writel(cpu_to_le32((hba->u.mvfrey.outlist_phy >> 16) >> 16),
983                         &(hba->u.mvfrey.mu->outbound_base_high));
984
985         writel(cpu_to_le32(hba->u.mvfrey.outlist_cptr_phy & 0xffffffff),
986                         &(hba->u.mvfrey.mu->outbound_shadow_base));
987         writel(cpu_to_le32((hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16),
988                         &(hba->u.mvfrey.mu->outbound_shadow_base_high));
989
990         hba->u.mvfrey.inlist_wptr = (list_count - 1) | CL_POINTER_TOGGLE;
991         *hba->u.mvfrey.outlist_cptr = (list_count - 1) | CL_POINTER_TOGGLE;
992         hba->u.mvfrey.outlist_rptr = list_count - 1;
993         return 0;
994 }
995
996 static int hptiop_queuecommand_lck(struct scsi_cmnd *scp)
997 {
998         struct Scsi_Host *host = scp->device->host;
999         struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1000         struct hpt_iop_request_scsi_command *req;
1001         int sg_count = 0;
1002         struct hptiop_request *_req;
1003
1004         _req = get_req(hba);
1005         if (_req == NULL) {
1006                 dprintk("hptiop_queuecmd : no free req\n");
1007                 return SCSI_MLQUEUE_HOST_BUSY;
1008         }
1009
1010         _req->scp = scp;
1011
1012         dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%llu cdb=(%08x-%08x-%08x-%08x) "
1013                         "req_index=%d, req=%p\n",
1014                         scp,
1015                         host->host_no, scp->device->channel,
1016                         scp->device->id, scp->device->lun,
1017                         cpu_to_be32(((u32 *)scp->cmnd)[0]),
1018                         cpu_to_be32(((u32 *)scp->cmnd)[1]),
1019                         cpu_to_be32(((u32 *)scp->cmnd)[2]),
1020                         cpu_to_be32(((u32 *)scp->cmnd)[3]),
1021                         _req->index, _req->req_virt);
1022
1023         scp->result = 0;
1024
1025         if (scp->device->channel ||
1026                         (scp->device->id > hba->max_devices) ||
1027                         ((scp->device->id == (hba->max_devices-1)) && scp->device->lun)) {
1028                 scp->result = DID_BAD_TARGET << 16;
1029                 free_req(hba, _req);
1030                 goto cmd_done;
1031         }
1032
1033         req = _req->req_virt;
1034
1035         /* build S/G table */
1036         sg_count = hptiop_buildsgl(scp, req->sg_list);
1037         if (!sg_count)
1038                 HPT_SCP(scp)->mapped = 0;
1039
1040         req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
1041         req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND);
1042         req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
1043         req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp));
1044         req->channel = scp->device->channel;
1045         req->target = scp->device->id;
1046         req->lun = scp->device->lun;
1047         req->header.size = cpu_to_le32(
1048                                 sizeof(struct hpt_iop_request_scsi_command)
1049                                  - sizeof(struct hpt_iopsg)
1050                                  + sg_count * sizeof(struct hpt_iopsg));
1051
1052         memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
1053         hba->ops->post_req(hba, _req);
1054         return 0;
1055
1056 cmd_done:
1057         dprintk("scsi_done(scp=%p)\n", scp);
1058         scsi_done(scp);
1059         return 0;
1060 }
1061
1062 static DEF_SCSI_QCMD(hptiop_queuecommand)
1063
1064 static const char *hptiop_info(struct Scsi_Host *host)
1065 {
1066         return driver_name_long;
1067 }
1068
1069 static int hptiop_reset_hba(struct hptiop_hba *hba)
1070 {
1071         if (atomic_xchg(&hba->resetting, 1) == 0) {
1072                 atomic_inc(&hba->reset_count);
1073                 hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET);
1074         }
1075
1076         wait_event_timeout(hba->reset_wq,
1077                         atomic_read(&hba->resetting) == 0, 60 * HZ);
1078
1079         if (atomic_read(&hba->resetting)) {
1080                 /* IOP is in unknown state, abort reset */
1081                 printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no);
1082                 return -1;
1083         }
1084
1085         if (iop_send_sync_msg(hba,
1086                 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
1087                 dprintk("scsi%d: fail to start background task\n",
1088                                 hba->host->host_no);
1089         }
1090
1091         return 0;
1092 }
1093
1094 static int hptiop_reset(struct scsi_cmnd *scp)
1095 {
1096         struct hptiop_hba * hba = (struct hptiop_hba *)scp->device->host->hostdata;
1097
1098         printk(KERN_WARNING "hptiop_reset(%d/%d/%d)\n",
1099                scp->device->host->host_no, -1, -1);
1100
1101         return hptiop_reset_hba(hba)? FAILED : SUCCESS;
1102 }
1103
1104 static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
1105                                           int queue_depth)
1106 {
1107         struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata;
1108
1109         if (queue_depth > hba->max_requests)
1110                 queue_depth = hba->max_requests;
1111         return scsi_change_queue_depth(sdev, queue_depth);
1112 }
1113
1114 static ssize_t hptiop_show_version(struct device *dev,
1115                                    struct device_attribute *attr, char *buf)
1116 {
1117         return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
1118 }
1119
1120 static ssize_t hptiop_show_fw_version(struct device *dev,
1121                                       struct device_attribute *attr, char *buf)
1122 {
1123         struct Scsi_Host *host = class_to_shost(dev);
1124         struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1125
1126         return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n",
1127                                 hba->firmware_version >> 24,
1128                                 (hba->firmware_version >> 16) & 0xff,
1129                                 (hba->firmware_version >> 8) & 0xff,
1130                                 hba->firmware_version & 0xff);
1131 }
1132
1133 static struct device_attribute hptiop_attr_version = {
1134         .attr = {
1135                 .name = "driver-version",
1136                 .mode = S_IRUGO,
1137         },
1138         .show = hptiop_show_version,
1139 };
1140
1141 static struct device_attribute hptiop_attr_fw_version = {
1142         .attr = {
1143                 .name = "firmware-version",
1144                 .mode = S_IRUGO,
1145         },
1146         .show = hptiop_show_fw_version,
1147 };
1148
1149 static struct attribute *hptiop_host_attrs[] = {
1150         &hptiop_attr_version.attr,
1151         &hptiop_attr_fw_version.attr,
1152         NULL
1153 };
1154
1155 ATTRIBUTE_GROUPS(hptiop_host);
1156
1157 static int hptiop_slave_config(struct scsi_device *sdev)
1158 {
1159         if (sdev->type == TYPE_TAPE)
1160                 blk_queue_max_hw_sectors(sdev->request_queue, 8192);
1161
1162         return 0;
1163 }
1164
1165 static struct scsi_host_template driver_template = {
1166         .module                     = THIS_MODULE,
1167         .name                       = driver_name,
1168         .queuecommand               = hptiop_queuecommand,
1169         .eh_host_reset_handler      = hptiop_reset,
1170         .info                       = hptiop_info,
1171         .emulated                   = 0,
1172         .proc_name                  = driver_name,
1173         .shost_groups               = hptiop_host_groups,
1174         .slave_configure            = hptiop_slave_config,
1175         .this_id                    = -1,
1176         .change_queue_depth         = hptiop_adjust_disk_queue_depth,
1177 };
1178
1179 static int hptiop_internal_memalloc_itl(struct hptiop_hba *hba)
1180 {
1181         return 0;
1182 }
1183
1184 static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba)
1185 {
1186         hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev,
1187                         0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL);
1188         if (hba->u.mv.internal_req)
1189                 return 0;
1190         else
1191                 return -1;
1192 }
1193
1194 static int hptiop_internal_memalloc_mvfrey(struct hptiop_hba *hba)
1195 {
1196         u32 list_count = readl(&hba->u.mvfrey.mu->inbound_conf_ctl);
1197         char *p;
1198         dma_addr_t phy;
1199
1200         BUG_ON(hba->max_request_size == 0);
1201
1202         if (list_count == 0) {
1203                 BUG_ON(1);
1204                 return -1;
1205         }
1206
1207         list_count >>= 16;
1208
1209         hba->u.mvfrey.list_count = list_count;
1210         hba->u.mvfrey.internal_mem_size = 0x800 +
1211                         list_count * sizeof(struct mvfrey_inlist_entry) +
1212                         list_count * sizeof(struct mvfrey_outlist_entry) +
1213                         sizeof(int);
1214
1215         p = dma_alloc_coherent(&hba->pcidev->dev,
1216                         hba->u.mvfrey.internal_mem_size, &phy, GFP_KERNEL);
1217         if (!p)
1218                 return -1;
1219
1220         hba->u.mvfrey.internal_req.req_virt = p;
1221         hba->u.mvfrey.internal_req.req_shifted_phy = phy >> 5;
1222         hba->u.mvfrey.internal_req.scp = NULL;
1223         hba->u.mvfrey.internal_req.next = NULL;
1224
1225         p += 0x800;
1226         phy += 0x800;
1227
1228         hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
1229         hba->u.mvfrey.inlist_phy = phy;
1230
1231         p += list_count * sizeof(struct mvfrey_inlist_entry);
1232         phy += list_count * sizeof(struct mvfrey_inlist_entry);
1233
1234         hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
1235         hba->u.mvfrey.outlist_phy = phy;
1236
1237         p += list_count * sizeof(struct mvfrey_outlist_entry);
1238         phy += list_count * sizeof(struct mvfrey_outlist_entry);
1239
1240         hba->u.mvfrey.outlist_cptr = (__le32 *)p;
1241         hba->u.mvfrey.outlist_cptr_phy = phy;
1242
1243         return 0;
1244 }
1245
1246 static int hptiop_internal_memfree_itl(struct hptiop_hba *hba)
1247 {
1248         return 0;
1249 }
1250
1251 static int hptiop_internal_memfree_mv(struct hptiop_hba *hba)
1252 {
1253         if (hba->u.mv.internal_req) {
1254                 dma_free_coherent(&hba->pcidev->dev, 0x800,
1255                         hba->u.mv.internal_req, hba->u.mv.internal_req_phy);
1256                 return 0;
1257         } else
1258                 return -1;
1259 }
1260
1261 static int hptiop_internal_memfree_mvfrey(struct hptiop_hba *hba)
1262 {
1263         if (hba->u.mvfrey.internal_req.req_virt) {
1264                 dma_free_coherent(&hba->pcidev->dev,
1265                         hba->u.mvfrey.internal_mem_size,
1266                         hba->u.mvfrey.internal_req.req_virt,
1267                         (dma_addr_t)
1268                         hba->u.mvfrey.internal_req.req_shifted_phy << 5);
1269                 return 0;
1270         } else
1271                 return -1;
1272 }
1273
1274 static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
1275 {
1276         struct Scsi_Host *host = NULL;
1277         struct hptiop_hba *hba;
1278         struct hptiop_adapter_ops *iop_ops;
1279         struct hpt_iop_request_get_config iop_config;
1280         struct hpt_iop_request_set_config set_config;
1281         dma_addr_t start_phy;
1282         void *start_virt;
1283         u32 offset, i, req_size;
1284         int rc;
1285
1286         dprintk("hptiop_probe(%p)\n", pcidev);
1287
1288         if (pci_enable_device(pcidev)) {
1289                 printk(KERN_ERR "hptiop: fail to enable pci device\n");
1290                 return -ENODEV;
1291         }
1292
1293         printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n",
1294                 pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7,
1295                 pcidev->irq);
1296
1297         pci_set_master(pcidev);
1298
1299         /* Enable 64bit DMA if possible */
1300         iop_ops = (struct hptiop_adapter_ops *)id->driver_data;
1301         rc = dma_set_mask(&pcidev->dev,
1302                           DMA_BIT_MASK(iop_ops->hw_dma_bit_mask));
1303         if (rc)
1304                 rc = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32));
1305
1306         if (rc) {
1307                 printk(KERN_ERR "hptiop: fail to set dma_mask\n");
1308                 goto disable_pci_device;
1309         }
1310
1311         if (pci_request_regions(pcidev, driver_name)) {
1312                 printk(KERN_ERR "hptiop: pci_request_regions failed\n");
1313                 goto disable_pci_device;
1314         }
1315
1316         host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba));
1317         if (!host) {
1318                 printk(KERN_ERR "hptiop: fail to alloc scsi host\n");
1319                 goto free_pci_regions;
1320         }
1321
1322         hba = (struct hptiop_hba *)host->hostdata;
1323         memset(hba, 0, sizeof(struct hptiop_hba));
1324
1325         hba->ops = iop_ops;
1326         hba->pcidev = pcidev;
1327         hba->host = host;
1328         hba->initialized = 0;
1329         hba->iopintf_v2 = 0;
1330
1331         atomic_set(&hba->resetting, 0);
1332         atomic_set(&hba->reset_count, 0);
1333
1334         init_waitqueue_head(&hba->reset_wq);
1335         init_waitqueue_head(&hba->ioctl_wq);
1336
1337         host->max_lun = 128;
1338         host->max_channel = 0;
1339         host->io_port = 0;
1340         host->n_io_port = 0;
1341         host->irq = pcidev->irq;
1342
1343         if (hba->ops->map_pci_bar(hba))
1344                 goto free_scsi_host;
1345
1346         if (hba->ops->iop_wait_ready(hba, 20000)) {
1347                 printk(KERN_ERR "scsi%d: firmware not ready\n",
1348                                 hba->host->host_no);
1349                 goto unmap_pci_bar;
1350         }
1351
1352         if (hba->ops->family == MV_BASED_IOP) {
1353                 if (hba->ops->internal_memalloc(hba)) {
1354                         printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
1355                                 hba->host->host_no);
1356                         goto unmap_pci_bar;
1357                 }
1358         }
1359
1360         if (hba->ops->get_config(hba, &iop_config)) {
1361                 printk(KERN_ERR "scsi%d: get config failed\n",
1362                                 hba->host->host_no);
1363                 goto unmap_pci_bar;
1364         }
1365
1366         hba->max_requests = min(le32_to_cpu(iop_config.max_requests),
1367                                 HPTIOP_MAX_REQUESTS);
1368         hba->max_devices = le32_to_cpu(iop_config.max_devices);
1369         hba->max_request_size = le32_to_cpu(iop_config.request_size);
1370         hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count);
1371         hba->firmware_version = le32_to_cpu(iop_config.firmware_version);
1372         hba->interface_version = le32_to_cpu(iop_config.interface_version);
1373         hba->sdram_size = le32_to_cpu(iop_config.sdram_size);
1374
1375         if (hba->ops->family == MVFREY_BASED_IOP) {
1376                 if (hba->ops->internal_memalloc(hba)) {
1377                         printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
1378                                 hba->host->host_no);
1379                         goto unmap_pci_bar;
1380                 }
1381                 if (hba->ops->reset_comm(hba)) {
1382                         printk(KERN_ERR "scsi%d: reset comm failed\n",
1383                                         hba->host->host_no);
1384                         goto unmap_pci_bar;
1385                 }
1386         }
1387
1388         if (hba->firmware_version > 0x01020000 ||
1389                         hba->interface_version > 0x01020000)
1390                 hba->iopintf_v2 = 1;
1391
1392         host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9;
1393         host->max_id = le32_to_cpu(iop_config.max_devices);
1394         host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count);
1395         host->can_queue = le32_to_cpu(iop_config.max_requests);
1396         host->cmd_per_lun = le32_to_cpu(iop_config.max_requests);
1397         host->max_cmd_len = 16;
1398
1399         req_size = sizeof(struct hpt_iop_request_scsi_command)
1400                 + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1);
1401         if ((req_size & 0x1f) != 0)
1402                 req_size = (req_size + 0x1f) & ~0x1f;
1403
1404         memset(&set_config, 0, sizeof(struct hpt_iop_request_set_config));
1405         set_config.iop_id = cpu_to_le32(host->host_no);
1406         set_config.vbus_id = cpu_to_le16(host->host_no);
1407         set_config.max_host_request_size = cpu_to_le16(req_size);
1408
1409         if (hba->ops->set_config(hba, &set_config)) {
1410                 printk(KERN_ERR "scsi%d: set config failed\n",
1411                                 hba->host->host_no);
1412                 goto unmap_pci_bar;
1413         }
1414
1415         pci_set_drvdata(pcidev, host);
1416
1417         if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED,
1418                                         driver_name, hba)) {
1419                 printk(KERN_ERR "scsi%d: request irq %d failed\n",
1420                                         hba->host->host_no, pcidev->irq);
1421                 goto unmap_pci_bar;
1422         }
1423
1424         /* Allocate request mem */
1425
1426         dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests);
1427
1428         hba->req_size = req_size;
1429         hba->req_list = NULL;
1430
1431         for (i = 0; i < hba->max_requests; i++) {
1432                 start_virt = dma_alloc_coherent(&pcidev->dev,
1433                                         hba->req_size + 0x20,
1434                                         &start_phy, GFP_KERNEL);
1435
1436                 if (!start_virt) {
1437                         printk(KERN_ERR "scsi%d: fail to alloc request mem\n",
1438                                                 hba->host->host_no);
1439                         goto free_request_mem;
1440                 }
1441
1442                 hba->dma_coherent[i] = start_virt;
1443                 hba->dma_coherent_handle[i] = start_phy;
1444
1445                 if ((start_phy & 0x1f) != 0) {
1446                         offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
1447                         start_phy += offset;
1448                         start_virt += offset;
1449                 }
1450
1451                 hba->reqs[i].next = NULL;
1452                 hba->reqs[i].req_virt = start_virt;
1453                 hba->reqs[i].req_shifted_phy = start_phy >> 5;
1454                 hba->reqs[i].index = i;
1455                 free_req(hba, &hba->reqs[i]);
1456         }
1457
1458         /* Enable Interrupt and start background task */
1459         if (hptiop_initialize_iop(hba))
1460                 goto free_request_mem;
1461
1462         if (scsi_add_host(host, &pcidev->dev)) {
1463                 printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
1464                                         hba->host->host_no);
1465                 goto free_request_mem;
1466         }
1467
1468         scsi_scan_host(host);
1469
1470         dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no);
1471         return 0;
1472
1473 free_request_mem:
1474         for (i = 0; i < hba->max_requests; i++) {
1475                 if (hba->dma_coherent[i] && hba->dma_coherent_handle[i])
1476                         dma_free_coherent(&hba->pcidev->dev,
1477                                         hba->req_size + 0x20,
1478                                         hba->dma_coherent[i],
1479                                         hba->dma_coherent_handle[i]);
1480                 else
1481                         break;
1482         }
1483
1484         free_irq(hba->pcidev->irq, hba);
1485
1486 unmap_pci_bar:
1487         hba->ops->internal_memfree(hba);
1488
1489         hba->ops->unmap_pci_bar(hba);
1490
1491 free_scsi_host:
1492         scsi_host_put(host);
1493
1494 free_pci_regions:
1495         pci_release_regions(pcidev);
1496
1497 disable_pci_device:
1498         pci_disable_device(pcidev);
1499
1500         dprintk("scsi%d: hptiop_probe fail\n", host ? host->host_no : 0);
1501         return -ENODEV;
1502 }
1503
1504 static void hptiop_shutdown(struct pci_dev *pcidev)
1505 {
1506         struct Scsi_Host *host = pci_get_drvdata(pcidev);
1507         struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1508
1509         dprintk("hptiop_shutdown(%p)\n", hba);
1510
1511         /* stop the iop */
1512         if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
1513                 printk(KERN_ERR "scsi%d: shutdown the iop timeout\n",
1514                                         hba->host->host_no);
1515
1516         /* disable all outbound interrupts */
1517         hba->ops->disable_intr(hba);
1518 }
1519
1520 static void hptiop_disable_intr_itl(struct hptiop_hba *hba)
1521 {
1522         u32 int_mask;
1523
1524         int_mask = readl(&hba->u.itl.iop->outbound_intmask);
1525         writel(int_mask |
1526                 IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE,
1527                 &hba->u.itl.iop->outbound_intmask);
1528         readl(&hba->u.itl.iop->outbound_intmask);
1529 }
1530
1531 static void hptiop_disable_intr_mv(struct hptiop_hba *hba)
1532 {
1533         writel(0, &hba->u.mv.regs->outbound_intmask);
1534         readl(&hba->u.mv.regs->outbound_intmask);
1535 }
1536
1537 static void hptiop_disable_intr_mvfrey(struct hptiop_hba *hba)
1538 {
1539         writel(0, &(hba->u.mvfrey.mu->f0_doorbell_enable));
1540         readl(&(hba->u.mvfrey.mu->f0_doorbell_enable));
1541         writel(0, &(hba->u.mvfrey.mu->isr_enable));
1542         readl(&(hba->u.mvfrey.mu->isr_enable));
1543         writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
1544         readl(&(hba->u.mvfrey.mu->pcie_f0_int_enable));
1545 }
1546
1547 static void hptiop_remove(struct pci_dev *pcidev)
1548 {
1549         struct Scsi_Host *host = pci_get_drvdata(pcidev);
1550         struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1551         u32 i;
1552
1553         dprintk("scsi%d: hptiop_remove\n", hba->host->host_no);
1554
1555         scsi_remove_host(host);
1556
1557         hptiop_shutdown(pcidev);
1558
1559         free_irq(hba->pcidev->irq, hba);
1560
1561         for (i = 0; i < hba->max_requests; i++) {
1562                 if (hba->dma_coherent[i] && hba->dma_coherent_handle[i])
1563                         dma_free_coherent(&hba->pcidev->dev,
1564                                         hba->req_size + 0x20,
1565                                         hba->dma_coherent[i],
1566                                         hba->dma_coherent_handle[i]);
1567                 else
1568                         break;
1569         }
1570
1571         hba->ops->internal_memfree(hba);
1572
1573         hba->ops->unmap_pci_bar(hba);
1574
1575         pci_release_regions(hba->pcidev);
1576         pci_set_drvdata(hba->pcidev, NULL);
1577         pci_disable_device(hba->pcidev);
1578
1579         scsi_host_put(host);
1580 }
1581
1582 static struct hptiop_adapter_ops hptiop_itl_ops = {
1583         .family            = INTEL_BASED_IOP,
1584         .iop_wait_ready    = iop_wait_ready_itl,
1585         .internal_memalloc = hptiop_internal_memalloc_itl,
1586         .internal_memfree  = hptiop_internal_memfree_itl,
1587         .map_pci_bar       = hptiop_map_pci_bar_itl,
1588         .unmap_pci_bar     = hptiop_unmap_pci_bar_itl,
1589         .enable_intr       = hptiop_enable_intr_itl,
1590         .disable_intr      = hptiop_disable_intr_itl,
1591         .get_config        = iop_get_config_itl,
1592         .set_config        = iop_set_config_itl,
1593         .iop_intr          = iop_intr_itl,
1594         .post_msg          = hptiop_post_msg_itl,
1595         .post_req          = hptiop_post_req_itl,
1596         .hw_dma_bit_mask   = 64,
1597         .reset_comm        = hptiop_reset_comm_itl,
1598         .host_phy_flag     = cpu_to_le64(0),
1599 };
1600
1601 static struct hptiop_adapter_ops hptiop_mv_ops = {
1602         .family            = MV_BASED_IOP,
1603         .iop_wait_ready    = iop_wait_ready_mv,
1604         .internal_memalloc = hptiop_internal_memalloc_mv,
1605         .internal_memfree  = hptiop_internal_memfree_mv,
1606         .map_pci_bar       = hptiop_map_pci_bar_mv,
1607         .unmap_pci_bar     = hptiop_unmap_pci_bar_mv,
1608         .enable_intr       = hptiop_enable_intr_mv,
1609         .disable_intr      = hptiop_disable_intr_mv,
1610         .get_config        = iop_get_config_mv,
1611         .set_config        = iop_set_config_mv,
1612         .iop_intr          = iop_intr_mv,
1613         .post_msg          = hptiop_post_msg_mv,
1614         .post_req          = hptiop_post_req_mv,
1615         .hw_dma_bit_mask   = 33,
1616         .reset_comm        = hptiop_reset_comm_mv,
1617         .host_phy_flag     = cpu_to_le64(0),
1618 };
1619
1620 static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
1621         .family            = MVFREY_BASED_IOP,
1622         .iop_wait_ready    = iop_wait_ready_mvfrey,
1623         .internal_memalloc = hptiop_internal_memalloc_mvfrey,
1624         .internal_memfree  = hptiop_internal_memfree_mvfrey,
1625         .map_pci_bar       = hptiop_map_pci_bar_mvfrey,
1626         .unmap_pci_bar     = hptiop_unmap_pci_bar_mvfrey,
1627         .enable_intr       = hptiop_enable_intr_mvfrey,
1628         .disable_intr      = hptiop_disable_intr_mvfrey,
1629         .get_config        = iop_get_config_mvfrey,
1630         .set_config        = iop_set_config_mvfrey,
1631         .iop_intr          = iop_intr_mvfrey,
1632         .post_msg          = hptiop_post_msg_mvfrey,
1633         .post_req          = hptiop_post_req_mvfrey,
1634         .hw_dma_bit_mask   = 64,
1635         .reset_comm        = hptiop_reset_comm_mvfrey,
1636         .host_phy_flag     = cpu_to_le64(1),
1637 };
1638
1639 static struct pci_device_id hptiop_id_table[] = {
1640         { PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops },
1641         { PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops },
1642         { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
1643         { PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops },
1644         { PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops },
1645         { PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops },
1646         { PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops },
1647         { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops },
1648         { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops },
1649         { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops },
1650         { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops },
1651         { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops },
1652         { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops },
1653         { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops },
1654         { PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops },
1655         { PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops },
1656         { PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops },
1657         { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops },
1658         { PCI_VDEVICE(TTI, 0x4400), (kernel_ulong_t)&hptiop_itl_ops },
1659         { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
1660         { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
1661         { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
1662         { PCI_VDEVICE(TTI, 0x4520), (kernel_ulong_t)&hptiop_mvfrey_ops },
1663         { PCI_VDEVICE(TTI, 0x4522), (kernel_ulong_t)&hptiop_mvfrey_ops },
1664         { PCI_VDEVICE(TTI, 0x3610), (kernel_ulong_t)&hptiop_mvfrey_ops },
1665         { PCI_VDEVICE(TTI, 0x3611), (kernel_ulong_t)&hptiop_mvfrey_ops },
1666         { PCI_VDEVICE(TTI, 0x3620), (kernel_ulong_t)&hptiop_mvfrey_ops },
1667         { PCI_VDEVICE(TTI, 0x3622), (kernel_ulong_t)&hptiop_mvfrey_ops },
1668         { PCI_VDEVICE(TTI, 0x3640), (kernel_ulong_t)&hptiop_mvfrey_ops },
1669         { PCI_VDEVICE(TTI, 0x3660), (kernel_ulong_t)&hptiop_mvfrey_ops },
1670         { PCI_VDEVICE(TTI, 0x3680), (kernel_ulong_t)&hptiop_mvfrey_ops },
1671         { PCI_VDEVICE(TTI, 0x3690), (kernel_ulong_t)&hptiop_mvfrey_ops },
1672         {},
1673 };
1674
1675 MODULE_DEVICE_TABLE(pci, hptiop_id_table);
1676
1677 static struct pci_driver hptiop_pci_driver = {
1678         .name       = driver_name,
1679         .id_table   = hptiop_id_table,
1680         .probe      = hptiop_probe,
1681         .remove     = hptiop_remove,
1682         .shutdown   = hptiop_shutdown,
1683 };
1684
1685 static int __init hptiop_module_init(void)
1686 {
1687         printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver);
1688         return pci_register_driver(&hptiop_pci_driver);
1689 }
1690
1691 static void __exit hptiop_module_exit(void)
1692 {
1693         pci_unregister_driver(&hptiop_pci_driver);
1694 }
1695
1696
1697 module_init(hptiop_module_init);
1698 module_exit(hptiop_module_exit);
1699
1700 MODULE_LICENSE("GPL");
1701