2 *******************************************************************************
4 ** FILE NAME : arcmsr_hba.c
5 ** BY : Nick Cheng, C.L. Huang
6 ** Description: SCSI RAID Device Driver for Areca RAID Controller
7 *******************************************************************************
8 ** Copyright (C) 2002 - 2014, Areca Technology Corporation All rights reserved
10 ** Web site: www.areca.com.tw
11 ** E-mail: support@areca.com.tw
13 ** This program is free software; you can redistribute it and/or modify
14 ** it under the terms of the GNU General Public License version 2 as
15 ** published by the Free Software Foundation.
16 ** This program is distributed in the hope that it will be useful,
17 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
18 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 ** GNU General Public License for more details.
20 *******************************************************************************
21 ** Redistribution and use in source and binary forms, with or without
22 ** modification, are permitted provided that the following conditions
24 ** 1. Redistributions of source code must retain the above copyright
25 ** notice, this list of conditions and the following disclaimer.
26 ** 2. Redistributions in binary form must reproduce the above copyright
27 ** notice, this list of conditions and the following disclaimer in the
28 ** documentation and/or other materials provided with the distribution.
29 ** 3. The name of the author may not be used to endorse or promote products
30 ** derived from this software without specific prior written permission.
32 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
33 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
34 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
35 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
36 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
37 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
39 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
41 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *******************************************************************************
43 ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
44 ** Firmware Specification, see Documentation/scsi/arcmsr_spec.rst
45 *******************************************************************************
47 #include <linux/module.h>
48 #include <linux/reboot.h>
49 #include <linux/spinlock.h>
50 #include <linux/pci_ids.h>
51 #include <linux/interrupt.h>
52 #include <linux/moduleparam.h>
53 #include <linux/errno.h>
54 #include <linux/types.h>
55 #include <linux/delay.h>
56 #include <linux/dma-mapping.h>
57 #include <linux/timer.h>
58 #include <linux/slab.h>
59 #include <linux/pci.h>
60 #include <linux/aer.h>
61 #include <linux/circ_buf.h>
64 #include <linux/uaccess.h>
65 #include <scsi/scsi_host.h>
66 #include <scsi/scsi.h>
67 #include <scsi/scsi_cmnd.h>
68 #include <scsi/scsi_tcq.h>
69 #include <scsi/scsi_device.h>
70 #include <scsi/scsi_transport.h>
71 #include <scsi/scsicam.h>
73 MODULE_AUTHOR("Nick Cheng, C.L. Huang <support@areca.com.tw>");
74 MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver");
75 MODULE_LICENSE("Dual BSD/GPL");
76 MODULE_VERSION(ARCMSR_DRIVER_VERSION);
78 static int msix_enable = 1;
79 module_param(msix_enable, int, S_IRUGO);
80 MODULE_PARM_DESC(msix_enable, "Enable MSI-X interrupt(0 ~ 1), msix_enable=1(enable), =0(disable)");
82 static int msi_enable = 1;
83 module_param(msi_enable, int, S_IRUGO);
84 MODULE_PARM_DESC(msi_enable, "Enable MSI interrupt(0 ~ 1), msi_enable=1(enable), =0(disable)");
86 static int host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
87 module_param(host_can_queue, int, S_IRUGO);
88 MODULE_PARM_DESC(host_can_queue, " adapter queue depth(32 ~ 1024), default is 128");
90 static int cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
91 module_param(cmd_per_lun, int, S_IRUGO);
92 MODULE_PARM_DESC(cmd_per_lun, " device queue depth(1 ~ 128), default is 32");
94 static int dma_mask_64 = 0;
95 module_param(dma_mask_64, int, S_IRUGO);
96 MODULE_PARM_DESC(dma_mask_64, " set DMA mask to 64 bits(0 ~ 1), dma_mask_64=1(64 bits), =0(32 bits)");
98 static int set_date_time = 0;
99 module_param(set_date_time, int, S_IRUGO);
100 MODULE_PARM_DESC(set_date_time, " send date, time to iop(0 ~ 1), set_date_time=1(enable), default(=0) is disable");
102 #define ARCMSR_SLEEPTIME 10
103 #define ARCMSR_RETRYCOUNT 12
105 static wait_queue_head_t wait_q;
106 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
107 struct scsi_cmnd *cmd);
108 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
109 static int arcmsr_abort(struct scsi_cmnd *);
110 static int arcmsr_bus_reset(struct scsi_cmnd *);
111 static int arcmsr_bios_param(struct scsi_device *sdev,
112 struct block_device *bdev, sector_t capacity, int *info);
113 static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
114 static int arcmsr_probe(struct pci_dev *pdev,
115 const struct pci_device_id *id);
116 static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state);
117 static int arcmsr_resume(struct pci_dev *pdev);
118 static void arcmsr_remove(struct pci_dev *pdev);
119 static void arcmsr_shutdown(struct pci_dev *pdev);
120 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
121 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
122 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
123 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
125 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
126 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
127 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
128 static void arcmsr_request_device_map(struct timer_list *t);
129 static void arcmsr_message_isr_bh_fn(struct work_struct *work);
130 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
131 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
132 static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB);
133 static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb);
134 static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb);
135 static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb);
136 static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
137 static const char *arcmsr_info(struct Scsi_Host *);
138 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
139 static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
140 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb);
141 static void arcmsr_set_iop_datetime(struct timer_list *);
142 static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
144 if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
145 queue_depth = ARCMSR_MAX_CMD_PERLUN;
146 return scsi_change_queue_depth(sdev, queue_depth);
149 static struct scsi_host_template arcmsr_scsi_host_template = {
150 .module = THIS_MODULE,
151 .name = "Areca SAS/SATA RAID driver",
153 .queuecommand = arcmsr_queue_command,
154 .eh_abort_handler = arcmsr_abort,
155 .eh_bus_reset_handler = arcmsr_bus_reset,
156 .bios_param = arcmsr_bios_param,
157 .change_queue_depth = arcmsr_adjust_disk_queue_depth,
158 .can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD,
159 .this_id = ARCMSR_SCSI_INITIATOR_ID,
160 .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
161 .max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
162 .cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN,
163 .shost_attrs = arcmsr_host_attrs,
167 static struct pci_device_id arcmsr_device_id_table[] = {
168 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110),
169 .driver_data = ACB_ADAPTER_TYPE_A},
170 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120),
171 .driver_data = ACB_ADAPTER_TYPE_A},
172 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130),
173 .driver_data = ACB_ADAPTER_TYPE_A},
174 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160),
175 .driver_data = ACB_ADAPTER_TYPE_A},
176 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170),
177 .driver_data = ACB_ADAPTER_TYPE_A},
178 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200),
179 .driver_data = ACB_ADAPTER_TYPE_B},
180 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201),
181 .driver_data = ACB_ADAPTER_TYPE_B},
182 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202),
183 .driver_data = ACB_ADAPTER_TYPE_B},
184 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1203),
185 .driver_data = ACB_ADAPTER_TYPE_B},
186 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210),
187 .driver_data = ACB_ADAPTER_TYPE_A},
188 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1214),
189 .driver_data = ACB_ADAPTER_TYPE_D},
190 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220),
191 .driver_data = ACB_ADAPTER_TYPE_A},
192 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230),
193 .driver_data = ACB_ADAPTER_TYPE_A},
194 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260),
195 .driver_data = ACB_ADAPTER_TYPE_A},
196 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270),
197 .driver_data = ACB_ADAPTER_TYPE_A},
198 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280),
199 .driver_data = ACB_ADAPTER_TYPE_A},
200 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380),
201 .driver_data = ACB_ADAPTER_TYPE_A},
202 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381),
203 .driver_data = ACB_ADAPTER_TYPE_A},
204 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680),
205 .driver_data = ACB_ADAPTER_TYPE_A},
206 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681),
207 .driver_data = ACB_ADAPTER_TYPE_A},
208 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880),
209 .driver_data = ACB_ADAPTER_TYPE_C},
210 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1884),
211 .driver_data = ACB_ADAPTER_TYPE_E},
212 {0, 0}, /* Terminating entry */
214 MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
216 static struct pci_driver arcmsr_pci_driver = {
218 .id_table = arcmsr_device_id_table,
219 .probe = arcmsr_probe,
220 .remove = arcmsr_remove,
221 .suspend = arcmsr_suspend,
222 .resume = arcmsr_resume,
223 .shutdown = arcmsr_shutdown,
226 ****************************************************************************
227 ****************************************************************************
230 static void arcmsr_free_io_queue(struct AdapterControlBlock *acb)
232 switch (acb->adapter_type) {
233 case ACB_ADAPTER_TYPE_B:
234 case ACB_ADAPTER_TYPE_D:
235 case ACB_ADAPTER_TYPE_E: {
236 dma_free_coherent(&acb->pdev->dev, acb->ioqueue_size,
237 acb->dma_coherent2, acb->dma_coherent_handle2);
243 static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
245 struct pci_dev *pdev = acb->pdev;
246 switch (acb->adapter_type){
247 case ACB_ADAPTER_TYPE_A:{
248 acb->pmuA = ioremap(pci_resource_start(pdev,0), pci_resource_len(pdev,0));
250 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
255 case ACB_ADAPTER_TYPE_B:{
256 void __iomem *mem_base0, *mem_base1;
257 mem_base0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
259 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
262 mem_base1 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2));
265 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
268 acb->mem_base0 = mem_base0;
269 acb->mem_base1 = mem_base1;
272 case ACB_ADAPTER_TYPE_C:{
273 acb->pmuC = ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
275 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
278 if (readl(&acb->pmuC->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
279 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &acb->pmuC->outbound_doorbell_clear);/*clear interrupt*/
284 case ACB_ADAPTER_TYPE_D: {
285 void __iomem *mem_base0;
286 unsigned long addr, range;
288 addr = (unsigned long)pci_resource_start(pdev, 0);
289 range = pci_resource_len(pdev, 0);
290 mem_base0 = ioremap(addr, range);
292 pr_notice("arcmsr%d: memory mapping region fail\n",
296 acb->mem_base0 = mem_base0;
299 case ACB_ADAPTER_TYPE_E: {
300 acb->pmuE = ioremap(pci_resource_start(pdev, 1),
301 pci_resource_len(pdev, 1));
303 pr_notice("arcmsr%d: memory mapping region fail \n",
307 writel(0, &acb->pmuE->host_int_status); /*clear interrupt*/
308 writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell); /* synchronize doorbell to 0 */
309 acb->in_doorbell = 0;
310 acb->out_doorbell = 0;
317 static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
319 switch (acb->adapter_type) {
320 case ACB_ADAPTER_TYPE_A:{
324 case ACB_ADAPTER_TYPE_B:{
325 iounmap(acb->mem_base0);
326 iounmap(acb->mem_base1);
330 case ACB_ADAPTER_TYPE_C:{
334 case ACB_ADAPTER_TYPE_D:
335 iounmap(acb->mem_base0);
337 case ACB_ADAPTER_TYPE_E:
343 static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
345 irqreturn_t handle_state;
346 struct AdapterControlBlock *acb = dev_id;
348 handle_state = arcmsr_interrupt(acb);
352 static int arcmsr_bios_param(struct scsi_device *sdev,
353 struct block_device *bdev, sector_t capacity, int *geom)
355 int heads, sectors, cylinders, total_capacity;
357 if (scsi_partsize(bdev, capacity, geom))
360 total_capacity = capacity;
363 cylinders = total_capacity / (heads * sectors);
364 if (cylinders > 1024) {
367 cylinders = total_capacity / (heads * sectors);
375 static uint8_t arcmsr_hbaA_wait_msgint_ready(struct AdapterControlBlock *acb)
377 struct MessageUnit_A __iomem *reg = acb->pmuA;
380 for (i = 0; i < 2000; i++) {
381 if (readl(®->outbound_intstatus) &
382 ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
383 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
384 ®->outbound_intstatus);
388 } /* max 20 seconds */
393 static uint8_t arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock *acb)
395 struct MessageUnit_B *reg = acb->pmuB;
398 for (i = 0; i < 2000; i++) {
399 if (readl(reg->iop2drv_doorbell)
400 & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
401 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN,
402 reg->iop2drv_doorbell);
403 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT,
404 reg->drv2iop_doorbell);
408 } /* max 20 seconds */
413 static uint8_t arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock *pACB)
415 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
418 for (i = 0; i < 2000; i++) {
419 if (readl(&phbcmu->outbound_doorbell)
420 & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
421 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR,
422 &phbcmu->outbound_doorbell_clear); /*clear interrupt*/
426 } /* max 20 seconds */
431 static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB)
433 struct MessageUnit_D *reg = pACB->pmuD;
436 for (i = 0; i < 2000; i++) {
437 if (readl(reg->outbound_doorbell)
438 & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
439 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
440 reg->outbound_doorbell);
444 } /* max 20 seconds */
448 static bool arcmsr_hbaE_wait_msgint_ready(struct AdapterControlBlock *pACB)
451 uint32_t read_doorbell;
452 struct MessageUnit_E __iomem *phbcmu = pACB->pmuE;
454 for (i = 0; i < 2000; i++) {
455 read_doorbell = readl(&phbcmu->iobound_doorbell);
456 if ((read_doorbell ^ pACB->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
457 writel(0, &phbcmu->host_int_status); /*clear interrupt*/
458 pACB->in_doorbell = read_doorbell;
462 } /* max 20 seconds */
466 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb)
468 struct MessageUnit_A __iomem *reg = acb->pmuA;
469 int retry_count = 30;
470 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0);
472 if (arcmsr_hbaA_wait_msgint_ready(acb))
476 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
477 timeout, retry count down = %d \n", acb->host->host_no, retry_count);
479 } while (retry_count != 0);
482 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb)
484 struct MessageUnit_B *reg = acb->pmuB;
485 int retry_count = 30;
486 writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell);
488 if (arcmsr_hbaB_wait_msgint_ready(acb))
492 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
493 timeout,retry count down = %d \n", acb->host->host_no, retry_count);
495 } while (retry_count != 0);
498 static void arcmsr_hbaC_flush_cache(struct AdapterControlBlock *pACB)
500 struct MessageUnit_C __iomem *reg = pACB->pmuC;
501 int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
502 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0);
503 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
505 if (arcmsr_hbaC_wait_msgint_ready(pACB)) {
509 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
510 timeout,retry count down = %d \n", pACB->host->host_no, retry_count);
512 } while (retry_count != 0);
516 static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB)
518 int retry_count = 15;
519 struct MessageUnit_D *reg = pACB->pmuD;
521 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, reg->inbound_msgaddr0);
523 if (arcmsr_hbaD_wait_msgint_ready(pACB))
527 pr_notice("arcmsr%d: wait 'flush adapter "
528 "cache' timeout, retry count down = %d\n",
529 pACB->host->host_no, retry_count);
530 } while (retry_count != 0);
533 static void arcmsr_hbaE_flush_cache(struct AdapterControlBlock *pACB)
535 int retry_count = 30;
536 struct MessageUnit_E __iomem *reg = pACB->pmuE;
538 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0);
539 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
540 writel(pACB->out_doorbell, ®->iobound_doorbell);
542 if (arcmsr_hbaE_wait_msgint_ready(pACB))
545 pr_notice("arcmsr%d: wait 'flush adapter "
546 "cache' timeout, retry count down = %d\n",
547 pACB->host->host_no, retry_count);
548 } while (retry_count != 0);
551 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
553 switch (acb->adapter_type) {
555 case ACB_ADAPTER_TYPE_A: {
556 arcmsr_hbaA_flush_cache(acb);
560 case ACB_ADAPTER_TYPE_B: {
561 arcmsr_hbaB_flush_cache(acb);
564 case ACB_ADAPTER_TYPE_C: {
565 arcmsr_hbaC_flush_cache(acb);
568 case ACB_ADAPTER_TYPE_D:
569 arcmsr_hbaD_flush_cache(acb);
571 case ACB_ADAPTER_TYPE_E:
572 arcmsr_hbaE_flush_cache(acb);
577 static void arcmsr_hbaB_assign_regAddr(struct AdapterControlBlock *acb)
579 struct MessageUnit_B *reg = acb->pmuB;
581 if (acb->pdev->device == PCI_DEVICE_ID_ARECA_1203) {
582 reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_1203);
583 reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK_1203);
584 reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_1203);
585 reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK_1203);
587 reg->drv2iop_doorbell= MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL);
588 reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK);
589 reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL);
590 reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK);
592 reg->message_wbuffer = MEM_BASE1(ARCMSR_MESSAGE_WBUFFER);
593 reg->message_rbuffer = MEM_BASE1(ARCMSR_MESSAGE_RBUFFER);
594 reg->message_rwbuffer = MEM_BASE1(ARCMSR_MESSAGE_RWBUFFER);
597 static void arcmsr_hbaD_assign_regAddr(struct AdapterControlBlock *acb)
599 struct MessageUnit_D *reg = acb->pmuD;
601 reg->chip_id = MEM_BASE0(ARCMSR_ARC1214_CHIP_ID);
602 reg->cpu_mem_config = MEM_BASE0(ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION);
603 reg->i2o_host_interrupt_mask = MEM_BASE0(ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK);
604 reg->sample_at_reset = MEM_BASE0(ARCMSR_ARC1214_SAMPLE_RESET);
605 reg->reset_request = MEM_BASE0(ARCMSR_ARC1214_RESET_REQUEST);
606 reg->host_int_status = MEM_BASE0(ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS);
607 reg->pcief0_int_enable = MEM_BASE0(ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE);
608 reg->inbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE0);
609 reg->inbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE1);
610 reg->outbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE0);
611 reg->outbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE1);
612 reg->inbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_INBOUND_DOORBELL);
613 reg->outbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL);
614 reg->outbound_doorbell_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE);
615 reg->inboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW);
616 reg->inboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH);
617 reg->inboundlist_write_pointer = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER);
618 reg->outboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW);
619 reg->outboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH);
620 reg->outboundlist_copy_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER);
621 reg->outboundlist_read_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER);
622 reg->outboundlist_interrupt_cause = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE);
623 reg->outboundlist_interrupt_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE);
624 reg->message_wbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_WBUFFER);
625 reg->message_rbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RBUFFER);
626 reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER);
629 static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
633 dma_addr_t dma_coherent_handle;
634 struct pci_dev *pdev = acb->pdev;
636 switch (acb->adapter_type) {
637 case ACB_ADAPTER_TYPE_B: {
638 acb->ioqueue_size = roundup(sizeof(struct MessageUnit_B), 32);
639 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
640 &dma_coherent_handle, GFP_KERNEL);
642 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
645 acb->dma_coherent_handle2 = dma_coherent_handle;
646 acb->dma_coherent2 = dma_coherent;
647 acb->pmuB = (struct MessageUnit_B *)dma_coherent;
648 arcmsr_hbaB_assign_regAddr(acb);
651 case ACB_ADAPTER_TYPE_D: {
652 acb->ioqueue_size = roundup(sizeof(struct MessageUnit_D), 32);
653 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
654 &dma_coherent_handle, GFP_KERNEL);
656 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
659 acb->dma_coherent_handle2 = dma_coherent_handle;
660 acb->dma_coherent2 = dma_coherent;
661 acb->pmuD = (struct MessageUnit_D *)dma_coherent;
662 arcmsr_hbaD_assign_regAddr(acb);
665 case ACB_ADAPTER_TYPE_E: {
666 uint32_t completeQ_size;
667 completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128;
668 acb->ioqueue_size = roundup(completeQ_size, 32);
669 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
670 &dma_coherent_handle, GFP_KERNEL);
672 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
675 acb->dma_coherent_handle2 = dma_coherent_handle;
676 acb->dma_coherent2 = dma_coherent;
677 acb->pCompletionQ = dma_coherent;
678 acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ);
679 acb->doneq_index = 0;
688 static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
690 struct pci_dev *pdev = acb->pdev;
692 dma_addr_t dma_coherent_handle;
693 struct CommandControlBlock *ccb_tmp;
695 unsigned long cdb_phyaddr, next_ccb_phy;
696 unsigned long roundup_ccbsize;
697 unsigned long max_xfer_len;
698 unsigned long max_sg_entrys;
699 uint32_t firm_config_version, curr_phy_upper32;
701 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
702 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
703 acb->devstate[i][j] = ARECA_RAID_GONE;
705 max_xfer_len = ARCMSR_MAX_XFER_LEN;
706 max_sg_entrys = ARCMSR_DEFAULT_SG_ENTRIES;
707 firm_config_version = acb->firm_cfg_version;
708 if((firm_config_version & 0xFF) >= 3){
709 max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */
710 max_sg_entrys = (max_xfer_len/4096);
712 acb->host->max_sectors = max_xfer_len/512;
713 acb->host->sg_tablesize = max_sg_entrys;
714 roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
715 acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB;
716 acb->uncache_size += acb->ioqueue_size;
717 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
719 printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
722 acb->dma_coherent = dma_coherent;
723 acb->dma_coherent_handle = dma_coherent_handle;
724 memset(dma_coherent, 0, acb->uncache_size);
725 acb->ccbsize = roundup_ccbsize;
726 ccb_tmp = dma_coherent;
727 curr_phy_upper32 = upper_32_bits(dma_coherent_handle);
728 acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
729 for(i = 0; i < acb->maxFreeCCB; i++){
730 cdb_phyaddr = (unsigned long)dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
731 switch (acb->adapter_type) {
732 case ACB_ADAPTER_TYPE_A:
733 case ACB_ADAPTER_TYPE_B:
734 ccb_tmp->cdb_phyaddr = cdb_phyaddr >> 5;
736 case ACB_ADAPTER_TYPE_C:
737 case ACB_ADAPTER_TYPE_D:
738 case ACB_ADAPTER_TYPE_E:
739 ccb_tmp->cdb_phyaddr = cdb_phyaddr;
742 acb->pccb_pool[i] = ccb_tmp;
744 ccb_tmp->smid = (u32)i << 16;
745 INIT_LIST_HEAD(&ccb_tmp->list);
746 next_ccb_phy = dma_coherent_handle + roundup_ccbsize;
747 if (upper_32_bits(next_ccb_phy) != curr_phy_upper32) {
749 acb->host->can_queue = i;
753 list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
754 ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
755 dma_coherent_handle = next_ccb_phy;
757 acb->dma_coherent_handle2 = dma_coherent_handle;
758 acb->dma_coherent2 = ccb_tmp;
759 switch (acb->adapter_type) {
760 case ACB_ADAPTER_TYPE_B:
761 acb->pmuB = (struct MessageUnit_B *)acb->dma_coherent2;
762 arcmsr_hbaB_assign_regAddr(acb);
764 case ACB_ADAPTER_TYPE_D:
765 acb->pmuD = (struct MessageUnit_D *)acb->dma_coherent2;
766 arcmsr_hbaD_assign_regAddr(acb);
768 case ACB_ADAPTER_TYPE_E:
769 acb->pCompletionQ = acb->dma_coherent2;
770 acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ);
771 acb->doneq_index = 0;
777 static void arcmsr_message_isr_bh_fn(struct work_struct *work)
779 struct AdapterControlBlock *acb = container_of(work,
780 struct AdapterControlBlock, arcmsr_do_message_isr_bh);
781 char *acb_dev_map = (char *)acb->device_map;
782 uint32_t __iomem *signature = NULL;
783 char __iomem *devicemap = NULL;
785 struct scsi_device *psdev;
788 acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG;
789 switch (acb->adapter_type) {
790 case ACB_ADAPTER_TYPE_A: {
791 struct MessageUnit_A __iomem *reg = acb->pmuA;
793 signature = (uint32_t __iomem *)(®->message_rwbuffer[0]);
794 devicemap = (char __iomem *)(®->message_rwbuffer[21]);
797 case ACB_ADAPTER_TYPE_B: {
798 struct MessageUnit_B *reg = acb->pmuB;
800 signature = (uint32_t __iomem *)(®->message_rwbuffer[0]);
801 devicemap = (char __iomem *)(®->message_rwbuffer[21]);
804 case ACB_ADAPTER_TYPE_C: {
805 struct MessageUnit_C __iomem *reg = acb->pmuC;
807 signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]);
808 devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]);
811 case ACB_ADAPTER_TYPE_D: {
812 struct MessageUnit_D *reg = acb->pmuD;
814 signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]);
815 devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]);
818 case ACB_ADAPTER_TYPE_E: {
819 struct MessageUnit_E __iomem *reg = acb->pmuE;
821 signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]);
822 devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]);
826 atomic_inc(&acb->rq_map_token);
827 if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG)
829 for (target = 0; target < ARCMSR_MAX_TARGETID - 1;
831 temp = readb(devicemap);
832 diff = (*acb_dev_map) ^ temp;
835 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN;
837 if ((diff & 0x01) == 1 &&
838 (temp & 0x01) == 1) {
839 scsi_add_device(acb->host,
841 } else if ((diff & 0x01) == 1
842 && (temp & 0x01) == 0) {
843 psdev = scsi_device_lookup(acb->host,
846 scsi_remove_device(psdev);
847 scsi_device_put(psdev);
860 arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
865 if (msix_enable == 0)
867 nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS,
870 pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
874 if (msi_enable == 1) {
875 nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
877 dev_info(&pdev->dev, "msi enabled\n");
881 nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
888 acb->vector_count = nvec;
889 for (i = 0; i < nvec; i++) {
890 if (request_irq(pci_irq_vector(pdev, i), arcmsr_do_interrupt,
891 flags, "arcmsr", acb)) {
892 pr_warn("arcmsr%d: request_irq =%d failed!\n",
893 acb->host->host_no, pci_irq_vector(pdev, i));
901 free_irq(pci_irq_vector(pdev, i), acb);
902 pci_free_irq_vectors(pdev);
906 static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb)
908 INIT_WORK(&pacb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
909 atomic_set(&pacb->rq_map_token, 16);
910 atomic_set(&pacb->ante_token_value, 16);
911 pacb->fw_flag = FW_NORMAL;
912 timer_setup(&pacb->eternal_timer, arcmsr_request_device_map, 0);
913 pacb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
914 add_timer(&pacb->eternal_timer);
917 static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb)
919 timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0);
920 pacb->refresh_timer.expires = jiffies + msecs_to_jiffies(60 * 1000);
921 add_timer(&pacb->refresh_timer);
924 static int arcmsr_set_dma_mask(struct AdapterControlBlock *acb)
926 struct pci_dev *pcidev = acb->pdev;
929 if (((acb->adapter_type == ACB_ADAPTER_TYPE_A) && !dma_mask_64) ||
930 dma_set_mask(&pcidev->dev, DMA_BIT_MASK(64)))
932 if (dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(64)) ||
933 dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64))) {
934 printk("arcmsr: set DMA 64 mask failed\n");
939 if (dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32)) ||
940 dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(32)) ||
941 dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32))) {
942 printk("arcmsr: set DMA 32-bit mask failed\n");
949 static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
951 struct Scsi_Host *host;
952 struct AdapterControlBlock *acb;
955 error = pci_enable_device(pdev);
959 host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof(struct AdapterControlBlock));
961 goto pci_disable_dev;
963 init_waitqueue_head(&wait_q);
964 bus = pdev->bus->number;
965 dev_fun = pdev->devfn;
966 acb = (struct AdapterControlBlock *) host->hostdata;
967 memset(acb,0,sizeof(struct AdapterControlBlock));
969 acb->adapter_type = id->driver_data;
970 if (arcmsr_set_dma_mask(acb))
971 goto scsi_host_release;
973 host->max_lun = ARCMSR_MAX_TARGETLUN;
974 host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/
975 host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/
976 if ((host_can_queue < ARCMSR_MIN_OUTSTANDING_CMD) || (host_can_queue > ARCMSR_MAX_OUTSTANDING_CMD))
977 host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
978 host->can_queue = host_can_queue; /* max simultaneous cmds */
979 if ((cmd_per_lun < ARCMSR_MIN_CMD_PERLUN) || (cmd_per_lun > ARCMSR_MAX_CMD_PERLUN))
980 cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
981 host->cmd_per_lun = cmd_per_lun;
982 host->this_id = ARCMSR_SCSI_INITIATOR_ID;
983 host->unique_id = (bus << 8) | dev_fun;
984 pci_set_drvdata(pdev, host);
985 pci_set_master(pdev);
986 error = pci_request_regions(pdev, "arcmsr");
988 goto scsi_host_release;
990 spin_lock_init(&acb->eh_lock);
991 spin_lock_init(&acb->ccblist_lock);
992 spin_lock_init(&acb->postq_lock);
993 spin_lock_init(&acb->doneq_lock);
994 spin_lock_init(&acb->rqbuffer_lock);
995 spin_lock_init(&acb->wqbuffer_lock);
996 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
997 ACB_F_MESSAGE_RQBUFFER_CLEARED |
998 ACB_F_MESSAGE_WQBUFFER_READED);
999 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
1000 INIT_LIST_HEAD(&acb->ccb_free_list);
1001 error = arcmsr_remap_pciregion(acb);
1003 goto pci_release_regs;
1005 error = arcmsr_alloc_io_queue(acb);
1007 goto unmap_pci_region;
1008 error = arcmsr_get_firmware_spec(acb);
1012 arcmsr_free_io_queue(acb);
1013 error = arcmsr_alloc_ccb_pool(acb);
1015 goto unmap_pci_region;
1017 error = scsi_add_host(host, &pdev->dev);
1021 if (arcmsr_request_irq(pdev, acb) == FAILED)
1022 goto scsi_host_remove;
1023 arcmsr_iop_init(acb);
1024 arcmsr_init_get_devmap_timer(acb);
1026 arcmsr_init_set_datetime_timer(acb);
1027 if(arcmsr_alloc_sysfs_attr(acb))
1028 goto out_free_sysfs;
1029 scsi_scan_host(host);
1033 del_timer_sync(&acb->refresh_timer);
1034 del_timer_sync(&acb->eternal_timer);
1035 flush_work(&acb->arcmsr_do_message_isr_bh);
1036 arcmsr_stop_adapter_bgrb(acb);
1037 arcmsr_flush_adapter_cache(acb);
1038 arcmsr_free_irq(pdev, acb);
1040 scsi_remove_host(host);
1042 arcmsr_free_ccb_pool(acb);
1043 goto unmap_pci_region;
1045 arcmsr_free_io_queue(acb);
1047 arcmsr_unmap_pciregion(acb);
1049 pci_release_regions(pdev);
1051 scsi_host_put(host);
1053 pci_disable_device(pdev);
1057 static void arcmsr_free_irq(struct pci_dev *pdev,
1058 struct AdapterControlBlock *acb)
1062 for (i = 0; i < acb->vector_count; i++)
1063 free_irq(pci_irq_vector(pdev, i), acb);
1064 pci_free_irq_vectors(pdev);
1067 static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
1069 struct Scsi_Host *host = pci_get_drvdata(pdev);
1070 struct AdapterControlBlock *acb =
1071 (struct AdapterControlBlock *)host->hostdata;
1073 arcmsr_disable_outbound_ints(acb);
1074 arcmsr_free_irq(pdev, acb);
1075 del_timer_sync(&acb->eternal_timer);
1077 del_timer_sync(&acb->refresh_timer);
1078 flush_work(&acb->arcmsr_do_message_isr_bh);
1079 arcmsr_stop_adapter_bgrb(acb);
1080 arcmsr_flush_adapter_cache(acb);
1081 pci_set_drvdata(pdev, host);
1082 pci_save_state(pdev);
1083 pci_disable_device(pdev);
1084 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1088 static int arcmsr_resume(struct pci_dev *pdev)
1090 struct Scsi_Host *host = pci_get_drvdata(pdev);
1091 struct AdapterControlBlock *acb =
1092 (struct AdapterControlBlock *)host->hostdata;
1094 pci_set_power_state(pdev, PCI_D0);
1095 pci_enable_wake(pdev, PCI_D0, 0);
1096 pci_restore_state(pdev);
1097 if (pci_enable_device(pdev)) {
1098 pr_warn("%s: pci_enable_device error\n", __func__);
1101 if (arcmsr_set_dma_mask(acb))
1102 goto controller_unregister;
1103 pci_set_master(pdev);
1104 if (arcmsr_request_irq(pdev, acb) == FAILED)
1105 goto controller_stop;
1106 switch (acb->adapter_type) {
1107 case ACB_ADAPTER_TYPE_B: {
1108 struct MessageUnit_B *reg = acb->pmuB;
1110 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
1111 reg->post_qbuffer[i] = 0;
1112 reg->done_qbuffer[i] = 0;
1114 reg->postq_index = 0;
1115 reg->doneq_index = 0;
1118 case ACB_ADAPTER_TYPE_E:
1119 writel(0, &acb->pmuE->host_int_status);
1120 writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell);
1121 acb->in_doorbell = 0;
1122 acb->out_doorbell = 0;
1123 acb->doneq_index = 0;
1126 arcmsr_iop_init(acb);
1127 arcmsr_init_get_devmap_timer(acb);
1129 arcmsr_init_set_datetime_timer(acb);
1132 arcmsr_stop_adapter_bgrb(acb);
1133 arcmsr_flush_adapter_cache(acb);
1134 controller_unregister:
1135 scsi_remove_host(host);
1136 arcmsr_free_ccb_pool(acb);
1137 arcmsr_unmap_pciregion(acb);
1138 pci_release_regions(pdev);
1139 scsi_host_put(host);
1140 pci_disable_device(pdev);
1144 static uint8_t arcmsr_hbaA_abort_allcmd(struct AdapterControlBlock *acb)
1146 struct MessageUnit_A __iomem *reg = acb->pmuA;
1147 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0);
1148 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
1150 "arcmsr%d: wait 'abort all outstanding command' timeout\n"
1151 , acb->host->host_no);
1157 static uint8_t arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock *acb)
1159 struct MessageUnit_B *reg = acb->pmuB;
1161 writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell);
1162 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
1164 "arcmsr%d: wait 'abort all outstanding command' timeout\n"
1165 , acb->host->host_no);
1170 static uint8_t arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock *pACB)
1172 struct MessageUnit_C __iomem *reg = pACB->pmuC;
1173 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0);
1174 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
1175 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
1177 "arcmsr%d: wait 'abort all outstanding command' timeout\n"
1178 , pACB->host->host_no);
1184 static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB)
1186 struct MessageUnit_D *reg = pACB->pmuD;
1188 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, reg->inbound_msgaddr0);
1189 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
1190 pr_notice("arcmsr%d: wait 'abort all outstanding "
1191 "command' timeout\n", pACB->host->host_no);
1197 static uint8_t arcmsr_hbaE_abort_allcmd(struct AdapterControlBlock *pACB)
1199 struct MessageUnit_E __iomem *reg = pACB->pmuE;
1201 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0);
1202 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
1203 writel(pACB->out_doorbell, ®->iobound_doorbell);
1204 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
1205 pr_notice("arcmsr%d: wait 'abort all outstanding "
1206 "command' timeout\n", pACB->host->host_no);
1212 static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
1215 switch (acb->adapter_type) {
1216 case ACB_ADAPTER_TYPE_A: {
1217 rtnval = arcmsr_hbaA_abort_allcmd(acb);
1221 case ACB_ADAPTER_TYPE_B: {
1222 rtnval = arcmsr_hbaB_abort_allcmd(acb);
1226 case ACB_ADAPTER_TYPE_C: {
1227 rtnval = arcmsr_hbaC_abort_allcmd(acb);
1231 case ACB_ADAPTER_TYPE_D:
1232 rtnval = arcmsr_hbaD_abort_allcmd(acb);
1234 case ACB_ADAPTER_TYPE_E:
1235 rtnval = arcmsr_hbaE_abort_allcmd(acb);
1241 static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
1243 struct scsi_cmnd *pcmd = ccb->pcmd;
1245 scsi_dma_unmap(pcmd);
1248 static void arcmsr_ccb_complete(struct CommandControlBlock *ccb)
1250 struct AdapterControlBlock *acb = ccb->acb;
1251 struct scsi_cmnd *pcmd = ccb->pcmd;
1252 unsigned long flags;
1253 atomic_dec(&acb->ccboutstandingcount);
1254 arcmsr_pci_unmap_dma(ccb);
1255 ccb->startdone = ARCMSR_CCB_DONE;
1256 spin_lock_irqsave(&acb->ccblist_lock, flags);
1257 list_add_tail(&ccb->list, &acb->ccb_free_list);
1258 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
1259 pcmd->scsi_done(pcmd);
1262 static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
1265 struct scsi_cmnd *pcmd = ccb->pcmd;
1266 struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
1267 pcmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1269 int sense_data_length =
1270 sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
1271 ? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE;
1272 memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE);
1273 memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
1274 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
1275 sensebuffer->Valid = 1;
1276 pcmd->result |= (DRIVER_SENSE << 24);
1280 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
1283 switch (acb->adapter_type) {
1284 case ACB_ADAPTER_TYPE_A : {
1285 struct MessageUnit_A __iomem *reg = acb->pmuA;
1286 orig_mask = readl(®->outbound_intmask);
1287 writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
1288 ®->outbound_intmask);
1291 case ACB_ADAPTER_TYPE_B : {
1292 struct MessageUnit_B *reg = acb->pmuB;
1293 orig_mask = readl(reg->iop2drv_doorbell_mask);
1294 writel(0, reg->iop2drv_doorbell_mask);
1297 case ACB_ADAPTER_TYPE_C:{
1298 struct MessageUnit_C __iomem *reg = acb->pmuC;
1299 /* disable all outbound interrupt */
1300 orig_mask = readl(®->host_int_mask); /* disable outbound message0 int */
1301 writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, ®->host_int_mask);
1304 case ACB_ADAPTER_TYPE_D: {
1305 struct MessageUnit_D *reg = acb->pmuD;
1306 /* disable all outbound interrupt */
1307 writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable);
1310 case ACB_ADAPTER_TYPE_E: {
1311 struct MessageUnit_E __iomem *reg = acb->pmuE;
1312 orig_mask = readl(®->host_int_mask);
1313 writel(orig_mask | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR, ®->host_int_mask);
1314 readl(®->host_int_mask); /* Dummy readl to force pci flush */
1321 static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
1322 struct CommandControlBlock *ccb, bool error)
1325 id = ccb->pcmd->device->id;
1326 lun = ccb->pcmd->device->lun;
1328 if (acb->devstate[id][lun] == ARECA_RAID_GONE)
1329 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1330 ccb->pcmd->result = DID_OK << 16;
1331 arcmsr_ccb_complete(ccb);
1333 switch (ccb->arcmsr_cdb.DeviceStatus) {
1334 case ARCMSR_DEV_SELECT_TIMEOUT: {
1335 acb->devstate[id][lun] = ARECA_RAID_GONE;
1336 ccb->pcmd->result = DID_NO_CONNECT << 16;
1337 arcmsr_ccb_complete(ccb);
1341 case ARCMSR_DEV_ABORTED:
1343 case ARCMSR_DEV_INIT_FAIL: {
1344 acb->devstate[id][lun] = ARECA_RAID_GONE;
1345 ccb->pcmd->result = DID_BAD_TARGET << 16;
1346 arcmsr_ccb_complete(ccb);
1350 case ARCMSR_DEV_CHECK_CONDITION: {
1351 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1352 arcmsr_report_sense_info(ccb);
1353 arcmsr_ccb_complete(ccb);
1359 "arcmsr%d: scsi id = %d lun = %d isr get command error done, \
1360 but got unknown DeviceStatus = 0x%x \n"
1361 , acb->host->host_no
1364 , ccb->arcmsr_cdb.DeviceStatus);
1365 acb->devstate[id][lun] = ARECA_RAID_GONE;
1366 ccb->pcmd->result = DID_NO_CONNECT << 16;
1367 arcmsr_ccb_complete(ccb);
1373 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
1375 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
1376 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
1377 struct scsi_cmnd *abortcmd = pCCB->pcmd;
1379 abortcmd->result |= DID_ABORT << 16;
1380 arcmsr_ccb_complete(pCCB);
1381 printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
1382 acb->host->host_no, pCCB);
1386 printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \
1388 "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
1389 " ccboutstandingcount = %d \n"
1390 , acb->host->host_no
1395 , atomic_read(&acb->ccboutstandingcount));
1398 arcmsr_report_ccb_state(acb, pCCB, error);
1401 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
1405 struct ARCMSR_CDB *pARCMSR_CDB;
1407 struct CommandControlBlock *pCCB;
1408 unsigned long ccb_cdb_phy;
1410 switch (acb->adapter_type) {
1412 case ACB_ADAPTER_TYPE_A: {
1413 struct MessageUnit_A __iomem *reg = acb->pmuA;
1414 uint32_t outbound_intstatus;
1415 outbound_intstatus = readl(®->outbound_intstatus) &
1416 acb->outbound_int_enable;
1417 /*clear and abort all outbound posted Q*/
1418 writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/
1419 while(((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF)
1420 && (i++ < acb->maxOutstanding)) {
1421 ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
1422 if (acb->cdb_phyadd_hipart)
1423 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1424 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
1425 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1426 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
1427 arcmsr_drain_donequeue(acb, pCCB, error);
1432 case ACB_ADAPTER_TYPE_B: {
1433 struct MessageUnit_B *reg = acb->pmuB;
1434 /*clear all outbound posted Q*/
1435 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */
1436 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
1437 flag_ccb = reg->done_qbuffer[i];
1438 if (flag_ccb != 0) {
1439 reg->done_qbuffer[i] = 0;
1440 ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
1441 if (acb->cdb_phyadd_hipart)
1442 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1443 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
1444 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1445 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
1446 arcmsr_drain_donequeue(acb, pCCB, error);
1448 reg->post_qbuffer[i] = 0;
1450 reg->doneq_index = 0;
1451 reg->postq_index = 0;
1454 case ACB_ADAPTER_TYPE_C: {
1455 struct MessageUnit_C __iomem *reg = acb->pmuC;
1456 while ((readl(®->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < acb->maxOutstanding)) {
1458 flag_ccb = readl(®->outbound_queueport_low);
1459 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
1460 if (acb->cdb_phyadd_hipart)
1461 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1462 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
1463 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1464 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
1465 arcmsr_drain_donequeue(acb, pCCB, error);
1469 case ACB_ADAPTER_TYPE_D: {
1470 struct MessageUnit_D *pmu = acb->pmuD;
1471 uint32_t outbound_write_pointer;
1472 uint32_t doneq_index, index_stripped, addressLow, residual, toggle;
1473 unsigned long flags;
1475 residual = atomic_read(&acb->ccboutstandingcount);
1476 for (i = 0; i < residual; i++) {
1477 spin_lock_irqsave(&acb->doneq_lock, flags);
1478 outbound_write_pointer =
1479 pmu->done_qbuffer[0].addressLow + 1;
1480 doneq_index = pmu->doneq_index;
1481 if ((doneq_index & 0xFFF) !=
1482 (outbound_write_pointer & 0xFFF)) {
1483 toggle = doneq_index & 0x4000;
1484 index_stripped = (doneq_index & 0xFFF) + 1;
1485 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
1486 pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
1487 ((toggle ^ 0x4000) + 1);
1488 doneq_index = pmu->doneq_index;
1489 spin_unlock_irqrestore(&acb->doneq_lock, flags);
1490 addressLow = pmu->done_qbuffer[doneq_index &
1492 ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
1493 if (acb->cdb_phyadd_hipart)
1494 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1495 pARCMSR_CDB = (struct ARCMSR_CDB *)
1496 (acb->vir2phy_offset + ccb_cdb_phy);
1497 pCCB = container_of(pARCMSR_CDB,
1498 struct CommandControlBlock, arcmsr_cdb);
1499 error = (addressLow &
1500 ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
1502 arcmsr_drain_donequeue(acb, pCCB, error);
1504 pmu->outboundlist_read_pointer);
1506 spin_unlock_irqrestore(&acb->doneq_lock, flags);
1510 pmu->postq_index = 0;
1511 pmu->doneq_index = 0x40FF;
1514 case ACB_ADAPTER_TYPE_E:
1515 arcmsr_hbaE_postqueue_isr(acb);
1520 static void arcmsr_remove_scsi_devices(struct AdapterControlBlock *acb)
1522 char *acb_dev_map = (char *)acb->device_map;
1524 struct scsi_device *psdev;
1525 struct CommandControlBlock *ccb;
1528 for (i = 0; i < acb->maxFreeCCB; i++) {
1529 ccb = acb->pccb_pool[i];
1530 if (ccb->startdone == ARCMSR_CCB_START) {
1531 ccb->pcmd->result = DID_NO_CONNECT << 16;
1532 arcmsr_pci_unmap_dma(ccb);
1533 ccb->pcmd->scsi_done(ccb->pcmd);
1536 for (target = 0; target < ARCMSR_MAX_TARGETID; target++) {
1537 temp = *acb_dev_map;
1539 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
1541 psdev = scsi_device_lookup(acb->host,
1543 if (psdev != NULL) {
1544 scsi_remove_device(psdev);
1545 scsi_device_put(psdev);
1556 static void arcmsr_free_pcidev(struct AdapterControlBlock *acb)
1558 struct pci_dev *pdev;
1559 struct Scsi_Host *host;
1562 arcmsr_free_sysfs_attr(acb);
1563 scsi_remove_host(host);
1564 flush_work(&acb->arcmsr_do_message_isr_bh);
1565 del_timer_sync(&acb->eternal_timer);
1567 del_timer_sync(&acb->refresh_timer);
1569 arcmsr_free_irq(pdev, acb);
1570 arcmsr_free_ccb_pool(acb);
1571 arcmsr_unmap_pciregion(acb);
1572 pci_release_regions(pdev);
1573 scsi_host_put(host);
1574 pci_disable_device(pdev);
1577 static void arcmsr_remove(struct pci_dev *pdev)
1579 struct Scsi_Host *host = pci_get_drvdata(pdev);
1580 struct AdapterControlBlock *acb =
1581 (struct AdapterControlBlock *) host->hostdata;
1585 pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id);
1586 if (dev_id == 0xffff) {
1587 acb->acb_flags &= ~ACB_F_IOP_INITED;
1588 acb->acb_flags |= ACB_F_ADAPTER_REMOVED;
1589 arcmsr_remove_scsi_devices(acb);
1590 arcmsr_free_pcidev(acb);
1593 arcmsr_free_sysfs_attr(acb);
1594 scsi_remove_host(host);
1595 flush_work(&acb->arcmsr_do_message_isr_bh);
1596 del_timer_sync(&acb->eternal_timer);
1598 del_timer_sync(&acb->refresh_timer);
1599 arcmsr_disable_outbound_ints(acb);
1600 arcmsr_stop_adapter_bgrb(acb);
1601 arcmsr_flush_adapter_cache(acb);
1602 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
1603 acb->acb_flags &= ~ACB_F_IOP_INITED;
1605 for (poll_count = 0; poll_count < acb->maxOutstanding; poll_count++){
1606 if (!atomic_read(&acb->ccboutstandingcount))
1608 arcmsr_interrupt(acb);/* FIXME: need spinlock */
1612 if (atomic_read(&acb->ccboutstandingcount)) {
1615 arcmsr_abort_allcmd(acb);
1616 arcmsr_done4abort_postqueue(acb);
1617 for (i = 0; i < acb->maxFreeCCB; i++) {
1618 struct CommandControlBlock *ccb = acb->pccb_pool[i];
1619 if (ccb->startdone == ARCMSR_CCB_START) {
1620 ccb->startdone = ARCMSR_CCB_ABORTED;
1621 ccb->pcmd->result = DID_ABORT << 16;
1622 arcmsr_ccb_complete(ccb);
1626 arcmsr_free_irq(pdev, acb);
1627 arcmsr_free_ccb_pool(acb);
1628 arcmsr_unmap_pciregion(acb);
1629 pci_release_regions(pdev);
1630 scsi_host_put(host);
1631 pci_disable_device(pdev);
1634 static void arcmsr_shutdown(struct pci_dev *pdev)
1636 struct Scsi_Host *host = pci_get_drvdata(pdev);
1637 struct AdapterControlBlock *acb =
1638 (struct AdapterControlBlock *)host->hostdata;
1639 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED)
1641 del_timer_sync(&acb->eternal_timer);
1643 del_timer_sync(&acb->refresh_timer);
1644 arcmsr_disable_outbound_ints(acb);
1645 arcmsr_free_irq(pdev, acb);
1646 flush_work(&acb->arcmsr_do_message_isr_bh);
1647 arcmsr_stop_adapter_bgrb(acb);
1648 arcmsr_flush_adapter_cache(acb);
1651 static int arcmsr_module_init(void)
1654 error = pci_register_driver(&arcmsr_pci_driver);
1658 static void arcmsr_module_exit(void)
1660 pci_unregister_driver(&arcmsr_pci_driver);
1662 module_init(arcmsr_module_init);
1663 module_exit(arcmsr_module_exit);
1665 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
1669 switch (acb->adapter_type) {
1671 case ACB_ADAPTER_TYPE_A: {
1672 struct MessageUnit_A __iomem *reg = acb->pmuA;
1673 mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
1674 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|
1675 ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
1676 writel(mask, ®->outbound_intmask);
1677 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
1681 case ACB_ADAPTER_TYPE_B: {
1682 struct MessageUnit_B *reg = acb->pmuB;
1683 mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK |
1684 ARCMSR_IOP2DRV_DATA_READ_OK |
1685 ARCMSR_IOP2DRV_CDB_DONE |
1686 ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
1687 writel(mask, reg->iop2drv_doorbell_mask);
1688 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
1691 case ACB_ADAPTER_TYPE_C: {
1692 struct MessageUnit_C __iomem *reg = acb->pmuC;
1693 mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
1694 writel(intmask_org & mask, ®->host_int_mask);
1695 acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
1698 case ACB_ADAPTER_TYPE_D: {
1699 struct MessageUnit_D *reg = acb->pmuD;
1701 mask = ARCMSR_ARC1214_ALL_INT_ENABLE;
1702 writel(intmask_org | mask, reg->pcief0_int_enable);
1705 case ACB_ADAPTER_TYPE_E: {
1706 struct MessageUnit_E __iomem *reg = acb->pmuE;
1708 mask = ~(ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR);
1709 writel(intmask_org & mask, ®->host_int_mask);
1715 static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
1716 struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
1718 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1719 int8_t *psge = (int8_t *)&arcmsr_cdb->u;
1720 __le32 address_lo, address_hi;
1721 int arccdbsize = 0x30;
1724 struct scatterlist *sg;
1727 memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
1728 arcmsr_cdb->TargetID = pcmd->device->id;
1729 arcmsr_cdb->LUN = pcmd->device->lun;
1730 arcmsr_cdb->Function = 1;
1731 arcmsr_cdb->msgContext = 0;
1732 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
1734 nseg = scsi_dma_map(pcmd);
1735 if (unlikely(nseg > acb->host->sg_tablesize || nseg < 0))
1737 scsi_for_each_sg(pcmd, sg, nseg, i) {
1738 /* Get the physical address of the current data pointer */
1739 length = cpu_to_le32(sg_dma_len(sg));
1740 address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
1741 address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
1742 if (address_hi == 0) {
1743 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
1745 pdma_sg->address = address_lo;
1746 pdma_sg->length = length;
1747 psge += sizeof (struct SG32ENTRY);
1748 arccdbsize += sizeof (struct SG32ENTRY);
1750 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
1752 pdma_sg->addresshigh = address_hi;
1753 pdma_sg->address = address_lo;
1754 pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR);
1755 psge += sizeof (struct SG64ENTRY);
1756 arccdbsize += sizeof (struct SG64ENTRY);
1759 arcmsr_cdb->sgcount = (uint8_t)nseg;
1760 arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
1761 arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0);
1762 if ( arccdbsize > 256)
1763 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
1764 if (pcmd->sc_data_direction == DMA_TO_DEVICE)
1765 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
1766 ccb->arc_cdb_size = arccdbsize;
1770 static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
1772 uint32_t cdb_phyaddr = ccb->cdb_phyaddr;
1773 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1774 atomic_inc(&acb->ccboutstandingcount);
1775 ccb->startdone = ARCMSR_CCB_START;
1776 switch (acb->adapter_type) {
1777 case ACB_ADAPTER_TYPE_A: {
1778 struct MessageUnit_A __iomem *reg = acb->pmuA;
1780 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
1781 writel(cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
1782 ®->inbound_queueport);
1784 writel(cdb_phyaddr, ®->inbound_queueport);
1788 case ACB_ADAPTER_TYPE_B: {
1789 struct MessageUnit_B *reg = acb->pmuB;
1790 uint32_t ending_index, index = reg->postq_index;
1792 ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
1793 reg->post_qbuffer[ending_index] = 0;
1794 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1795 reg->post_qbuffer[index] =
1796 cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE;
1798 reg->post_qbuffer[index] = cdb_phyaddr;
1801 index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
1802 reg->postq_index = index;
1803 writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell);
1806 case ACB_ADAPTER_TYPE_C: {
1807 struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
1808 uint32_t ccb_post_stamp, arc_cdb_size;
1810 arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
1811 ccb_post_stamp = (cdb_phyaddr | ((arc_cdb_size - 1) >> 6) | 1);
1812 writel(upper_32_bits(ccb->cdb_phyaddr), &phbcmu->inbound_queueport_high);
1813 writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
1816 case ACB_ADAPTER_TYPE_D: {
1817 struct MessageUnit_D *pmu = acb->pmuD;
1819 u16 postq_index, toggle;
1820 unsigned long flags;
1821 struct InBound_SRB *pinbound_srb;
1823 spin_lock_irqsave(&acb->postq_lock, flags);
1824 postq_index = pmu->postq_index;
1825 pinbound_srb = (struct InBound_SRB *)&(pmu->post_qbuffer[postq_index & 0xFF]);
1826 pinbound_srb->addressHigh = upper_32_bits(ccb->cdb_phyaddr);
1827 pinbound_srb->addressLow = cdb_phyaddr;
1828 pinbound_srb->length = ccb->arc_cdb_size >> 2;
1829 arcmsr_cdb->msgContext = dma_addr_lo32(cdb_phyaddr);
1830 toggle = postq_index & 0x4000;
1831 index_stripped = postq_index + 1;
1832 index_stripped &= (ARCMSR_MAX_ARC1214_POSTQUEUE - 1);
1833 pmu->postq_index = index_stripped ? (index_stripped | toggle) :
1835 writel(postq_index, pmu->inboundlist_write_pointer);
1836 spin_unlock_irqrestore(&acb->postq_lock, flags);
1839 case ACB_ADAPTER_TYPE_E: {
1840 struct MessageUnit_E __iomem *pmu = acb->pmuE;
1841 u32 ccb_post_stamp, arc_cdb_size;
1843 arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
1844 ccb_post_stamp = (ccb->smid | ((arc_cdb_size - 1) >> 6));
1845 writel(0, &pmu->inbound_queueport_high);
1846 writel(ccb_post_stamp, &pmu->inbound_queueport_low);
1852 static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb)
1854 struct MessageUnit_A __iomem *reg = acb->pmuA;
1855 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1856 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0);
1857 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
1859 "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
1860 , acb->host->host_no);
1864 static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb)
1866 struct MessageUnit_B *reg = acb->pmuB;
1867 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1868 writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell);
1870 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
1872 "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
1873 , acb->host->host_no);
1877 static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB)
1879 struct MessageUnit_C __iomem *reg = pACB->pmuC;
1880 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
1881 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0);
1882 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
1883 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
1885 "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
1886 , pACB->host->host_no);
1891 static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB)
1893 struct MessageUnit_D *reg = pACB->pmuD;
1895 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
1896 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0);
1897 if (!arcmsr_hbaD_wait_msgint_ready(pACB))
1898 pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
1899 "timeout\n", pACB->host->host_no);
1902 static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB)
1904 struct MessageUnit_E __iomem *reg = pACB->pmuE;
1906 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
1907 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0);
1908 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
1909 writel(pACB->out_doorbell, ®->iobound_doorbell);
1910 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
1911 pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
1912 "timeout\n", pACB->host->host_no);
1916 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
1918 switch (acb->adapter_type) {
1919 case ACB_ADAPTER_TYPE_A: {
1920 arcmsr_hbaA_stop_bgrb(acb);
1924 case ACB_ADAPTER_TYPE_B: {
1925 arcmsr_hbaB_stop_bgrb(acb);
1928 case ACB_ADAPTER_TYPE_C: {
1929 arcmsr_hbaC_stop_bgrb(acb);
1932 case ACB_ADAPTER_TYPE_D:
1933 arcmsr_hbaD_stop_bgrb(acb);
1935 case ACB_ADAPTER_TYPE_E:
1936 arcmsr_hbaE_stop_bgrb(acb);
1941 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
1943 dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle);
1946 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
1948 switch (acb->adapter_type) {
1949 case ACB_ADAPTER_TYPE_A: {
1950 struct MessageUnit_A __iomem *reg = acb->pmuA;
1951 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell);
1955 case ACB_ADAPTER_TYPE_B: {
1956 struct MessageUnit_B *reg = acb->pmuB;
1957 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
1960 case ACB_ADAPTER_TYPE_C: {
1961 struct MessageUnit_C __iomem *reg = acb->pmuC;
1963 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell);
1966 case ACB_ADAPTER_TYPE_D: {
1967 struct MessageUnit_D *reg = acb->pmuD;
1968 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
1969 reg->inbound_doorbell);
1972 case ACB_ADAPTER_TYPE_E: {
1973 struct MessageUnit_E __iomem *reg = acb->pmuE;
1974 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
1975 writel(acb->out_doorbell, ®->iobound_doorbell);
1981 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
1983 switch (acb->adapter_type) {
1984 case ACB_ADAPTER_TYPE_A: {
1985 struct MessageUnit_A __iomem *reg = acb->pmuA;
1987 ** push inbound doorbell tell iop, driver data write ok
1988 ** and wait reply on next hwinterrupt for next Qbuffer post
1990 writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, ®->inbound_doorbell);
1994 case ACB_ADAPTER_TYPE_B: {
1995 struct MessageUnit_B *reg = acb->pmuB;
1997 ** push inbound doorbell tell iop, driver data write ok
1998 ** and wait reply on next hwinterrupt for next Qbuffer post
2000 writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell);
2003 case ACB_ADAPTER_TYPE_C: {
2004 struct MessageUnit_C __iomem *reg = acb->pmuC;
2006 ** push inbound doorbell tell iop, driver data write ok
2007 ** and wait reply on next hwinterrupt for next Qbuffer post
2009 writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, ®->inbound_doorbell);
2012 case ACB_ADAPTER_TYPE_D: {
2013 struct MessageUnit_D *reg = acb->pmuD;
2014 writel(ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY,
2015 reg->inbound_doorbell);
2018 case ACB_ADAPTER_TYPE_E: {
2019 struct MessageUnit_E __iomem *reg = acb->pmuE;
2020 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK;
2021 writel(acb->out_doorbell, ®->iobound_doorbell);
2027 struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
2029 struct QBUFFER __iomem *qbuffer = NULL;
2030 switch (acb->adapter_type) {
2032 case ACB_ADAPTER_TYPE_A: {
2033 struct MessageUnit_A __iomem *reg = acb->pmuA;
2034 qbuffer = (struct QBUFFER __iomem *)®->message_rbuffer;
2038 case ACB_ADAPTER_TYPE_B: {
2039 struct MessageUnit_B *reg = acb->pmuB;
2040 qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
2043 case ACB_ADAPTER_TYPE_C: {
2044 struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
2045 qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer;
2048 case ACB_ADAPTER_TYPE_D: {
2049 struct MessageUnit_D *reg = acb->pmuD;
2050 qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
2053 case ACB_ADAPTER_TYPE_E: {
2054 struct MessageUnit_E __iomem *reg = acb->pmuE;
2055 qbuffer = (struct QBUFFER __iomem *)®->message_rbuffer;
2062 static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
2064 struct QBUFFER __iomem *pqbuffer = NULL;
2065 switch (acb->adapter_type) {
2067 case ACB_ADAPTER_TYPE_A: {
2068 struct MessageUnit_A __iomem *reg = acb->pmuA;
2069 pqbuffer = (struct QBUFFER __iomem *) ®->message_wbuffer;
2073 case ACB_ADAPTER_TYPE_B: {
2074 struct MessageUnit_B *reg = acb->pmuB;
2075 pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
2078 case ACB_ADAPTER_TYPE_C: {
2079 struct MessageUnit_C __iomem *reg = acb->pmuC;
2080 pqbuffer = (struct QBUFFER __iomem *)®->message_wbuffer;
2083 case ACB_ADAPTER_TYPE_D: {
2084 struct MessageUnit_D *reg = acb->pmuD;
2085 pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
2088 case ACB_ADAPTER_TYPE_E: {
2089 struct MessageUnit_E __iomem *reg = acb->pmuE;
2090 pqbuffer = (struct QBUFFER __iomem *)®->message_wbuffer;
2098 arcmsr_Read_iop_rqbuffer_in_DWORD(struct AdapterControlBlock *acb,
2099 struct QBUFFER __iomem *prbuffer)
2102 uint8_t *buf1 = NULL;
2103 uint32_t __iomem *iop_data;
2104 uint32_t iop_len, data_len, *buf2 = NULL;
2106 iop_data = (uint32_t __iomem *)prbuffer->data;
2107 iop_len = readl(&prbuffer->data_len);
2109 buf1 = kmalloc(128, GFP_ATOMIC);
2110 buf2 = (uint32_t *)buf1;
2114 while (data_len >= 4) {
2115 *buf2++ = readl(iop_data);
2120 *buf2 = readl(iop_data);
2121 buf2 = (uint32_t *)buf1;
2123 while (iop_len > 0) {
2124 pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
2126 acb->rqbuf_putIndex++;
2127 /* if last, index number set it to 0 */
2128 acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
2133 /* let IOP know data has been read */
2134 arcmsr_iop_message_read(acb);
2139 arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
2140 struct QBUFFER __iomem *prbuffer) {
2143 uint8_t __iomem *iop_data;
2146 if (acb->adapter_type > ACB_ADAPTER_TYPE_B)
2147 return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer);
2148 iop_data = (uint8_t __iomem *)prbuffer->data;
2149 iop_len = readl(&prbuffer->data_len);
2150 while (iop_len > 0) {
2151 pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
2152 *pQbuffer = readb(iop_data);
2153 acb->rqbuf_putIndex++;
2154 acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
2158 arcmsr_iop_message_read(acb);
2162 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
2164 unsigned long flags;
2165 struct QBUFFER __iomem *prbuffer;
2166 int32_t buf_empty_len;
2168 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
2169 prbuffer = arcmsr_get_iop_rqbuffer(acb);
2170 buf_empty_len = (acb->rqbuf_putIndex - acb->rqbuf_getIndex - 1) &
2171 (ARCMSR_MAX_QBUFFER - 1);
2172 if (buf_empty_len >= readl(&prbuffer->data_len)) {
2173 if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
2174 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2176 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2177 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
2180 static void arcmsr_write_ioctldata2iop_in_DWORD(struct AdapterControlBlock *acb)
2183 struct QBUFFER __iomem *pwbuffer;
2184 uint8_t *buf1 = NULL;
2185 uint32_t __iomem *iop_data;
2186 uint32_t allxfer_len = 0, data_len, *buf2 = NULL, data;
2188 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
2189 buf1 = kmalloc(128, GFP_ATOMIC);
2190 buf2 = (uint32_t *)buf1;
2194 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
2195 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
2196 iop_data = (uint32_t __iomem *)pwbuffer->data;
2197 while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
2198 && (allxfer_len < 124)) {
2199 pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
2201 acb->wqbuf_getIndex++;
2202 acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
2206 data_len = allxfer_len;
2207 buf1 = (uint8_t *)buf2;
2208 while (data_len >= 4) {
2210 writel(data, iop_data);
2216 writel(data, iop_data);
2218 writel(allxfer_len, &pwbuffer->data_len);
2220 arcmsr_iop_message_wrote(acb);
2225 arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb)
2228 struct QBUFFER __iomem *pwbuffer;
2229 uint8_t __iomem *iop_data;
2230 int32_t allxfer_len = 0;
2232 if (acb->adapter_type > ACB_ADAPTER_TYPE_B) {
2233 arcmsr_write_ioctldata2iop_in_DWORD(acb);
2236 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
2237 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
2238 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
2239 iop_data = (uint8_t __iomem *)pwbuffer->data;
2240 while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
2241 && (allxfer_len < 124)) {
2242 pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
2243 writeb(*pQbuffer, iop_data);
2244 acb->wqbuf_getIndex++;
2245 acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
2249 writel(allxfer_len, &pwbuffer->data_len);
2250 arcmsr_iop_message_wrote(acb);
2254 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
2256 unsigned long flags;
2258 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
2259 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
2260 if (acb->wqbuf_getIndex != acb->wqbuf_putIndex)
2261 arcmsr_write_ioctldata2iop(acb);
2262 if (acb->wqbuf_getIndex == acb->wqbuf_putIndex)
2263 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
2264 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
2267 static void arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock *acb)
2269 uint32_t outbound_doorbell;
2270 struct MessageUnit_A __iomem *reg = acb->pmuA;
2271 outbound_doorbell = readl(®->outbound_doorbell);
2273 writel(outbound_doorbell, ®->outbound_doorbell);
2274 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK)
2275 arcmsr_iop2drv_data_wrote_handle(acb);
2276 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)
2277 arcmsr_iop2drv_data_read_handle(acb);
2278 outbound_doorbell = readl(®->outbound_doorbell);
2279 } while (outbound_doorbell & (ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK
2280 | ARCMSR_OUTBOUND_IOP331_DATA_READ_OK));
2282 static void arcmsr_hbaC_doorbell_isr(struct AdapterControlBlock *pACB)
2284 uint32_t outbound_doorbell;
2285 struct MessageUnit_C __iomem *reg = pACB->pmuC;
2287 *******************************************************************
2288 ** Maybe here we need to check wrqbuffer_lock is lock or not
2289 ** DOORBELL: din! don!
2290 ** check if there are any mail need to pack from firmware
2291 *******************************************************************
2293 outbound_doorbell = readl(®->outbound_doorbell);
2295 writel(outbound_doorbell, ®->outbound_doorbell_clear);
2296 readl(®->outbound_doorbell_clear);
2297 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK)
2298 arcmsr_iop2drv_data_wrote_handle(pACB);
2299 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK)
2300 arcmsr_iop2drv_data_read_handle(pACB);
2301 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE)
2302 arcmsr_hbaC_message_isr(pACB);
2303 outbound_doorbell = readl(®->outbound_doorbell);
2304 } while (outbound_doorbell & (ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK
2305 | ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK
2306 | ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE));
2309 static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB)
2311 uint32_t outbound_doorbell;
2312 struct MessageUnit_D *pmu = pACB->pmuD;
2314 outbound_doorbell = readl(pmu->outbound_doorbell);
2316 writel(outbound_doorbell, pmu->outbound_doorbell);
2317 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE)
2318 arcmsr_hbaD_message_isr(pACB);
2319 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK)
2320 arcmsr_iop2drv_data_wrote_handle(pACB);
2321 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK)
2322 arcmsr_iop2drv_data_read_handle(pACB);
2323 outbound_doorbell = readl(pmu->outbound_doorbell);
2324 } while (outbound_doorbell & (ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK
2325 | ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK
2326 | ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE));
2329 static void arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock *pACB)
2331 uint32_t outbound_doorbell, in_doorbell, tmp;
2332 struct MessageUnit_E __iomem *reg = pACB->pmuE;
2334 in_doorbell = readl(®->iobound_doorbell);
2335 outbound_doorbell = in_doorbell ^ pACB->in_doorbell;
2337 writel(0, ®->host_int_status); /* clear interrupt */
2338 if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) {
2339 arcmsr_iop2drv_data_wrote_handle(pACB);
2341 if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK) {
2342 arcmsr_iop2drv_data_read_handle(pACB);
2344 if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
2345 arcmsr_hbaE_message_isr(pACB);
2348 in_doorbell = readl(®->iobound_doorbell);
2349 outbound_doorbell = tmp ^ in_doorbell;
2350 } while (outbound_doorbell & (ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK
2351 | ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK
2352 | ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE));
2353 pACB->in_doorbell = in_doorbell;
2356 static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb)
2359 struct MessageUnit_A __iomem *reg = acb->pmuA;
2360 struct ARCMSR_CDB *pARCMSR_CDB;
2361 struct CommandControlBlock *pCCB;
2363 unsigned long cdb_phy_addr;
2365 while ((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) {
2366 cdb_phy_addr = (flag_ccb << 5) & 0xffffffff;
2367 if (acb->cdb_phyadd_hipart)
2368 cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart;
2369 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr);
2370 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
2371 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
2372 arcmsr_drain_donequeue(acb, pCCB, error);
2375 static void arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock *acb)
2379 struct MessageUnit_B *reg = acb->pmuB;
2380 struct ARCMSR_CDB *pARCMSR_CDB;
2381 struct CommandControlBlock *pCCB;
2383 unsigned long cdb_phy_addr;
2385 index = reg->doneq_index;
2386 while ((flag_ccb = reg->done_qbuffer[index]) != 0) {
2387 cdb_phy_addr = (flag_ccb << 5) & 0xffffffff;
2388 if (acb->cdb_phyadd_hipart)
2389 cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart;
2390 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr);
2391 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
2392 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
2393 arcmsr_drain_donequeue(acb, pCCB, error);
2394 reg->done_qbuffer[index] = 0;
2396 index %= ARCMSR_MAX_HBB_POSTQUEUE;
2397 reg->doneq_index = index;
2401 static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb)
2403 struct MessageUnit_C __iomem *phbcmu;
2404 struct ARCMSR_CDB *arcmsr_cdb;
2405 struct CommandControlBlock *ccb;
2406 uint32_t flag_ccb, throttling = 0;
2407 unsigned long ccb_cdb_phy;
2411 /* areca cdb command done */
2412 /* Use correct offset and size for syncing */
2414 while ((flag_ccb = readl(&phbcmu->outbound_queueport_low)) !=
2416 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
2417 if (acb->cdb_phyadd_hipart)
2418 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
2419 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
2421 ccb = container_of(arcmsr_cdb, struct CommandControlBlock,
2423 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
2425 /* check if command done with no error */
2426 arcmsr_drain_donequeue(acb, ccb, error);
2428 if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
2429 writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING,
2430 &phbcmu->inbound_doorbell);
2436 static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
2438 u32 outbound_write_pointer, doneq_index, index_stripped, toggle;
2439 uint32_t addressLow;
2441 struct MessageUnit_D *pmu;
2442 struct ARCMSR_CDB *arcmsr_cdb;
2443 struct CommandControlBlock *ccb;
2444 unsigned long flags, ccb_cdb_phy;
2446 spin_lock_irqsave(&acb->doneq_lock, flags);
2448 outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
2449 doneq_index = pmu->doneq_index;
2450 if ((doneq_index & 0xFFF) != (outbound_write_pointer & 0xFFF)) {
2452 toggle = doneq_index & 0x4000;
2453 index_stripped = (doneq_index & 0xFFF) + 1;
2454 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
2455 pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
2456 ((toggle ^ 0x4000) + 1);
2457 doneq_index = pmu->doneq_index;
2458 addressLow = pmu->done_qbuffer[doneq_index &
2460 ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
2461 if (acb->cdb_phyadd_hipart)
2462 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
2463 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
2465 ccb = container_of(arcmsr_cdb,
2466 struct CommandControlBlock, arcmsr_cdb);
2467 error = (addressLow & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
2469 arcmsr_drain_donequeue(acb, ccb, error);
2470 writel(doneq_index, pmu->outboundlist_read_pointer);
2471 } while ((doneq_index & 0xFFF) !=
2472 (outbound_write_pointer & 0xFFF));
2474 writel(ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR,
2475 pmu->outboundlist_interrupt_cause);
2476 readl(pmu->outboundlist_interrupt_cause);
2477 spin_unlock_irqrestore(&acb->doneq_lock, flags);
2480 static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb)
2482 uint32_t doneq_index;
2485 struct MessageUnit_E __iomem *pmu;
2486 struct CommandControlBlock *ccb;
2487 unsigned long flags;
2489 spin_lock_irqsave(&acb->doneq_lock, flags);
2490 doneq_index = acb->doneq_index;
2492 while ((readl(&pmu->reply_post_producer_index) & 0xFFFF) != doneq_index) {
2493 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
2494 ccb = acb->pccb_pool[cmdSMID];
2495 error = (acb->pCompletionQ[doneq_index].cmdFlag
2496 & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
2497 arcmsr_drain_donequeue(acb, ccb, error);
2499 if (doneq_index >= acb->completionQ_entry)
2502 acb->doneq_index = doneq_index;
2503 writel(doneq_index, &pmu->reply_post_consumer_index);
2504 spin_unlock_irqrestore(&acb->doneq_lock, flags);
2508 **********************************************************************************
2509 ** Handle a message interrupt
2511 ** The only message interrupt we expect is in response to a query for the current adapter config.
2512 ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
2513 **********************************************************************************
2515 static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb)
2517 struct MessageUnit_A __iomem *reg = acb->pmuA;
2518 /*clear interrupt and message state*/
2519 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, ®->outbound_intstatus);
2520 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2521 schedule_work(&acb->arcmsr_do_message_isr_bh);
2523 static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
2525 struct MessageUnit_B *reg = acb->pmuB;
2527 /*clear interrupt and message state*/
2528 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
2529 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2530 schedule_work(&acb->arcmsr_do_message_isr_bh);
2533 **********************************************************************************
2534 ** Handle a message interrupt
2536 ** The only message interrupt we expect is in response to a query for the
2537 ** current adapter config.
2538 ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
2539 **********************************************************************************
2541 static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb)
2543 struct MessageUnit_C __iomem *reg = acb->pmuC;
2544 /*clear interrupt and message state*/
2545 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, ®->outbound_doorbell_clear);
2546 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2547 schedule_work(&acb->arcmsr_do_message_isr_bh);
2550 static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
2552 struct MessageUnit_D *reg = acb->pmuD;
2554 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell);
2555 readl(reg->outbound_doorbell);
2556 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2557 schedule_work(&acb->arcmsr_do_message_isr_bh);
2560 static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb)
2562 struct MessageUnit_E __iomem *reg = acb->pmuE;
2564 writel(0, ®->host_int_status);
2565 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2566 schedule_work(&acb->arcmsr_do_message_isr_bh);
2569 static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb)
2571 uint32_t outbound_intstatus;
2572 struct MessageUnit_A __iomem *reg = acb->pmuA;
2573 outbound_intstatus = readl(®->outbound_intstatus) &
2574 acb->outbound_int_enable;
2575 if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT))
2578 writel(outbound_intstatus, ®->outbound_intstatus);
2579 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)
2580 arcmsr_hbaA_doorbell_isr(acb);
2581 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT)
2582 arcmsr_hbaA_postqueue_isr(acb);
2583 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT)
2584 arcmsr_hbaA_message_isr(acb);
2585 outbound_intstatus = readl(®->outbound_intstatus) &
2586 acb->outbound_int_enable;
2587 } while (outbound_intstatus & (ARCMSR_MU_OUTBOUND_DOORBELL_INT
2588 | ARCMSR_MU_OUTBOUND_POSTQUEUE_INT
2589 | ARCMSR_MU_OUTBOUND_MESSAGE0_INT));
2593 static int arcmsr_hbaB_handle_isr(struct AdapterControlBlock *acb)
2595 uint32_t outbound_doorbell;
2596 struct MessageUnit_B *reg = acb->pmuB;
2597 outbound_doorbell = readl(reg->iop2drv_doorbell) &
2598 acb->outbound_int_enable;
2599 if (!outbound_doorbell)
2602 writel(~outbound_doorbell, reg->iop2drv_doorbell);
2603 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
2604 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)
2605 arcmsr_iop2drv_data_wrote_handle(acb);
2606 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK)
2607 arcmsr_iop2drv_data_read_handle(acb);
2608 if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE)
2609 arcmsr_hbaB_postqueue_isr(acb);
2610 if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE)
2611 arcmsr_hbaB_message_isr(acb);
2612 outbound_doorbell = readl(reg->iop2drv_doorbell) &
2613 acb->outbound_int_enable;
2614 } while (outbound_doorbell & (ARCMSR_IOP2DRV_DATA_WRITE_OK
2615 | ARCMSR_IOP2DRV_DATA_READ_OK
2616 | ARCMSR_IOP2DRV_CDB_DONE
2617 | ARCMSR_IOP2DRV_MESSAGE_CMD_DONE));
2621 static int arcmsr_hbaC_handle_isr(struct AdapterControlBlock *pACB)
2623 uint32_t host_interrupt_status;
2624 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
2626 *********************************************
2627 ** check outbound intstatus
2628 *********************************************
2630 host_interrupt_status = readl(&phbcmu->host_int_status) &
2631 (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
2632 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR);
2633 if (!host_interrupt_status)
2636 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)
2637 arcmsr_hbaC_doorbell_isr(pACB);
2638 /* MU post queue interrupts*/
2639 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)
2640 arcmsr_hbaC_postqueue_isr(pACB);
2641 host_interrupt_status = readl(&phbcmu->host_int_status);
2642 } while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
2643 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR));
2647 static irqreturn_t arcmsr_hbaD_handle_isr(struct AdapterControlBlock *pACB)
2649 u32 host_interrupt_status;
2650 struct MessageUnit_D *pmu = pACB->pmuD;
2652 host_interrupt_status = readl(pmu->host_int_status) &
2653 (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
2654 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR);
2655 if (!host_interrupt_status)
2658 /* MU post queue interrupts*/
2659 if (host_interrupt_status &
2660 ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR)
2661 arcmsr_hbaD_postqueue_isr(pACB);
2662 if (host_interrupt_status &
2663 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR)
2664 arcmsr_hbaD_doorbell_isr(pACB);
2665 host_interrupt_status = readl(pmu->host_int_status);
2666 } while (host_interrupt_status &
2667 (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
2668 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR));
2672 static irqreturn_t arcmsr_hbaE_handle_isr(struct AdapterControlBlock *pACB)
2674 uint32_t host_interrupt_status;
2675 struct MessageUnit_E __iomem *pmu = pACB->pmuE;
2677 host_interrupt_status = readl(&pmu->host_int_status) &
2678 (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2679 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR);
2680 if (!host_interrupt_status)
2683 /* MU ioctl transfer doorbell interrupts*/
2684 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR) {
2685 arcmsr_hbaE_doorbell_isr(pACB);
2687 /* MU post queue interrupts*/
2688 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR) {
2689 arcmsr_hbaE_postqueue_isr(pACB);
2691 host_interrupt_status = readl(&pmu->host_int_status);
2692 } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2693 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR));
2697 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
2699 switch (acb->adapter_type) {
2700 case ACB_ADAPTER_TYPE_A:
2701 return arcmsr_hbaA_handle_isr(acb);
2703 case ACB_ADAPTER_TYPE_B:
2704 return arcmsr_hbaB_handle_isr(acb);
2706 case ACB_ADAPTER_TYPE_C:
2707 return arcmsr_hbaC_handle_isr(acb);
2708 case ACB_ADAPTER_TYPE_D:
2709 return arcmsr_hbaD_handle_isr(acb);
2710 case ACB_ADAPTER_TYPE_E:
2711 return arcmsr_hbaE_handle_isr(acb);
2717 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
2720 /* stop adapter background rebuild */
2721 if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
2722 uint32_t intmask_org;
2723 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
2724 intmask_org = arcmsr_disable_outbound_ints(acb);
2725 arcmsr_stop_adapter_bgrb(acb);
2726 arcmsr_flush_adapter_cache(acb);
2727 arcmsr_enable_outbound_ints(acb, intmask_org);
2733 void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *acb)
2737 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2738 for (i = 0; i < 15; i++) {
2739 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2740 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2741 acb->rqbuf_getIndex = 0;
2742 acb->rqbuf_putIndex = 0;
2743 arcmsr_iop_message_read(acb);
2745 } else if (acb->rqbuf_getIndex !=
2746 acb->rqbuf_putIndex) {
2747 acb->rqbuf_getIndex = 0;
2748 acb->rqbuf_putIndex = 0;
2756 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
2757 struct scsi_cmnd *cmd)
2760 unsigned short use_sg;
2761 int retvalue = 0, transfer_len = 0;
2762 unsigned long flags;
2763 struct CMD_MESSAGE_FIELD *pcmdmessagefld;
2764 uint32_t controlcode = (uint32_t)cmd->cmnd[5] << 24 |
2765 (uint32_t)cmd->cmnd[6] << 16 |
2766 (uint32_t)cmd->cmnd[7] << 8 |
2767 (uint32_t)cmd->cmnd[8];
2768 struct scatterlist *sg;
2770 use_sg = scsi_sg_count(cmd);
2771 sg = scsi_sglist(cmd);
2772 buffer = kmap_atomic(sg_page(sg)) + sg->offset;
2774 retvalue = ARCMSR_MESSAGE_FAIL;
2777 transfer_len += sg->length;
2778 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
2779 retvalue = ARCMSR_MESSAGE_FAIL;
2780 pr_info("%s: ARCMSR_MESSAGE_FAIL!\n", __func__);
2783 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)buffer;
2784 switch (controlcode) {
2785 case ARCMSR_MESSAGE_READ_RQBUFFER: {
2786 unsigned char *ver_addr;
2787 uint8_t *ptmpQbuffer;
2788 uint32_t allxfer_len = 0;
2789 ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
2791 retvalue = ARCMSR_MESSAGE_FAIL;
2792 pr_info("%s: memory not enough!\n", __func__);
2795 ptmpQbuffer = ver_addr;
2796 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
2797 if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) {
2798 unsigned int tail = acb->rqbuf_getIndex;
2799 unsigned int head = acb->rqbuf_putIndex;
2800 unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER);
2802 allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER);
2803 if (allxfer_len > ARCMSR_API_DATA_BUFLEN)
2804 allxfer_len = ARCMSR_API_DATA_BUFLEN;
2806 if (allxfer_len <= cnt_to_end)
2807 memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len);
2809 memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end);
2810 memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end);
2812 acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER;
2814 memcpy(pcmdmessagefld->messagedatabuffer, ver_addr,
2816 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2817 struct QBUFFER __iomem *prbuffer;
2818 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2819 prbuffer = arcmsr_get_iop_rqbuffer(acb);
2820 if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
2821 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2823 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
2825 pcmdmessagefld->cmdmessage.Length = allxfer_len;
2826 if (acb->fw_flag == FW_DEADLOCK)
2827 pcmdmessagefld->cmdmessage.ReturnCode =
2828 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2830 pcmdmessagefld->cmdmessage.ReturnCode =
2831 ARCMSR_MESSAGE_RETURNCODE_OK;
2834 case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
2835 unsigned char *ver_addr;
2838 uint8_t *pQbuffer, *ptmpuserbuffer;
2840 user_len = pcmdmessagefld->cmdmessage.Length;
2841 if (user_len > ARCMSR_API_DATA_BUFLEN) {
2842 retvalue = ARCMSR_MESSAGE_FAIL;
2846 ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
2848 retvalue = ARCMSR_MESSAGE_FAIL;
2851 ptmpuserbuffer = ver_addr;
2853 memcpy(ptmpuserbuffer,
2854 pcmdmessagefld->messagedatabuffer, user_len);
2855 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
2856 if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) {
2857 struct SENSE_DATA *sensebuffer =
2858 (struct SENSE_DATA *)cmd->sense_buffer;
2859 arcmsr_write_ioctldata2iop(acb);
2860 /* has error report sensedata */
2861 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
2862 sensebuffer->SenseKey = ILLEGAL_REQUEST;
2863 sensebuffer->AdditionalSenseLength = 0x0A;
2864 sensebuffer->AdditionalSenseCode = 0x20;
2865 sensebuffer->Valid = 1;
2866 retvalue = ARCMSR_MESSAGE_FAIL;
2868 pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex];
2869 cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex;
2870 if (user_len > cnt2end) {
2871 memcpy(pQbuffer, ptmpuserbuffer, cnt2end);
2872 ptmpuserbuffer += cnt2end;
2873 user_len -= cnt2end;
2874 acb->wqbuf_putIndex = 0;
2875 pQbuffer = acb->wqbuffer;
2877 memcpy(pQbuffer, ptmpuserbuffer, user_len);
2878 acb->wqbuf_putIndex += user_len;
2879 acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
2880 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
2882 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
2883 arcmsr_write_ioctldata2iop(acb);
2886 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
2888 if (acb->fw_flag == FW_DEADLOCK)
2889 pcmdmessagefld->cmdmessage.ReturnCode =
2890 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2892 pcmdmessagefld->cmdmessage.ReturnCode =
2893 ARCMSR_MESSAGE_RETURNCODE_OK;
2896 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
2897 uint8_t *pQbuffer = acb->rqbuffer;
2899 arcmsr_clear_iop2drv_rqueue_buffer(acb);
2900 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
2901 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
2902 acb->rqbuf_getIndex = 0;
2903 acb->rqbuf_putIndex = 0;
2904 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2905 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
2906 if (acb->fw_flag == FW_DEADLOCK)
2907 pcmdmessagefld->cmdmessage.ReturnCode =
2908 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2910 pcmdmessagefld->cmdmessage.ReturnCode =
2911 ARCMSR_MESSAGE_RETURNCODE_OK;
2914 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
2915 uint8_t *pQbuffer = acb->wqbuffer;
2916 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
2917 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2918 ACB_F_MESSAGE_WQBUFFER_READED);
2919 acb->wqbuf_getIndex = 0;
2920 acb->wqbuf_putIndex = 0;
2921 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2922 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
2923 if (acb->fw_flag == FW_DEADLOCK)
2924 pcmdmessagefld->cmdmessage.ReturnCode =
2925 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2927 pcmdmessagefld->cmdmessage.ReturnCode =
2928 ARCMSR_MESSAGE_RETURNCODE_OK;
2931 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
2933 arcmsr_clear_iop2drv_rqueue_buffer(acb);
2934 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
2935 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
2936 acb->rqbuf_getIndex = 0;
2937 acb->rqbuf_putIndex = 0;
2938 pQbuffer = acb->rqbuffer;
2939 memset(pQbuffer, 0, sizeof(struct QBUFFER));
2940 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
2941 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
2942 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2943 ACB_F_MESSAGE_WQBUFFER_READED);
2944 acb->wqbuf_getIndex = 0;
2945 acb->wqbuf_putIndex = 0;
2946 pQbuffer = acb->wqbuffer;
2947 memset(pQbuffer, 0, sizeof(struct QBUFFER));
2948 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
2949 if (acb->fw_flag == FW_DEADLOCK)
2950 pcmdmessagefld->cmdmessage.ReturnCode =
2951 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2953 pcmdmessagefld->cmdmessage.ReturnCode =
2954 ARCMSR_MESSAGE_RETURNCODE_OK;
2957 case ARCMSR_MESSAGE_RETURN_CODE_3F: {
2958 if (acb->fw_flag == FW_DEADLOCK)
2959 pcmdmessagefld->cmdmessage.ReturnCode =
2960 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2962 pcmdmessagefld->cmdmessage.ReturnCode =
2963 ARCMSR_MESSAGE_RETURNCODE_3F;
2966 case ARCMSR_MESSAGE_SAY_HELLO: {
2967 int8_t *hello_string = "Hello! I am ARCMSR";
2968 if (acb->fw_flag == FW_DEADLOCK)
2969 pcmdmessagefld->cmdmessage.ReturnCode =
2970 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2972 pcmdmessagefld->cmdmessage.ReturnCode =
2973 ARCMSR_MESSAGE_RETURNCODE_OK;
2974 memcpy(pcmdmessagefld->messagedatabuffer,
2975 hello_string, (int16_t)strlen(hello_string));
2978 case ARCMSR_MESSAGE_SAY_GOODBYE: {
2979 if (acb->fw_flag == FW_DEADLOCK)
2980 pcmdmessagefld->cmdmessage.ReturnCode =
2981 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2983 pcmdmessagefld->cmdmessage.ReturnCode =
2984 ARCMSR_MESSAGE_RETURNCODE_OK;
2985 arcmsr_iop_parking(acb);
2988 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
2989 if (acb->fw_flag == FW_DEADLOCK)
2990 pcmdmessagefld->cmdmessage.ReturnCode =
2991 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2993 pcmdmessagefld->cmdmessage.ReturnCode =
2994 ARCMSR_MESSAGE_RETURNCODE_OK;
2995 arcmsr_flush_adapter_cache(acb);
2999 retvalue = ARCMSR_MESSAGE_FAIL;
3000 pr_info("%s: unknown controlcode!\n", __func__);
3004 struct scatterlist *sg = scsi_sglist(cmd);
3005 kunmap_atomic(buffer - sg->offset);
3010 static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
3012 struct list_head *head = &acb->ccb_free_list;
3013 struct CommandControlBlock *ccb = NULL;
3014 unsigned long flags;
3015 spin_lock_irqsave(&acb->ccblist_lock, flags);
3016 if (!list_empty(head)) {
3017 ccb = list_entry(head->next, struct CommandControlBlock, list);
3018 list_del_init(&ccb->list);
3020 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
3023 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
3027 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
3028 struct scsi_cmnd *cmd)
3030 switch (cmd->cmnd[0]) {
3032 unsigned char inqdata[36];
3034 struct scatterlist *sg;
3036 if (cmd->device->lun) {
3037 cmd->result = (DID_TIME_OUT << 16);
3038 cmd->scsi_done(cmd);
3041 inqdata[0] = TYPE_PROCESSOR;
3042 /* Periph Qualifier & Periph Dev Type */
3044 /* rem media bit & Dev Type Modifier */
3046 /* ISO, ECMA, & ANSI versions */
3048 /* length of additional data */
3049 strncpy(&inqdata[8], "Areca ", 8);
3050 /* Vendor Identification */
3051 strncpy(&inqdata[16], "RAID controller ", 16);
3052 /* Product Identification */
3053 strncpy(&inqdata[32], "R001", 4); /* Product Revision */
3055 sg = scsi_sglist(cmd);
3056 buffer = kmap_atomic(sg_page(sg)) + sg->offset;
3058 memcpy(buffer, inqdata, sizeof(inqdata));
3059 sg = scsi_sglist(cmd);
3060 kunmap_atomic(buffer - sg->offset);
3062 cmd->scsi_done(cmd);
3067 if (arcmsr_iop_message_xfer(acb, cmd))
3068 cmd->result = (DID_ERROR << 16);
3069 cmd->scsi_done(cmd);
3073 cmd->scsi_done(cmd);
3077 static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
3078 void (* done)(struct scsi_cmnd *))
3080 struct Scsi_Host *host = cmd->device->host;
3081 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
3082 struct CommandControlBlock *ccb;
3083 int target = cmd->device->id;
3085 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) {
3086 cmd->result = (DID_NO_CONNECT << 16);
3087 cmd->scsi_done(cmd);
3090 cmd->scsi_done = done;
3091 cmd->host_scribble = NULL;
3094 /* virtual device for iop message transfer */
3095 arcmsr_handle_virtual_command(acb, cmd);
3098 ccb = arcmsr_get_freeccb(acb);
3100 return SCSI_MLQUEUE_HOST_BUSY;
3101 if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) {
3102 cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
3103 cmd->scsi_done(cmd);
3106 arcmsr_post_ccb(acb, ccb);
3110 static DEF_SCSI_QCMD(arcmsr_queue_command)
3112 static void arcmsr_get_adapter_config(struct AdapterControlBlock *pACB, uint32_t *rwbuffer)
3115 uint32_t *acb_firm_model = (uint32_t *)pACB->firm_model;
3116 uint32_t *acb_firm_version = (uint32_t *)pACB->firm_version;
3117 uint32_t *acb_device_map = (uint32_t *)pACB->device_map;
3118 uint32_t *firm_model = &rwbuffer[15];
3119 uint32_t *firm_version = &rwbuffer[17];
3120 uint32_t *device_map = &rwbuffer[21];
3124 *acb_firm_model = readl(firm_model);
3131 *acb_firm_version = readl(firm_version);
3138 *acb_device_map = readl(device_map);
3143 pACB->signature = readl(&rwbuffer[0]);
3144 pACB->firm_request_len = readl(&rwbuffer[1]);
3145 pACB->firm_numbers_queue = readl(&rwbuffer[2]);
3146 pACB->firm_sdram_size = readl(&rwbuffer[3]);
3147 pACB->firm_hd_channels = readl(&rwbuffer[4]);
3148 pACB->firm_cfg_version = readl(&rwbuffer[25]);
3149 pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
3150 pACB->host->host_no,
3152 pACB->firm_version);
3155 static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb)
3157 struct MessageUnit_A __iomem *reg = acb->pmuA;
3159 arcmsr_wait_firmware_ready(acb);
3160 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
3161 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
3162 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
3163 miscellaneous data' timeout \n", acb->host->host_no);
3166 arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
3169 static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
3171 struct MessageUnit_B *reg = acb->pmuB;
3173 arcmsr_wait_firmware_ready(acb);
3174 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
3175 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3176 printk(KERN_ERR "arcmsr%d: can't set driver mode.\n", acb->host->host_no);
3179 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
3180 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3181 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
3182 miscellaneous data' timeout \n", acb->host->host_no);
3185 arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
3189 static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
3191 uint32_t intmask_org;
3192 struct MessageUnit_C __iomem *reg = pACB->pmuC;
3194 /* disable all outbound interrupt */
3195 intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */
3196 writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, ®->host_int_mask);
3197 /* wait firmware ready */
3198 arcmsr_wait_firmware_ready(pACB);
3199 /* post "get config" instruction */
3200 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
3201 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
3202 /* wait message ready */
3203 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
3204 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
3205 miscellaneous data' timeout \n", pACB->host->host_no);
3208 arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer);
3212 static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
3214 struct MessageUnit_D *reg = acb->pmuD;
3216 if (readl(acb->pmuD->outbound_doorbell) &
3217 ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
3218 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
3219 acb->pmuD->outbound_doorbell);/*clear interrupt*/
3221 arcmsr_wait_firmware_ready(acb);
3222 /* post "get config" instruction */
3223 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
3224 /* wait message ready */
3225 if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
3226 pr_notice("arcmsr%d: wait get adapter firmware "
3227 "miscellaneous data timeout\n", acb->host->host_no);
3230 arcmsr_get_adapter_config(acb, reg->msgcode_rwbuffer);
3234 static bool arcmsr_hbaE_get_config(struct AdapterControlBlock *pACB)
3236 struct MessageUnit_E __iomem *reg = pACB->pmuE;
3237 uint32_t intmask_org;
3239 /* disable all outbound interrupt */
3240 intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */
3241 writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, ®->host_int_mask);
3242 /* wait firmware ready */
3243 arcmsr_wait_firmware_ready(pACB);
3245 /* post "get config" instruction */
3246 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
3248 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3249 writel(pACB->out_doorbell, ®->iobound_doorbell);
3250 /* wait message ready */
3251 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
3252 pr_notice("arcmsr%d: wait get adapter firmware "
3253 "miscellaneous data timeout\n", pACB->host->host_no);
3256 arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer);
3260 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
3264 switch (acb->adapter_type) {
3265 case ACB_ADAPTER_TYPE_A:
3266 rtn = arcmsr_hbaA_get_config(acb);
3268 case ACB_ADAPTER_TYPE_B:
3269 rtn = arcmsr_hbaB_get_config(acb);
3271 case ACB_ADAPTER_TYPE_C:
3272 rtn = arcmsr_hbaC_get_config(acb);
3274 case ACB_ADAPTER_TYPE_D:
3275 rtn = arcmsr_hbaD_get_config(acb);
3277 case ACB_ADAPTER_TYPE_E:
3278 rtn = arcmsr_hbaE_get_config(acb);
3283 acb->maxOutstanding = acb->firm_numbers_queue - 1;
3284 if (acb->host->can_queue >= acb->firm_numbers_queue)
3285 acb->host->can_queue = acb->maxOutstanding;
3287 acb->maxOutstanding = acb->host->can_queue;
3288 acb->maxFreeCCB = acb->host->can_queue;
3289 if (acb->maxFreeCCB < ARCMSR_MAX_FREECCB_NUM)
3290 acb->maxFreeCCB += 64;
3294 static int arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock *acb,
3295 struct CommandControlBlock *poll_ccb)
3297 struct MessageUnit_A __iomem *reg = acb->pmuA;
3298 struct CommandControlBlock *ccb;
3299 struct ARCMSR_CDB *arcmsr_cdb;
3300 uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
3303 unsigned long ccb_cdb_phy;
3305 polling_hba_ccb_retry:
3307 outbound_intstatus = readl(®->outbound_intstatus) & acb->outbound_int_enable;
3308 writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/
3310 if ((flag_ccb = readl(®->outbound_queueport)) == 0xFFFFFFFF) {
3316 if (poll_count > 100){
3320 goto polling_hba_ccb_retry;
3323 ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
3324 if (acb->cdb_phyadd_hipart)
3325 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3326 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
3327 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
3328 poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
3329 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
3330 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
3331 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
3332 " poll command abort successfully \n"
3333 , acb->host->host_no
3334 , ccb->pcmd->device->id
3335 , (u32)ccb->pcmd->device->lun
3337 ccb->pcmd->result = DID_ABORT << 16;
3338 arcmsr_ccb_complete(ccb);
3341 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
3342 " command done ccb = '0x%p'"
3343 "ccboutstandingcount = %d \n"
3344 , acb->host->host_no
3346 , atomic_read(&acb->ccboutstandingcount));
3349 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
3350 arcmsr_report_ccb_state(acb, ccb, error);
3355 static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb,
3356 struct CommandControlBlock *poll_ccb)
3358 struct MessageUnit_B *reg = acb->pmuB;
3359 struct ARCMSR_CDB *arcmsr_cdb;
3360 struct CommandControlBlock *ccb;
3361 uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
3364 unsigned long ccb_cdb_phy;
3366 polling_hbb_ccb_retry:
3368 /* clear doorbell interrupt */
3369 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
3371 index = reg->doneq_index;
3372 flag_ccb = reg->done_qbuffer[index];
3373 if (flag_ccb == 0) {
3379 if (poll_count > 100){
3383 goto polling_hbb_ccb_retry;
3386 reg->done_qbuffer[index] = 0;
3388 /*if last index number set it to 0 */
3389 index %= ARCMSR_MAX_HBB_POSTQUEUE;
3390 reg->doneq_index = index;
3391 /* check if command done with no error*/
3392 ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
3393 if (acb->cdb_phyadd_hipart)
3394 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3395 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
3396 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
3397 poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
3398 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
3399 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
3400 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
3401 " poll command abort successfully \n"
3403 ,ccb->pcmd->device->id
3404 ,(u32)ccb->pcmd->device->lun
3406 ccb->pcmd->result = DID_ABORT << 16;
3407 arcmsr_ccb_complete(ccb);
3410 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
3411 " command done ccb = '0x%p'"
3412 "ccboutstandingcount = %d \n"
3413 , acb->host->host_no
3415 , atomic_read(&acb->ccboutstandingcount));
3418 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
3419 arcmsr_report_ccb_state(acb, ccb, error);
3424 static int arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock *acb,
3425 struct CommandControlBlock *poll_ccb)
3427 struct MessageUnit_C __iomem *reg = acb->pmuC;
3429 struct ARCMSR_CDB *arcmsr_cdb;
3431 struct CommandControlBlock *pCCB;
3432 uint32_t poll_ccb_done = 0, poll_count = 0;
3434 unsigned long ccb_cdb_phy;
3436 polling_hbc_ccb_retry:
3439 if ((readl(®->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) == 0) {
3440 if (poll_ccb_done) {
3445 if (poll_count > 100) {
3449 goto polling_hbc_ccb_retry;
3452 flag_ccb = readl(®->outbound_queueport_low);
3453 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
3454 if (acb->cdb_phyadd_hipart)
3455 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3456 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
3457 pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
3458 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
3459 /* check ifcommand done with no error*/
3460 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
3461 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
3462 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
3463 " poll command abort successfully \n"
3464 , acb->host->host_no
3465 , pCCB->pcmd->device->id
3466 , (u32)pCCB->pcmd->device->lun
3468 pCCB->pcmd->result = DID_ABORT << 16;
3469 arcmsr_ccb_complete(pCCB);
3472 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
3473 " command done ccb = '0x%p'"
3474 "ccboutstandingcount = %d \n"
3475 , acb->host->host_no
3477 , atomic_read(&acb->ccboutstandingcount));
3480 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
3481 arcmsr_report_ccb_state(acb, pCCB, error);
3486 static int arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock *acb,
3487 struct CommandControlBlock *poll_ccb)
3490 uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb;
3491 int rtn, doneq_index, index_stripped, outbound_write_pointer, toggle;
3492 unsigned long flags, ccb_cdb_phy;
3493 struct ARCMSR_CDB *arcmsr_cdb;
3494 struct CommandControlBlock *pCCB;
3495 struct MessageUnit_D *pmu = acb->pmuD;
3497 polling_hbaD_ccb_retry:
3500 spin_lock_irqsave(&acb->doneq_lock, flags);
3501 outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
3502 doneq_index = pmu->doneq_index;
3503 if ((outbound_write_pointer & 0xFFF) == (doneq_index & 0xFFF)) {
3504 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3505 if (poll_ccb_done) {
3510 if (poll_count > 40) {
3514 goto polling_hbaD_ccb_retry;
3517 toggle = doneq_index & 0x4000;
3518 index_stripped = (doneq_index & 0xFFF) + 1;
3519 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
3520 pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
3521 ((toggle ^ 0x4000) + 1);
3522 doneq_index = pmu->doneq_index;
3523 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3524 flag_ccb = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow;
3525 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
3526 if (acb->cdb_phyadd_hipart)
3527 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3528 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset +
3530 pCCB = container_of(arcmsr_cdb, struct CommandControlBlock,
3532 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
3533 if ((pCCB->acb != acb) ||
3534 (pCCB->startdone != ARCMSR_CCB_START)) {
3535 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
3536 pr_notice("arcmsr%d: scsi id = %d "
3537 "lun = %d ccb = '0x%p' poll command "
3538 "abort successfully\n"
3539 , acb->host->host_no
3540 , pCCB->pcmd->device->id
3541 , (u32)pCCB->pcmd->device->lun
3543 pCCB->pcmd->result = DID_ABORT << 16;
3544 arcmsr_ccb_complete(pCCB);
3547 pr_notice("arcmsr%d: polling an illegal "
3548 "ccb command done ccb = '0x%p' "
3549 "ccboutstandingcount = %d\n"
3550 , acb->host->host_no
3552 , atomic_read(&acb->ccboutstandingcount));
3555 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
3557 arcmsr_report_ccb_state(acb, pCCB, error);
3562 static int arcmsr_hbaE_polling_ccbdone(struct AdapterControlBlock *acb,
3563 struct CommandControlBlock *poll_ccb)
3566 uint32_t poll_ccb_done = 0, poll_count = 0, doneq_index;
3568 unsigned long flags;
3570 struct CommandControlBlock *pCCB;
3571 struct MessageUnit_E __iomem *reg = acb->pmuE;
3573 polling_hbaC_ccb_retry:
3576 spin_lock_irqsave(&acb->doneq_lock, flags);
3577 doneq_index = acb->doneq_index;
3578 if ((readl(®->reply_post_producer_index) & 0xFFFF) ==
3580 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3581 if (poll_ccb_done) {
3586 if (poll_count > 40) {
3590 goto polling_hbaC_ccb_retry;
3593 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
3595 if (doneq_index >= acb->completionQ_entry)
3597 acb->doneq_index = doneq_index;
3598 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3599 pCCB = acb->pccb_pool[cmdSMID];
3600 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
3601 /* check if command done with no error*/
3602 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
3603 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
3604 pr_notice("arcmsr%d: scsi id = %d "
3605 "lun = %d ccb = '0x%p' poll command "
3606 "abort successfully\n"
3607 , acb->host->host_no
3608 , pCCB->pcmd->device->id
3609 , (u32)pCCB->pcmd->device->lun
3611 pCCB->pcmd->result = DID_ABORT << 16;
3612 arcmsr_ccb_complete(pCCB);
3615 pr_notice("arcmsr%d: polling an illegal "
3616 "ccb command done ccb = '0x%p' "
3617 "ccboutstandingcount = %d\n"
3618 , acb->host->host_no
3620 , atomic_read(&acb->ccboutstandingcount));
3623 error = (acb->pCompletionQ[doneq_index].cmdFlag &
3624 ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
3625 arcmsr_report_ccb_state(acb, pCCB, error);
3627 writel(doneq_index, ®->reply_post_consumer_index);
3631 static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
3632 struct CommandControlBlock *poll_ccb)
3635 switch (acb->adapter_type) {
3637 case ACB_ADAPTER_TYPE_A: {
3638 rtn = arcmsr_hbaA_polling_ccbdone(acb, poll_ccb);
3642 case ACB_ADAPTER_TYPE_B: {
3643 rtn = arcmsr_hbaB_polling_ccbdone(acb, poll_ccb);
3646 case ACB_ADAPTER_TYPE_C: {
3647 rtn = arcmsr_hbaC_polling_ccbdone(acb, poll_ccb);
3650 case ACB_ADAPTER_TYPE_D:
3651 rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb);
3653 case ACB_ADAPTER_TYPE_E:
3654 rtn = arcmsr_hbaE_polling_ccbdone(acb, poll_ccb);
3660 static void arcmsr_set_iop_datetime(struct timer_list *t)
3662 struct AdapterControlBlock *pacb = from_timer(pacb, t, refresh_timer);
3663 unsigned int next_time;
3677 uint32_t msg_time[2];
3681 time64_to_tm(ktime_get_real_seconds(), -sys_tz.tz_minuteswest * 60, &tm);
3683 datetime.a.signature = 0x55AA;
3684 datetime.a.year = tm.tm_year - 100; /* base 2000 instead of 1900 */
3685 datetime.a.month = tm.tm_mon;
3686 datetime.a.date = tm.tm_mday;
3687 datetime.a.hour = tm.tm_hour;
3688 datetime.a.minute = tm.tm_min;
3689 datetime.a.second = tm.tm_sec;
3691 switch (pacb->adapter_type) {
3692 case ACB_ADAPTER_TYPE_A: {
3693 struct MessageUnit_A __iomem *reg = pacb->pmuA;
3694 writel(datetime.b.msg_time[0], ®->message_rwbuffer[0]);
3695 writel(datetime.b.msg_time[1], ®->message_rwbuffer[1]);
3696 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0);
3699 case ACB_ADAPTER_TYPE_B: {
3700 uint32_t __iomem *rwbuffer;
3701 struct MessageUnit_B *reg = pacb->pmuB;
3702 rwbuffer = reg->message_rwbuffer;
3703 writel(datetime.b.msg_time[0], rwbuffer++);
3704 writel(datetime.b.msg_time[1], rwbuffer++);
3705 writel(ARCMSR_MESSAGE_SYNC_TIMER, reg->drv2iop_doorbell);
3708 case ACB_ADAPTER_TYPE_C: {
3709 struct MessageUnit_C __iomem *reg = pacb->pmuC;
3710 writel(datetime.b.msg_time[0], ®->msgcode_rwbuffer[0]);
3711 writel(datetime.b.msg_time[1], ®->msgcode_rwbuffer[1]);
3712 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0);
3713 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
3716 case ACB_ADAPTER_TYPE_D: {
3717 uint32_t __iomem *rwbuffer;
3718 struct MessageUnit_D *reg = pacb->pmuD;
3719 rwbuffer = reg->msgcode_rwbuffer;
3720 writel(datetime.b.msg_time[0], rwbuffer++);
3721 writel(datetime.b.msg_time[1], rwbuffer++);
3722 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, reg->inbound_msgaddr0);
3725 case ACB_ADAPTER_TYPE_E: {
3726 struct MessageUnit_E __iomem *reg = pacb->pmuE;
3727 writel(datetime.b.msg_time[0], ®->msgcode_rwbuffer[0]);
3728 writel(datetime.b.msg_time[1], ®->msgcode_rwbuffer[1]);
3729 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0);
3730 pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3731 writel(pacb->out_doorbell, ®->iobound_doorbell);
3735 if (sys_tz.tz_minuteswest)
3736 next_time = ARCMSR_HOURS;
3738 next_time = ARCMSR_MINUTES;
3739 mod_timer(&pacb->refresh_timer, jiffies + msecs_to_jiffies(next_time));
3742 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
3744 uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
3745 dma_addr_t dma_coherent_handle;
3748 ********************************************************************
3749 ** here we need to tell iop 331 our freeccb.HighPart
3750 ** if freeccb.HighPart is not zero
3751 ********************************************************************
3753 switch (acb->adapter_type) {
3754 case ACB_ADAPTER_TYPE_B:
3755 case ACB_ADAPTER_TYPE_D:
3756 dma_coherent_handle = acb->dma_coherent_handle2;
3758 case ACB_ADAPTER_TYPE_E:
3759 dma_coherent_handle = acb->dma_coherent_handle +
3760 offsetof(struct CommandControlBlock, arcmsr_cdb);
3763 dma_coherent_handle = acb->dma_coherent_handle;
3766 cdb_phyaddr = lower_32_bits(dma_coherent_handle);
3767 cdb_phyaddr_hi32 = upper_32_bits(dma_coherent_handle);
3768 acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
3769 acb->cdb_phyadd_hipart = ((uint64_t)cdb_phyaddr_hi32) << 32;
3771 ***********************************************************************
3772 ** if adapter type B, set window of "post command Q"
3773 ***********************************************************************
3775 switch (acb->adapter_type) {
3777 case ACB_ADAPTER_TYPE_A: {
3778 if (cdb_phyaddr_hi32 != 0) {
3779 struct MessageUnit_A __iomem *reg = acb->pmuA;
3780 writel(ARCMSR_SIGNATURE_SET_CONFIG, \
3781 ®->message_rwbuffer[0]);
3782 writel(cdb_phyaddr_hi32, ®->message_rwbuffer[1]);
3783 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \
3784 ®->inbound_msgaddr0);
3785 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
3786 printk(KERN_NOTICE "arcmsr%d: ""set ccb high \
3787 part physical address timeout\n",
3788 acb->host->host_no);
3795 case ACB_ADAPTER_TYPE_B: {
3796 uint32_t __iomem *rwbuffer;
3798 struct MessageUnit_B *reg = acb->pmuB;
3799 reg->postq_index = 0;
3800 reg->doneq_index = 0;
3801 writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell);
3802 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3803 printk(KERN_NOTICE "arcmsr%d: cannot set driver mode\n", \
3804 acb->host->host_no);
3807 rwbuffer = reg->message_rwbuffer;
3808 /* driver "set config" signature */
3809 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
3810 /* normal should be zero */
3811 writel(cdb_phyaddr_hi32, rwbuffer++);
3812 /* postQ size (256 + 8)*4 */
3813 writel(cdb_phyaddr, rwbuffer++);
3814 /* doneQ size (256 + 8)*4 */
3815 writel(cdb_phyaddr + 1056, rwbuffer++);
3816 /* ccb maxQ size must be --> [(256 + 8)*4]*/
3817 writel(1056, rwbuffer);
3819 writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell);
3820 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3821 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
3822 timeout \n",acb->host->host_no);
3825 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
3826 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3827 pr_err("arcmsr%d: can't set driver mode.\n",
3828 acb->host->host_no);
3833 case ACB_ADAPTER_TYPE_C: {
3834 struct MessageUnit_C __iomem *reg = acb->pmuC;
3836 printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n",
3837 acb->adapter_index, cdb_phyaddr_hi32);
3838 writel(ARCMSR_SIGNATURE_SET_CONFIG, ®->msgcode_rwbuffer[0]);
3839 writel(cdb_phyaddr_hi32, ®->msgcode_rwbuffer[1]);
3840 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0);
3841 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
3842 if (!arcmsr_hbaC_wait_msgint_ready(acb)) {
3843 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
3844 timeout \n", acb->host->host_no);
3849 case ACB_ADAPTER_TYPE_D: {
3850 uint32_t __iomem *rwbuffer;
3851 struct MessageUnit_D *reg = acb->pmuD;
3852 reg->postq_index = 0;
3853 reg->doneq_index = 0;
3854 rwbuffer = reg->msgcode_rwbuffer;
3855 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
3856 writel(cdb_phyaddr_hi32, rwbuffer++);
3857 writel(cdb_phyaddr, rwbuffer++);
3858 writel(cdb_phyaddr + (ARCMSR_MAX_ARC1214_POSTQUEUE *
3859 sizeof(struct InBound_SRB)), rwbuffer++);
3860 writel(0x100, rwbuffer);
3861 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, reg->inbound_msgaddr0);
3862 if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
3863 pr_notice("arcmsr%d: 'set command Q window' timeout\n",
3864 acb->host->host_no);
3869 case ACB_ADAPTER_TYPE_E: {
3870 struct MessageUnit_E __iomem *reg = acb->pmuE;
3871 writel(ARCMSR_SIGNATURE_SET_CONFIG, ®->msgcode_rwbuffer[0]);
3872 writel(ARCMSR_SIGNATURE_1884, ®->msgcode_rwbuffer[1]);
3873 writel(cdb_phyaddr, ®->msgcode_rwbuffer[2]);
3874 writel(cdb_phyaddr_hi32, ®->msgcode_rwbuffer[3]);
3875 writel(acb->ccbsize, ®->msgcode_rwbuffer[4]);
3876 dma_coherent_handle = acb->dma_coherent_handle2;
3877 cdb_phyaddr = (uint32_t)(dma_coherent_handle & 0xffffffff);
3878 cdb_phyaddr_hi32 = (uint32_t)((dma_coherent_handle >> 16) >> 16);
3879 writel(cdb_phyaddr, ®->msgcode_rwbuffer[5]);
3880 writel(cdb_phyaddr_hi32, ®->msgcode_rwbuffer[6]);
3881 writel(acb->ioqueue_size, ®->msgcode_rwbuffer[7]);
3882 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0);
3883 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3884 writel(acb->out_doorbell, ®->iobound_doorbell);
3885 if (!arcmsr_hbaE_wait_msgint_ready(acb)) {
3886 pr_notice("arcmsr%d: 'set command Q window' timeout \n",
3887 acb->host->host_no);
3896 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
3898 uint32_t firmware_state = 0;
3899 switch (acb->adapter_type) {
3901 case ACB_ADAPTER_TYPE_A: {
3902 struct MessageUnit_A __iomem *reg = acb->pmuA;
3904 if (!(acb->acb_flags & ACB_F_IOP_INITED))
3906 firmware_state = readl(®->outbound_msgaddr1);
3907 } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0);
3911 case ACB_ADAPTER_TYPE_B: {
3912 struct MessageUnit_B *reg = acb->pmuB;
3914 if (!(acb->acb_flags & ACB_F_IOP_INITED))
3916 firmware_state = readl(reg->iop2drv_doorbell);
3917 } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
3918 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
3921 case ACB_ADAPTER_TYPE_C: {
3922 struct MessageUnit_C __iomem *reg = acb->pmuC;
3924 if (!(acb->acb_flags & ACB_F_IOP_INITED))
3926 firmware_state = readl(®->outbound_msgaddr1);
3927 } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
3930 case ACB_ADAPTER_TYPE_D: {
3931 struct MessageUnit_D *reg = acb->pmuD;
3933 if (!(acb->acb_flags & ACB_F_IOP_INITED))
3935 firmware_state = readl(reg->outbound_msgaddr1);
3936 } while ((firmware_state &
3937 ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0);
3940 case ACB_ADAPTER_TYPE_E: {
3941 struct MessageUnit_E __iomem *reg = acb->pmuE;
3943 if (!(acb->acb_flags & ACB_F_IOP_INITED))
3945 firmware_state = readl(®->outbound_msgaddr1);
3946 } while ((firmware_state & ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK) == 0);
3952 static void arcmsr_request_device_map(struct timer_list *t)
3954 struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
3955 if (unlikely(atomic_read(&acb->rq_map_token) == 0) ||
3956 (acb->acb_flags & ACB_F_BUS_RESET) ||
3957 (acb->acb_flags & ACB_F_ABORT)) {
3958 mod_timer(&acb->eternal_timer,
3959 jiffies + msecs_to_jiffies(6 * HZ));
3961 acb->fw_flag = FW_NORMAL;
3962 if (atomic_read(&acb->ante_token_value) ==
3963 atomic_read(&acb->rq_map_token)) {
3964 atomic_set(&acb->rq_map_token, 16);
3966 atomic_set(&acb->ante_token_value,
3967 atomic_read(&acb->rq_map_token));
3968 if (atomic_dec_and_test(&acb->rq_map_token)) {
3969 mod_timer(&acb->eternal_timer, jiffies +
3970 msecs_to_jiffies(6 * HZ));
3973 switch (acb->adapter_type) {
3974 case ACB_ADAPTER_TYPE_A: {
3975 struct MessageUnit_A __iomem *reg = acb->pmuA;
3976 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
3979 case ACB_ADAPTER_TYPE_B: {
3980 struct MessageUnit_B *reg = acb->pmuB;
3981 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
3984 case ACB_ADAPTER_TYPE_C: {
3985 struct MessageUnit_C __iomem *reg = acb->pmuC;
3986 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
3987 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
3990 case ACB_ADAPTER_TYPE_D: {
3991 struct MessageUnit_D *reg = acb->pmuD;
3992 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
3995 case ACB_ADAPTER_TYPE_E: {
3996 struct MessageUnit_E __iomem *reg = acb->pmuE;
3997 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
3998 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3999 writel(acb->out_doorbell, ®->iobound_doorbell);
4005 acb->acb_flags |= ACB_F_MSG_GET_CONFIG;
4006 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
4010 static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock *acb)
4012 struct MessageUnit_A __iomem *reg = acb->pmuA;
4013 acb->acb_flags |= ACB_F_MSG_START_BGRB;
4014 writel(ARCMSR_INBOUND_MESG0_START_BGRB, ®->inbound_msgaddr0);
4015 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
4016 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
4017 rebuild' timeout \n", acb->host->host_no);
4021 static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb)
4023 struct MessageUnit_B *reg = acb->pmuB;
4024 acb->acb_flags |= ACB_F_MSG_START_BGRB;
4025 writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell);
4026 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4027 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
4028 rebuild' timeout \n",acb->host->host_no);
4032 static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB)
4034 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
4035 pACB->acb_flags |= ACB_F_MSG_START_BGRB;
4036 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0);
4037 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell);
4038 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
4039 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
4040 rebuild' timeout \n", pACB->host->host_no);
4045 static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB)
4047 struct MessageUnit_D *pmu = pACB->pmuD;
4049 pACB->acb_flags |= ACB_F_MSG_START_BGRB;
4050 writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0);
4051 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
4052 pr_notice("arcmsr%d: wait 'start adapter "
4053 "background rebuild' timeout\n", pACB->host->host_no);
4057 static void arcmsr_hbaE_start_bgrb(struct AdapterControlBlock *pACB)
4059 struct MessageUnit_E __iomem *pmu = pACB->pmuE;
4061 pACB->acb_flags |= ACB_F_MSG_START_BGRB;
4062 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &pmu->inbound_msgaddr0);
4063 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4064 writel(pACB->out_doorbell, &pmu->iobound_doorbell);
4065 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
4066 pr_notice("arcmsr%d: wait 'start adapter "
4067 "background rebuild' timeout \n", pACB->host->host_no);
4071 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
4073 switch (acb->adapter_type) {
4074 case ACB_ADAPTER_TYPE_A:
4075 arcmsr_hbaA_start_bgrb(acb);
4077 case ACB_ADAPTER_TYPE_B:
4078 arcmsr_hbaB_start_bgrb(acb);
4080 case ACB_ADAPTER_TYPE_C:
4081 arcmsr_hbaC_start_bgrb(acb);
4083 case ACB_ADAPTER_TYPE_D:
4084 arcmsr_hbaD_start_bgrb(acb);
4086 case ACB_ADAPTER_TYPE_E:
4087 arcmsr_hbaE_start_bgrb(acb);
4092 static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
4094 switch (acb->adapter_type) {
4095 case ACB_ADAPTER_TYPE_A: {
4096 struct MessageUnit_A __iomem *reg = acb->pmuA;
4097 uint32_t outbound_doorbell;
4098 /* empty doorbell Qbuffer if door bell ringed */
4099 outbound_doorbell = readl(®->outbound_doorbell);
4100 /*clear doorbell interrupt */
4101 writel(outbound_doorbell, ®->outbound_doorbell);
4102 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell);
4106 case ACB_ADAPTER_TYPE_B: {
4107 struct MessageUnit_B *reg = acb->pmuB;
4108 uint32_t outbound_doorbell, i;
4109 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
4110 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
4111 /* let IOP know data has been read */
4112 for(i=0; i < 200; i++) {
4114 outbound_doorbell = readl(reg->iop2drv_doorbell);
4115 if( outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
4116 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
4117 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
4123 case ACB_ADAPTER_TYPE_C: {
4124 struct MessageUnit_C __iomem *reg = acb->pmuC;
4125 uint32_t outbound_doorbell, i;
4126 /* empty doorbell Qbuffer if door bell ringed */
4127 outbound_doorbell = readl(®->outbound_doorbell);
4128 writel(outbound_doorbell, ®->outbound_doorbell_clear);
4129 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell);
4130 for (i = 0; i < 200; i++) {
4132 outbound_doorbell = readl(®->outbound_doorbell);
4133 if (outbound_doorbell &
4134 ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
4135 writel(outbound_doorbell,
4136 ®->outbound_doorbell_clear);
4137 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK,
4138 ®->inbound_doorbell);
4144 case ACB_ADAPTER_TYPE_D: {
4145 struct MessageUnit_D *reg = acb->pmuD;
4146 uint32_t outbound_doorbell, i;
4147 /* empty doorbell Qbuffer if door bell ringed */
4148 outbound_doorbell = readl(reg->outbound_doorbell);
4149 writel(outbound_doorbell, reg->outbound_doorbell);
4150 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
4151 reg->inbound_doorbell);
4152 for (i = 0; i < 200; i++) {
4154 outbound_doorbell = readl(reg->outbound_doorbell);
4155 if (outbound_doorbell &
4156 ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) {
4157 writel(outbound_doorbell,
4158 reg->outbound_doorbell);
4159 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
4160 reg->inbound_doorbell);
4166 case ACB_ADAPTER_TYPE_E: {
4167 struct MessageUnit_E __iomem *reg = acb->pmuE;
4170 acb->in_doorbell = readl(®->iobound_doorbell);
4171 writel(0, ®->host_int_status); /*clear interrupt*/
4172 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
4173 writel(acb->out_doorbell, ®->iobound_doorbell);
4174 for(i=0; i < 200; i++) {
4176 tmp = acb->in_doorbell;
4177 acb->in_doorbell = readl(®->iobound_doorbell);
4178 if((tmp ^ acb->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) {
4179 writel(0, ®->host_int_status); /*clear interrupt*/
4180 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
4181 writel(acb->out_doorbell, ®->iobound_doorbell);
4190 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
4192 switch (acb->adapter_type) {
4193 case ACB_ADAPTER_TYPE_A:
4195 case ACB_ADAPTER_TYPE_B:
4197 struct MessageUnit_B *reg = acb->pmuB;
4198 writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell);
4199 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4200 printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
4205 case ACB_ADAPTER_TYPE_C:
4211 static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
4215 struct MessageUnit_A __iomem *pmuA = acb->pmuA;
4216 struct MessageUnit_C __iomem *pmuC = acb->pmuC;
4217 struct MessageUnit_D *pmuD = acb->pmuD;
4219 /* backup pci config data */
4220 printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no);
4221 for (i = 0; i < 64; i++) {
4222 pci_read_config_byte(acb->pdev, i, &value[i]);
4224 /* hardware reset signal */
4225 if (acb->dev_id == 0x1680) {
4226 writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]);
4227 } else if (acb->dev_id == 0x1880) {
4230 writel(0xF, &pmuC->write_sequence);
4231 writel(0x4, &pmuC->write_sequence);
4232 writel(0xB, &pmuC->write_sequence);
4233 writel(0x2, &pmuC->write_sequence);
4234 writel(0x7, &pmuC->write_sequence);
4235 writel(0xD, &pmuC->write_sequence);
4236 } while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
4237 writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
4238 } else if (acb->dev_id == 0x1884) {
4239 struct MessageUnit_E __iomem *pmuE = acb->pmuE;
4242 writel(0x4, &pmuE->write_sequence_3xxx);
4243 writel(0xB, &pmuE->write_sequence_3xxx);
4244 writel(0x2, &pmuE->write_sequence_3xxx);
4245 writel(0x7, &pmuE->write_sequence_3xxx);
4246 writel(0xD, &pmuE->write_sequence_3xxx);
4248 } while (((readl(&pmuE->host_diagnostic_3xxx) &
4249 ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5));
4250 writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx);
4251 } else if (acb->dev_id == 0x1214) {
4252 writel(0x20, pmuD->reset_request);
4254 pci_write_config_byte(acb->pdev, 0x84, 0x20);
4257 /* write back pci config data */
4258 for (i = 0; i < 64; i++) {
4259 pci_write_config_byte(acb->pdev, i, value[i]);
4265 static bool arcmsr_reset_in_progress(struct AdapterControlBlock *acb)
4269 switch(acb->adapter_type) {
4270 case ACB_ADAPTER_TYPE_A:{
4271 struct MessageUnit_A __iomem *reg = acb->pmuA;
4272 rtn = ((readl(®->outbound_msgaddr1) &
4273 ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) ? true : false;
4276 case ACB_ADAPTER_TYPE_B:{
4277 struct MessageUnit_B *reg = acb->pmuB;
4278 rtn = ((readl(reg->iop2drv_doorbell) &
4279 ARCMSR_MESSAGE_FIRMWARE_OK) == 0) ? true : false;
4282 case ACB_ADAPTER_TYPE_C:{
4283 struct MessageUnit_C __iomem *reg = acb->pmuC;
4284 rtn = (readl(®->host_diagnostic) & 0x04) ? true : false;
4287 case ACB_ADAPTER_TYPE_D:{
4288 struct MessageUnit_D *reg = acb->pmuD;
4289 rtn = ((readl(reg->sample_at_reset) & 0x80) == 0) ?
4293 case ACB_ADAPTER_TYPE_E:{
4294 struct MessageUnit_E __iomem *reg = acb->pmuE;
4295 rtn = (readl(®->host_diagnostic_3xxx) &
4296 ARCMSR_ARC188X_RESET_ADAPTER) ? true : false;
4303 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
4305 uint32_t intmask_org;
4306 /* disable all outbound interrupt */
4307 intmask_org = arcmsr_disable_outbound_ints(acb);
4308 arcmsr_wait_firmware_ready(acb);
4309 arcmsr_iop_confirm(acb);
4310 /*start background rebuild*/
4311 arcmsr_start_adapter_bgrb(acb);
4312 /* empty doorbell Qbuffer if door bell ringed */
4313 arcmsr_clear_doorbell_queue_buffer(acb);
4314 arcmsr_enable_eoi_mode(acb);
4315 /* enable outbound Post Queue,outbound doorbell Interrupt */
4316 arcmsr_enable_outbound_ints(acb, intmask_org);
4317 acb->acb_flags |= ACB_F_IOP_INITED;
4320 static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
4322 struct CommandControlBlock *ccb;
4323 uint32_t intmask_org;
4324 uint8_t rtnval = 0x00;
4326 unsigned long flags;
4328 if (atomic_read(&acb->ccboutstandingcount) != 0) {
4329 /* disable all outbound interrupt */
4330 intmask_org = arcmsr_disable_outbound_ints(acb);
4331 /* talk to iop 331 outstanding command aborted */
4332 rtnval = arcmsr_abort_allcmd(acb);
4333 /* clear all outbound posted Q */
4334 arcmsr_done4abort_postqueue(acb);
4335 for (i = 0; i < acb->maxFreeCCB; i++) {
4336 ccb = acb->pccb_pool[i];
4337 if (ccb->startdone == ARCMSR_CCB_START) {
4338 scsi_dma_unmap(ccb->pcmd);
4339 ccb->startdone = ARCMSR_CCB_DONE;
4341 spin_lock_irqsave(&acb->ccblist_lock, flags);
4342 list_add_tail(&ccb->list, &acb->ccb_free_list);
4343 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
4346 atomic_set(&acb->ccboutstandingcount, 0);
4347 /* enable all outbound interrupt */
4348 arcmsr_enable_outbound_ints(acb, intmask_org);
4354 static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
4356 struct AdapterControlBlock *acb;
4357 int retry_count = 0;
4359 acb = (struct AdapterControlBlock *) cmd->device->host->hostdata;
4360 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED)
4362 pr_notice("arcmsr: executing bus reset eh.....num_resets = %d,"
4363 " num_aborts = %d \n", acb->num_resets, acb->num_aborts);
4366 if (acb->acb_flags & ACB_F_BUS_RESET) {
4368 pr_notice("arcmsr: there is a bus reset eh proceeding...\n");
4369 timeout = wait_event_timeout(wait_q, (acb->acb_flags
4370 & ACB_F_BUS_RESET) == 0, 220 * HZ);
4374 acb->acb_flags |= ACB_F_BUS_RESET;
4375 if (!arcmsr_iop_reset(acb)) {
4376 arcmsr_hardware_reset(acb);
4377 acb->acb_flags &= ~ACB_F_IOP_INITED;
4379 ssleep(ARCMSR_SLEEPTIME);
4380 if (arcmsr_reset_in_progress(acb)) {
4381 if (retry_count > ARCMSR_RETRYCOUNT) {
4382 acb->fw_flag = FW_DEADLOCK;
4383 pr_notice("arcmsr%d: waiting for hw bus reset"
4384 " return, RETRY TERMINATED!!\n",
4385 acb->host->host_no);
4389 goto wait_reset_done;
4391 arcmsr_iop_init(acb);
4392 atomic_set(&acb->rq_map_token, 16);
4393 atomic_set(&acb->ante_token_value, 16);
4394 acb->fw_flag = FW_NORMAL;
4395 mod_timer(&acb->eternal_timer, jiffies +
4396 msecs_to_jiffies(6 * HZ));
4397 acb->acb_flags &= ~ACB_F_BUS_RESET;
4399 pr_notice("arcmsr: scsi bus reset eh returns with success\n");
4401 acb->acb_flags &= ~ACB_F_BUS_RESET;
4402 atomic_set(&acb->rq_map_token, 16);
4403 atomic_set(&acb->ante_token_value, 16);
4404 acb->fw_flag = FW_NORMAL;
4405 mod_timer(&acb->eternal_timer, jiffies +
4406 msecs_to_jiffies(6 * HZ));
4412 static int arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
4413 struct CommandControlBlock *ccb)
4416 rtn = arcmsr_polling_ccbdone(acb, ccb);
4420 static int arcmsr_abort(struct scsi_cmnd *cmd)
4422 struct AdapterControlBlock *acb =
4423 (struct AdapterControlBlock *)cmd->device->host->hostdata;
4426 uint32_t intmask_org;
4428 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED)
4431 "arcmsr%d: abort device command of scsi id = %d lun = %d\n",
4432 acb->host->host_no, cmd->device->id, (u32)cmd->device->lun);
4433 acb->acb_flags |= ACB_F_ABORT;
4436 ************************************************
4437 ** the all interrupt service routine is locked
4438 ** we need to handle it as soon as possible and exit
4439 ************************************************
4441 if (!atomic_read(&acb->ccboutstandingcount)) {
4442 acb->acb_flags &= ~ACB_F_ABORT;
4446 intmask_org = arcmsr_disable_outbound_ints(acb);
4447 for (i = 0; i < acb->maxFreeCCB; i++) {
4448 struct CommandControlBlock *ccb = acb->pccb_pool[i];
4449 if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
4450 ccb->startdone = ARCMSR_CCB_ABORTED;
4451 rtn = arcmsr_abort_one_cmd(acb, ccb);
4455 acb->acb_flags &= ~ACB_F_ABORT;
4456 arcmsr_enable_outbound_ints(acb, intmask_org);
4460 static const char *arcmsr_info(struct Scsi_Host *host)
4462 struct AdapterControlBlock *acb =
4463 (struct AdapterControlBlock *) host->hostdata;
4464 static char buf[256];
4467 switch (acb->pdev->device) {
4468 case PCI_DEVICE_ID_ARECA_1110:
4469 case PCI_DEVICE_ID_ARECA_1200:
4470 case PCI_DEVICE_ID_ARECA_1202:
4471 case PCI_DEVICE_ID_ARECA_1210:
4474 case PCI_DEVICE_ID_ARECA_1120:
4475 case PCI_DEVICE_ID_ARECA_1130:
4476 case PCI_DEVICE_ID_ARECA_1160:
4477 case PCI_DEVICE_ID_ARECA_1170:
4478 case PCI_DEVICE_ID_ARECA_1201:
4479 case PCI_DEVICE_ID_ARECA_1203:
4480 case PCI_DEVICE_ID_ARECA_1220:
4481 case PCI_DEVICE_ID_ARECA_1230:
4482 case PCI_DEVICE_ID_ARECA_1260:
4483 case PCI_DEVICE_ID_ARECA_1270:
4484 case PCI_DEVICE_ID_ARECA_1280:
4487 case PCI_DEVICE_ID_ARECA_1214:
4488 case PCI_DEVICE_ID_ARECA_1380:
4489 case PCI_DEVICE_ID_ARECA_1381:
4490 case PCI_DEVICE_ID_ARECA_1680:
4491 case PCI_DEVICE_ID_ARECA_1681:
4492 case PCI_DEVICE_ID_ARECA_1880:
4493 case PCI_DEVICE_ID_ARECA_1884:
4501 sprintf(buf, "Areca %s RAID Controller %s\narcmsr version %s\n",
4502 type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION);