Merge tag 'for-linus-20170904' of git://git.infradead.org/linux-mtd
[linux-2.6-microblaze.git] / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 16;
102 static unsigned int ipr_fast_reboot;
103 static DEFINE_SPINLOCK(ipr_driver_lock);
104
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
107         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
108                 .mailbox = 0x0042C,
109                 .max_cmds = 100,
110                 .cache_line_size = 0x20,
111                 .clear_isr = 1,
112                 .iopoll_weight = 0,
113                 {
114                         .set_interrupt_mask_reg = 0x0022C,
115                         .clr_interrupt_mask_reg = 0x00230,
116                         .clr_interrupt_mask_reg32 = 0x00230,
117                         .sense_interrupt_mask_reg = 0x0022C,
118                         .sense_interrupt_mask_reg32 = 0x0022C,
119                         .clr_interrupt_reg = 0x00228,
120                         .clr_interrupt_reg32 = 0x00228,
121                         .sense_interrupt_reg = 0x00224,
122                         .sense_interrupt_reg32 = 0x00224,
123                         .ioarrin_reg = 0x00404,
124                         .sense_uproc_interrupt_reg = 0x00214,
125                         .sense_uproc_interrupt_reg32 = 0x00214,
126                         .set_uproc_interrupt_reg = 0x00214,
127                         .set_uproc_interrupt_reg32 = 0x00214,
128                         .clr_uproc_interrupt_reg = 0x00218,
129                         .clr_uproc_interrupt_reg32 = 0x00218
130                 }
131         },
132         { /* Snipe and Scamp */
133                 .mailbox = 0x0052C,
134                 .max_cmds = 100,
135                 .cache_line_size = 0x20,
136                 .clear_isr = 1,
137                 .iopoll_weight = 0,
138                 {
139                         .set_interrupt_mask_reg = 0x00288,
140                         .clr_interrupt_mask_reg = 0x0028C,
141                         .clr_interrupt_mask_reg32 = 0x0028C,
142                         .sense_interrupt_mask_reg = 0x00288,
143                         .sense_interrupt_mask_reg32 = 0x00288,
144                         .clr_interrupt_reg = 0x00284,
145                         .clr_interrupt_reg32 = 0x00284,
146                         .sense_interrupt_reg = 0x00280,
147                         .sense_interrupt_reg32 = 0x00280,
148                         .ioarrin_reg = 0x00504,
149                         .sense_uproc_interrupt_reg = 0x00290,
150                         .sense_uproc_interrupt_reg32 = 0x00290,
151                         .set_uproc_interrupt_reg = 0x00290,
152                         .set_uproc_interrupt_reg32 = 0x00290,
153                         .clr_uproc_interrupt_reg = 0x00294,
154                         .clr_uproc_interrupt_reg32 = 0x00294
155                 }
156         },
157         { /* CRoC */
158                 .mailbox = 0x00044,
159                 .max_cmds = 1000,
160                 .cache_line_size = 0x20,
161                 .clear_isr = 0,
162                 .iopoll_weight = 64,
163                 {
164                         .set_interrupt_mask_reg = 0x00010,
165                         .clr_interrupt_mask_reg = 0x00018,
166                         .clr_interrupt_mask_reg32 = 0x0001C,
167                         .sense_interrupt_mask_reg = 0x00010,
168                         .sense_interrupt_mask_reg32 = 0x00014,
169                         .clr_interrupt_reg = 0x00008,
170                         .clr_interrupt_reg32 = 0x0000C,
171                         .sense_interrupt_reg = 0x00000,
172                         .sense_interrupt_reg32 = 0x00004,
173                         .ioarrin_reg = 0x00070,
174                         .sense_uproc_interrupt_reg = 0x00020,
175                         .sense_uproc_interrupt_reg32 = 0x00024,
176                         .set_uproc_interrupt_reg = 0x00020,
177                         .set_uproc_interrupt_reg32 = 0x00024,
178                         .clr_uproc_interrupt_reg = 0x00028,
179                         .clr_uproc_interrupt_reg32 = 0x0002C,
180                         .init_feedback_reg = 0x0005C,
181                         .dump_addr_reg = 0x00064,
182                         .dump_data_reg = 0x00068,
183                         .endian_swap_reg = 0x00084
184                 }
185         },
186 };
187
188 static const struct ipr_chip_t ipr_chip[] = {
189         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
199 };
200
201 static int ipr_max_bus_speeds[] = {
202         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203 };
204
205 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207 module_param_named(max_speed, ipr_max_speed, uint, 0);
208 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209 module_param_named(log_level, ipr_log_level, uint, 0);
210 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211 module_param_named(testmode, ipr_testmode, int, 0);
212 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
213 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
214 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
217 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
218 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
219 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
221 module_param_named(max_devs, ipr_max_devs, int, 0);
222 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
224 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
225 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:16)");
226 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(IPR_DRIVER_VERSION);
230
231 /*  A constant array of IOASCs/URCs/Error Messages */
232 static const
233 struct ipr_error_table_t ipr_error_table[] = {
234         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
235         "8155: An unknown error was received"},
236         {0x00330000, 0, 0,
237         "Soft underlength error"},
238         {0x005A0000, 0, 0,
239         "Command to be cancelled not found"},
240         {0x00808000, 0, 0,
241         "Qualified success"},
242         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
243         "FFFE: Soft device bus error recovered by the IOA"},
244         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
245         "4101: Soft device bus fabric error"},
246         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247         "FFFC: Logical block guard error recovered by the device"},
248         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249         "FFFC: Logical block reference tag error recovered by the device"},
250         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251         "4171: Recovered scatter list tag / sequence number error"},
252         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257         "FFFD: Recovered logical block reference tag error detected by the IOA"},
258         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259         "FFFD: Logical block guard error recovered by the IOA"},
260         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
261         "FFF9: Device sector reassign successful"},
262         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
263         "FFF7: Media error recovered by device rewrite procedures"},
264         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
265         "7001: IOA sector reassignment successful"},
266         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
267         "FFF9: Soft media error. Sector reassignment recommended"},
268         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
269         "FFF7: Media error recovered by IOA rewrite procedures"},
270         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
271         "FF3D: Soft PCI bus error recovered by the IOA"},
272         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
273         "FFF6: Device hardware error recovered by the IOA"},
274         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
275         "FFF6: Device hardware error recovered by the device"},
276         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
277         "FF3D: Soft IOA error recovered by the IOA"},
278         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
279         "FFFA: Undefined device response recovered by the IOA"},
280         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
281         "FFF6: Device bus error, message or command phase"},
282         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
283         "FFFE: Task Management Function failed"},
284         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
285         "FFF6: Failure prediction threshold exceeded"},
286         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
287         "8009: Impending cache battery pack failure"},
288         {0x02040100, 0, 0,
289         "Logical Unit in process of becoming ready"},
290         {0x02040200, 0, 0,
291         "Initializing command required"},
292         {0x02040400, 0, 0,
293         "34FF: Disk device format in progress"},
294         {0x02040C00, 0, 0,
295         "Logical unit not accessible, target port in unavailable state"},
296         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297         "9070: IOA requested reset"},
298         {0x023F0000, 0, 0,
299         "Synchronization required"},
300         {0x02408500, 0, 0,
301         "IOA microcode download required"},
302         {0x02408600, 0, 0,
303         "Device bus connection is prohibited by host"},
304         {0x024E0000, 0, 0,
305         "No ready, IOA shutdown"},
306         {0x025A0000, 0, 0,
307         "Not ready, IOA has been shutdown"},
308         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
309         "3020: Storage subsystem configuration error"},
310         {0x03110B00, 0, 0,
311         "FFF5: Medium error, data unreadable, recommend reassign"},
312         {0x03110C00, 0, 0,
313         "7000: Medium error, data unreadable, do not reassign"},
314         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
315         "FFF3: Disk media format bad"},
316         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
317         "3002: Addressed device failed to respond to selection"},
318         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
319         "3100: Device bus error"},
320         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
321         "3109: IOA timed out a device command"},
322         {0x04088000, 0, 0,
323         "3120: SCSI bus is not operational"},
324         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
325         "4100: Hard device bus fabric error"},
326         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327         "310C: Logical block guard error detected by the device"},
328         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329         "310C: Logical block reference tag error detected by the device"},
330         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331         "4170: Scatter list tag / sequence number error"},
332         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333         "8150: Logical block CRC error on IOA to Host transfer"},
334         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335         "4170: Logical block sequence number error on IOA to Host transfer"},
336         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337         "310D: Logical block reference tag error detected by the IOA"},
338         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339         "310D: Logical block guard error detected by the IOA"},
340         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
341         "9000: IOA reserved area data check"},
342         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
343         "9001: IOA reserved area invalid data pattern"},
344         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
345         "9002: IOA reserved area LRC error"},
346         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347         "Hardware Error, IOA metadata access error"},
348         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
349         "102E: Out of alternate sectors for disk storage"},
350         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
351         "FFF4: Data transfer underlength error"},
352         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
353         "FFF4: Data transfer overlength error"},
354         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
355         "3400: Logical unit failure"},
356         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
357         "FFF4: Device microcode is corrupt"},
358         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
359         "8150: PCI bus error"},
360         {0x04430000, 1, 0,
361         "Unsupported device bus message received"},
362         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
363         "FFF4: Disk device problem"},
364         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
365         "8150: Permanent IOA failure"},
366         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
367         "3010: Disk device returned wrong response to IOA"},
368         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
369         "8151: IOA microcode error"},
370         {0x04448500, 0, 0,
371         "Device bus status error"},
372         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
373         "8157: IOA error requiring IOA reset to recover"},
374         {0x04448700, 0, 0,
375         "ATA device status error"},
376         {0x04490000, 0, 0,
377         "Message reject received from the device"},
378         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
379         "8008: A permanent cache battery pack failure occurred"},
380         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
381         "9090: Disk unit has been modified after the last known status"},
382         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
383         "9081: IOA detected device error"},
384         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
385         "9082: IOA detected device error"},
386         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
387         "3110: Device bus error, message or command phase"},
388         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
389         "3110: SAS Command / Task Management Function failed"},
390         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
391         "9091: Incorrect hardware configuration change has been detected"},
392         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
393         "9073: Invalid multi-adapter configuration"},
394         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
395         "4010: Incorrect connection between cascaded expanders"},
396         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
397         "4020: Connections exceed IOA design limits"},
398         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
399         "4030: Incorrect multipath connection"},
400         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
401         "4110: Unsupported enclosure function"},
402         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403         "4120: SAS cable VPD cannot be read"},
404         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
405         "FFF4: Command to logical unit failed"},
406         {0x05240000, 1, 0,
407         "Illegal request, invalid request type or request packet"},
408         {0x05250000, 0, 0,
409         "Illegal request, invalid resource handle"},
410         {0x05258000, 0, 0,
411         "Illegal request, commands not allowed to this device"},
412         {0x05258100, 0, 0,
413         "Illegal request, command not allowed to a secondary adapter"},
414         {0x05258200, 0, 0,
415         "Illegal request, command not allowed to a non-optimized resource"},
416         {0x05260000, 0, 0,
417         "Illegal request, invalid field in parameter list"},
418         {0x05260100, 0, 0,
419         "Illegal request, parameter not supported"},
420         {0x05260200, 0, 0,
421         "Illegal request, parameter value invalid"},
422         {0x052C0000, 0, 0,
423         "Illegal request, command sequence error"},
424         {0x052C8000, 1, 0,
425         "Illegal request, dual adapter support not enabled"},
426         {0x052C8100, 1, 0,
427         "Illegal request, another cable connector was physically disabled"},
428         {0x054E8000, 1, 0,
429         "Illegal request, inconsistent group id/group count"},
430         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
431         "9031: Array protection temporarily suspended, protection resuming"},
432         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
433         "9040: Array protection temporarily suspended, protection resuming"},
434         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435         "4080: IOA exceeded maximum operating temperature"},
436         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437         "4085: Service required"},
438         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
439         "3140: Device bus not ready to ready transition"},
440         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
441         "FFFB: SCSI bus was reset"},
442         {0x06290500, 0, 0,
443         "FFFE: SCSI bus transition to single ended"},
444         {0x06290600, 0, 0,
445         "FFFE: SCSI bus transition to LVD"},
446         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
447         "FFFB: SCSI bus was reset by another initiator"},
448         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
449         "3029: A device replacement has occurred"},
450         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
451         "4102: Device bus fabric performance degradation"},
452         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
453         "9051: IOA cache data exists for a missing or failed device"},
454         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
455         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
456         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
457         "9025: Disk unit is not supported at its physical location"},
458         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
459         "3020: IOA detected a SCSI bus configuration error"},
460         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
461         "3150: SCSI bus configuration error"},
462         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
463         "9074: Asymmetric advanced function disk configuration"},
464         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
465         "4040: Incomplete multipath connection between IOA and enclosure"},
466         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
467         "4041: Incomplete multipath connection between enclosure and device"},
468         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
469         "9075: Incomplete multipath connection between IOA and remote IOA"},
470         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
471         "9076: Configuration error, missing remote IOA"},
472         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
473         "4050: Enclosure does not support a required multipath function"},
474         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
475         "4121: Configuration error, required cable is missing"},
476         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
477         "4122: Cable is not plugged into the correct location on remote IOA"},
478         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
479         "4123: Configuration error, invalid cable vital product data"},
480         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
481         "4124: Configuration error, both cable ends are plugged into the same IOA"},
482         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
483         "4070: Logically bad block written on device"},
484         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
485         "9041: Array protection temporarily suspended"},
486         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
487         "9042: Corrupt array parity detected on specified device"},
488         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
489         "9030: Array no longer protected due to missing or failed disk unit"},
490         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
491         "9071: Link operational transition"},
492         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
493         "9072: Link not operational transition"},
494         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
495         "9032: Array exposed but still protected"},
496         {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
497         "70DD: Device forced failed by disrupt device command"},
498         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
499         "4061: Multipath redundancy level got better"},
500         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
501         "4060: Multipath redundancy level got worse"},
502         {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
503         "9083: Device raw mode enabled"},
504         {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
505         "9084: Device raw mode disabled"},
506         {0x07270000, 0, 0,
507         "Failure due to other device"},
508         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
509         "9008: IOA does not support functions expected by devices"},
510         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
511         "9010: Cache data associated with attached devices cannot be found"},
512         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
513         "9011: Cache data belongs to devices other than those attached"},
514         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
515         "9020: Array missing 2 or more devices with only 1 device present"},
516         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
517         "9021: Array missing 2 or more devices with 2 or more devices present"},
518         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
519         "9022: Exposed array is missing a required device"},
520         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
521         "9023: Array member(s) not at required physical locations"},
522         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
523         "9024: Array not functional due to present hardware configuration"},
524         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
525         "9026: Array not functional due to present hardware configuration"},
526         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
527         "9027: Array is missing a device and parity is out of sync"},
528         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
529         "9028: Maximum number of arrays already exist"},
530         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
531         "9050: Required cache data cannot be located for a disk unit"},
532         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
533         "9052: Cache data exists for a device that has been modified"},
534         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
535         "9054: IOA resources not available due to previous problems"},
536         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
537         "9092: Disk unit requires initialization before use"},
538         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
539         "9029: Incorrect hardware configuration change has been detected"},
540         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
541         "9060: One or more disk pairs are missing from an array"},
542         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
543         "9061: One or more disks are missing from an array"},
544         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
545         "9062: One or more disks are missing from an array"},
546         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
547         "9063: Maximum number of functional arrays has been exceeded"},
548         {0x07279A00, 0, 0,
549         "Data protect, other volume set problem"},
550         {0x0B260000, 0, 0,
551         "Aborted command, invalid descriptor"},
552         {0x0B3F9000, 0, 0,
553         "Target operating conditions have changed, dual adapter takeover"},
554         {0x0B530200, 0, 0,
555         "Aborted command, medium removal prevented"},
556         {0x0B5A0000, 0, 0,
557         "Command terminated by host"},
558         {0x0B5B8000, 0, 0,
559         "Aborted command, command terminated by host"}
560 };
561
562 static const struct ipr_ses_table_entry ipr_ses_table[] = {
563         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
564         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
565         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
566         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
567         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
568         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
569         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
570         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
571         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
572         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
573         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
574         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
575         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
576 };
577
578 /*
579  *  Function Prototypes
580  */
581 static int ipr_reset_alert(struct ipr_cmnd *);
582 static void ipr_process_ccn(struct ipr_cmnd *);
583 static void ipr_process_error(struct ipr_cmnd *);
584 static void ipr_reset_ioa_job(struct ipr_cmnd *);
585 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
586                                    enum ipr_shutdown_type);
587
588 #ifdef CONFIG_SCSI_IPR_TRACE
589 /**
590  * ipr_trc_hook - Add a trace entry to the driver trace
591  * @ipr_cmd:    ipr command struct
592  * @type:               trace type
593  * @add_data:   additional data
594  *
595  * Return value:
596  *      none
597  **/
598 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
599                          u8 type, u32 add_data)
600 {
601         struct ipr_trace_entry *trace_entry;
602         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
603         unsigned int trace_index;
604
605         trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
606         trace_entry = &ioa_cfg->trace[trace_index];
607         trace_entry->time = jiffies;
608         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
609         trace_entry->type = type;
610         if (ipr_cmd->ioa_cfg->sis64)
611                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
612         else
613                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
614         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
615         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
616         trace_entry->u.add_data = add_data;
617         wmb();
618 }
619 #else
620 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
621 #endif
622
623 /**
624  * ipr_lock_and_done - Acquire lock and complete command
625  * @ipr_cmd:    ipr command struct
626  *
627  * Return value:
628  *      none
629  **/
630 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
631 {
632         unsigned long lock_flags;
633         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
634
635         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
636         ipr_cmd->done(ipr_cmd);
637         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
638 }
639
640 /**
641  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
642  * @ipr_cmd:    ipr command struct
643  *
644  * Return value:
645  *      none
646  **/
647 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
648 {
649         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
650         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
651         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
652         dma_addr_t dma_addr = ipr_cmd->dma_addr;
653         int hrrq_id;
654
655         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
656         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
657         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
658         ioarcb->data_transfer_length = 0;
659         ioarcb->read_data_transfer_length = 0;
660         ioarcb->ioadl_len = 0;
661         ioarcb->read_ioadl_len = 0;
662
663         if (ipr_cmd->ioa_cfg->sis64) {
664                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
665                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
666                 ioasa64->u.gata.status = 0;
667         } else {
668                 ioarcb->write_ioadl_addr =
669                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
670                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
671                 ioasa->u.gata.status = 0;
672         }
673
674         ioasa->hdr.ioasc = 0;
675         ioasa->hdr.residual_data_len = 0;
676         ipr_cmd->scsi_cmd = NULL;
677         ipr_cmd->qc = NULL;
678         ipr_cmd->sense_buffer[0] = 0;
679         ipr_cmd->dma_use_sg = 0;
680 }
681
682 /**
683  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
684  * @ipr_cmd:    ipr command struct
685  *
686  * Return value:
687  *      none
688  **/
689 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
690                               void (*fast_done) (struct ipr_cmnd *))
691 {
692         ipr_reinit_ipr_cmnd(ipr_cmd);
693         ipr_cmd->u.scratch = 0;
694         ipr_cmd->sibling = NULL;
695         ipr_cmd->eh_comp = NULL;
696         ipr_cmd->fast_done = fast_done;
697         init_timer(&ipr_cmd->timer);
698 }
699
700 /**
701  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
702  * @ioa_cfg:    ioa config struct
703  *
704  * Return value:
705  *      pointer to ipr command struct
706  **/
707 static
708 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
709 {
710         struct ipr_cmnd *ipr_cmd = NULL;
711
712         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
713                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
714                         struct ipr_cmnd, queue);
715                 list_del(&ipr_cmd->queue);
716         }
717
718
719         return ipr_cmd;
720 }
721
722 /**
723  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
724  * @ioa_cfg:    ioa config struct
725  *
726  * Return value:
727  *      pointer to ipr command struct
728  **/
729 static
730 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
731 {
732         struct ipr_cmnd *ipr_cmd =
733                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
734         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
735         return ipr_cmd;
736 }
737
738 /**
739  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
740  * @ioa_cfg:    ioa config struct
741  * @clr_ints:     interrupts to clear
742  *
743  * This function masks all interrupts on the adapter, then clears the
744  * interrupts specified in the mask
745  *
746  * Return value:
747  *      none
748  **/
749 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
750                                           u32 clr_ints)
751 {
752         volatile u32 int_reg;
753         int i;
754
755         /* Stop new interrupts */
756         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
757                 spin_lock(&ioa_cfg->hrrq[i]._lock);
758                 ioa_cfg->hrrq[i].allow_interrupts = 0;
759                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
760         }
761         wmb();
762
763         /* Set interrupt mask to stop all new interrupts */
764         if (ioa_cfg->sis64)
765                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
766         else
767                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
768
769         /* Clear any pending interrupts */
770         if (ioa_cfg->sis64)
771                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
772         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
773         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
774 }
775
776 /**
777  * ipr_save_pcix_cmd_reg - Save PCI-X command register
778  * @ioa_cfg:    ioa config struct
779  *
780  * Return value:
781  *      0 on success / -EIO on failure
782  **/
783 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
784 {
785         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
786
787         if (pcix_cmd_reg == 0)
788                 return 0;
789
790         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
791                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
792                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
793                 return -EIO;
794         }
795
796         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
797         return 0;
798 }
799
800 /**
801  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
802  * @ioa_cfg:    ioa config struct
803  *
804  * Return value:
805  *      0 on success / -EIO on failure
806  **/
807 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
808 {
809         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
810
811         if (pcix_cmd_reg) {
812                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
813                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
814                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
815                         return -EIO;
816                 }
817         }
818
819         return 0;
820 }
821
822 /**
823  * __ipr_sata_eh_done - done function for aborted SATA commands
824  * @ipr_cmd:    ipr command struct
825  *
826  * This function is invoked for ops generated to SATA
827  * devices which are being aborted.
828  *
829  * Return value:
830  *      none
831  **/
832 static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
833 {
834         struct ata_queued_cmd *qc = ipr_cmd->qc;
835         struct ipr_sata_port *sata_port = qc->ap->private_data;
836
837         qc->err_mask |= AC_ERR_OTHER;
838         sata_port->ioasa.status |= ATA_BUSY;
839         ata_qc_complete(qc);
840         if (ipr_cmd->eh_comp)
841                 complete(ipr_cmd->eh_comp);
842         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
843 }
844
845 /**
846  * ipr_sata_eh_done - done function for aborted SATA commands
847  * @ipr_cmd:    ipr command struct
848  *
849  * This function is invoked for ops generated to SATA
850  * devices which are being aborted.
851  *
852  * Return value:
853  *      none
854  **/
855 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
856 {
857         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
858         unsigned long hrrq_flags;
859
860         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
861         __ipr_sata_eh_done(ipr_cmd);
862         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
863 }
864
865 /**
866  * __ipr_scsi_eh_done - mid-layer done function for aborted ops
867  * @ipr_cmd:    ipr command struct
868  *
869  * This function is invoked by the interrupt handler for
870  * ops generated by the SCSI mid-layer which are being aborted.
871  *
872  * Return value:
873  *      none
874  **/
875 static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
876 {
877         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
878
879         scsi_cmd->result |= (DID_ERROR << 16);
880
881         scsi_dma_unmap(ipr_cmd->scsi_cmd);
882         scsi_cmd->scsi_done(scsi_cmd);
883         if (ipr_cmd->eh_comp)
884                 complete(ipr_cmd->eh_comp);
885         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
886 }
887
888 /**
889  * ipr_scsi_eh_done - mid-layer done function for aborted ops
890  * @ipr_cmd:    ipr command struct
891  *
892  * This function is invoked by the interrupt handler for
893  * ops generated by the SCSI mid-layer which are being aborted.
894  *
895  * Return value:
896  *      none
897  **/
898 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
899 {
900         unsigned long hrrq_flags;
901         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
902
903         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
904         __ipr_scsi_eh_done(ipr_cmd);
905         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
906 }
907
908 /**
909  * ipr_fail_all_ops - Fails all outstanding ops.
910  * @ioa_cfg:    ioa config struct
911  *
912  * This function fails all outstanding ops.
913  *
914  * Return value:
915  *      none
916  **/
917 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
918 {
919         struct ipr_cmnd *ipr_cmd, *temp;
920         struct ipr_hrr_queue *hrrq;
921
922         ENTER;
923         for_each_hrrq(hrrq, ioa_cfg) {
924                 spin_lock(&hrrq->_lock);
925                 list_for_each_entry_safe(ipr_cmd,
926                                         temp, &hrrq->hrrq_pending_q, queue) {
927                         list_del(&ipr_cmd->queue);
928
929                         ipr_cmd->s.ioasa.hdr.ioasc =
930                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
931                         ipr_cmd->s.ioasa.hdr.ilid =
932                                 cpu_to_be32(IPR_DRIVER_ILID);
933
934                         if (ipr_cmd->scsi_cmd)
935                                 ipr_cmd->done = __ipr_scsi_eh_done;
936                         else if (ipr_cmd->qc)
937                                 ipr_cmd->done = __ipr_sata_eh_done;
938
939                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
940                                      IPR_IOASC_IOA_WAS_RESET);
941                         del_timer(&ipr_cmd->timer);
942                         ipr_cmd->done(ipr_cmd);
943                 }
944                 spin_unlock(&hrrq->_lock);
945         }
946         LEAVE;
947 }
948
949 /**
950  * ipr_send_command -  Send driver initiated requests.
951  * @ipr_cmd:            ipr command struct
952  *
953  * This function sends a command to the adapter using the correct write call.
954  * In the case of sis64, calculate the ioarcb size required. Then or in the
955  * appropriate bits.
956  *
957  * Return value:
958  *      none
959  **/
960 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
961 {
962         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
963         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
964
965         if (ioa_cfg->sis64) {
966                 /* The default size is 256 bytes */
967                 send_dma_addr |= 0x1;
968
969                 /* If the number of ioadls * size of ioadl > 128 bytes,
970                    then use a 512 byte ioarcb */
971                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
972                         send_dma_addr |= 0x4;
973                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
974         } else
975                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
976 }
977
978 /**
979  * ipr_do_req -  Send driver initiated requests.
980  * @ipr_cmd:            ipr command struct
981  * @done:                       done function
982  * @timeout_func:       timeout function
983  * @timeout:            timeout value
984  *
985  * This function sends the specified command to the adapter with the
986  * timeout given. The done function is invoked on command completion.
987  *
988  * Return value:
989  *      none
990  **/
991 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
992                        void (*done) (struct ipr_cmnd *),
993                        void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
994 {
995         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
996
997         ipr_cmd->done = done;
998
999         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
1000         ipr_cmd->timer.expires = jiffies + timeout;
1001         ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
1002
1003         add_timer(&ipr_cmd->timer);
1004
1005         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
1006
1007         ipr_send_command(ipr_cmd);
1008 }
1009
1010 /**
1011  * ipr_internal_cmd_done - Op done function for an internally generated op.
1012  * @ipr_cmd:    ipr command struct
1013  *
1014  * This function is the op done function for an internally generated,
1015  * blocking op. It simply wakes the sleeping thread.
1016  *
1017  * Return value:
1018  *      none
1019  **/
1020 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1021 {
1022         if (ipr_cmd->sibling)
1023                 ipr_cmd->sibling = NULL;
1024         else
1025                 complete(&ipr_cmd->completion);
1026 }
1027
1028 /**
1029  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1030  * @ipr_cmd:    ipr command struct
1031  * @dma_addr:   dma address
1032  * @len:        transfer length
1033  * @flags:      ioadl flag value
1034  *
1035  * This function initializes an ioadl in the case where there is only a single
1036  * descriptor.
1037  *
1038  * Return value:
1039  *      nothing
1040  **/
1041 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1042                            u32 len, int flags)
1043 {
1044         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1045         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1046
1047         ipr_cmd->dma_use_sg = 1;
1048
1049         if (ipr_cmd->ioa_cfg->sis64) {
1050                 ioadl64->flags = cpu_to_be32(flags);
1051                 ioadl64->data_len = cpu_to_be32(len);
1052                 ioadl64->address = cpu_to_be64(dma_addr);
1053
1054                 ipr_cmd->ioarcb.ioadl_len =
1055                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1056                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1057         } else {
1058                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1059                 ioadl->address = cpu_to_be32(dma_addr);
1060
1061                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1062                         ipr_cmd->ioarcb.read_ioadl_len =
1063                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1064                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1065                 } else {
1066                         ipr_cmd->ioarcb.ioadl_len =
1067                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1068                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1069                 }
1070         }
1071 }
1072
1073 /**
1074  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1075  * @ipr_cmd:    ipr command struct
1076  * @timeout_func:       function to invoke if command times out
1077  * @timeout:    timeout
1078  *
1079  * Return value:
1080  *      none
1081  **/
1082 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1083                                   void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1084                                   u32 timeout)
1085 {
1086         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1087
1088         init_completion(&ipr_cmd->completion);
1089         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1090
1091         spin_unlock_irq(ioa_cfg->host->host_lock);
1092         wait_for_completion(&ipr_cmd->completion);
1093         spin_lock_irq(ioa_cfg->host->host_lock);
1094 }
1095
1096 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1097 {
1098         unsigned int hrrq;
1099
1100         if (ioa_cfg->hrrq_num == 1)
1101                 hrrq = 0;
1102         else {
1103                 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1104                 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1105         }
1106         return hrrq;
1107 }
1108
1109 /**
1110  * ipr_send_hcam - Send an HCAM to the adapter.
1111  * @ioa_cfg:    ioa config struct
1112  * @type:               HCAM type
1113  * @hostrcb:    hostrcb struct
1114  *
1115  * This function will send a Host Controlled Async command to the adapter.
1116  * If HCAMs are currently not allowed to be issued to the adapter, it will
1117  * place the hostrcb on the free queue.
1118  *
1119  * Return value:
1120  *      none
1121  **/
1122 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1123                           struct ipr_hostrcb *hostrcb)
1124 {
1125         struct ipr_cmnd *ipr_cmd;
1126         struct ipr_ioarcb *ioarcb;
1127
1128         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1129                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1130                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1131                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1132
1133                 ipr_cmd->u.hostrcb = hostrcb;
1134                 ioarcb = &ipr_cmd->ioarcb;
1135
1136                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1137                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1138                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1139                 ioarcb->cmd_pkt.cdb[1] = type;
1140                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1141                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1142
1143                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1144                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1145
1146                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1147                         ipr_cmd->done = ipr_process_ccn;
1148                 else
1149                         ipr_cmd->done = ipr_process_error;
1150
1151                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1152
1153                 ipr_send_command(ipr_cmd);
1154         } else {
1155                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1156         }
1157 }
1158
1159 /**
1160  * ipr_update_ata_class - Update the ata class in the resource entry
1161  * @res:        resource entry struct
1162  * @proto:      cfgte device bus protocol value
1163  *
1164  * Return value:
1165  *      none
1166  **/
1167 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1168 {
1169         switch (proto) {
1170         case IPR_PROTO_SATA:
1171         case IPR_PROTO_SAS_STP:
1172                 res->ata_class = ATA_DEV_ATA;
1173                 break;
1174         case IPR_PROTO_SATA_ATAPI:
1175         case IPR_PROTO_SAS_STP_ATAPI:
1176                 res->ata_class = ATA_DEV_ATAPI;
1177                 break;
1178         default:
1179                 res->ata_class = ATA_DEV_UNKNOWN;
1180                 break;
1181         };
1182 }
1183
1184 /**
1185  * ipr_init_res_entry - Initialize a resource entry struct.
1186  * @res:        resource entry struct
1187  * @cfgtew:     config table entry wrapper struct
1188  *
1189  * Return value:
1190  *      none
1191  **/
1192 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1193                                struct ipr_config_table_entry_wrapper *cfgtew)
1194 {
1195         int found = 0;
1196         unsigned int proto;
1197         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1198         struct ipr_resource_entry *gscsi_res = NULL;
1199
1200         res->needs_sync_complete = 0;
1201         res->in_erp = 0;
1202         res->add_to_ml = 0;
1203         res->del_from_ml = 0;
1204         res->resetting_device = 0;
1205         res->reset_occurred = 0;
1206         res->sdev = NULL;
1207         res->sata_port = NULL;
1208
1209         if (ioa_cfg->sis64) {
1210                 proto = cfgtew->u.cfgte64->proto;
1211                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1212                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1213                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1214                 res->type = cfgtew->u.cfgte64->res_type;
1215
1216                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1217                         sizeof(res->res_path));
1218
1219                 res->bus = 0;
1220                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1221                         sizeof(res->dev_lun.scsi_lun));
1222                 res->lun = scsilun_to_int(&res->dev_lun);
1223
1224                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1225                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1226                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1227                                         found = 1;
1228                                         res->target = gscsi_res->target;
1229                                         break;
1230                                 }
1231                         }
1232                         if (!found) {
1233                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1234                                                                   ioa_cfg->max_devs_supported);
1235                                 set_bit(res->target, ioa_cfg->target_ids);
1236                         }
1237                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1238                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1239                         res->target = 0;
1240                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1241                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1242                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1243                                                           ioa_cfg->max_devs_supported);
1244                         set_bit(res->target, ioa_cfg->array_ids);
1245                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1246                         res->bus = IPR_VSET_VIRTUAL_BUS;
1247                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1248                                                           ioa_cfg->max_devs_supported);
1249                         set_bit(res->target, ioa_cfg->vset_ids);
1250                 } else {
1251                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1252                                                           ioa_cfg->max_devs_supported);
1253                         set_bit(res->target, ioa_cfg->target_ids);
1254                 }
1255         } else {
1256                 proto = cfgtew->u.cfgte->proto;
1257                 res->qmodel = IPR_QUEUEING_MODEL(res);
1258                 res->flags = cfgtew->u.cfgte->flags;
1259                 if (res->flags & IPR_IS_IOA_RESOURCE)
1260                         res->type = IPR_RES_TYPE_IOAFP;
1261                 else
1262                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1263
1264                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1265                 res->target = cfgtew->u.cfgte->res_addr.target;
1266                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1267                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1268         }
1269
1270         ipr_update_ata_class(res, proto);
1271 }
1272
1273 /**
1274  * ipr_is_same_device - Determine if two devices are the same.
1275  * @res:        resource entry struct
1276  * @cfgtew:     config table entry wrapper struct
1277  *
1278  * Return value:
1279  *      1 if the devices are the same / 0 otherwise
1280  **/
1281 static int ipr_is_same_device(struct ipr_resource_entry *res,
1282                               struct ipr_config_table_entry_wrapper *cfgtew)
1283 {
1284         if (res->ioa_cfg->sis64) {
1285                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1286                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1287                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1288                                         sizeof(cfgtew->u.cfgte64->lun))) {
1289                         return 1;
1290                 }
1291         } else {
1292                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1293                     res->target == cfgtew->u.cfgte->res_addr.target &&
1294                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1295                         return 1;
1296         }
1297
1298         return 0;
1299 }
1300
1301 /**
1302  * __ipr_format_res_path - Format the resource path for printing.
1303  * @res_path:   resource path
1304  * @buf:        buffer
1305  * @len:        length of buffer provided
1306  *
1307  * Return value:
1308  *      pointer to buffer
1309  **/
1310 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1311 {
1312         int i;
1313         char *p = buffer;
1314
1315         *p = '\0';
1316         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1317         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1318                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1319
1320         return buffer;
1321 }
1322
1323 /**
1324  * ipr_format_res_path - Format the resource path for printing.
1325  * @ioa_cfg:    ioa config struct
1326  * @res_path:   resource path
1327  * @buf:        buffer
1328  * @len:        length of buffer provided
1329  *
1330  * Return value:
1331  *      pointer to buffer
1332  **/
1333 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1334                                  u8 *res_path, char *buffer, int len)
1335 {
1336         char *p = buffer;
1337
1338         *p = '\0';
1339         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1340         __ipr_format_res_path(res_path, p, len - (buffer - p));
1341         return buffer;
1342 }
1343
1344 /**
1345  * ipr_update_res_entry - Update the resource entry.
1346  * @res:        resource entry struct
1347  * @cfgtew:     config table entry wrapper struct
1348  *
1349  * Return value:
1350  *      none
1351  **/
1352 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1353                                  struct ipr_config_table_entry_wrapper *cfgtew)
1354 {
1355         char buffer[IPR_MAX_RES_PATH_LENGTH];
1356         unsigned int proto;
1357         int new_path = 0;
1358
1359         if (res->ioa_cfg->sis64) {
1360                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1361                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1362                 res->type = cfgtew->u.cfgte64->res_type;
1363
1364                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1365                         sizeof(struct ipr_std_inq_data));
1366
1367                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1368                 proto = cfgtew->u.cfgte64->proto;
1369                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1370                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1371
1372                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1373                         sizeof(res->dev_lun.scsi_lun));
1374
1375                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1376                                         sizeof(res->res_path))) {
1377                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1378                                 sizeof(res->res_path));
1379                         new_path = 1;
1380                 }
1381
1382                 if (res->sdev && new_path)
1383                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1384                                     ipr_format_res_path(res->ioa_cfg,
1385                                         res->res_path, buffer, sizeof(buffer)));
1386         } else {
1387                 res->flags = cfgtew->u.cfgte->flags;
1388                 if (res->flags & IPR_IS_IOA_RESOURCE)
1389                         res->type = IPR_RES_TYPE_IOAFP;
1390                 else
1391                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1392
1393                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1394                         sizeof(struct ipr_std_inq_data));
1395
1396                 res->qmodel = IPR_QUEUEING_MODEL(res);
1397                 proto = cfgtew->u.cfgte->proto;
1398                 res->res_handle = cfgtew->u.cfgte->res_handle;
1399         }
1400
1401         ipr_update_ata_class(res, proto);
1402 }
1403
1404 /**
1405  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1406  *                        for the resource.
1407  * @res:        resource entry struct
1408  * @cfgtew:     config table entry wrapper struct
1409  *
1410  * Return value:
1411  *      none
1412  **/
1413 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1414 {
1415         struct ipr_resource_entry *gscsi_res = NULL;
1416         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1417
1418         if (!ioa_cfg->sis64)
1419                 return;
1420
1421         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1422                 clear_bit(res->target, ioa_cfg->array_ids);
1423         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1424                 clear_bit(res->target, ioa_cfg->vset_ids);
1425         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1426                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1427                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1428                                 return;
1429                 clear_bit(res->target, ioa_cfg->target_ids);
1430
1431         } else if (res->bus == 0)
1432                 clear_bit(res->target, ioa_cfg->target_ids);
1433 }
1434
1435 /**
1436  * ipr_handle_config_change - Handle a config change from the adapter
1437  * @ioa_cfg:    ioa config struct
1438  * @hostrcb:    hostrcb
1439  *
1440  * Return value:
1441  *      none
1442  **/
1443 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1444                                      struct ipr_hostrcb *hostrcb)
1445 {
1446         struct ipr_resource_entry *res = NULL;
1447         struct ipr_config_table_entry_wrapper cfgtew;
1448         __be32 cc_res_handle;
1449
1450         u32 is_ndn = 1;
1451
1452         if (ioa_cfg->sis64) {
1453                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1454                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1455         } else {
1456                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1457                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1458         }
1459
1460         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1461                 if (res->res_handle == cc_res_handle) {
1462                         is_ndn = 0;
1463                         break;
1464                 }
1465         }
1466
1467         if (is_ndn) {
1468                 if (list_empty(&ioa_cfg->free_res_q)) {
1469                         ipr_send_hcam(ioa_cfg,
1470                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1471                                       hostrcb);
1472                         return;
1473                 }
1474
1475                 res = list_entry(ioa_cfg->free_res_q.next,
1476                                  struct ipr_resource_entry, queue);
1477
1478                 list_del(&res->queue);
1479                 ipr_init_res_entry(res, &cfgtew);
1480                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1481         }
1482
1483         ipr_update_res_entry(res, &cfgtew);
1484
1485         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1486                 if (res->sdev) {
1487                         res->del_from_ml = 1;
1488                         res->res_handle = IPR_INVALID_RES_HANDLE;
1489                         schedule_work(&ioa_cfg->work_q);
1490                 } else {
1491                         ipr_clear_res_target(res);
1492                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1493                 }
1494         } else if (!res->sdev || res->del_from_ml) {
1495                 res->add_to_ml = 1;
1496                 schedule_work(&ioa_cfg->work_q);
1497         }
1498
1499         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1500 }
1501
1502 /**
1503  * ipr_process_ccn - Op done function for a CCN.
1504  * @ipr_cmd:    ipr command struct
1505  *
1506  * This function is the op done function for a configuration
1507  * change notification host controlled async from the adapter.
1508  *
1509  * Return value:
1510  *      none
1511  **/
1512 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1513 {
1514         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1515         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1516         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1517
1518         list_del_init(&hostrcb->queue);
1519         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1520
1521         if (ioasc) {
1522                 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1523                     ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1524                         dev_err(&ioa_cfg->pdev->dev,
1525                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1526
1527                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1528         } else {
1529                 ipr_handle_config_change(ioa_cfg, hostrcb);
1530         }
1531 }
1532
1533 /**
1534  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1535  * @i:          index into buffer
1536  * @buf:                string to modify
1537  *
1538  * This function will strip all trailing whitespace, pad the end
1539  * of the string with a single space, and NULL terminate the string.
1540  *
1541  * Return value:
1542  *      new length of string
1543  **/
1544 static int strip_and_pad_whitespace(int i, char *buf)
1545 {
1546         while (i && buf[i] == ' ')
1547                 i--;
1548         buf[i+1] = ' ';
1549         buf[i+2] = '\0';
1550         return i + 2;
1551 }
1552
1553 /**
1554  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1555  * @prefix:             string to print at start of printk
1556  * @hostrcb:    hostrcb pointer
1557  * @vpd:                vendor/product id/sn struct
1558  *
1559  * Return value:
1560  *      none
1561  **/
1562 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1563                                 struct ipr_vpd *vpd)
1564 {
1565         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1566         int i = 0;
1567
1568         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1569         i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1570
1571         memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1572         i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1573
1574         memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1575         buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1576
1577         ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1578 }
1579
1580 /**
1581  * ipr_log_vpd - Log the passed VPD to the error log.
1582  * @vpd:                vendor/product id/sn struct
1583  *
1584  * Return value:
1585  *      none
1586  **/
1587 static void ipr_log_vpd(struct ipr_vpd *vpd)
1588 {
1589         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1590                     + IPR_SERIAL_NUM_LEN];
1591
1592         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1593         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1594                IPR_PROD_ID_LEN);
1595         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1596         ipr_err("Vendor/Product ID: %s\n", buffer);
1597
1598         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1599         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1600         ipr_err("    Serial Number: %s\n", buffer);
1601 }
1602
1603 /**
1604  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1605  * @prefix:             string to print at start of printk
1606  * @hostrcb:    hostrcb pointer
1607  * @vpd:                vendor/product id/sn/wwn struct
1608  *
1609  * Return value:
1610  *      none
1611  **/
1612 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1613                                     struct ipr_ext_vpd *vpd)
1614 {
1615         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1616         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1617                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1618 }
1619
1620 /**
1621  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1622  * @vpd:                vendor/product id/sn/wwn struct
1623  *
1624  * Return value:
1625  *      none
1626  **/
1627 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1628 {
1629         ipr_log_vpd(&vpd->vpd);
1630         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1631                 be32_to_cpu(vpd->wwid[1]));
1632 }
1633
1634 /**
1635  * ipr_log_enhanced_cache_error - Log a cache error.
1636  * @ioa_cfg:    ioa config struct
1637  * @hostrcb:    hostrcb struct
1638  *
1639  * Return value:
1640  *      none
1641  **/
1642 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1643                                          struct ipr_hostrcb *hostrcb)
1644 {
1645         struct ipr_hostrcb_type_12_error *error;
1646
1647         if (ioa_cfg->sis64)
1648                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1649         else
1650                 error = &hostrcb->hcam.u.error.u.type_12_error;
1651
1652         ipr_err("-----Current Configuration-----\n");
1653         ipr_err("Cache Directory Card Information:\n");
1654         ipr_log_ext_vpd(&error->ioa_vpd);
1655         ipr_err("Adapter Card Information:\n");
1656         ipr_log_ext_vpd(&error->cfc_vpd);
1657
1658         ipr_err("-----Expected Configuration-----\n");
1659         ipr_err("Cache Directory Card Information:\n");
1660         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1661         ipr_err("Adapter Card Information:\n");
1662         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1663
1664         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1665                      be32_to_cpu(error->ioa_data[0]),
1666                      be32_to_cpu(error->ioa_data[1]),
1667                      be32_to_cpu(error->ioa_data[2]));
1668 }
1669
1670 /**
1671  * ipr_log_cache_error - Log a cache error.
1672  * @ioa_cfg:    ioa config struct
1673  * @hostrcb:    hostrcb struct
1674  *
1675  * Return value:
1676  *      none
1677  **/
1678 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1679                                 struct ipr_hostrcb *hostrcb)
1680 {
1681         struct ipr_hostrcb_type_02_error *error =
1682                 &hostrcb->hcam.u.error.u.type_02_error;
1683
1684         ipr_err("-----Current Configuration-----\n");
1685         ipr_err("Cache Directory Card Information:\n");
1686         ipr_log_vpd(&error->ioa_vpd);
1687         ipr_err("Adapter Card Information:\n");
1688         ipr_log_vpd(&error->cfc_vpd);
1689
1690         ipr_err("-----Expected Configuration-----\n");
1691         ipr_err("Cache Directory Card Information:\n");
1692         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1693         ipr_err("Adapter Card Information:\n");
1694         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1695
1696         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1697                      be32_to_cpu(error->ioa_data[0]),
1698                      be32_to_cpu(error->ioa_data[1]),
1699                      be32_to_cpu(error->ioa_data[2]));
1700 }
1701
1702 /**
1703  * ipr_log_enhanced_config_error - Log a configuration error.
1704  * @ioa_cfg:    ioa config struct
1705  * @hostrcb:    hostrcb struct
1706  *
1707  * Return value:
1708  *      none
1709  **/
1710 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1711                                           struct ipr_hostrcb *hostrcb)
1712 {
1713         int errors_logged, i;
1714         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1715         struct ipr_hostrcb_type_13_error *error;
1716
1717         error = &hostrcb->hcam.u.error.u.type_13_error;
1718         errors_logged = be32_to_cpu(error->errors_logged);
1719
1720         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1721                 be32_to_cpu(error->errors_detected), errors_logged);
1722
1723         dev_entry = error->dev;
1724
1725         for (i = 0; i < errors_logged; i++, dev_entry++) {
1726                 ipr_err_separator;
1727
1728                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1729                 ipr_log_ext_vpd(&dev_entry->vpd);
1730
1731                 ipr_err("-----New Device Information-----\n");
1732                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1733
1734                 ipr_err("Cache Directory Card Information:\n");
1735                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1736
1737                 ipr_err("Adapter Card Information:\n");
1738                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1739         }
1740 }
1741
1742 /**
1743  * ipr_log_sis64_config_error - Log a device error.
1744  * @ioa_cfg:    ioa config struct
1745  * @hostrcb:    hostrcb struct
1746  *
1747  * Return value:
1748  *      none
1749  **/
1750 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1751                                        struct ipr_hostrcb *hostrcb)
1752 {
1753         int errors_logged, i;
1754         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1755         struct ipr_hostrcb_type_23_error *error;
1756         char buffer[IPR_MAX_RES_PATH_LENGTH];
1757
1758         error = &hostrcb->hcam.u.error64.u.type_23_error;
1759         errors_logged = be32_to_cpu(error->errors_logged);
1760
1761         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1762                 be32_to_cpu(error->errors_detected), errors_logged);
1763
1764         dev_entry = error->dev;
1765
1766         for (i = 0; i < errors_logged; i++, dev_entry++) {
1767                 ipr_err_separator;
1768
1769                 ipr_err("Device %d : %s", i + 1,
1770                         __ipr_format_res_path(dev_entry->res_path,
1771                                               buffer, sizeof(buffer)));
1772                 ipr_log_ext_vpd(&dev_entry->vpd);
1773
1774                 ipr_err("-----New Device Information-----\n");
1775                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1776
1777                 ipr_err("Cache Directory Card Information:\n");
1778                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1779
1780                 ipr_err("Adapter Card Information:\n");
1781                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1782         }
1783 }
1784
1785 /**
1786  * ipr_log_config_error - Log a configuration error.
1787  * @ioa_cfg:    ioa config struct
1788  * @hostrcb:    hostrcb struct
1789  *
1790  * Return value:
1791  *      none
1792  **/
1793 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1794                                  struct ipr_hostrcb *hostrcb)
1795 {
1796         int errors_logged, i;
1797         struct ipr_hostrcb_device_data_entry *dev_entry;
1798         struct ipr_hostrcb_type_03_error *error;
1799
1800         error = &hostrcb->hcam.u.error.u.type_03_error;
1801         errors_logged = be32_to_cpu(error->errors_logged);
1802
1803         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1804                 be32_to_cpu(error->errors_detected), errors_logged);
1805
1806         dev_entry = error->dev;
1807
1808         for (i = 0; i < errors_logged; i++, dev_entry++) {
1809                 ipr_err_separator;
1810
1811                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1812                 ipr_log_vpd(&dev_entry->vpd);
1813
1814                 ipr_err("-----New Device Information-----\n");
1815                 ipr_log_vpd(&dev_entry->new_vpd);
1816
1817                 ipr_err("Cache Directory Card Information:\n");
1818                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1819
1820                 ipr_err("Adapter Card Information:\n");
1821                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1822
1823                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1824                         be32_to_cpu(dev_entry->ioa_data[0]),
1825                         be32_to_cpu(dev_entry->ioa_data[1]),
1826                         be32_to_cpu(dev_entry->ioa_data[2]),
1827                         be32_to_cpu(dev_entry->ioa_data[3]),
1828                         be32_to_cpu(dev_entry->ioa_data[4]));
1829         }
1830 }
1831
1832 /**
1833  * ipr_log_enhanced_array_error - Log an array configuration error.
1834  * @ioa_cfg:    ioa config struct
1835  * @hostrcb:    hostrcb struct
1836  *
1837  * Return value:
1838  *      none
1839  **/
1840 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1841                                          struct ipr_hostrcb *hostrcb)
1842 {
1843         int i, num_entries;
1844         struct ipr_hostrcb_type_14_error *error;
1845         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1846         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1847
1848         error = &hostrcb->hcam.u.error.u.type_14_error;
1849
1850         ipr_err_separator;
1851
1852         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1853                 error->protection_level,
1854                 ioa_cfg->host->host_no,
1855                 error->last_func_vset_res_addr.bus,
1856                 error->last_func_vset_res_addr.target,
1857                 error->last_func_vset_res_addr.lun);
1858
1859         ipr_err_separator;
1860
1861         array_entry = error->array_member;
1862         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1863                             ARRAY_SIZE(error->array_member));
1864
1865         for (i = 0; i < num_entries; i++, array_entry++) {
1866                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1867                         continue;
1868
1869                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1870                         ipr_err("Exposed Array Member %d:\n", i);
1871                 else
1872                         ipr_err("Array Member %d:\n", i);
1873
1874                 ipr_log_ext_vpd(&array_entry->vpd);
1875                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1876                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1877                                  "Expected Location");
1878
1879                 ipr_err_separator;
1880         }
1881 }
1882
1883 /**
1884  * ipr_log_array_error - Log an array configuration error.
1885  * @ioa_cfg:    ioa config struct
1886  * @hostrcb:    hostrcb struct
1887  *
1888  * Return value:
1889  *      none
1890  **/
1891 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1892                                 struct ipr_hostrcb *hostrcb)
1893 {
1894         int i;
1895         struct ipr_hostrcb_type_04_error *error;
1896         struct ipr_hostrcb_array_data_entry *array_entry;
1897         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1898
1899         error = &hostrcb->hcam.u.error.u.type_04_error;
1900
1901         ipr_err_separator;
1902
1903         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1904                 error->protection_level,
1905                 ioa_cfg->host->host_no,
1906                 error->last_func_vset_res_addr.bus,
1907                 error->last_func_vset_res_addr.target,
1908                 error->last_func_vset_res_addr.lun);
1909
1910         ipr_err_separator;
1911
1912         array_entry = error->array_member;
1913
1914         for (i = 0; i < 18; i++) {
1915                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1916                         continue;
1917
1918                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1919                         ipr_err("Exposed Array Member %d:\n", i);
1920                 else
1921                         ipr_err("Array Member %d:\n", i);
1922
1923                 ipr_log_vpd(&array_entry->vpd);
1924
1925                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1926                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1927                                  "Expected Location");
1928
1929                 ipr_err_separator;
1930
1931                 if (i == 9)
1932                         array_entry = error->array_member2;
1933                 else
1934                         array_entry++;
1935         }
1936 }
1937
1938 /**
1939  * ipr_log_hex_data - Log additional hex IOA error data.
1940  * @ioa_cfg:    ioa config struct
1941  * @data:               IOA error data
1942  * @len:                data length
1943  *
1944  * Return value:
1945  *      none
1946  **/
1947 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1948 {
1949         int i;
1950
1951         if (len == 0)
1952                 return;
1953
1954         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1955                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1956
1957         for (i = 0; i < len / 4; i += 4) {
1958                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1959                         be32_to_cpu(data[i]),
1960                         be32_to_cpu(data[i+1]),
1961                         be32_to_cpu(data[i+2]),
1962                         be32_to_cpu(data[i+3]));
1963         }
1964 }
1965
1966 /**
1967  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1968  * @ioa_cfg:    ioa config struct
1969  * @hostrcb:    hostrcb struct
1970  *
1971  * Return value:
1972  *      none
1973  **/
1974 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1975                                             struct ipr_hostrcb *hostrcb)
1976 {
1977         struct ipr_hostrcb_type_17_error *error;
1978
1979         if (ioa_cfg->sis64)
1980                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1981         else
1982                 error = &hostrcb->hcam.u.error.u.type_17_error;
1983
1984         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1985         strim(error->failure_reason);
1986
1987         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1988                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1989         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1990         ipr_log_hex_data(ioa_cfg, error->data,
1991                          be32_to_cpu(hostrcb->hcam.length) -
1992                          (offsetof(struct ipr_hostrcb_error, u) +
1993                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1994 }
1995
1996 /**
1997  * ipr_log_dual_ioa_error - Log a dual adapter error.
1998  * @ioa_cfg:    ioa config struct
1999  * @hostrcb:    hostrcb struct
2000  *
2001  * Return value:
2002  *      none
2003  **/
2004 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
2005                                    struct ipr_hostrcb *hostrcb)
2006 {
2007         struct ipr_hostrcb_type_07_error *error;
2008
2009         error = &hostrcb->hcam.u.error.u.type_07_error;
2010         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2011         strim(error->failure_reason);
2012
2013         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2014                      be32_to_cpu(hostrcb->hcam.u.error.prc));
2015         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
2016         ipr_log_hex_data(ioa_cfg, error->data,
2017                          be32_to_cpu(hostrcb->hcam.length) -
2018                          (offsetof(struct ipr_hostrcb_error, u) +
2019                           offsetof(struct ipr_hostrcb_type_07_error, data)));
2020 }
2021
2022 static const struct {
2023         u8 active;
2024         char *desc;
2025 } path_active_desc[] = {
2026         { IPR_PATH_NO_INFO, "Path" },
2027         { IPR_PATH_ACTIVE, "Active path" },
2028         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2029 };
2030
2031 static const struct {
2032         u8 state;
2033         char *desc;
2034 } path_state_desc[] = {
2035         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2036         { IPR_PATH_HEALTHY, "is healthy" },
2037         { IPR_PATH_DEGRADED, "is degraded" },
2038         { IPR_PATH_FAILED, "is failed" }
2039 };
2040
2041 /**
2042  * ipr_log_fabric_path - Log a fabric path error
2043  * @hostrcb:    hostrcb struct
2044  * @fabric:             fabric descriptor
2045  *
2046  * Return value:
2047  *      none
2048  **/
2049 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2050                                 struct ipr_hostrcb_fabric_desc *fabric)
2051 {
2052         int i, j;
2053         u8 path_state = fabric->path_state;
2054         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2055         u8 state = path_state & IPR_PATH_STATE_MASK;
2056
2057         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2058                 if (path_active_desc[i].active != active)
2059                         continue;
2060
2061                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2062                         if (path_state_desc[j].state != state)
2063                                 continue;
2064
2065                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2066                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2067                                              path_active_desc[i].desc, path_state_desc[j].desc,
2068                                              fabric->ioa_port);
2069                         } else if (fabric->cascaded_expander == 0xff) {
2070                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2071                                              path_active_desc[i].desc, path_state_desc[j].desc,
2072                                              fabric->ioa_port, fabric->phy);
2073                         } else if (fabric->phy == 0xff) {
2074                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2075                                              path_active_desc[i].desc, path_state_desc[j].desc,
2076                                              fabric->ioa_port, fabric->cascaded_expander);
2077                         } else {
2078                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2079                                              path_active_desc[i].desc, path_state_desc[j].desc,
2080                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2081                         }
2082                         return;
2083                 }
2084         }
2085
2086         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2087                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2088 }
2089
2090 /**
2091  * ipr_log64_fabric_path - Log a fabric path error
2092  * @hostrcb:    hostrcb struct
2093  * @fabric:             fabric descriptor
2094  *
2095  * Return value:
2096  *      none
2097  **/
2098 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2099                                   struct ipr_hostrcb64_fabric_desc *fabric)
2100 {
2101         int i, j;
2102         u8 path_state = fabric->path_state;
2103         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2104         u8 state = path_state & IPR_PATH_STATE_MASK;
2105         char buffer[IPR_MAX_RES_PATH_LENGTH];
2106
2107         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2108                 if (path_active_desc[i].active != active)
2109                         continue;
2110
2111                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2112                         if (path_state_desc[j].state != state)
2113                                 continue;
2114
2115                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2116                                      path_active_desc[i].desc, path_state_desc[j].desc,
2117                                      ipr_format_res_path(hostrcb->ioa_cfg,
2118                                                 fabric->res_path,
2119                                                 buffer, sizeof(buffer)));
2120                         return;
2121                 }
2122         }
2123
2124         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2125                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2126                                     buffer, sizeof(buffer)));
2127 }
2128
2129 static const struct {
2130         u8 type;
2131         char *desc;
2132 } path_type_desc[] = {
2133         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2134         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2135         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2136         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2137 };
2138
2139 static const struct {
2140         u8 status;
2141         char *desc;
2142 } path_status_desc[] = {
2143         { IPR_PATH_CFG_NO_PROB, "Functional" },
2144         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2145         { IPR_PATH_CFG_FAILED, "Failed" },
2146         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2147         { IPR_PATH_NOT_DETECTED, "Missing" },
2148         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2149 };
2150
2151 static const char *link_rate[] = {
2152         "unknown",
2153         "disabled",
2154         "phy reset problem",
2155         "spinup hold",
2156         "port selector",
2157         "unknown",
2158         "unknown",
2159         "unknown",
2160         "1.5Gbps",
2161         "3.0Gbps",
2162         "unknown",
2163         "unknown",
2164         "unknown",
2165         "unknown",
2166         "unknown",
2167         "unknown"
2168 };
2169
2170 /**
2171  * ipr_log_path_elem - Log a fabric path element.
2172  * @hostrcb:    hostrcb struct
2173  * @cfg:                fabric path element struct
2174  *
2175  * Return value:
2176  *      none
2177  **/
2178 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2179                               struct ipr_hostrcb_config_element *cfg)
2180 {
2181         int i, j;
2182         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2183         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2184
2185         if (type == IPR_PATH_CFG_NOT_EXIST)
2186                 return;
2187
2188         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2189                 if (path_type_desc[i].type != type)
2190                         continue;
2191
2192                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2193                         if (path_status_desc[j].status != status)
2194                                 continue;
2195
2196                         if (type == IPR_PATH_CFG_IOA_PORT) {
2197                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2198                                              path_status_desc[j].desc, path_type_desc[i].desc,
2199                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2200                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2201                         } else {
2202                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2203                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2204                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2205                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2206                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2207                                 } else if (cfg->cascaded_expander == 0xff) {
2208                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2209                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2210                                                      path_type_desc[i].desc, cfg->phy,
2211                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2212                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2213                                 } else if (cfg->phy == 0xff) {
2214                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2215                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2216                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2217                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2218                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2219                                 } else {
2220                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2221                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2222                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2223                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2224                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2225                                 }
2226                         }
2227                         return;
2228                 }
2229         }
2230
2231         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2232                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2233                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2234                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2235 }
2236
2237 /**
2238  * ipr_log64_path_elem - Log a fabric path element.
2239  * @hostrcb:    hostrcb struct
2240  * @cfg:                fabric path element struct
2241  *
2242  * Return value:
2243  *      none
2244  **/
2245 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2246                                 struct ipr_hostrcb64_config_element *cfg)
2247 {
2248         int i, j;
2249         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2250         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2251         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2252         char buffer[IPR_MAX_RES_PATH_LENGTH];
2253
2254         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2255                 return;
2256
2257         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2258                 if (path_type_desc[i].type != type)
2259                         continue;
2260
2261                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2262                         if (path_status_desc[j].status != status)
2263                                 continue;
2264
2265                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2266                                      path_status_desc[j].desc, path_type_desc[i].desc,
2267                                      ipr_format_res_path(hostrcb->ioa_cfg,
2268                                         cfg->res_path, buffer, sizeof(buffer)),
2269                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2270                                         be32_to_cpu(cfg->wwid[0]),
2271                                         be32_to_cpu(cfg->wwid[1]));
2272                         return;
2273                 }
2274         }
2275         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2276                      "WWN=%08X%08X\n", cfg->type_status,
2277                      ipr_format_res_path(hostrcb->ioa_cfg,
2278                         cfg->res_path, buffer, sizeof(buffer)),
2279                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2280                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2281 }
2282
2283 /**
2284  * ipr_log_fabric_error - Log a fabric error.
2285  * @ioa_cfg:    ioa config struct
2286  * @hostrcb:    hostrcb struct
2287  *
2288  * Return value:
2289  *      none
2290  **/
2291 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2292                                  struct ipr_hostrcb *hostrcb)
2293 {
2294         struct ipr_hostrcb_type_20_error *error;
2295         struct ipr_hostrcb_fabric_desc *fabric;
2296         struct ipr_hostrcb_config_element *cfg;
2297         int i, add_len;
2298
2299         error = &hostrcb->hcam.u.error.u.type_20_error;
2300         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2301         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2302
2303         add_len = be32_to_cpu(hostrcb->hcam.length) -
2304                 (offsetof(struct ipr_hostrcb_error, u) +
2305                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2306
2307         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2308                 ipr_log_fabric_path(hostrcb, fabric);
2309                 for_each_fabric_cfg(fabric, cfg)
2310                         ipr_log_path_elem(hostrcb, cfg);
2311
2312                 add_len -= be16_to_cpu(fabric->length);
2313                 fabric = (struct ipr_hostrcb_fabric_desc *)
2314                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2315         }
2316
2317         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2318 }
2319
2320 /**
2321  * ipr_log_sis64_array_error - Log a sis64 array error.
2322  * @ioa_cfg:    ioa config struct
2323  * @hostrcb:    hostrcb struct
2324  *
2325  * Return value:
2326  *      none
2327  **/
2328 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2329                                       struct ipr_hostrcb *hostrcb)
2330 {
2331         int i, num_entries;
2332         struct ipr_hostrcb_type_24_error *error;
2333         struct ipr_hostrcb64_array_data_entry *array_entry;
2334         char buffer[IPR_MAX_RES_PATH_LENGTH];
2335         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2336
2337         error = &hostrcb->hcam.u.error64.u.type_24_error;
2338
2339         ipr_err_separator;
2340
2341         ipr_err("RAID %s Array Configuration: %s\n",
2342                 error->protection_level,
2343                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2344                         buffer, sizeof(buffer)));
2345
2346         ipr_err_separator;
2347
2348         array_entry = error->array_member;
2349         num_entries = min_t(u32, error->num_entries,
2350                             ARRAY_SIZE(error->array_member));
2351
2352         for (i = 0; i < num_entries; i++, array_entry++) {
2353
2354                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2355                         continue;
2356
2357                 if (error->exposed_mode_adn == i)
2358                         ipr_err("Exposed Array Member %d:\n", i);
2359                 else
2360                         ipr_err("Array Member %d:\n", i);
2361
2362                 ipr_err("Array Member %d:\n", i);
2363                 ipr_log_ext_vpd(&array_entry->vpd);
2364                 ipr_err("Current Location: %s\n",
2365                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2366                                 buffer, sizeof(buffer)));
2367                 ipr_err("Expected Location: %s\n",
2368                          ipr_format_res_path(ioa_cfg,
2369                                 array_entry->expected_res_path,
2370                                 buffer, sizeof(buffer)));
2371
2372                 ipr_err_separator;
2373         }
2374 }
2375
2376 /**
2377  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2378  * @ioa_cfg:    ioa config struct
2379  * @hostrcb:    hostrcb struct
2380  *
2381  * Return value:
2382  *      none
2383  **/
2384 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2385                                        struct ipr_hostrcb *hostrcb)
2386 {
2387         struct ipr_hostrcb_type_30_error *error;
2388         struct ipr_hostrcb64_fabric_desc *fabric;
2389         struct ipr_hostrcb64_config_element *cfg;
2390         int i, add_len;
2391
2392         error = &hostrcb->hcam.u.error64.u.type_30_error;
2393
2394         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2395         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2396
2397         add_len = be32_to_cpu(hostrcb->hcam.length) -
2398                 (offsetof(struct ipr_hostrcb64_error, u) +
2399                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2400
2401         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2402                 ipr_log64_fabric_path(hostrcb, fabric);
2403                 for_each_fabric_cfg(fabric, cfg)
2404                         ipr_log64_path_elem(hostrcb, cfg);
2405
2406                 add_len -= be16_to_cpu(fabric->length);
2407                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2408                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2409         }
2410
2411         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2412 }
2413
2414 /**
2415  * ipr_log_generic_error - Log an adapter error.
2416  * @ioa_cfg:    ioa config struct
2417  * @hostrcb:    hostrcb struct
2418  *
2419  * Return value:
2420  *      none
2421  **/
2422 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2423                                   struct ipr_hostrcb *hostrcb)
2424 {
2425         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2426                          be32_to_cpu(hostrcb->hcam.length));
2427 }
2428
2429 /**
2430  * ipr_log_sis64_device_error - Log a cache error.
2431  * @ioa_cfg:    ioa config struct
2432  * @hostrcb:    hostrcb struct
2433  *
2434  * Return value:
2435  *      none
2436  **/
2437 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2438                                          struct ipr_hostrcb *hostrcb)
2439 {
2440         struct ipr_hostrcb_type_21_error *error;
2441         char buffer[IPR_MAX_RES_PATH_LENGTH];
2442
2443         error = &hostrcb->hcam.u.error64.u.type_21_error;
2444
2445         ipr_err("-----Failing Device Information-----\n");
2446         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2447                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2448                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2449         ipr_err("Device Resource Path: %s\n",
2450                 __ipr_format_res_path(error->res_path,
2451                                       buffer, sizeof(buffer)));
2452         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2453         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2454         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2455         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2456         ipr_err("SCSI Sense Data:\n");
2457         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2458         ipr_err("SCSI Command Descriptor Block: \n");
2459         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2460
2461         ipr_err("Additional IOA Data:\n");
2462         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2463 }
2464
2465 /**
2466  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2467  * @ioasc:      IOASC
2468  *
2469  * This function will return the index of into the ipr_error_table
2470  * for the specified IOASC. If the IOASC is not in the table,
2471  * 0 will be returned, which points to the entry used for unknown errors.
2472  *
2473  * Return value:
2474  *      index into the ipr_error_table
2475  **/
2476 static u32 ipr_get_error(u32 ioasc)
2477 {
2478         int i;
2479
2480         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2481                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2482                         return i;
2483
2484         return 0;
2485 }
2486
2487 /**
2488  * ipr_handle_log_data - Log an adapter error.
2489  * @ioa_cfg:    ioa config struct
2490  * @hostrcb:    hostrcb struct
2491  *
2492  * This function logs an adapter error to the system.
2493  *
2494  * Return value:
2495  *      none
2496  **/
2497 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2498                                 struct ipr_hostrcb *hostrcb)
2499 {
2500         u32 ioasc;
2501         int error_index;
2502         struct ipr_hostrcb_type_21_error *error;
2503
2504         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2505                 return;
2506
2507         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2508                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2509
2510         if (ioa_cfg->sis64)
2511                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2512         else
2513                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2514
2515         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2516             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2517                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2518                 scsi_report_bus_reset(ioa_cfg->host,
2519                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2520         }
2521
2522         error_index = ipr_get_error(ioasc);
2523
2524         if (!ipr_error_table[error_index].log_hcam)
2525                 return;
2526
2527         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2528             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2529                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2530
2531                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2532                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2533                                 return;
2534         }
2535
2536         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2537
2538         /* Set indication we have logged an error */
2539         ioa_cfg->errors_logged++;
2540
2541         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2542                 return;
2543         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2544                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2545
2546         switch (hostrcb->hcam.overlay_id) {
2547         case IPR_HOST_RCB_OVERLAY_ID_2:
2548                 ipr_log_cache_error(ioa_cfg, hostrcb);
2549                 break;
2550         case IPR_HOST_RCB_OVERLAY_ID_3:
2551                 ipr_log_config_error(ioa_cfg, hostrcb);
2552                 break;
2553         case IPR_HOST_RCB_OVERLAY_ID_4:
2554         case IPR_HOST_RCB_OVERLAY_ID_6:
2555                 ipr_log_array_error(ioa_cfg, hostrcb);
2556                 break;
2557         case IPR_HOST_RCB_OVERLAY_ID_7:
2558                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2559                 break;
2560         case IPR_HOST_RCB_OVERLAY_ID_12:
2561                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2562                 break;
2563         case IPR_HOST_RCB_OVERLAY_ID_13:
2564                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2565                 break;
2566         case IPR_HOST_RCB_OVERLAY_ID_14:
2567         case IPR_HOST_RCB_OVERLAY_ID_16:
2568                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2569                 break;
2570         case IPR_HOST_RCB_OVERLAY_ID_17:
2571                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2572                 break;
2573         case IPR_HOST_RCB_OVERLAY_ID_20:
2574                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2575                 break;
2576         case IPR_HOST_RCB_OVERLAY_ID_21:
2577                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2578                 break;
2579         case IPR_HOST_RCB_OVERLAY_ID_23:
2580                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2581                 break;
2582         case IPR_HOST_RCB_OVERLAY_ID_24:
2583         case IPR_HOST_RCB_OVERLAY_ID_26:
2584                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2585                 break;
2586         case IPR_HOST_RCB_OVERLAY_ID_30:
2587                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2588                 break;
2589         case IPR_HOST_RCB_OVERLAY_ID_1:
2590         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2591         default:
2592                 ipr_log_generic_error(ioa_cfg, hostrcb);
2593                 break;
2594         }
2595 }
2596
2597 static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2598 {
2599         struct ipr_hostrcb *hostrcb;
2600
2601         hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2602                                         struct ipr_hostrcb, queue);
2603
2604         if (unlikely(!hostrcb)) {
2605                 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2606                 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2607                                                 struct ipr_hostrcb, queue);
2608         }
2609
2610         list_del_init(&hostrcb->queue);
2611         return hostrcb;
2612 }
2613
2614 /**
2615  * ipr_process_error - Op done function for an adapter error log.
2616  * @ipr_cmd:    ipr command struct
2617  *
2618  * This function is the op done function for an error log host
2619  * controlled async from the adapter. It will log the error and
2620  * send the HCAM back to the adapter.
2621  *
2622  * Return value:
2623  *      none
2624  **/
2625 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2626 {
2627         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2628         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2629         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2630         u32 fd_ioasc;
2631
2632         if (ioa_cfg->sis64)
2633                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2634         else
2635                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2636
2637         list_del_init(&hostrcb->queue);
2638         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2639
2640         if (!ioasc) {
2641                 ipr_handle_log_data(ioa_cfg, hostrcb);
2642                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2643                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2644         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2645                    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2646                 dev_err(&ioa_cfg->pdev->dev,
2647                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2648         }
2649
2650         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2651         schedule_work(&ioa_cfg->work_q);
2652         hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2653
2654         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2655 }
2656
2657 /**
2658  * ipr_timeout -  An internally generated op has timed out.
2659  * @ipr_cmd:    ipr command struct
2660  *
2661  * This function blocks host requests and initiates an
2662  * adapter reset.
2663  *
2664  * Return value:
2665  *      none
2666  **/
2667 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2668 {
2669         unsigned long lock_flags = 0;
2670         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2671
2672         ENTER;
2673         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2674
2675         ioa_cfg->errors_logged++;
2676         dev_err(&ioa_cfg->pdev->dev,
2677                 "Adapter being reset due to command timeout.\n");
2678
2679         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2680                 ioa_cfg->sdt_state = GET_DUMP;
2681
2682         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2683                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2684
2685         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2686         LEAVE;
2687 }
2688
2689 /**
2690  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2691  * @ipr_cmd:    ipr command struct
2692  *
2693  * This function blocks host requests and initiates an
2694  * adapter reset.
2695  *
2696  * Return value:
2697  *      none
2698  **/
2699 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2700 {
2701         unsigned long lock_flags = 0;
2702         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2703
2704         ENTER;
2705         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2706
2707         ioa_cfg->errors_logged++;
2708         dev_err(&ioa_cfg->pdev->dev,
2709                 "Adapter timed out transitioning to operational.\n");
2710
2711         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2712                 ioa_cfg->sdt_state = GET_DUMP;
2713
2714         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2715                 if (ipr_fastfail)
2716                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2717                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2718         }
2719
2720         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2721         LEAVE;
2722 }
2723
2724 /**
2725  * ipr_find_ses_entry - Find matching SES in SES table
2726  * @res:        resource entry struct of SES
2727  *
2728  * Return value:
2729  *      pointer to SES table entry / NULL on failure
2730  **/
2731 static const struct ipr_ses_table_entry *
2732 ipr_find_ses_entry(struct ipr_resource_entry *res)
2733 {
2734         int i, j, matches;
2735         struct ipr_std_inq_vpids *vpids;
2736         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2737
2738         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2739                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2740                         if (ste->compare_product_id_byte[j] == 'X') {
2741                                 vpids = &res->std_inq_data.vpids;
2742                                 if (vpids->product_id[j] == ste->product_id[j])
2743                                         matches++;
2744                                 else
2745                                         break;
2746                         } else
2747                                 matches++;
2748                 }
2749
2750                 if (matches == IPR_PROD_ID_LEN)
2751                         return ste;
2752         }
2753
2754         return NULL;
2755 }
2756
2757 /**
2758  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2759  * @ioa_cfg:    ioa config struct
2760  * @bus:                SCSI bus
2761  * @bus_width:  bus width
2762  *
2763  * Return value:
2764  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2765  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2766  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2767  *      max 160MHz = max 320MB/sec).
2768  **/
2769 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2770 {
2771         struct ipr_resource_entry *res;
2772         const struct ipr_ses_table_entry *ste;
2773         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2774
2775         /* Loop through each config table entry in the config table buffer */
2776         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2777                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2778                         continue;
2779
2780                 if (bus != res->bus)
2781                         continue;
2782
2783                 if (!(ste = ipr_find_ses_entry(res)))
2784                         continue;
2785
2786                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2787         }
2788
2789         return max_xfer_rate;
2790 }
2791
2792 /**
2793  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2794  * @ioa_cfg:            ioa config struct
2795  * @max_delay:          max delay in micro-seconds to wait
2796  *
2797  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2798  *
2799  * Return value:
2800  *      0 on success / other on failure
2801  **/
2802 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2803 {
2804         volatile u32 pcii_reg;
2805         int delay = 1;
2806
2807         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2808         while (delay < max_delay) {
2809                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2810
2811                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2812                         return 0;
2813
2814                 /* udelay cannot be used if delay is more than a few milliseconds */
2815                 if ((delay / 1000) > MAX_UDELAY_MS)
2816                         mdelay(delay / 1000);
2817                 else
2818                         udelay(delay);
2819
2820                 delay += delay;
2821         }
2822         return -EIO;
2823 }
2824
2825 /**
2826  * ipr_get_sis64_dump_data_section - Dump IOA memory
2827  * @ioa_cfg:                    ioa config struct
2828  * @start_addr:                 adapter address to dump
2829  * @dest:                       destination kernel buffer
2830  * @length_in_words:            length to dump in 4 byte words
2831  *
2832  * Return value:
2833  *      0 on success
2834  **/
2835 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2836                                            u32 start_addr,
2837                                            __be32 *dest, u32 length_in_words)
2838 {
2839         int i;
2840
2841         for (i = 0; i < length_in_words; i++) {
2842                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2843                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2844                 dest++;
2845         }
2846
2847         return 0;
2848 }
2849
2850 /**
2851  * ipr_get_ldump_data_section - Dump IOA memory
2852  * @ioa_cfg:                    ioa config struct
2853  * @start_addr:                 adapter address to dump
2854  * @dest:                               destination kernel buffer
2855  * @length_in_words:    length to dump in 4 byte words
2856  *
2857  * Return value:
2858  *      0 on success / -EIO on failure
2859  **/
2860 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2861                                       u32 start_addr,
2862                                       __be32 *dest, u32 length_in_words)
2863 {
2864         volatile u32 temp_pcii_reg;
2865         int i, delay = 0;
2866
2867         if (ioa_cfg->sis64)
2868                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2869                                                        dest, length_in_words);
2870
2871         /* Write IOA interrupt reg starting LDUMP state  */
2872         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2873                ioa_cfg->regs.set_uproc_interrupt_reg32);
2874
2875         /* Wait for IO debug acknowledge */
2876         if (ipr_wait_iodbg_ack(ioa_cfg,
2877                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2878                 dev_err(&ioa_cfg->pdev->dev,
2879                         "IOA dump long data transfer timeout\n");
2880                 return -EIO;
2881         }
2882
2883         /* Signal LDUMP interlocked - clear IO debug ack */
2884         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2885                ioa_cfg->regs.clr_interrupt_reg);
2886
2887         /* Write Mailbox with starting address */
2888         writel(start_addr, ioa_cfg->ioa_mailbox);
2889
2890         /* Signal address valid - clear IOA Reset alert */
2891         writel(IPR_UPROCI_RESET_ALERT,
2892                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2893
2894         for (i = 0; i < length_in_words; i++) {
2895                 /* Wait for IO debug acknowledge */
2896                 if (ipr_wait_iodbg_ack(ioa_cfg,
2897                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2898                         dev_err(&ioa_cfg->pdev->dev,
2899                                 "IOA dump short data transfer timeout\n");
2900                         return -EIO;
2901                 }
2902
2903                 /* Read data from mailbox and increment destination pointer */
2904                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2905                 dest++;
2906
2907                 /* For all but the last word of data, signal data received */
2908                 if (i < (length_in_words - 1)) {
2909                         /* Signal dump data received - Clear IO debug Ack */
2910                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2911                                ioa_cfg->regs.clr_interrupt_reg);
2912                 }
2913         }
2914
2915         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2916         writel(IPR_UPROCI_RESET_ALERT,
2917                ioa_cfg->regs.set_uproc_interrupt_reg32);
2918
2919         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2920                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2921
2922         /* Signal dump data received - Clear IO debug Ack */
2923         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2924                ioa_cfg->regs.clr_interrupt_reg);
2925
2926         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2927         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2928                 temp_pcii_reg =
2929                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2930
2931                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2932                         return 0;
2933
2934                 udelay(10);
2935                 delay += 10;
2936         }
2937
2938         return 0;
2939 }
2940
2941 #ifdef CONFIG_SCSI_IPR_DUMP
2942 /**
2943  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2944  * @ioa_cfg:            ioa config struct
2945  * @pci_address:        adapter address
2946  * @length:                     length of data to copy
2947  *
2948  * Copy data from PCI adapter to kernel buffer.
2949  * Note: length MUST be a 4 byte multiple
2950  * Return value:
2951  *      0 on success / other on failure
2952  **/
2953 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2954                         unsigned long pci_address, u32 length)
2955 {
2956         int bytes_copied = 0;
2957         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2958         __be32 *page;
2959         unsigned long lock_flags = 0;
2960         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2961
2962         if (ioa_cfg->sis64)
2963                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2964         else
2965                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2966
2967         while (bytes_copied < length &&
2968                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2969                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2970                     ioa_dump->page_offset == 0) {
2971                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2972
2973                         if (!page) {
2974                                 ipr_trace;
2975                                 return bytes_copied;
2976                         }
2977
2978                         ioa_dump->page_offset = 0;
2979                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2980                         ioa_dump->next_page_index++;
2981                 } else
2982                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2983
2984                 rem_len = length - bytes_copied;
2985                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2986                 cur_len = min(rem_len, rem_page_len);
2987
2988                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2989                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2990                         rc = -EIO;
2991                 } else {
2992                         rc = ipr_get_ldump_data_section(ioa_cfg,
2993                                                         pci_address + bytes_copied,
2994                                                         &page[ioa_dump->page_offset / 4],
2995                                                         (cur_len / sizeof(u32)));
2996                 }
2997                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2998
2999                 if (!rc) {
3000                         ioa_dump->page_offset += cur_len;
3001                         bytes_copied += cur_len;
3002                 } else {
3003                         ipr_trace;
3004                         break;
3005                 }
3006                 schedule();
3007         }
3008
3009         return bytes_copied;
3010 }
3011
3012 /**
3013  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3014  * @hdr:        dump entry header struct
3015  *
3016  * Return value:
3017  *      nothing
3018  **/
3019 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3020 {
3021         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3022         hdr->num_elems = 1;
3023         hdr->offset = sizeof(*hdr);
3024         hdr->status = IPR_DUMP_STATUS_SUCCESS;
3025 }
3026
3027 /**
3028  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3029  * @ioa_cfg:    ioa config struct
3030  * @driver_dump:        driver dump struct
3031  *
3032  * Return value:
3033  *      nothing
3034  **/
3035 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3036                                    struct ipr_driver_dump *driver_dump)
3037 {
3038         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3039
3040         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3041         driver_dump->ioa_type_entry.hdr.len =
3042                 sizeof(struct ipr_dump_ioa_type_entry) -
3043                 sizeof(struct ipr_dump_entry_header);
3044         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3045         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3046         driver_dump->ioa_type_entry.type = ioa_cfg->type;
3047         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3048                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3049                 ucode_vpd->minor_release[1];
3050         driver_dump->hdr.num_entries++;
3051 }
3052
3053 /**
3054  * ipr_dump_version_data - Fill in the driver version in the dump.
3055  * @ioa_cfg:    ioa config struct
3056  * @driver_dump:        driver dump struct
3057  *
3058  * Return value:
3059  *      nothing
3060  **/
3061 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3062                                   struct ipr_driver_dump *driver_dump)
3063 {
3064         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3065         driver_dump->version_entry.hdr.len =
3066                 sizeof(struct ipr_dump_version_entry) -
3067                 sizeof(struct ipr_dump_entry_header);
3068         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3069         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3070         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3071         driver_dump->hdr.num_entries++;
3072 }
3073
3074 /**
3075  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3076  * @ioa_cfg:    ioa config struct
3077  * @driver_dump:        driver dump struct
3078  *
3079  * Return value:
3080  *      nothing
3081  **/
3082 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3083                                    struct ipr_driver_dump *driver_dump)
3084 {
3085         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3086         driver_dump->trace_entry.hdr.len =
3087                 sizeof(struct ipr_dump_trace_entry) -
3088                 sizeof(struct ipr_dump_entry_header);
3089         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3090         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3091         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3092         driver_dump->hdr.num_entries++;
3093 }
3094
3095 /**
3096  * ipr_dump_location_data - Fill in the IOA location in the dump.
3097  * @ioa_cfg:    ioa config struct
3098  * @driver_dump:        driver dump struct
3099  *
3100  * Return value:
3101  *      nothing
3102  **/
3103 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3104                                    struct ipr_driver_dump *driver_dump)
3105 {
3106         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3107         driver_dump->location_entry.hdr.len =
3108                 sizeof(struct ipr_dump_location_entry) -
3109                 sizeof(struct ipr_dump_entry_header);
3110         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3111         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3112         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3113         driver_dump->hdr.num_entries++;
3114 }
3115
3116 /**
3117  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3118  * @ioa_cfg:    ioa config struct
3119  * @dump:               dump struct
3120  *
3121  * Return value:
3122  *      nothing
3123  **/
3124 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3125 {
3126         unsigned long start_addr, sdt_word;
3127         unsigned long lock_flags = 0;
3128         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3129         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3130         u32 num_entries, max_num_entries, start_off, end_off;
3131         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3132         struct ipr_sdt *sdt;
3133         int valid = 1;
3134         int i;
3135
3136         ENTER;
3137
3138         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3139
3140         if (ioa_cfg->sdt_state != READ_DUMP) {
3141                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3142                 return;
3143         }
3144
3145         if (ioa_cfg->sis64) {
3146                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3147                 ssleep(IPR_DUMP_DELAY_SECONDS);
3148                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3149         }
3150
3151         start_addr = readl(ioa_cfg->ioa_mailbox);
3152
3153         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3154                 dev_err(&ioa_cfg->pdev->dev,
3155                         "Invalid dump table format: %lx\n", start_addr);
3156                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3157                 return;
3158         }
3159
3160         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3161
3162         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3163
3164         /* Initialize the overall dump header */
3165         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3166         driver_dump->hdr.num_entries = 1;
3167         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3168         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3169         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3170         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3171
3172         ipr_dump_version_data(ioa_cfg, driver_dump);
3173         ipr_dump_location_data(ioa_cfg, driver_dump);
3174         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3175         ipr_dump_trace_data(ioa_cfg, driver_dump);
3176
3177         /* Update dump_header */
3178         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3179
3180         /* IOA Dump entry */
3181         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3182         ioa_dump->hdr.len = 0;
3183         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3184         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3185
3186         /* First entries in sdt are actually a list of dump addresses and
3187          lengths to gather the real dump data.  sdt represents the pointer
3188          to the ioa generated dump table.  Dump data will be extracted based
3189          on entries in this table */
3190         sdt = &ioa_dump->sdt;
3191
3192         if (ioa_cfg->sis64) {
3193                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3194                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3195         } else {
3196                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3197                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3198         }
3199
3200         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3201                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3202         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3203                                         bytes_to_copy / sizeof(__be32));
3204
3205         /* Smart Dump table is ready to use and the first entry is valid */
3206         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3207             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3208                 dev_err(&ioa_cfg->pdev->dev,
3209                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3210                         rc, be32_to_cpu(sdt->hdr.state));
3211                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3212                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3213                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3214                 return;
3215         }
3216
3217         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3218
3219         if (num_entries > max_num_entries)
3220                 num_entries = max_num_entries;
3221
3222         /* Update dump length to the actual data to be copied */
3223         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3224         if (ioa_cfg->sis64)
3225                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3226         else
3227                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3228
3229         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3230
3231         for (i = 0; i < num_entries; i++) {
3232                 if (ioa_dump->hdr.len > max_dump_size) {
3233                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3234                         break;
3235                 }
3236
3237                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3238                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3239                         if (ioa_cfg->sis64)
3240                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3241                         else {
3242                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3243                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3244
3245                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3246                                         bytes_to_copy = end_off - start_off;
3247                                 else
3248                                         valid = 0;
3249                         }
3250                         if (valid) {
3251                                 if (bytes_to_copy > max_dump_size) {
3252                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3253                                         continue;
3254                                 }
3255
3256                                 /* Copy data from adapter to driver buffers */
3257                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3258                                                             bytes_to_copy);
3259
3260                                 ioa_dump->hdr.len += bytes_copied;
3261
3262                                 if (bytes_copied != bytes_to_copy) {
3263                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3264                                         break;
3265                                 }
3266                         }
3267                 }
3268         }
3269
3270         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3271
3272         /* Update dump_header */
3273         driver_dump->hdr.len += ioa_dump->hdr.len;
3274         wmb();
3275         ioa_cfg->sdt_state = DUMP_OBTAINED;
3276         LEAVE;
3277 }
3278
3279 #else
3280 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3281 #endif
3282
3283 /**
3284  * ipr_release_dump - Free adapter dump memory
3285  * @kref:       kref struct
3286  *
3287  * Return value:
3288  *      nothing
3289  **/
3290 static void ipr_release_dump(struct kref *kref)
3291 {
3292         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3293         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3294         unsigned long lock_flags = 0;
3295         int i;
3296
3297         ENTER;
3298         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3299         ioa_cfg->dump = NULL;
3300         ioa_cfg->sdt_state = INACTIVE;
3301         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3302
3303         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3304                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3305
3306         vfree(dump->ioa_dump.ioa_data);
3307         kfree(dump);
3308         LEAVE;
3309 }
3310
3311 /**
3312  * ipr_worker_thread - Worker thread
3313  * @work:               ioa config struct
3314  *
3315  * Called at task level from a work thread. This function takes care
3316  * of adding and removing device from the mid-layer as configuration
3317  * changes are detected by the adapter.
3318  *
3319  * Return value:
3320  *      nothing
3321  **/
3322 static void ipr_worker_thread(struct work_struct *work)
3323 {
3324         unsigned long lock_flags;
3325         struct ipr_resource_entry *res;
3326         struct scsi_device *sdev;
3327         struct ipr_dump *dump;
3328         struct ipr_ioa_cfg *ioa_cfg =
3329                 container_of(work, struct ipr_ioa_cfg, work_q);
3330         u8 bus, target, lun;
3331         int did_work;
3332
3333         ENTER;
3334         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3335
3336         if (ioa_cfg->sdt_state == READ_DUMP) {
3337                 dump = ioa_cfg->dump;
3338                 if (!dump) {
3339                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3340                         return;
3341                 }
3342                 kref_get(&dump->kref);
3343                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3344                 ipr_get_ioa_dump(ioa_cfg, dump);
3345                 kref_put(&dump->kref, ipr_release_dump);
3346
3347                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3348                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3349                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3350                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3351                 return;
3352         }
3353
3354         if (ioa_cfg->scsi_unblock) {
3355                 ioa_cfg->scsi_unblock = 0;
3356                 ioa_cfg->scsi_blocked = 0;
3357                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3358                 scsi_unblock_requests(ioa_cfg->host);
3359                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3360                 if (ioa_cfg->scsi_blocked)
3361                         scsi_block_requests(ioa_cfg->host);
3362         }
3363
3364         if (!ioa_cfg->scan_enabled) {
3365                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3366                 return;
3367         }
3368
3369 restart:
3370         do {
3371                 did_work = 0;
3372                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3373                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3374                         return;
3375                 }
3376
3377                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3378                         if (res->del_from_ml && res->sdev) {
3379                                 did_work = 1;
3380                                 sdev = res->sdev;
3381                                 if (!scsi_device_get(sdev)) {
3382                                         if (!res->add_to_ml)
3383                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3384                                         else
3385                                                 res->del_from_ml = 0;
3386                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3387                                         scsi_remove_device(sdev);
3388                                         scsi_device_put(sdev);
3389                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3390                                 }
3391                                 break;
3392                         }
3393                 }
3394         } while (did_work);
3395
3396         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3397                 if (res->add_to_ml) {
3398                         bus = res->bus;
3399                         target = res->target;
3400                         lun = res->lun;
3401                         res->add_to_ml = 0;
3402                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3403                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3404                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3405                         goto restart;
3406                 }
3407         }
3408
3409         ioa_cfg->scan_done = 1;
3410         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3411         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3412         LEAVE;
3413 }
3414
3415 #ifdef CONFIG_SCSI_IPR_TRACE
3416 /**
3417  * ipr_read_trace - Dump the adapter trace
3418  * @filp:               open sysfs file
3419  * @kobj:               kobject struct
3420  * @bin_attr:           bin_attribute struct
3421  * @buf:                buffer
3422  * @off:                offset
3423  * @count:              buffer size
3424  *
3425  * Return value:
3426  *      number of bytes printed to buffer
3427  **/
3428 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3429                               struct bin_attribute *bin_attr,
3430                               char *buf, loff_t off, size_t count)
3431 {
3432         struct device *dev = container_of(kobj, struct device, kobj);
3433         struct Scsi_Host *shost = class_to_shost(dev);
3434         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3435         unsigned long lock_flags = 0;
3436         ssize_t ret;
3437
3438         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3439         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3440                                 IPR_TRACE_SIZE);
3441         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3442
3443         return ret;
3444 }
3445
3446 static struct bin_attribute ipr_trace_attr = {
3447         .attr = {
3448                 .name = "trace",
3449                 .mode = S_IRUGO,
3450         },
3451         .size = 0,
3452         .read = ipr_read_trace,
3453 };
3454 #endif
3455
3456 /**
3457  * ipr_show_fw_version - Show the firmware version
3458  * @dev:        class device struct
3459  * @buf:        buffer
3460  *
3461  * Return value:
3462  *      number of bytes printed to buffer
3463  **/
3464 static ssize_t ipr_show_fw_version(struct device *dev,
3465                                    struct device_attribute *attr, char *buf)
3466 {
3467         struct Scsi_Host *shost = class_to_shost(dev);
3468         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3469         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3470         unsigned long lock_flags = 0;
3471         int len;
3472
3473         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3474         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3475                        ucode_vpd->major_release, ucode_vpd->card_type,
3476                        ucode_vpd->minor_release[0],
3477                        ucode_vpd->minor_release[1]);
3478         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3479         return len;
3480 }
3481
3482 static struct device_attribute ipr_fw_version_attr = {
3483         .attr = {
3484                 .name =         "fw_version",
3485                 .mode =         S_IRUGO,
3486         },
3487         .show = ipr_show_fw_version,
3488 };
3489
3490 /**
3491  * ipr_show_log_level - Show the adapter's error logging level
3492  * @dev:        class device struct
3493  * @buf:        buffer
3494  *
3495  * Return value:
3496  *      number of bytes printed to buffer
3497  **/
3498 static ssize_t ipr_show_log_level(struct device *dev,
3499                                    struct device_attribute *attr, char *buf)
3500 {
3501         struct Scsi_Host *shost = class_to_shost(dev);
3502         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3503         unsigned long lock_flags = 0;
3504         int len;
3505
3506         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3507         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3508         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3509         return len;
3510 }
3511
3512 /**
3513  * ipr_store_log_level - Change the adapter's error logging level
3514  * @dev:        class device struct
3515  * @buf:        buffer
3516  *
3517  * Return value:
3518  *      number of bytes printed to buffer
3519  **/
3520 static ssize_t ipr_store_log_level(struct device *dev,
3521                                    struct device_attribute *attr,
3522                                    const char *buf, size_t count)
3523 {
3524         struct Scsi_Host *shost = class_to_shost(dev);
3525         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3526         unsigned long lock_flags = 0;
3527
3528         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3529         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3530         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3531         return strlen(buf);
3532 }
3533
3534 static struct device_attribute ipr_log_level_attr = {
3535         .attr = {
3536                 .name =         "log_level",
3537                 .mode =         S_IRUGO | S_IWUSR,
3538         },
3539         .show = ipr_show_log_level,
3540         .store = ipr_store_log_level
3541 };
3542
3543 /**
3544  * ipr_store_diagnostics - IOA Diagnostics interface
3545  * @dev:        device struct
3546  * @buf:        buffer
3547  * @count:      buffer size
3548  *
3549  * This function will reset the adapter and wait a reasonable
3550  * amount of time for any errors that the adapter might log.
3551  *
3552  * Return value:
3553  *      count on success / other on failure
3554  **/
3555 static ssize_t ipr_store_diagnostics(struct device *dev,
3556                                      struct device_attribute *attr,
3557                                      const char *buf, size_t count)
3558 {
3559         struct Scsi_Host *shost = class_to_shost(dev);
3560         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3561         unsigned long lock_flags = 0;
3562         int rc = count;
3563
3564         if (!capable(CAP_SYS_ADMIN))
3565                 return -EACCES;
3566
3567         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3568         while (ioa_cfg->in_reset_reload) {
3569                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3570                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3571                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3572         }
3573
3574         ioa_cfg->errors_logged = 0;
3575         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3576
3577         if (ioa_cfg->in_reset_reload) {
3578                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3579                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3580
3581                 /* Wait for a second for any errors to be logged */
3582                 msleep(1000);
3583         } else {
3584                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3585                 return -EIO;
3586         }
3587
3588         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3589         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3590                 rc = -EIO;
3591         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3592
3593         return rc;
3594 }
3595
3596 static struct device_attribute ipr_diagnostics_attr = {
3597         .attr = {
3598                 .name =         "run_diagnostics",
3599                 .mode =         S_IWUSR,
3600         },
3601         .store = ipr_store_diagnostics
3602 };
3603
3604 /**
3605  * ipr_show_adapter_state - Show the adapter's state
3606  * @class_dev:  device struct
3607  * @buf:        buffer
3608  *
3609  * Return value:
3610  *      number of bytes printed to buffer
3611  **/
3612 static ssize_t ipr_show_adapter_state(struct device *dev,
3613                                       struct device_attribute *attr, char *buf)
3614 {
3615         struct Scsi_Host *shost = class_to_shost(dev);
3616         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3617         unsigned long lock_flags = 0;
3618         int len;
3619
3620         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3621         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3622                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3623         else
3624                 len = snprintf(buf, PAGE_SIZE, "online\n");
3625         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3626         return len;
3627 }
3628
3629 /**
3630  * ipr_store_adapter_state - Change adapter state
3631  * @dev:        device struct
3632  * @buf:        buffer
3633  * @count:      buffer size
3634  *
3635  * This function will change the adapter's state.
3636  *
3637  * Return value:
3638  *      count on success / other on failure
3639  **/
3640 static ssize_t ipr_store_adapter_state(struct device *dev,
3641                                        struct device_attribute *attr,
3642                                        const char *buf, size_t count)
3643 {
3644         struct Scsi_Host *shost = class_to_shost(dev);
3645         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3646         unsigned long lock_flags;
3647         int result = count, i;
3648
3649         if (!capable(CAP_SYS_ADMIN))
3650                 return -EACCES;
3651
3652         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3653         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3654             !strncmp(buf, "online", 6)) {
3655                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3656                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3657                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3658                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3659                 }
3660                 wmb();
3661                 ioa_cfg->reset_retries = 0;
3662                 ioa_cfg->in_ioa_bringdown = 0;
3663                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3664         }
3665         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3666         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3667
3668         return result;
3669 }
3670
3671 static struct device_attribute ipr_ioa_state_attr = {
3672         .attr = {
3673                 .name =         "online_state",
3674                 .mode =         S_IRUGO | S_IWUSR,
3675         },
3676         .show = ipr_show_adapter_state,
3677         .store = ipr_store_adapter_state
3678 };
3679
3680 /**
3681  * ipr_store_reset_adapter - Reset the adapter
3682  * @dev:        device struct
3683  * @buf:        buffer
3684  * @count:      buffer size
3685  *
3686  * This function will reset the adapter.
3687  *
3688  * Return value:
3689  *      count on success / other on failure
3690  **/
3691 static ssize_t ipr_store_reset_adapter(struct device *dev,
3692                                        struct device_attribute *attr,
3693                                        const char *buf, size_t count)
3694 {
3695         struct Scsi_Host *shost = class_to_shost(dev);
3696         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3697         unsigned long lock_flags;
3698         int result = count;
3699
3700         if (!capable(CAP_SYS_ADMIN))
3701                 return -EACCES;
3702
3703         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3704         if (!ioa_cfg->in_reset_reload)
3705                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3706         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3707         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3708
3709         return result;
3710 }
3711
3712 static struct device_attribute ipr_ioa_reset_attr = {
3713         .attr = {
3714                 .name =         "reset_host",
3715                 .mode =         S_IWUSR,
3716         },
3717         .store = ipr_store_reset_adapter
3718 };
3719
3720 static int ipr_iopoll(struct irq_poll *iop, int budget);
3721  /**
3722  * ipr_show_iopoll_weight - Show ipr polling mode
3723  * @dev:        class device struct
3724  * @buf:        buffer
3725  *
3726  * Return value:
3727  *      number of bytes printed to buffer
3728  **/
3729 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3730                                    struct device_attribute *attr, char *buf)
3731 {
3732         struct Scsi_Host *shost = class_to_shost(dev);
3733         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3734         unsigned long lock_flags = 0;
3735         int len;
3736
3737         spin_lock_irqsave(shost->host_lock, lock_flags);
3738         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3739         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3740
3741         return len;
3742 }
3743
3744 /**
3745  * ipr_store_iopoll_weight - Change the adapter's polling mode
3746  * @dev:        class device struct
3747  * @buf:        buffer
3748  *
3749  * Return value:
3750  *      number of bytes printed to buffer
3751  **/
3752 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3753                                         struct device_attribute *attr,
3754                                         const char *buf, size_t count)
3755 {
3756         struct Scsi_Host *shost = class_to_shost(dev);
3757         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3758         unsigned long user_iopoll_weight;
3759         unsigned long lock_flags = 0;
3760         int i;
3761
3762         if (!ioa_cfg->sis64) {
3763                 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3764                 return -EINVAL;
3765         }
3766         if (kstrtoul(buf, 10, &user_iopoll_weight))
3767                 return -EINVAL;
3768
3769         if (user_iopoll_weight > 256) {
3770                 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3771                 return -EINVAL;
3772         }
3773
3774         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3775                 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3776                 return strlen(buf);
3777         }
3778
3779         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3780                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3781                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3782         }
3783
3784         spin_lock_irqsave(shost->host_lock, lock_flags);
3785         ioa_cfg->iopoll_weight = user_iopoll_weight;
3786         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3787                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3788                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3789                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3790                 }
3791         }
3792         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3793
3794         return strlen(buf);
3795 }
3796
3797 static struct device_attribute ipr_iopoll_weight_attr = {
3798         .attr = {
3799                 .name =         "iopoll_weight",
3800                 .mode =         S_IRUGO | S_IWUSR,
3801         },
3802         .show = ipr_show_iopoll_weight,
3803         .store = ipr_store_iopoll_weight
3804 };
3805
3806 /**
3807  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3808  * @buf_len:            buffer length
3809  *
3810  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3811  * list to use for microcode download
3812  *
3813  * Return value:
3814  *      pointer to sglist / NULL on failure
3815  **/
3816 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3817 {
3818         int sg_size, order, bsize_elem, num_elem, i, j;
3819         struct ipr_sglist *sglist;
3820         struct scatterlist *scatterlist;
3821         struct page *page;
3822
3823         /* Get the minimum size per scatter/gather element */
3824         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3825
3826         /* Get the actual size per element */
3827         order = get_order(sg_size);
3828
3829         /* Determine the actual number of bytes per element */
3830         bsize_elem = PAGE_SIZE * (1 << order);
3831
3832         /* Determine the actual number of sg entries needed */
3833         if (buf_len % bsize_elem)
3834                 num_elem = (buf_len / bsize_elem) + 1;
3835         else
3836                 num_elem = buf_len / bsize_elem;
3837
3838         /* Allocate a scatter/gather list for the DMA */
3839         sglist = kzalloc(sizeof(struct ipr_sglist) +
3840                          (sizeof(struct scatterlist) * (num_elem - 1)),
3841                          GFP_KERNEL);
3842
3843         if (sglist == NULL) {
3844                 ipr_trace;
3845                 return NULL;
3846         }
3847
3848         scatterlist = sglist->scatterlist;
3849         sg_init_table(scatterlist, num_elem);
3850
3851         sglist->order = order;
3852         sglist->num_sg = num_elem;
3853
3854         /* Allocate a bunch of sg elements */
3855         for (i = 0; i < num_elem; i++) {
3856                 page = alloc_pages(GFP_KERNEL, order);
3857                 if (!page) {
3858                         ipr_trace;
3859
3860                         /* Free up what we already allocated */
3861                         for (j = i - 1; j >= 0; j--)
3862                                 __free_pages(sg_page(&scatterlist[j]), order);
3863                         kfree(sglist);
3864                         return NULL;
3865                 }
3866
3867                 sg_set_page(&scatterlist[i], page, 0, 0);
3868         }
3869
3870         return sglist;
3871 }
3872
3873 /**
3874  * ipr_free_ucode_buffer - Frees a microcode download buffer
3875  * @p_dnld:             scatter/gather list pointer
3876  *
3877  * Free a DMA'able ucode download buffer previously allocated with
3878  * ipr_alloc_ucode_buffer
3879  *
3880  * Return value:
3881  *      nothing
3882  **/
3883 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3884 {
3885         int i;
3886
3887         for (i = 0; i < sglist->num_sg; i++)
3888                 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3889
3890         kfree(sglist);
3891 }
3892
3893 /**
3894  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3895  * @sglist:             scatter/gather list pointer
3896  * @buffer:             buffer pointer
3897  * @len:                buffer length
3898  *
3899  * Copy a microcode image from a user buffer into a buffer allocated by
3900  * ipr_alloc_ucode_buffer
3901  *
3902  * Return value:
3903  *      0 on success / other on failure
3904  **/
3905 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3906                                  u8 *buffer, u32 len)
3907 {
3908         int bsize_elem, i, result = 0;
3909         struct scatterlist *scatterlist;
3910         void *kaddr;
3911
3912         /* Determine the actual number of bytes per element */
3913         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3914
3915         scatterlist = sglist->scatterlist;
3916
3917         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3918                 struct page *page = sg_page(&scatterlist[i]);
3919
3920                 kaddr = kmap(page);
3921                 memcpy(kaddr, buffer, bsize_elem);
3922                 kunmap(page);
3923
3924                 scatterlist[i].length = bsize_elem;
3925
3926                 if (result != 0) {
3927                         ipr_trace;
3928                         return result;
3929                 }
3930         }
3931
3932         if (len % bsize_elem) {
3933                 struct page *page = sg_page(&scatterlist[i]);
3934
3935                 kaddr = kmap(page);
3936                 memcpy(kaddr, buffer, len % bsize_elem);
3937                 kunmap(page);
3938
3939                 scatterlist[i].length = len % bsize_elem;
3940         }
3941
3942         sglist->buffer_len = len;
3943         return result;
3944 }
3945
3946 /**
3947  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3948  * @ipr_cmd:            ipr command struct
3949  * @sglist:             scatter/gather list
3950  *
3951  * Builds a microcode download IOA data list (IOADL).
3952  *
3953  **/
3954 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3955                                     struct ipr_sglist *sglist)
3956 {
3957         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3958         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3959         struct scatterlist *scatterlist = sglist->scatterlist;
3960         int i;
3961
3962         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3963         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3964         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3965
3966         ioarcb->ioadl_len =
3967                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3968         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3969                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3970                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3971                 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3972         }
3973
3974         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3975 }
3976
3977 /**
3978  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3979  * @ipr_cmd:    ipr command struct
3980  * @sglist:             scatter/gather list
3981  *
3982  * Builds a microcode download IOA data list (IOADL).
3983  *
3984  **/
3985 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3986                                   struct ipr_sglist *sglist)
3987 {
3988         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3989         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3990         struct scatterlist *scatterlist = sglist->scatterlist;
3991         int i;
3992
3993         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3994         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3995         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3996
3997         ioarcb->ioadl_len =
3998                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3999
4000         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
4001                 ioadl[i].flags_and_data_len =
4002                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
4003                 ioadl[i].address =
4004                         cpu_to_be32(sg_dma_address(&scatterlist[i]));
4005         }
4006
4007         ioadl[i-1].flags_and_data_len |=
4008                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4009 }
4010
4011 /**
4012  * ipr_update_ioa_ucode - Update IOA's microcode
4013  * @ioa_cfg:    ioa config struct
4014  * @sglist:             scatter/gather list
4015  *
4016  * Initiate an adapter reset to update the IOA's microcode
4017  *
4018  * Return value:
4019  *      0 on success / -EIO on failure
4020  **/
4021 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
4022                                 struct ipr_sglist *sglist)
4023 {
4024         unsigned long lock_flags;
4025
4026         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4027         while (ioa_cfg->in_reset_reload) {
4028                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4029                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4030                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4031         }
4032
4033         if (ioa_cfg->ucode_sglist) {
4034                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4035                 dev_err(&ioa_cfg->pdev->dev,
4036                         "Microcode download already in progress\n");
4037                 return -EIO;
4038         }
4039
4040         sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4041                                         sglist->scatterlist, sglist->num_sg,
4042                                         DMA_TO_DEVICE);
4043
4044         if (!sglist->num_dma_sg) {
4045                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4046                 dev_err(&ioa_cfg->pdev->dev,
4047                         "Failed to map microcode download buffer!\n");
4048                 return -EIO;
4049         }
4050
4051         ioa_cfg->ucode_sglist = sglist;
4052         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4053         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4054         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4055
4056         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4057         ioa_cfg->ucode_sglist = NULL;
4058         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4059         return 0;
4060 }
4061
4062 /**
4063  * ipr_store_update_fw - Update the firmware on the adapter
4064  * @class_dev:  device struct
4065  * @buf:        buffer
4066  * @count:      buffer size
4067  *
4068  * This function will update the firmware on the adapter.
4069  *
4070  * Return value:
4071  *      count on success / other on failure
4072  **/
4073 static ssize_t ipr_store_update_fw(struct device *dev,
4074                                    struct device_attribute *attr,
4075                                    const char *buf, size_t count)
4076 {
4077         struct Scsi_Host *shost = class_to_shost(dev);
4078         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4079         struct ipr_ucode_image_header *image_hdr;
4080         const struct firmware *fw_entry;
4081         struct ipr_sglist *sglist;
4082         char fname[100];
4083         char *src;
4084         char *endline;
4085         int result, dnld_size;
4086
4087         if (!capable(CAP_SYS_ADMIN))
4088                 return -EACCES;
4089
4090         snprintf(fname, sizeof(fname), "%s", buf);
4091
4092         endline = strchr(fname, '\n');
4093         if (endline)
4094                 *endline = '\0';
4095
4096         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4097                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4098                 return -EIO;
4099         }
4100
4101         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4102
4103         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4104         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4105         sglist = ipr_alloc_ucode_buffer(dnld_size);
4106
4107         if (!sglist) {
4108                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4109                 release_firmware(fw_entry);
4110                 return -ENOMEM;
4111         }
4112
4113         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4114
4115         if (result) {
4116                 dev_err(&ioa_cfg->pdev->dev,
4117                         "Microcode buffer copy to DMA buffer failed\n");
4118                 goto out;
4119         }
4120
4121         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4122
4123         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4124
4125         if (!result)
4126                 result = count;
4127 out:
4128         ipr_free_ucode_buffer(sglist);
4129         release_firmware(fw_entry);
4130         return result;
4131 }
4132
4133 static struct device_attribute ipr_update_fw_attr = {
4134         .attr = {
4135                 .name =         "update_fw",
4136                 .mode =         S_IWUSR,
4137         },
4138         .store = ipr_store_update_fw
4139 };
4140
4141 /**
4142  * ipr_show_fw_type - Show the adapter's firmware type.
4143  * @dev:        class device struct
4144  * @buf:        buffer
4145  *
4146  * Return value:
4147  *      number of bytes printed to buffer
4148  **/
4149 static ssize_t ipr_show_fw_type(struct device *dev,
4150                                 struct device_attribute *attr, char *buf)
4151 {
4152         struct Scsi_Host *shost = class_to_shost(dev);
4153         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4154         unsigned long lock_flags = 0;
4155         int len;
4156
4157         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4158         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4159         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4160         return len;
4161 }
4162
4163 static struct device_attribute ipr_ioa_fw_type_attr = {
4164         .attr = {
4165                 .name =         "fw_type",
4166                 .mode =         S_IRUGO,
4167         },
4168         .show = ipr_show_fw_type
4169 };
4170
4171 static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4172                                 struct bin_attribute *bin_attr, char *buf,
4173                                 loff_t off, size_t count)
4174 {
4175         struct device *cdev = container_of(kobj, struct device, kobj);
4176         struct Scsi_Host *shost = class_to_shost(cdev);
4177         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4178         struct ipr_hostrcb *hostrcb;
4179         unsigned long lock_flags = 0;
4180         int ret;
4181
4182         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4183         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4184                                         struct ipr_hostrcb, queue);
4185         if (!hostrcb) {
4186                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4187                 return 0;
4188         }
4189         ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4190                                 sizeof(hostrcb->hcam));
4191         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4192         return ret;
4193 }
4194
4195 static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4196                                 struct bin_attribute *bin_attr, char *buf,
4197                                 loff_t off, size_t count)
4198 {
4199         struct device *cdev = container_of(kobj, struct device, kobj);
4200         struct Scsi_Host *shost = class_to_shost(cdev);
4201         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4202         struct ipr_hostrcb *hostrcb;
4203         unsigned long lock_flags = 0;
4204
4205         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4206         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4207                                         struct ipr_hostrcb, queue);
4208         if (!hostrcb) {
4209                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4210                 return count;
4211         }
4212
4213         /* Reclaim hostrcb before exit */
4214         list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4215         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4216         return count;
4217 }
4218
4219 static struct bin_attribute ipr_ioa_async_err_log = {
4220         .attr = {
4221                 .name =         "async_err_log",
4222                 .mode =         S_IRUGO | S_IWUSR,
4223         },
4224         .size = 0,
4225         .read = ipr_read_async_err_log,
4226         .write = ipr_next_async_err_log
4227 };
4228
4229 static struct device_attribute *ipr_ioa_attrs[] = {
4230         &ipr_fw_version_attr,
4231         &ipr_log_level_attr,
4232         &ipr_diagnostics_attr,
4233         &ipr_ioa_state_attr,
4234         &ipr_ioa_reset_attr,
4235         &ipr_update_fw_attr,
4236         &ipr_ioa_fw_type_attr,
4237         &ipr_iopoll_weight_attr,
4238         NULL,
4239 };
4240
4241 #ifdef CONFIG_SCSI_IPR_DUMP
4242 /**
4243  * ipr_read_dump - Dump the adapter
4244  * @filp:               open sysfs file
4245  * @kobj:               kobject struct
4246  * @bin_attr:           bin_attribute struct
4247  * @buf:                buffer
4248  * @off:                offset
4249  * @count:              buffer size
4250  *
4251  * Return value:
4252  *      number of bytes printed to buffer
4253  **/
4254 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4255                              struct bin_attribute *bin_attr,
4256                              char *buf, loff_t off, size_t count)
4257 {
4258         struct device *cdev = container_of(kobj, struct device, kobj);
4259         struct Scsi_Host *shost = class_to_shost(cdev);
4260         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4261         struct ipr_dump *dump;
4262         unsigned long lock_flags = 0;
4263         char *src;
4264         int len, sdt_end;
4265         size_t rc = count;
4266
4267         if (!capable(CAP_SYS_ADMIN))
4268                 return -EACCES;
4269
4270         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4271         dump = ioa_cfg->dump;
4272
4273         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4274                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4275                 return 0;
4276         }
4277         kref_get(&dump->kref);
4278         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4279
4280         if (off > dump->driver_dump.hdr.len) {
4281                 kref_put(&dump->kref, ipr_release_dump);
4282                 return 0;
4283         }
4284
4285         if (off + count > dump->driver_dump.hdr.len) {
4286                 count = dump->driver_dump.hdr.len - off;
4287                 rc = count;
4288         }
4289
4290         if (count && off < sizeof(dump->driver_dump)) {
4291                 if (off + count > sizeof(dump->driver_dump))
4292                         len = sizeof(dump->driver_dump) - off;
4293                 else
4294                         len = count;
4295                 src = (u8 *)&dump->driver_dump + off;
4296                 memcpy(buf, src, len);
4297                 buf += len;
4298                 off += len;
4299                 count -= len;
4300         }
4301
4302         off -= sizeof(dump->driver_dump);
4303
4304         if (ioa_cfg->sis64)
4305                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4306                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4307                            sizeof(struct ipr_sdt_entry));
4308         else
4309                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4310                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4311
4312         if (count && off < sdt_end) {
4313                 if (off + count > sdt_end)
4314                         len = sdt_end - off;
4315                 else
4316                         len = count;
4317                 src = (u8 *)&dump->ioa_dump + off;
4318                 memcpy(buf, src, len);
4319                 buf += len;
4320                 off += len;
4321                 count -= len;
4322         }
4323
4324         off -= sdt_end;
4325
4326         while (count) {
4327                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4328                         len = PAGE_ALIGN(off) - off;
4329                 else
4330                         len = count;
4331                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4332                 src += off & ~PAGE_MASK;
4333                 memcpy(buf, src, len);
4334                 buf += len;
4335                 off += len;
4336                 count -= len;
4337         }
4338
4339         kref_put(&dump->kref, ipr_release_dump);
4340         return rc;
4341 }
4342
4343 /**
4344  * ipr_alloc_dump - Prepare for adapter dump
4345  * @ioa_cfg:    ioa config struct
4346  *
4347  * Return value:
4348  *      0 on success / other on failure
4349  **/
4350 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4351 {
4352         struct ipr_dump *dump;
4353         __be32 **ioa_data;
4354         unsigned long lock_flags = 0;
4355
4356         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4357
4358         if (!dump) {
4359                 ipr_err("Dump memory allocation failed\n");
4360                 return -ENOMEM;
4361         }
4362
4363         if (ioa_cfg->sis64)
4364                 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4365         else
4366                 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4367
4368         if (!ioa_data) {
4369                 ipr_err("Dump memory allocation failed\n");
4370                 kfree(dump);
4371                 return -ENOMEM;
4372         }
4373
4374         dump->ioa_dump.ioa_data = ioa_data;
4375
4376         kref_init(&dump->kref);
4377         dump->ioa_cfg = ioa_cfg;
4378
4379         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4380
4381         if (INACTIVE != ioa_cfg->sdt_state) {
4382                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4383                 vfree(dump->ioa_dump.ioa_data);
4384                 kfree(dump);
4385                 return 0;
4386         }
4387
4388         ioa_cfg->dump = dump;
4389         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4390         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4391                 ioa_cfg->dump_taken = 1;
4392                 schedule_work(&ioa_cfg->work_q);
4393         }
4394         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4395
4396         return 0;
4397 }
4398
4399 /**
4400  * ipr_free_dump - Free adapter dump memory
4401  * @ioa_cfg:    ioa config struct
4402  *
4403  * Return value:
4404  *      0 on success / other on failure
4405  **/
4406 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4407 {
4408         struct ipr_dump *dump;
4409         unsigned long lock_flags = 0;
4410
4411         ENTER;
4412
4413         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4414         dump = ioa_cfg->dump;
4415         if (!dump) {
4416                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4417                 return 0;
4418         }
4419
4420         ioa_cfg->dump = NULL;
4421         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4422
4423         kref_put(&dump->kref, ipr_release_dump);
4424
4425         LEAVE;
4426         return 0;
4427 }
4428
4429 /**
4430  * ipr_write_dump - Setup dump state of adapter
4431  * @filp:               open sysfs file
4432  * @kobj:               kobject struct
4433  * @bin_attr:           bin_attribute struct
4434  * @buf:                buffer
4435  * @off:                offset
4436  * @count:              buffer size
4437  *
4438  * Return value:
4439  *      number of bytes printed to buffer
4440  **/
4441 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4442                               struct bin_attribute *bin_attr,
4443                               char *buf, loff_t off, size_t count)
4444 {
4445         struct device *cdev = container_of(kobj, struct device, kobj);
4446         struct Scsi_Host *shost = class_to_shost(cdev);
4447         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4448         int rc;
4449
4450         if (!capable(CAP_SYS_ADMIN))
4451                 return -EACCES;
4452
4453         if (buf[0] == '1')
4454                 rc = ipr_alloc_dump(ioa_cfg);
4455         else if (buf[0] == '0')
4456                 rc = ipr_free_dump(ioa_cfg);
4457         else
4458                 return -EINVAL;
4459
4460         if (rc)
4461                 return rc;
4462         else
4463                 return count;
4464 }
4465
4466 static struct bin_attribute ipr_dump_attr = {
4467         .attr = {
4468                 .name = "dump",
4469                 .mode = S_IRUSR | S_IWUSR,
4470         },
4471         .size = 0,
4472         .read = ipr_read_dump,
4473         .write = ipr_write_dump
4474 };
4475 #else
4476 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4477 #endif
4478
4479 /**
4480  * ipr_change_queue_depth - Change the device's queue depth
4481  * @sdev:       scsi device struct
4482  * @qdepth:     depth to set
4483  * @reason:     calling context
4484  *
4485  * Return value:
4486  *      actual depth set
4487  **/
4488 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4489 {
4490         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4491         struct ipr_resource_entry *res;
4492         unsigned long lock_flags = 0;
4493
4494         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4495         res = (struct ipr_resource_entry *)sdev->hostdata;
4496
4497         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4498                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4499         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4500
4501         scsi_change_queue_depth(sdev, qdepth);
4502         return sdev->queue_depth;
4503 }
4504
4505 /**
4506  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4507  * @dev:        device struct
4508  * @attr:       device attribute structure
4509  * @buf:        buffer
4510  *
4511  * Return value:
4512  *      number of bytes printed to buffer
4513  **/
4514 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4515 {
4516         struct scsi_device *sdev = to_scsi_device(dev);
4517         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4518         struct ipr_resource_entry *res;
4519         unsigned long lock_flags = 0;
4520         ssize_t len = -ENXIO;
4521
4522         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4523         res = (struct ipr_resource_entry *)sdev->hostdata;
4524         if (res)
4525                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4526         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4527         return len;
4528 }
4529
4530 static struct device_attribute ipr_adapter_handle_attr = {
4531         .attr = {
4532                 .name =         "adapter_handle",
4533                 .mode =         S_IRUSR,
4534         },
4535         .show = ipr_show_adapter_handle
4536 };
4537
4538 /**
4539  * ipr_show_resource_path - Show the resource path or the resource address for
4540  *                          this device.
4541  * @dev:        device struct
4542  * @attr:       device attribute structure
4543  * @buf:        buffer
4544  *
4545  * Return value:
4546  *      number of bytes printed to buffer
4547  **/
4548 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4549 {
4550         struct scsi_device *sdev = to_scsi_device(dev);
4551         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4552         struct ipr_resource_entry *res;
4553         unsigned long lock_flags = 0;
4554         ssize_t len = -ENXIO;
4555         char buffer[IPR_MAX_RES_PATH_LENGTH];
4556
4557         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4558         res = (struct ipr_resource_entry *)sdev->hostdata;
4559         if (res && ioa_cfg->sis64)
4560                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4561                                __ipr_format_res_path(res->res_path, buffer,
4562                                                      sizeof(buffer)));
4563         else if (res)
4564                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4565                                res->bus, res->target, res->lun);
4566
4567         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4568         return len;
4569 }
4570
4571 static struct device_attribute ipr_resource_path_attr = {
4572         .attr = {
4573                 .name =         "resource_path",
4574                 .mode =         S_IRUGO,
4575         },
4576         .show = ipr_show_resource_path
4577 };
4578
4579 /**
4580  * ipr_show_device_id - Show the device_id for this device.
4581  * @dev:        device struct
4582  * @attr:       device attribute structure
4583  * @buf:        buffer
4584  *
4585  * Return value:
4586  *      number of bytes printed to buffer
4587  **/
4588 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4589 {
4590         struct scsi_device *sdev = to_scsi_device(dev);
4591         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4592         struct ipr_resource_entry *res;
4593         unsigned long lock_flags = 0;
4594         ssize_t len = -ENXIO;
4595
4596         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4597         res = (struct ipr_resource_entry *)sdev->hostdata;
4598         if (res && ioa_cfg->sis64)
4599                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4600         else if (res)
4601                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4602
4603         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4604         return len;
4605 }
4606
4607 static struct device_attribute ipr_device_id_attr = {
4608         .attr = {
4609                 .name =         "device_id",
4610                 .mode =         S_IRUGO,
4611         },
4612         .show = ipr_show_device_id
4613 };
4614
4615 /**
4616  * ipr_show_resource_type - Show the resource type for this device.
4617  * @dev:        device struct
4618  * @attr:       device attribute structure
4619  * @buf:        buffer
4620  *
4621  * Return value:
4622  *      number of bytes printed to buffer
4623  **/
4624 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4625 {
4626         struct scsi_device *sdev = to_scsi_device(dev);
4627         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4628         struct ipr_resource_entry *res;
4629         unsigned long lock_flags = 0;
4630         ssize_t len = -ENXIO;
4631
4632         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4633         res = (struct ipr_resource_entry *)sdev->hostdata;
4634
4635         if (res)
4636                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4637
4638         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4639         return len;
4640 }
4641
4642 static struct device_attribute ipr_resource_type_attr = {
4643         .attr = {
4644                 .name =         "resource_type",
4645                 .mode =         S_IRUGO,
4646         },
4647         .show = ipr_show_resource_type
4648 };
4649
4650 /**
4651  * ipr_show_raw_mode - Show the adapter's raw mode
4652  * @dev:        class device struct
4653  * @buf:        buffer
4654  *
4655  * Return value:
4656  *      number of bytes printed to buffer
4657  **/
4658 static ssize_t ipr_show_raw_mode(struct device *dev,
4659                                  struct device_attribute *attr, char *buf)
4660 {
4661         struct scsi_device *sdev = to_scsi_device(dev);
4662         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4663         struct ipr_resource_entry *res;
4664         unsigned long lock_flags = 0;
4665         ssize_t len;
4666
4667         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4668         res = (struct ipr_resource_entry *)sdev->hostdata;
4669         if (res)
4670                 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4671         else
4672                 len = -ENXIO;
4673         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4674         return len;
4675 }
4676
4677 /**
4678  * ipr_store_raw_mode - Change the adapter's raw mode
4679  * @dev:        class device struct
4680  * @buf:        buffer
4681  *
4682  * Return value:
4683  *      number of bytes printed to buffer
4684  **/
4685 static ssize_t ipr_store_raw_mode(struct device *dev,
4686                                   struct device_attribute *attr,
4687                                   const char *buf, size_t count)
4688 {
4689         struct scsi_device *sdev = to_scsi_device(dev);
4690         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4691         struct ipr_resource_entry *res;
4692         unsigned long lock_flags = 0;
4693         ssize_t len;
4694
4695         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4696         res = (struct ipr_resource_entry *)sdev->hostdata;
4697         if (res) {
4698                 if (ipr_is_af_dasd_device(res)) {
4699                         res->raw_mode = simple_strtoul(buf, NULL, 10);
4700                         len = strlen(buf);
4701                         if (res->sdev)
4702                                 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4703                                         res->raw_mode ? "enabled" : "disabled");
4704                 } else
4705                         len = -EINVAL;
4706         } else
4707                 len = -ENXIO;
4708         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4709         return len;
4710 }
4711
4712 static struct device_attribute ipr_raw_mode_attr = {
4713         .attr = {
4714                 .name =         "raw_mode",
4715                 .mode =         S_IRUGO | S_IWUSR,
4716         },
4717         .show = ipr_show_raw_mode,
4718         .store = ipr_store_raw_mode
4719 };
4720
4721 static struct device_attribute *ipr_dev_attrs[] = {
4722         &ipr_adapter_handle_attr,
4723         &ipr_resource_path_attr,
4724         &ipr_device_id_attr,
4725         &ipr_resource_type_attr,
4726         &ipr_raw_mode_attr,
4727         NULL,
4728 };
4729
4730 /**
4731  * ipr_biosparam - Return the HSC mapping
4732  * @sdev:                       scsi device struct
4733  * @block_device:       block device pointer
4734  * @capacity:           capacity of the device
4735  * @parm:                       Array containing returned HSC values.
4736  *
4737  * This function generates the HSC parms that fdisk uses.
4738  * We want to make sure we return something that places partitions
4739  * on 4k boundaries for best performance with the IOA.
4740  *
4741  * Return value:
4742  *      0 on success
4743  **/
4744 static int ipr_biosparam(struct scsi_device *sdev,
4745                          struct block_device *block_device,
4746                          sector_t capacity, int *parm)
4747 {
4748         int heads, sectors;
4749         sector_t cylinders;
4750
4751         heads = 128;
4752         sectors = 32;
4753
4754         cylinders = capacity;
4755         sector_div(cylinders, (128 * 32));
4756
4757         /* return result */
4758         parm[0] = heads;
4759         parm[1] = sectors;
4760         parm[2] = cylinders;
4761
4762         return 0;
4763 }
4764
4765 /**
4766  * ipr_find_starget - Find target based on bus/target.
4767  * @starget:    scsi target struct
4768  *
4769  * Return value:
4770  *      resource entry pointer if found / NULL if not found
4771  **/
4772 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4773 {
4774         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4775         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4776         struct ipr_resource_entry *res;
4777
4778         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4779                 if ((res->bus == starget->channel) &&
4780                     (res->target == starget->id)) {
4781                         return res;
4782                 }
4783         }
4784
4785         return NULL;
4786 }
4787
4788 static struct ata_port_info sata_port_info;
4789
4790 /**
4791  * ipr_target_alloc - Prepare for commands to a SCSI target
4792  * @starget:    scsi target struct
4793  *
4794  * If the device is a SATA device, this function allocates an
4795  * ATA port with libata, else it does nothing.
4796  *
4797  * Return value:
4798  *      0 on success / non-0 on failure
4799  **/
4800 static int ipr_target_alloc(struct scsi_target *starget)
4801 {
4802         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4803         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4804         struct ipr_sata_port *sata_port;
4805         struct ata_port *ap;
4806         struct ipr_resource_entry *res;
4807         unsigned long lock_flags;
4808
4809         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4810         res = ipr_find_starget(starget);
4811         starget->hostdata = NULL;
4812
4813         if (res && ipr_is_gata(res)) {
4814                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4815                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4816                 if (!sata_port)
4817                         return -ENOMEM;
4818
4819                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4820                 if (ap) {
4821                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4822                         sata_port->ioa_cfg = ioa_cfg;
4823                         sata_port->ap = ap;
4824                         sata_port->res = res;
4825
4826                         res->sata_port = sata_port;
4827                         ap->private_data = sata_port;
4828                         starget->hostdata = sata_port;
4829                 } else {
4830                         kfree(sata_port);
4831                         return -ENOMEM;
4832                 }
4833         }
4834         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4835
4836         return 0;
4837 }
4838
4839 /**
4840  * ipr_target_destroy - Destroy a SCSI target
4841  * @starget:    scsi target struct
4842  *
4843  * If the device was a SATA device, this function frees the libata
4844  * ATA port, else it does nothing.
4845  *
4846  **/
4847 static void ipr_target_destroy(struct scsi_target *starget)
4848 {
4849         struct ipr_sata_port *sata_port = starget->hostdata;
4850         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4851         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4852
4853         if (ioa_cfg->sis64) {
4854                 if (!ipr_find_starget(starget)) {
4855                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4856                                 clear_bit(starget->id, ioa_cfg->array_ids);
4857                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4858                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4859                         else if (starget->channel == 0)
4860                                 clear_bit(starget->id, ioa_cfg->target_ids);
4861                 }
4862         }
4863
4864         if (sata_port) {
4865                 starget->hostdata = NULL;
4866                 ata_sas_port_destroy(sata_port->ap);
4867                 kfree(sata_port);
4868         }
4869 }
4870
4871 /**
4872  * ipr_find_sdev - Find device based on bus/target/lun.
4873  * @sdev:       scsi device struct
4874  *
4875  * Return value:
4876  *      resource entry pointer if found / NULL if not found
4877  **/
4878 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4879 {
4880         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4881         struct ipr_resource_entry *res;
4882
4883         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4884                 if ((res->bus == sdev->channel) &&
4885                     (res->target == sdev->id) &&
4886                     (res->lun == sdev->lun))
4887                         return res;
4888         }
4889
4890         return NULL;
4891 }
4892
4893 /**
4894  * ipr_slave_destroy - Unconfigure a SCSI device
4895  * @sdev:       scsi device struct
4896  *
4897  * Return value:
4898  *      nothing
4899  **/
4900 static void ipr_slave_destroy(struct scsi_device *sdev)
4901 {
4902         struct ipr_resource_entry *res;
4903         struct ipr_ioa_cfg *ioa_cfg;
4904         unsigned long lock_flags = 0;
4905
4906         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4907
4908         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4909         res = (struct ipr_resource_entry *) sdev->hostdata;
4910         if (res) {
4911                 if (res->sata_port)
4912                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4913                 sdev->hostdata = NULL;
4914                 res->sdev = NULL;
4915                 res->sata_port = NULL;
4916         }
4917         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4918 }
4919
4920 /**
4921  * ipr_slave_configure - Configure a SCSI device
4922  * @sdev:       scsi device struct
4923  *
4924  * This function configures the specified scsi device.
4925  *
4926  * Return value:
4927  *      0 on success
4928  **/
4929 static int ipr_slave_configure(struct scsi_device *sdev)
4930 {
4931         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4932         struct ipr_resource_entry *res;
4933         struct ata_port *ap = NULL;
4934         unsigned long lock_flags = 0;
4935         char buffer[IPR_MAX_RES_PATH_LENGTH];
4936
4937         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4938         res = sdev->hostdata;
4939         if (res) {
4940                 if (ipr_is_af_dasd_device(res))
4941                         sdev->type = TYPE_RAID;
4942                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4943                         sdev->scsi_level = 4;
4944                         sdev->no_uld_attach = 1;
4945                 }
4946                 if (ipr_is_vset_device(res)) {
4947                         sdev->scsi_level = SCSI_SPC_3;
4948                         sdev->no_report_opcodes = 1;
4949                         blk_queue_rq_timeout(sdev->request_queue,
4950                                              IPR_VSET_RW_TIMEOUT);
4951                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4952                 }
4953                 if (ipr_is_gata(res) && res->sata_port)
4954                         ap = res->sata_port->ap;
4955                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4956
4957                 if (ap) {
4958                         scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4959                         ata_sas_slave_configure(sdev, ap);
4960                 }
4961
4962                 if (ioa_cfg->sis64)
4963                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4964                                     ipr_format_res_path(ioa_cfg,
4965                                 res->res_path, buffer, sizeof(buffer)));
4966                 return 0;
4967         }
4968         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4969         return 0;
4970 }
4971
4972 /**
4973  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4974  * @sdev:       scsi device struct
4975  *
4976  * This function initializes an ATA port so that future commands
4977  * sent through queuecommand will work.
4978  *
4979  * Return value:
4980  *      0 on success
4981  **/
4982 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4983 {
4984         struct ipr_sata_port *sata_port = NULL;
4985         int rc = -ENXIO;
4986
4987         ENTER;
4988         if (sdev->sdev_target)
4989                 sata_port = sdev->sdev_target->hostdata;
4990         if (sata_port) {
4991                 rc = ata_sas_port_init(sata_port->ap);
4992                 if (rc == 0)
4993                         rc = ata_sas_sync_probe(sata_port->ap);
4994         }
4995
4996         if (rc)
4997                 ipr_slave_destroy(sdev);
4998
4999         LEAVE;
5000         return rc;
5001 }
5002
5003 /**
5004  * ipr_slave_alloc - Prepare for commands to a device.
5005  * @sdev:       scsi device struct
5006  *
5007  * This function saves a pointer to the resource entry
5008  * in the scsi device struct if the device exists. We
5009  * can then use this pointer in ipr_queuecommand when
5010  * handling new commands.
5011  *
5012  * Return value:
5013  *      0 on success / -ENXIO if device does not exist
5014  **/
5015 static int ipr_slave_alloc(struct scsi_device *sdev)
5016 {
5017         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
5018         struct ipr_resource_entry *res;
5019         unsigned long lock_flags;
5020         int rc = -ENXIO;
5021
5022         sdev->hostdata = NULL;
5023
5024         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5025
5026         res = ipr_find_sdev(sdev);
5027         if (res) {
5028                 res->sdev = sdev;
5029                 res->add_to_ml = 0;
5030                 res->in_erp = 0;
5031                 sdev->hostdata = res;
5032                 if (!ipr_is_naca_model(res))
5033                         res->needs_sync_complete = 1;
5034                 rc = 0;
5035                 if (ipr_is_gata(res)) {
5036                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5037                         return ipr_ata_slave_alloc(sdev);
5038                 }
5039         }
5040
5041         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5042
5043         return rc;
5044 }
5045
5046 /**
5047  * ipr_match_lun - Match function for specified LUN
5048  * @ipr_cmd:    ipr command struct
5049  * @device:             device to match (sdev)
5050  *
5051  * Returns:
5052  *      1 if command matches sdev / 0 if command does not match sdev
5053  **/
5054 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5055 {
5056         if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5057                 return 1;
5058         return 0;
5059 }
5060
5061 /**
5062  * ipr_cmnd_is_free - Check if a command is free or not
5063  * @ipr_cmd     ipr command struct
5064  *
5065  * Returns:
5066  *      true / false
5067  **/
5068 static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5069 {
5070         struct ipr_cmnd *loop_cmd;
5071
5072         list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5073                 if (loop_cmd == ipr_cmd)
5074                         return true;
5075         }
5076
5077         return false;
5078 }
5079
5080 /**
5081  * ipr_match_res - Match function for specified resource entry
5082  * @ipr_cmd:    ipr command struct
5083  * @resource:   resource entry to match
5084  *
5085  * Returns:
5086  *      1 if command matches sdev / 0 if command does not match sdev
5087  **/
5088 static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5089 {
5090         struct ipr_resource_entry *res = resource;
5091
5092         if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5093                 return 1;
5094         return 0;
5095 }
5096
5097 /**
5098  * ipr_wait_for_ops - Wait for matching commands to complete
5099  * @ipr_cmd:    ipr command struct
5100  * @device:             device to match (sdev)
5101  * @match:              match function to use
5102  *
5103  * Returns:
5104  *      SUCCESS / FAILED
5105  **/
5106 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5107                             int (*match)(struct ipr_cmnd *, void *))
5108 {
5109         struct ipr_cmnd *ipr_cmd;
5110         int wait, i;
5111         unsigned long flags;
5112         struct ipr_hrr_queue *hrrq;
5113         signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5114         DECLARE_COMPLETION_ONSTACK(comp);
5115
5116         ENTER;
5117         do {
5118                 wait = 0;
5119
5120                 for_each_hrrq(hrrq, ioa_cfg) {
5121                         spin_lock_irqsave(hrrq->lock, flags);
5122                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5123                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5124                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5125                                         if (match(ipr_cmd, device)) {
5126                                                 ipr_cmd->eh_comp = &comp;
5127                                                 wait++;
5128                                         }
5129                                 }
5130                         }
5131                         spin_unlock_irqrestore(hrrq->lock, flags);
5132                 }
5133
5134                 if (wait) {
5135                         timeout = wait_for_completion_timeout(&comp, timeout);
5136
5137                         if (!timeout) {
5138                                 wait = 0;
5139
5140                                 for_each_hrrq(hrrq, ioa_cfg) {
5141                                         spin_lock_irqsave(hrrq->lock, flags);
5142                                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5143                                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5144                                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5145                                                         if (match(ipr_cmd, device)) {
5146                                                                 ipr_cmd->eh_comp = NULL;
5147                                                                 wait++;
5148                                                         }
5149                                                 }
5150                                         }
5151                                         spin_unlock_irqrestore(hrrq->lock, flags);
5152                                 }
5153
5154                                 if (wait)
5155                                         dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5156                                 LEAVE;
5157                                 return wait ? FAILED : SUCCESS;
5158                         }
5159                 }
5160         } while (wait);
5161
5162         LEAVE;
5163         return SUCCESS;
5164 }
5165
5166 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
5167 {
5168         struct ipr_ioa_cfg *ioa_cfg;
5169         unsigned long lock_flags = 0;
5170         int rc = SUCCESS;
5171
5172         ENTER;
5173         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5174         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5175
5176         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5177                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5178                 dev_err(&ioa_cfg->pdev->dev,
5179                         "Adapter being reset as a result of error recovery.\n");
5180
5181                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5182                         ioa_cfg->sdt_state = GET_DUMP;
5183         }
5184
5185         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5186         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5187         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5188
5189         /* If we got hit with a host reset while we were already resetting
5190          the adapter for some reason, and the reset failed. */
5191         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5192                 ipr_trace;
5193                 rc = FAILED;
5194         }
5195
5196         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5197         LEAVE;
5198         return rc;
5199 }
5200
5201 /**
5202  * ipr_device_reset - Reset the device
5203  * @ioa_cfg:    ioa config struct
5204  * @res:                resource entry struct
5205  *
5206  * This function issues a device reset to the affected device.
5207  * If the device is a SCSI device, a LUN reset will be sent
5208  * to the device first. If that does not work, a target reset
5209  * will be sent. If the device is a SATA device, a PHY reset will
5210  * be sent.
5211  *
5212  * Return value:
5213  *      0 on success / non-zero on failure
5214  **/
5215 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5216                             struct ipr_resource_entry *res)
5217 {
5218         struct ipr_cmnd *ipr_cmd;
5219         struct ipr_ioarcb *ioarcb;
5220         struct ipr_cmd_pkt *cmd_pkt;
5221         struct ipr_ioarcb_ata_regs *regs;
5222         u32 ioasc;
5223
5224         ENTER;
5225         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5226         ioarcb = &ipr_cmd->ioarcb;
5227         cmd_pkt = &ioarcb->cmd_pkt;
5228
5229         if (ipr_cmd->ioa_cfg->sis64) {
5230                 regs = &ipr_cmd->i.ata_ioadl.regs;
5231                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5232         } else
5233                 regs = &ioarcb->u.add_data.u.regs;
5234
5235         ioarcb->res_handle = res->res_handle;
5236         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5237         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5238         if (ipr_is_gata(res)) {
5239                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5240                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5241                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5242         }
5243
5244         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5245         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5246         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5247         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5248                 if (ipr_cmd->ioa_cfg->sis64)
5249                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5250                                sizeof(struct ipr_ioasa_gata));
5251                 else
5252                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5253                                sizeof(struct ipr_ioasa_gata));
5254         }
5255
5256         LEAVE;
5257         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5258 }
5259
5260 /**
5261  * ipr_sata_reset - Reset the SATA port
5262  * @link:       SATA link to reset
5263  * @classes:    class of the attached device
5264  *
5265  * This function issues a SATA phy reset to the affected ATA link.
5266  *
5267  * Return value:
5268  *      0 on success / non-zero on failure
5269  **/
5270 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5271                                 unsigned long deadline)
5272 {
5273         struct ipr_sata_port *sata_port = link->ap->private_data;
5274         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5275         struct ipr_resource_entry *res;
5276         unsigned long lock_flags = 0;
5277         int rc = -ENXIO, ret;
5278
5279         ENTER;
5280         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5281         while (ioa_cfg->in_reset_reload) {
5282                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5283                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5284                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5285         }
5286
5287         res = sata_port->res;
5288         if (res) {
5289                 rc = ipr_device_reset(ioa_cfg, res);
5290                 *classes = res->ata_class;
5291                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5292
5293                 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5294                 if (ret != SUCCESS) {
5295                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5296                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5297                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5298
5299                         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5300                 }
5301         } else
5302                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5303
5304         LEAVE;
5305         return rc;
5306 }
5307
5308 /**
5309  * ipr_eh_dev_reset - Reset the device
5310  * @scsi_cmd:   scsi command struct
5311  *
5312  * This function issues a device reset to the affected device.
5313  * A LUN reset will be sent to the device first. If that does
5314  * not work, a target reset will be sent.
5315  *
5316  * Return value:
5317  *      SUCCESS / FAILED
5318  **/
5319 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5320 {
5321         struct ipr_cmnd *ipr_cmd;
5322         struct ipr_ioa_cfg *ioa_cfg;
5323         struct ipr_resource_entry *res;
5324         struct ata_port *ap;
5325         int rc = 0, i;
5326         struct ipr_hrr_queue *hrrq;
5327
5328         ENTER;
5329         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5330         res = scsi_cmd->device->hostdata;
5331
5332         /*
5333          * If we are currently going through reset/reload, return failed. This will force the
5334          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5335          * reset to complete
5336          */
5337         if (ioa_cfg->in_reset_reload)
5338                 return FAILED;
5339         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5340                 return FAILED;
5341
5342         for_each_hrrq(hrrq, ioa_cfg) {
5343                 spin_lock(&hrrq->_lock);
5344                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5345                         ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5346
5347                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5348                                 if (!ipr_cmd->qc)
5349                                         continue;
5350                                 if (ipr_cmnd_is_free(ipr_cmd))
5351                                         continue;
5352
5353                                 ipr_cmd->done = ipr_sata_eh_done;
5354                                 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5355                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5356                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5357                                 }
5358                         }
5359                 }
5360                 spin_unlock(&hrrq->_lock);
5361         }
5362         res->resetting_device = 1;
5363         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5364
5365         if (ipr_is_gata(res) && res->sata_port) {
5366                 ap = res->sata_port->ap;
5367                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5368                 ata_std_error_handler(ap);
5369                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5370         } else
5371                 rc = ipr_device_reset(ioa_cfg, res);
5372         res->resetting_device = 0;
5373         res->reset_occurred = 1;
5374
5375         LEAVE;
5376         return rc ? FAILED : SUCCESS;
5377 }
5378
5379 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5380 {
5381         int rc;
5382         struct ipr_ioa_cfg *ioa_cfg;
5383         struct ipr_resource_entry *res;
5384
5385         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5386         res = cmd->device->hostdata;
5387
5388         if (!res)
5389                 return FAILED;
5390
5391         spin_lock_irq(cmd->device->host->host_lock);
5392         rc = __ipr_eh_dev_reset(cmd);
5393         spin_unlock_irq(cmd->device->host->host_lock);
5394
5395         if (rc == SUCCESS) {
5396                 if (ipr_is_gata(res) && res->sata_port)
5397                         rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5398                 else
5399                         rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5400         }
5401
5402         return rc;
5403 }
5404
5405 /**
5406  * ipr_bus_reset_done - Op done function for bus reset.
5407  * @ipr_cmd:    ipr command struct
5408  *
5409  * This function is the op done function for a bus reset
5410  *
5411  * Return value:
5412  *      none
5413  **/
5414 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5415 {
5416         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5417         struct ipr_resource_entry *res;
5418
5419         ENTER;
5420         if (!ioa_cfg->sis64)
5421                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5422                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5423                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5424                                 break;
5425                         }
5426                 }
5427
5428         /*
5429          * If abort has not completed, indicate the reset has, else call the
5430          * abort's done function to wake the sleeping eh thread
5431          */
5432         if (ipr_cmd->sibling->sibling)
5433                 ipr_cmd->sibling->sibling = NULL;
5434         else
5435                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5436
5437         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5438         LEAVE;
5439 }
5440
5441 /**
5442  * ipr_abort_timeout - An abort task has timed out
5443  * @ipr_cmd:    ipr command struct
5444  *
5445  * This function handles when an abort task times out. If this
5446  * happens we issue a bus reset since we have resources tied
5447  * up that must be freed before returning to the midlayer.
5448  *
5449  * Return value:
5450  *      none
5451  **/
5452 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5453 {
5454         struct ipr_cmnd *reset_cmd;
5455         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5456         struct ipr_cmd_pkt *cmd_pkt;
5457         unsigned long lock_flags = 0;
5458
5459         ENTER;
5460         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5461         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5462                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5463                 return;
5464         }
5465
5466         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5467         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5468         ipr_cmd->sibling = reset_cmd;
5469         reset_cmd->sibling = ipr_cmd;
5470         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5471         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5472         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5473         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5474         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5475
5476         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5477         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5478         LEAVE;
5479 }
5480
5481 /**
5482  * ipr_cancel_op - Cancel specified op
5483  * @scsi_cmd:   scsi command struct
5484  *
5485  * This function cancels specified op.
5486  *
5487  * Return value:
5488  *      SUCCESS / FAILED
5489  **/
5490 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5491 {
5492         struct ipr_cmnd *ipr_cmd;
5493         struct ipr_ioa_cfg *ioa_cfg;
5494         struct ipr_resource_entry *res;
5495         struct ipr_cmd_pkt *cmd_pkt;
5496         u32 ioasc, int_reg;
5497         int i, op_found = 0;
5498         struct ipr_hrr_queue *hrrq;
5499
5500         ENTER;
5501         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5502         res = scsi_cmd->device->hostdata;
5503
5504         /* If we are currently going through reset/reload, return failed.
5505          * This will force the mid-layer to call ipr_eh_host_reset,
5506          * which will then go to sleep and wait for the reset to complete
5507          */
5508         if (ioa_cfg->in_reset_reload ||
5509             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5510                 return FAILED;
5511         if (!res)
5512                 return FAILED;
5513
5514         /*
5515          * If we are aborting a timed out op, chances are that the timeout was caused
5516          * by a still not detected EEH error. In such cases, reading a register will
5517          * trigger the EEH recovery infrastructure.
5518          */
5519         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5520
5521         if (!ipr_is_gscsi(res))
5522                 return FAILED;
5523
5524         for_each_hrrq(hrrq, ioa_cfg) {
5525                 spin_lock(&hrrq->_lock);
5526                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5527                         if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5528                                 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5529                                         op_found = 1;
5530                                         break;
5531                                 }
5532                         }
5533                 }
5534                 spin_unlock(&hrrq->_lock);
5535         }
5536
5537         if (!op_found)
5538                 return SUCCESS;
5539
5540         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5541         ipr_cmd->ioarcb.res_handle = res->res_handle;
5542         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5543         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5544         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5545         ipr_cmd->u.sdev = scsi_cmd->device;
5546
5547         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5548                     scsi_cmd->cmnd[0]);
5549         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5550         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5551
5552         /*
5553          * If the abort task timed out and we sent a bus reset, we will get
5554          * one the following responses to the abort
5555          */
5556         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5557                 ioasc = 0;
5558                 ipr_trace;
5559         }
5560
5561         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5562         if (!ipr_is_naca_model(res))
5563                 res->needs_sync_complete = 1;
5564
5565         LEAVE;
5566         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5567 }
5568
5569 /**
5570  * ipr_eh_abort - Abort a single op
5571  * @scsi_cmd:   scsi command struct
5572  *
5573  * Return value:
5574  *      0 if scan in progress / 1 if scan is complete
5575  **/
5576 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5577 {
5578         unsigned long lock_flags;
5579         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5580         int rc = 0;
5581
5582         spin_lock_irqsave(shost->host_lock, lock_flags);
5583         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5584                 rc = 1;
5585         if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5586                 rc = 1;
5587         spin_unlock_irqrestore(shost->host_lock, lock_flags);
5588         return rc;
5589 }
5590
5591 /**
5592  * ipr_eh_host_reset - Reset the host adapter
5593  * @scsi_cmd:   scsi command struct
5594  *
5595  * Return value:
5596  *      SUCCESS / FAILED
5597  **/
5598 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5599 {
5600         unsigned long flags;
5601         int rc;
5602         struct ipr_ioa_cfg *ioa_cfg;
5603
5604         ENTER;
5605
5606         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5607
5608         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5609         rc = ipr_cancel_op(scsi_cmd);
5610         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5611
5612         if (rc == SUCCESS)
5613                 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5614         LEAVE;
5615         return rc;
5616 }
5617
5618 /**
5619  * ipr_handle_other_interrupt - Handle "other" interrupts
5620  * @ioa_cfg:    ioa config struct
5621  * @int_reg:    interrupt register
5622  *
5623  * Return value:
5624  *      IRQ_NONE / IRQ_HANDLED
5625  **/
5626 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5627                                               u32 int_reg)
5628 {
5629         irqreturn_t rc = IRQ_HANDLED;
5630         u32 int_mask_reg;
5631
5632         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5633         int_reg &= ~int_mask_reg;
5634
5635         /* If an interrupt on the adapter did not occur, ignore it.
5636          * Or in the case of SIS 64, check for a stage change interrupt.
5637          */
5638         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5639                 if (ioa_cfg->sis64) {
5640                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5641                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5642                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5643
5644                                 /* clear stage change */
5645                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5646                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5647                                 list_del(&ioa_cfg->reset_cmd->queue);
5648                                 del_timer(&ioa_cfg->reset_cmd->timer);
5649                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5650                                 return IRQ_HANDLED;
5651                         }
5652                 }
5653
5654                 return IRQ_NONE;
5655         }
5656
5657         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5658                 /* Mask the interrupt */
5659                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5660                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5661
5662                 list_del(&ioa_cfg->reset_cmd->queue);
5663                 del_timer(&ioa_cfg->reset_cmd->timer);
5664                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5665         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5666                 if (ioa_cfg->clear_isr) {
5667                         if (ipr_debug && printk_ratelimit())
5668                                 dev_err(&ioa_cfg->pdev->dev,
5669                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5670                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5671                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5672                         return IRQ_NONE;
5673                 }
5674         } else {
5675                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5676                         ioa_cfg->ioa_unit_checked = 1;
5677                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5678                         dev_err(&ioa_cfg->pdev->dev,
5679                                 "No Host RRQ. 0x%08X\n", int_reg);
5680                 else
5681                         dev_err(&ioa_cfg->pdev->dev,
5682                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5683
5684                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5685                         ioa_cfg->sdt_state = GET_DUMP;
5686
5687                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5688                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5689         }
5690
5691         return rc;
5692 }
5693
5694 /**
5695  * ipr_isr_eh - Interrupt service routine error handler
5696  * @ioa_cfg:    ioa config struct
5697  * @msg:        message to log
5698  *
5699  * Return value:
5700  *      none
5701  **/
5702 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5703 {
5704         ioa_cfg->errors_logged++;
5705         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5706
5707         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5708                 ioa_cfg->sdt_state = GET_DUMP;
5709
5710         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5711 }
5712
5713 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5714                                                 struct list_head *doneq)
5715 {
5716         u32 ioasc;
5717         u16 cmd_index;
5718         struct ipr_cmnd *ipr_cmd;
5719         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5720         int num_hrrq = 0;
5721
5722         /* If interrupts are disabled, ignore the interrupt */
5723         if (!hrr_queue->allow_interrupts)
5724                 return 0;
5725
5726         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5727                hrr_queue->toggle_bit) {
5728
5729                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5730                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5731                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5732
5733                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5734                              cmd_index < hrr_queue->min_cmd_id)) {
5735                         ipr_isr_eh(ioa_cfg,
5736                                 "Invalid response handle from IOA: ",
5737                                 cmd_index);
5738                         break;
5739                 }
5740
5741                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5742                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5743
5744                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5745
5746                 list_move_tail(&ipr_cmd->queue, doneq);
5747
5748                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5749                         hrr_queue->hrrq_curr++;
5750                 } else {
5751                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5752                         hrr_queue->toggle_bit ^= 1u;
5753                 }
5754                 num_hrrq++;
5755                 if (budget > 0 && num_hrrq >= budget)
5756                         break;
5757         }
5758
5759         return num_hrrq;
5760 }
5761
5762 static int ipr_iopoll(struct irq_poll *iop, int budget)
5763 {
5764         struct ipr_ioa_cfg *ioa_cfg;
5765         struct ipr_hrr_queue *hrrq;
5766         struct ipr_cmnd *ipr_cmd, *temp;
5767         unsigned long hrrq_flags;
5768         int completed_ops;
5769         LIST_HEAD(doneq);
5770
5771         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5772         ioa_cfg = hrrq->ioa_cfg;
5773
5774         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5775         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5776
5777         if (completed_ops < budget)
5778                 irq_poll_complete(iop);
5779         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5780
5781         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5782                 list_del(&ipr_cmd->queue);
5783                 del_timer(&ipr_cmd->timer);
5784                 ipr_cmd->fast_done(ipr_cmd);
5785         }
5786
5787         return completed_ops;
5788 }
5789
5790 /**
5791  * ipr_isr - Interrupt service routine
5792  * @irq:        irq number
5793  * @devp:       pointer to ioa config struct
5794  *
5795  * Return value:
5796  *      IRQ_NONE / IRQ_HANDLED
5797  **/
5798 static irqreturn_t ipr_isr(int irq, void *devp)
5799 {
5800         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5801         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5802         unsigned long hrrq_flags = 0;
5803         u32 int_reg = 0;
5804         int num_hrrq = 0;
5805         int irq_none = 0;
5806         struct ipr_cmnd *ipr_cmd, *temp;
5807         irqreturn_t rc = IRQ_NONE;
5808         LIST_HEAD(doneq);
5809
5810         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5811         /* If interrupts are disabled, ignore the interrupt */
5812         if (!hrrq->allow_interrupts) {
5813                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5814                 return IRQ_NONE;
5815         }
5816
5817         while (1) {
5818                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5819                         rc =  IRQ_HANDLED;
5820
5821                         if (!ioa_cfg->clear_isr)
5822                                 break;
5823
5824                         /* Clear the PCI interrupt */
5825                         num_hrrq = 0;
5826                         do {
5827                                 writel(IPR_PCII_HRRQ_UPDATED,
5828                                      ioa_cfg->regs.clr_interrupt_reg32);
5829                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5830                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5831                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5832
5833                 } else if (rc == IRQ_NONE && irq_none == 0) {
5834                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5835                         irq_none++;
5836                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5837                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5838                         ipr_isr_eh(ioa_cfg,
5839                                 "Error clearing HRRQ: ", num_hrrq);
5840                         rc = IRQ_HANDLED;
5841                         break;
5842                 } else
5843                         break;
5844         }
5845
5846         if (unlikely(rc == IRQ_NONE))
5847                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5848
5849         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5850         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5851                 list_del(&ipr_cmd->queue);
5852                 del_timer(&ipr_cmd->timer);
5853                 ipr_cmd->fast_done(ipr_cmd);
5854         }
5855         return rc;
5856 }
5857
5858 /**
5859  * ipr_isr_mhrrq - Interrupt service routine
5860  * @irq:        irq number
5861  * @devp:       pointer to ioa config struct
5862  *
5863  * Return value:
5864  *      IRQ_NONE / IRQ_HANDLED
5865  **/
5866 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5867 {
5868         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5869         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5870         unsigned long hrrq_flags = 0;
5871         struct ipr_cmnd *ipr_cmd, *temp;
5872         irqreturn_t rc = IRQ_NONE;
5873         LIST_HEAD(doneq);
5874
5875         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5876
5877         /* If interrupts are disabled, ignore the interrupt */
5878         if (!hrrq->allow_interrupts) {
5879                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5880                 return IRQ_NONE;
5881         }
5882
5883         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5884                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5885                        hrrq->toggle_bit) {
5886                         irq_poll_sched(&hrrq->iopoll);
5887                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5888                         return IRQ_HANDLED;
5889                 }
5890         } else {
5891                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5892                         hrrq->toggle_bit)
5893
5894                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5895                                 rc =  IRQ_HANDLED;
5896         }
5897
5898         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5899
5900         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5901                 list_del(&ipr_cmd->queue);
5902                 del_timer(&ipr_cmd->timer);
5903                 ipr_cmd->fast_done(ipr_cmd);
5904         }
5905         return rc;
5906 }
5907
5908 /**
5909  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5910  * @ioa_cfg:    ioa config struct
5911  * @ipr_cmd:    ipr command struct
5912  *
5913  * Return value:
5914  *      0 on success / -1 on failure
5915  **/
5916 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5917                              struct ipr_cmnd *ipr_cmd)
5918 {
5919         int i, nseg;
5920         struct scatterlist *sg;
5921         u32 length;
5922         u32 ioadl_flags = 0;
5923         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5924         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5925         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5926
5927         length = scsi_bufflen(scsi_cmd);
5928         if (!length)
5929                 return 0;
5930
5931         nseg = scsi_dma_map(scsi_cmd);
5932         if (nseg < 0) {
5933                 if (printk_ratelimit())
5934                         dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5935                 return -1;
5936         }
5937
5938         ipr_cmd->dma_use_sg = nseg;
5939
5940         ioarcb->data_transfer_length = cpu_to_be32(length);
5941         ioarcb->ioadl_len =
5942                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5943
5944         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5945                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5946                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5947         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5948                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5949
5950         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5951                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5952                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5953                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5954         }
5955
5956         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5957         return 0;
5958 }
5959
5960 /**
5961  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5962  * @ioa_cfg:    ioa config struct
5963  * @ipr_cmd:    ipr command struct
5964  *
5965  * Return value:
5966  *      0 on success / -1 on failure
5967  **/
5968 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5969                            struct ipr_cmnd *ipr_cmd)
5970 {
5971         int i, nseg;
5972         struct scatterlist *sg;
5973         u32 length;
5974         u32 ioadl_flags = 0;
5975         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5976         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5977         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5978
5979         length = scsi_bufflen(scsi_cmd);
5980         if (!length)
5981                 return 0;
5982
5983         nseg = scsi_dma_map(scsi_cmd);
5984         if (nseg < 0) {
5985                 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5986                 return -1;
5987         }
5988
5989         ipr_cmd->dma_use_sg = nseg;
5990
5991         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5992                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5993                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5994                 ioarcb->data_transfer_length = cpu_to_be32(length);
5995                 ioarcb->ioadl_len =
5996                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5997         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5998                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5999                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
6000                 ioarcb->read_ioadl_len =
6001                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6002         }
6003
6004         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
6005                 ioadl = ioarcb->u.add_data.u.ioadl;
6006                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
6007                                     offsetof(struct ipr_ioarcb, u.add_data));
6008                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6009         }
6010
6011         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
6012                 ioadl[i].flags_and_data_len =
6013                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6014                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
6015         }
6016
6017         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6018         return 0;
6019 }
6020
6021 /**
6022  * __ipr_erp_done - Process completion of ERP for a device
6023  * @ipr_cmd:            ipr command struct
6024  *
6025  * This function copies the sense buffer into the scsi_cmd
6026  * struct and pushes the scsi_done function.
6027  *
6028  * Return value:
6029  *      nothing
6030  **/
6031 static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6032 {
6033         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6034         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6035         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6036
6037         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6038                 scsi_cmd->result |= (DID_ERROR << 16);
6039                 scmd_printk(KERN_ERR, scsi_cmd,
6040                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
6041         } else {
6042                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6043                        SCSI_SENSE_BUFFERSIZE);
6044         }
6045
6046         if (res) {
6047                 if (!ipr_is_naca_model(res))
6048                         res->needs_sync_complete = 1;
6049                 res->in_erp = 0;
6050         }
6051         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6052         scsi_cmd->scsi_done(scsi_cmd);
6053         if (ipr_cmd->eh_comp)
6054                 complete(ipr_cmd->eh_comp);
6055         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6056 }
6057
6058 /**
6059  * ipr_erp_done - Process completion of ERP for a device
6060  * @ipr_cmd:            ipr command struct
6061  *
6062  * This function copies the sense buffer into the scsi_cmd
6063  * struct and pushes the scsi_done function.
6064  *
6065  * Return value:
6066  *      nothing
6067  **/
6068 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6069 {
6070         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6071         unsigned long hrrq_flags;
6072
6073         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6074         __ipr_erp_done(ipr_cmd);
6075         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6076 }
6077
6078 /**
6079  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6080  * @ipr_cmd:    ipr command struct
6081  *
6082  * Return value:
6083  *      none
6084  **/
6085 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6086 {
6087         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6088         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6089         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6090
6091         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
6092         ioarcb->data_transfer_length = 0;
6093         ioarcb->read_data_transfer_length = 0;
6094         ioarcb->ioadl_len = 0;
6095         ioarcb->read_ioadl_len = 0;
6096         ioasa->hdr.ioasc = 0;
6097         ioasa->hdr.residual_data_len = 0;
6098
6099         if (ipr_cmd->ioa_cfg->sis64)
6100                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6101                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6102         else {
6103                 ioarcb->write_ioadl_addr =
6104                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6105                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6106         }
6107 }
6108
6109 /**
6110  * __ipr_erp_request_sense - Send request sense to a device
6111  * @ipr_cmd:    ipr command struct
6112  *
6113  * This function sends a request sense to a device as a result
6114  * of a check condition.
6115  *
6116  * Return value:
6117  *      nothing
6118  **/
6119 static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6120 {
6121         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6122         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6123
6124         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6125                 __ipr_erp_done(ipr_cmd);
6126                 return;
6127         }
6128
6129         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6130
6131         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6132         cmd_pkt->cdb[0] = REQUEST_SENSE;
6133         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6134         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6135         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6136         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6137
6138         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6139                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
6140
6141         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6142                    IPR_REQUEST_SENSE_TIMEOUT * 2);
6143 }
6144
6145 /**
6146  * ipr_erp_request_sense - Send request sense to a device
6147  * @ipr_cmd:    ipr command struct
6148  *
6149  * This function sends a request sense to a device as a result
6150  * of a check condition.
6151  *
6152  * Return value:
6153  *      nothing
6154  **/
6155 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6156 {
6157         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6158         unsigned long hrrq_flags;
6159
6160         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6161         __ipr_erp_request_sense(ipr_cmd);
6162         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6163 }
6164
6165 /**
6166  * ipr_erp_cancel_all - Send cancel all to a device
6167  * @ipr_cmd:    ipr command struct
6168  *
6169  * This function sends a cancel all to a device to clear the
6170  * queue. If we are running TCQ on the device, QERR is set to 1,
6171  * which means all outstanding ops have been dropped on the floor.
6172  * Cancel all will return them to us.
6173  *
6174  * Return value:
6175  *      nothing
6176  **/
6177 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6178 {
6179         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6180         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6181         struct ipr_cmd_pkt *cmd_pkt;
6182
6183         res->in_erp = 1;
6184
6185         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6186
6187         if (!scsi_cmd->device->simple_tags) {
6188                 __ipr_erp_request_sense(ipr_cmd);
6189                 return;
6190         }
6191
6192         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6193         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6194         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6195
6196         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6197                    IPR_CANCEL_ALL_TIMEOUT);
6198 }
6199
6200 /**
6201  * ipr_dump_ioasa - Dump contents of IOASA
6202  * @ioa_cfg:    ioa config struct
6203  * @ipr_cmd:    ipr command struct
6204  * @res:                resource entry struct
6205  *
6206  * This function is invoked by the interrupt handler when ops
6207  * fail. It will log the IOASA if appropriate. Only called
6208  * for GPDD ops.
6209  *
6210  * Return value:
6211  *      none
6212  **/
6213 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
6214                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
6215 {
6216         int i;
6217         u16 data_len;
6218         u32 ioasc, fd_ioasc;
6219         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6220         __be32 *ioasa_data = (__be32 *)ioasa;
6221         int error_index;
6222
6223         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6224         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
6225
6226         if (0 == ioasc)
6227                 return;
6228
6229         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6230                 return;
6231
6232         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6233                 error_index = ipr_get_error(fd_ioasc);
6234         else
6235                 error_index = ipr_get_error(ioasc);
6236
6237         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6238                 /* Don't log an error if the IOA already logged one */
6239                 if (ioasa->hdr.ilid != 0)
6240                         return;
6241
6242                 if (!ipr_is_gscsi(res))
6243                         return;
6244
6245                 if (ipr_error_table[error_index].log_ioasa == 0)
6246                         return;
6247         }
6248
6249         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6250
6251         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6252         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6253                 data_len = sizeof(struct ipr_ioasa64);
6254         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6255                 data_len = sizeof(struct ipr_ioasa);
6256
6257         ipr_err("IOASA Dump:\n");
6258
6259         for (i = 0; i < data_len / 4; i += 4) {
6260                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6261                         be32_to_cpu(ioasa_data[i]),
6262                         be32_to_cpu(ioasa_data[i+1]),
6263                         be32_to_cpu(ioasa_data[i+2]),
6264                         be32_to_cpu(ioasa_data[i+3]));
6265         }
6266 }
6267
6268 /**
6269  * ipr_gen_sense - Generate SCSI sense data from an IOASA
6270  * @ioasa:              IOASA
6271  * @sense_buf:  sense data buffer
6272  *
6273  * Return value:
6274  *      none
6275  **/
6276 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6277 {
6278         u32 failing_lba;
6279         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6280         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6281         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6282         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6283
6284         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6285
6286         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6287                 return;
6288
6289         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6290
6291         if (ipr_is_vset_device(res) &&
6292             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6293             ioasa->u.vset.failing_lba_hi != 0) {
6294                 sense_buf[0] = 0x72;
6295                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6296                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6297                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6298
6299                 sense_buf[7] = 12;
6300                 sense_buf[8] = 0;
6301                 sense_buf[9] = 0x0A;
6302                 sense_buf[10] = 0x80;
6303
6304                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6305
6306                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6307                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6308                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6309                 sense_buf[15] = failing_lba & 0x000000ff;
6310
6311                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6312
6313                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6314                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6315                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6316                 sense_buf[19] = failing_lba & 0x000000ff;
6317         } else {
6318                 sense_buf[0] = 0x70;
6319                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6320                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6321                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6322
6323                 /* Illegal request */
6324                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6325                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6326                         sense_buf[7] = 10;      /* additional length */
6327
6328                         /* IOARCB was in error */
6329                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6330                                 sense_buf[15] = 0xC0;
6331                         else    /* Parameter data was invalid */
6332                                 sense_buf[15] = 0x80;
6333
6334                         sense_buf[16] =
6335                             ((IPR_FIELD_POINTER_MASK &
6336                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6337                         sense_buf[17] =
6338                             (IPR_FIELD_POINTER_MASK &
6339                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6340                 } else {
6341                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6342                                 if (ipr_is_vset_device(res))
6343                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6344                                 else
6345                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6346
6347                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
6348                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6349                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6350                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6351                                 sense_buf[6] = failing_lba & 0x000000ff;
6352                         }
6353
6354                         sense_buf[7] = 6;       /* additional length */
6355                 }
6356         }
6357 }
6358
6359 /**
6360  * ipr_get_autosense - Copy autosense data to sense buffer
6361  * @ipr_cmd:    ipr command struct
6362  *
6363  * This function copies the autosense buffer to the buffer
6364  * in the scsi_cmd, if there is autosense available.
6365  *
6366  * Return value:
6367  *      1 if autosense was available / 0 if not
6368  **/
6369 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6370 {
6371         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6372         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6373
6374         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6375                 return 0;
6376
6377         if (ipr_cmd->ioa_cfg->sis64)
6378                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6379                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6380                            SCSI_SENSE_BUFFERSIZE));
6381         else
6382                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6383                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6384                            SCSI_SENSE_BUFFERSIZE));
6385         return 1;
6386 }
6387
6388 /**
6389  * ipr_erp_start - Process an error response for a SCSI op
6390  * @ioa_cfg:    ioa config struct
6391  * @ipr_cmd:    ipr command struct
6392  *
6393  * This function determines whether or not to initiate ERP
6394  * on the affected device.
6395  *
6396  * Return value:
6397  *      nothing
6398  **/
6399 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6400                               struct ipr_cmnd *ipr_cmd)
6401 {
6402         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6403         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6404         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6405         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6406
6407         if (!res) {
6408                 __ipr_scsi_eh_done(ipr_cmd);
6409                 return;
6410         }
6411
6412         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6413                 ipr_gen_sense(ipr_cmd);
6414
6415         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6416
6417         switch (masked_ioasc) {
6418         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6419                 if (ipr_is_naca_model(res))
6420                         scsi_cmd->result |= (DID_ABORT << 16);
6421                 else
6422                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6423                 break;
6424         case IPR_IOASC_IR_RESOURCE_HANDLE:
6425         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6426                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6427                 break;
6428         case IPR_IOASC_HW_SEL_TIMEOUT:
6429                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6430                 if (!ipr_is_naca_model(res))
6431                         res->needs_sync_complete = 1;
6432                 break;
6433         case IPR_IOASC_SYNC_REQUIRED:
6434                 if (!res->in_erp)
6435                         res->needs_sync_complete = 1;
6436                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6437                 break;
6438         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6439         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6440                 /*
6441                  * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6442                  * so SCSI mid-layer and upper layers handle it accordingly.
6443                  */
6444                 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6445                         scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6446                 break;
6447         case IPR_IOASC_BUS_WAS_RESET:
6448         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6449                 /*
6450                  * Report the bus reset and ask for a retry. The device
6451                  * will give CC/UA the next command.
6452                  */
6453                 if (!res->resetting_device)
6454                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6455                 scsi_cmd->result |= (DID_ERROR << 16);
6456                 if (!ipr_is_naca_model(res))
6457                         res->needs_sync_complete = 1;
6458                 break;
6459         case IPR_IOASC_HW_DEV_BUS_STATUS:
6460                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6461                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6462                         if (!ipr_get_autosense(ipr_cmd)) {
6463                                 if (!ipr_is_naca_model(res)) {
6464                                         ipr_erp_cancel_all(ipr_cmd);
6465                                         return;
6466                                 }
6467                         }
6468                 }
6469                 if (!ipr_is_naca_model(res))
6470                         res->needs_sync_complete = 1;
6471                 break;
6472         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6473                 break;
6474         case IPR_IOASC_IR_NON_OPTIMIZED:
6475                 if (res->raw_mode) {
6476                         res->raw_mode = 0;
6477                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6478                 } else
6479                         scsi_cmd->result |= (DID_ERROR << 16);
6480                 break;
6481         default:
6482                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6483                         scsi_cmd->result |= (DID_ERROR << 16);
6484                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6485                         res->needs_sync_complete = 1;
6486                 break;
6487         }
6488
6489         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6490         scsi_cmd->scsi_done(scsi_cmd);
6491         if (ipr_cmd->eh_comp)
6492                 complete(ipr_cmd->eh_comp);
6493         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6494 }
6495
6496 /**
6497  * ipr_scsi_done - mid-layer done function
6498  * @ipr_cmd:    ipr command struct
6499  *
6500  * This function is invoked by the interrupt handler for
6501  * ops generated by the SCSI mid-layer
6502  *
6503  * Return value:
6504  *      none
6505  **/
6506 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6507 {
6508         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6509         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6510         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6511         unsigned long lock_flags;
6512
6513         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6514
6515         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6516                 scsi_dma_unmap(scsi_cmd);
6517
6518                 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6519                 scsi_cmd->scsi_done(scsi_cmd);
6520                 if (ipr_cmd->eh_comp)
6521                         complete(ipr_cmd->eh_comp);
6522                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6523                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6524         } else {
6525                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6526                 spin_lock(&ipr_cmd->hrrq->_lock);
6527                 ipr_erp_start(ioa_cfg, ipr_cmd);
6528                 spin_unlock(&ipr_cmd->hrrq->_lock);
6529                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6530         }
6531 }
6532
6533 /**
6534  * ipr_queuecommand - Queue a mid-layer request
6535  * @shost:              scsi host struct
6536  * @scsi_cmd:   scsi command struct
6537  *
6538  * This function queues a request generated by the mid-layer.
6539  *
6540  * Return value:
6541  *      0 on success
6542  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6543  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6544  **/
6545 static int ipr_queuecommand(struct Scsi_Host *shost,
6546                             struct scsi_cmnd *scsi_cmd)
6547 {
6548         struct ipr_ioa_cfg *ioa_cfg;
6549         struct ipr_resource_entry *res;
6550         struct ipr_ioarcb *ioarcb;
6551         struct ipr_cmnd *ipr_cmd;
6552         unsigned long hrrq_flags, lock_flags;
6553         int rc;
6554         struct ipr_hrr_queue *hrrq;
6555         int hrrq_id;
6556
6557         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6558
6559         scsi_cmd->result = (DID_OK << 16);
6560         res = scsi_cmd->device->hostdata;
6561
6562         if (ipr_is_gata(res) && res->sata_port) {
6563                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6564                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6565                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6566                 return rc;
6567         }
6568
6569         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6570         hrrq = &ioa_cfg->hrrq[hrrq_id];
6571
6572         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6573         /*
6574          * We are currently blocking all devices due to a host reset
6575          * We have told the host to stop giving us new requests, but
6576          * ERP ops don't count. FIXME
6577          */
6578         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6579                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6580                 return SCSI_MLQUEUE_HOST_BUSY;
6581         }
6582
6583         /*
6584          * FIXME - Create scsi_set_host_offline interface
6585          *  and the ioa_is_dead check can be removed
6586          */
6587         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6588                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6589                 goto err_nodev;
6590         }
6591
6592         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6593         if (ipr_cmd == NULL) {
6594                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6595                 return SCSI_MLQUEUE_HOST_BUSY;
6596         }
6597         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6598
6599         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6600         ioarcb = &ipr_cmd->ioarcb;
6601
6602         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6603         ipr_cmd->scsi_cmd = scsi_cmd;
6604         ipr_cmd->done = ipr_scsi_eh_done;
6605
6606         if (ipr_is_gscsi(res)) {
6607                 if (scsi_cmd->underflow == 0)
6608                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6609
6610                 if (res->reset_occurred) {
6611                         res->reset_occurred = 0;
6612                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6613                 }
6614         }
6615
6616         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6617                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6618
6619                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6620                 if (scsi_cmd->flags & SCMD_TAGGED)
6621                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6622                 else
6623                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6624         }
6625
6626         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6627             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6628                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6629         }
6630         if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6631                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6632
6633                 if (scsi_cmd->underflow == 0)
6634                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6635         }
6636
6637         if (ioa_cfg->sis64)
6638                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6639         else
6640                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6641
6642         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6643         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6644                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6645                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6646                 if (!rc)
6647                         scsi_dma_unmap(scsi_cmd);
6648                 return SCSI_MLQUEUE_HOST_BUSY;
6649         }
6650
6651         if (unlikely(hrrq->ioa_is_dead)) {
6652                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6653                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6654                 scsi_dma_unmap(scsi_cmd);
6655                 goto err_nodev;
6656         }
6657
6658         ioarcb->res_handle = res->res_handle;
6659         if (res->needs_sync_complete) {
6660                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6661                 res->needs_sync_complete = 0;
6662         }
6663         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6664         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6665         ipr_send_command(ipr_cmd);
6666         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6667         return 0;
6668
6669 err_nodev:
6670         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6671         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6672         scsi_cmd->result = (DID_NO_CONNECT << 16);
6673         scsi_cmd->scsi_done(scsi_cmd);
6674         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6675         return 0;
6676 }
6677
6678 /**
6679  * ipr_ioctl - IOCTL handler
6680  * @sdev:       scsi device struct
6681  * @cmd:        IOCTL cmd
6682  * @arg:        IOCTL arg
6683  *
6684  * Return value:
6685  *      0 on success / other on failure
6686  **/
6687 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6688 {
6689         struct ipr_resource_entry *res;
6690
6691         res = (struct ipr_resource_entry *)sdev->hostdata;
6692         if (res && ipr_is_gata(res)) {
6693                 if (cmd == HDIO_GET_IDENTITY)
6694                         return -ENOTTY;
6695                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6696         }
6697
6698         return -EINVAL;
6699 }
6700
6701 /**
6702  * ipr_info - Get information about the card/driver
6703  * @scsi_host:  scsi host struct
6704  *
6705  * Return value:
6706  *      pointer to buffer with description string
6707  **/
6708 static const char *ipr_ioa_info(struct Scsi_Host *host)
6709 {
6710         static char buffer[512];
6711         struct ipr_ioa_cfg *ioa_cfg;
6712         unsigned long lock_flags = 0;
6713
6714         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6715
6716         spin_lock_irqsave(host->host_lock, lock_flags);
6717         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6718         spin_unlock_irqrestore(host->host_lock, lock_flags);
6719
6720         return buffer;
6721 }
6722
6723 static struct scsi_host_template driver_template = {
6724         .module = THIS_MODULE,
6725         .name = "IPR",
6726         .info = ipr_ioa_info,
6727         .ioctl = ipr_ioctl,
6728         .queuecommand = ipr_queuecommand,
6729         .eh_abort_handler = ipr_eh_abort,
6730         .eh_device_reset_handler = ipr_eh_dev_reset,
6731         .eh_host_reset_handler = ipr_eh_host_reset,
6732         .slave_alloc = ipr_slave_alloc,
6733         .slave_configure = ipr_slave_configure,
6734         .slave_destroy = ipr_slave_destroy,
6735         .scan_finished = ipr_scan_finished,
6736         .target_alloc = ipr_target_alloc,
6737         .target_destroy = ipr_target_destroy,
6738         .change_queue_depth = ipr_change_queue_depth,
6739         .bios_param = ipr_biosparam,
6740         .can_queue = IPR_MAX_COMMANDS,
6741         .this_id = -1,
6742         .sg_tablesize = IPR_MAX_SGLIST,
6743         .max_sectors = IPR_IOA_MAX_SECTORS,
6744         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6745         .use_clustering = ENABLE_CLUSTERING,
6746         .shost_attrs = ipr_ioa_attrs,
6747         .sdev_attrs = ipr_dev_attrs,
6748         .proc_name = IPR_NAME,
6749 };
6750
6751 /**
6752  * ipr_ata_phy_reset - libata phy_reset handler
6753  * @ap:         ata port to reset
6754  *
6755  **/
6756 static void ipr_ata_phy_reset(struct ata_port *ap)
6757 {
6758         unsigned long flags;
6759         struct ipr_sata_port *sata_port = ap->private_data;
6760         struct ipr_resource_entry *res = sata_port->res;
6761         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6762         int rc;
6763
6764         ENTER;
6765         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6766         while (ioa_cfg->in_reset_reload) {
6767                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6768                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6769                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6770         }
6771
6772         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6773                 goto out_unlock;
6774
6775         rc = ipr_device_reset(ioa_cfg, res);
6776
6777         if (rc) {
6778                 ap->link.device[0].class = ATA_DEV_NONE;
6779                 goto out_unlock;
6780         }
6781
6782         ap->link.device[0].class = res->ata_class;
6783         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6784                 ap->link.device[0].class = ATA_DEV_NONE;
6785
6786 out_unlock:
6787         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6788         LEAVE;
6789 }
6790
6791 /**
6792  * ipr_ata_post_internal - Cleanup after an internal command
6793  * @qc: ATA queued command
6794  *
6795  * Return value:
6796  *      none
6797  **/
6798 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6799 {
6800         struct ipr_sata_port *sata_port = qc->ap->private_data;
6801         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6802         struct ipr_cmnd *ipr_cmd;
6803         struct ipr_hrr_queue *hrrq;
6804         unsigned long flags;
6805
6806         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6807         while (ioa_cfg->in_reset_reload) {
6808                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6809                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6810                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6811         }
6812
6813         for_each_hrrq(hrrq, ioa_cfg) {
6814                 spin_lock(&hrrq->_lock);
6815                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6816                         if (ipr_cmd->qc == qc) {
6817                                 ipr_device_reset(ioa_cfg, sata_port->res);
6818                                 break;
6819                         }
6820                 }
6821                 spin_unlock(&hrrq->_lock);
6822         }
6823         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6824 }
6825
6826 /**
6827  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6828  * @regs:       destination
6829  * @tf: source ATA taskfile
6830  *
6831  * Return value:
6832  *      none
6833  **/
6834 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6835                              struct ata_taskfile *tf)
6836 {
6837         regs->feature = tf->feature;
6838         regs->nsect = tf->nsect;
6839         regs->lbal = tf->lbal;
6840         regs->lbam = tf->lbam;
6841         regs->lbah = tf->lbah;
6842         regs->device = tf->device;
6843         regs->command = tf->command;
6844         regs->hob_feature = tf->hob_feature;
6845         regs->hob_nsect = tf->hob_nsect;
6846         regs->hob_lbal = tf->hob_lbal;
6847         regs->hob_lbam = tf->hob_lbam;
6848         regs->hob_lbah = tf->hob_lbah;
6849         regs->ctl = tf->ctl;
6850 }
6851
6852 /**
6853  * ipr_sata_done - done function for SATA commands
6854  * @ipr_cmd:    ipr command struct
6855  *
6856  * This function is invoked by the interrupt handler for
6857  * ops generated by the SCSI mid-layer to SATA devices
6858  *
6859  * Return value:
6860  *      none
6861  **/
6862 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6863 {
6864         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6865         struct ata_queued_cmd *qc = ipr_cmd->qc;
6866         struct ipr_sata_port *sata_port = qc->ap->private_data;
6867         struct ipr_resource_entry *res = sata_port->res;
6868         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6869
6870         spin_lock(&ipr_cmd->hrrq->_lock);
6871         if (ipr_cmd->ioa_cfg->sis64)
6872                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6873                        sizeof(struct ipr_ioasa_gata));
6874         else
6875                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6876                        sizeof(struct ipr_ioasa_gata));
6877         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6878
6879         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6880                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6881
6882         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6883                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6884         else
6885                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6886         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6887         spin_unlock(&ipr_cmd->hrrq->_lock);
6888         ata_qc_complete(qc);
6889 }
6890
6891 /**
6892  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6893  * @ipr_cmd:    ipr command struct
6894  * @qc:         ATA queued command
6895  *
6896  **/
6897 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6898                                   struct ata_queued_cmd *qc)
6899 {
6900         u32 ioadl_flags = 0;
6901         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6902         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6903         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6904         int len = qc->nbytes;
6905         struct scatterlist *sg;
6906         unsigned int si;
6907         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6908
6909         if (len == 0)
6910                 return;
6911
6912         if (qc->dma_dir == DMA_TO_DEVICE) {
6913                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6914                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6915         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6916                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6917
6918         ioarcb->data_transfer_length = cpu_to_be32(len);
6919         ioarcb->ioadl_len =
6920                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6921         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6922                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6923
6924         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6925                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6926                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6927                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6928
6929                 last_ioadl64 = ioadl64;
6930                 ioadl64++;
6931         }
6932
6933         if (likely(last_ioadl64))
6934                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6935 }
6936
6937 /**
6938  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6939  * @ipr_cmd:    ipr command struct
6940  * @qc:         ATA queued command
6941  *
6942  **/
6943 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6944                                 struct ata_queued_cmd *qc)
6945 {
6946         u32 ioadl_flags = 0;
6947         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6948         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6949         struct ipr_ioadl_desc *last_ioadl = NULL;
6950         int len = qc->nbytes;
6951         struct scatterlist *sg;
6952         unsigned int si;
6953
6954         if (len == 0)
6955                 return;
6956
6957         if (qc->dma_dir == DMA_TO_DEVICE) {
6958                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6959                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6960                 ioarcb->data_transfer_length = cpu_to_be32(len);
6961                 ioarcb->ioadl_len =
6962                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6963         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6964                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6965                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6966                 ioarcb->read_ioadl_len =
6967                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6968         }
6969
6970         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6971                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6972                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6973
6974                 last_ioadl = ioadl;
6975                 ioadl++;
6976         }
6977
6978         if (likely(last_ioadl))
6979                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6980 }
6981
6982 /**
6983  * ipr_qc_defer - Get a free ipr_cmd
6984  * @qc: queued command
6985  *
6986  * Return value:
6987  *      0 if success
6988  **/
6989 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6990 {
6991         struct ata_port *ap = qc->ap;
6992         struct ipr_sata_port *sata_port = ap->private_data;
6993         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6994         struct ipr_cmnd *ipr_cmd;
6995         struct ipr_hrr_queue *hrrq;
6996         int hrrq_id;
6997
6998         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6999         hrrq = &ioa_cfg->hrrq[hrrq_id];
7000
7001         qc->lldd_task = NULL;
7002         spin_lock(&hrrq->_lock);
7003         if (unlikely(hrrq->ioa_is_dead)) {
7004                 spin_unlock(&hrrq->_lock);
7005                 return 0;
7006         }
7007
7008         if (unlikely(!hrrq->allow_cmds)) {
7009                 spin_unlock(&hrrq->_lock);
7010                 return ATA_DEFER_LINK;
7011         }
7012
7013         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
7014         if (ipr_cmd == NULL) {
7015                 spin_unlock(&hrrq->_lock);
7016                 return ATA_DEFER_LINK;
7017         }
7018
7019         qc->lldd_task = ipr_cmd;
7020         spin_unlock(&hrrq->_lock);
7021         return 0;
7022 }
7023
7024 /**
7025  * ipr_qc_issue - Issue a SATA qc to a device
7026  * @qc: queued command
7027  *
7028  * Return value:
7029  *      0 if success
7030  **/
7031 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7032 {
7033         struct ata_port *ap = qc->ap;
7034         struct ipr_sata_port *sata_port = ap->private_data;
7035         struct ipr_resource_entry *res = sata_port->res;
7036         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7037         struct ipr_cmnd *ipr_cmd;
7038         struct ipr_ioarcb *ioarcb;
7039         struct ipr_ioarcb_ata_regs *regs;
7040
7041         if (qc->lldd_task == NULL)
7042                 ipr_qc_defer(qc);
7043
7044         ipr_cmd = qc->lldd_task;
7045         if (ipr_cmd == NULL)
7046                 return AC_ERR_SYSTEM;
7047
7048         qc->lldd_task = NULL;
7049         spin_lock(&ipr_cmd->hrrq->_lock);
7050         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7051                         ipr_cmd->hrrq->ioa_is_dead)) {
7052                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7053                 spin_unlock(&ipr_cmd->hrrq->_lock);
7054                 return AC_ERR_SYSTEM;
7055         }
7056
7057         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
7058         ioarcb = &ipr_cmd->ioarcb;
7059
7060         if (ioa_cfg->sis64) {
7061                 regs = &ipr_cmd->i.ata_ioadl.regs;
7062                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7063         } else
7064                 regs = &ioarcb->u.add_data.u.regs;
7065
7066         memset(regs, 0, sizeof(*regs));
7067         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
7068
7069         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7070         ipr_cmd->qc = qc;
7071         ipr_cmd->done = ipr_sata_done;
7072         ipr_cmd->ioarcb.res_handle = res->res_handle;
7073         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7074         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7075         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
7076         ipr_cmd->dma_use_sg = qc->n_elem;
7077
7078         if (ioa_cfg->sis64)
7079                 ipr_build_ata_ioadl64(ipr_cmd, qc);
7080         else
7081                 ipr_build_ata_ioadl(ipr_cmd, qc);
7082
7083         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7084         ipr_copy_sata_tf(regs, &qc->tf);
7085         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
7086         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
7087
7088         switch (qc->tf.protocol) {
7089         case ATA_PROT_NODATA:
7090         case ATA_PROT_PIO:
7091                 break;
7092
7093         case ATA_PROT_DMA:
7094                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7095                 break;
7096
7097         case ATAPI_PROT_PIO:
7098         case ATAPI_PROT_NODATA:
7099                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7100                 break;
7101
7102         case ATAPI_PROT_DMA:
7103                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7104                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7105                 break;
7106
7107         default:
7108                 WARN_ON(1);
7109                 spin_unlock(&ipr_cmd->hrrq->_lock);
7110                 return AC_ERR_INVALID;
7111         }
7112
7113         ipr_send_command(ipr_cmd);
7114         spin_unlock(&ipr_cmd->hrrq->_lock);
7115
7116         return 0;
7117 }
7118
7119 /**
7120  * ipr_qc_fill_rtf - Read result TF
7121  * @qc: ATA queued command
7122  *
7123  * Return value:
7124  *      true
7125  **/
7126 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7127 {
7128         struct ipr_sata_port *sata_port = qc->ap->private_data;
7129         struct ipr_ioasa_gata *g = &sata_port->ioasa;
7130         struct ata_taskfile *tf = &qc->result_tf;
7131
7132         tf->feature = g->error;
7133         tf->nsect = g->nsect;
7134         tf->lbal = g->lbal;
7135         tf->lbam = g->lbam;
7136         tf->lbah = g->lbah;
7137         tf->device = g->device;
7138         tf->command = g->status;
7139         tf->hob_nsect = g->hob_nsect;
7140         tf->hob_lbal = g->hob_lbal;
7141         tf->hob_lbam = g->hob_lbam;
7142         tf->hob_lbah = g->hob_lbah;
7143
7144         return true;
7145 }
7146
7147 static struct ata_port_operations ipr_sata_ops = {
7148         .phy_reset = ipr_ata_phy_reset,
7149         .hardreset = ipr_sata_reset,
7150         .post_internal_cmd = ipr_ata_post_internal,
7151         .qc_prep = ata_noop_qc_prep,
7152         .qc_defer = ipr_qc_defer,
7153         .qc_issue = ipr_qc_issue,
7154         .qc_fill_rtf = ipr_qc_fill_rtf,
7155         .port_start = ata_sas_port_start,
7156         .port_stop = ata_sas_port_stop
7157 };
7158
7159 static struct ata_port_info sata_port_info = {
7160         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7161                           ATA_FLAG_SAS_HOST,
7162         .pio_mask       = ATA_PIO4_ONLY,
7163         .mwdma_mask     = ATA_MWDMA2,
7164         .udma_mask      = ATA_UDMA6,
7165         .port_ops       = &ipr_sata_ops
7166 };
7167
7168 #ifdef CONFIG_PPC_PSERIES
7169 static const u16 ipr_blocked_processors[] = {
7170         PVR_NORTHSTAR,
7171         PVR_PULSAR,
7172         PVR_POWER4,
7173         PVR_ICESTAR,
7174         PVR_SSTAR,
7175         PVR_POWER4p,
7176         PVR_630,
7177         PVR_630p
7178 };
7179
7180 /**
7181  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7182  * @ioa_cfg:    ioa cfg struct
7183  *
7184  * Adapters that use Gemstone revision < 3.1 do not work reliably on
7185  * certain pSeries hardware. This function determines if the given
7186  * adapter is in one of these confgurations or not.
7187  *
7188  * Return value:
7189  *      1 if adapter is not supported / 0 if adapter is supported
7190  **/
7191 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7192 {
7193         int i;
7194
7195         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
7196                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
7197                         if (pvr_version_is(ipr_blocked_processors[i]))
7198                                 return 1;
7199                 }
7200         }
7201         return 0;
7202 }
7203 #else
7204 #define ipr_invalid_adapter(ioa_cfg) 0
7205 #endif
7206
7207 /**
7208  * ipr_ioa_bringdown_done - IOA bring down completion.
7209  * @ipr_cmd:    ipr command struct
7210  *
7211  * This function processes the completion of an adapter bring down.
7212  * It wakes any reset sleepers.
7213  *
7214  * Return value:
7215  *      IPR_RC_JOB_RETURN
7216  **/
7217 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7218 {
7219         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7220         int i;
7221
7222         ENTER;
7223         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7224                 ipr_trace;
7225                 ioa_cfg->scsi_unblock = 1;
7226                 schedule_work(&ioa_cfg->work_q);
7227         }
7228
7229         ioa_cfg->in_reset_reload = 0;
7230         ioa_cfg->reset_retries = 0;
7231         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7232                 spin_lock(&ioa_cfg->hrrq[i]._lock);
7233                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7234                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7235         }
7236         wmb();
7237
7238         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7239         wake_up_all(&ioa_cfg->reset_wait_q);
7240         LEAVE;
7241
7242         return IPR_RC_JOB_RETURN;
7243 }
7244
7245 /**
7246  * ipr_ioa_reset_done - IOA reset completion.
7247  * @ipr_cmd:    ipr command struct
7248  *
7249  * This function processes the completion of an adapter reset.
7250  * It schedules any necessary mid-layer add/removes and
7251  * wakes any reset sleepers.
7252  *
7253  * Return value:
7254  *      IPR_RC_JOB_RETURN
7255  **/
7256 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7257 {
7258         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7259         struct ipr_resource_entry *res;
7260         int j;
7261
7262         ENTER;
7263         ioa_cfg->in_reset_reload = 0;
7264         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7265                 spin_lock(&ioa_cfg->hrrq[j]._lock);
7266                 ioa_cfg->hrrq[j].allow_cmds = 1;
7267                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7268         }
7269         wmb();
7270         ioa_cfg->reset_cmd = NULL;
7271         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7272
7273         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7274                 if (res->add_to_ml || res->del_from_ml) {
7275                         ipr_trace;
7276                         break;
7277                 }
7278         }
7279         schedule_work(&ioa_cfg->work_q);
7280
7281         for (j = 0; j < IPR_NUM_HCAMS; j++) {
7282                 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7283                 if (j < IPR_NUM_LOG_HCAMS)
7284                         ipr_send_hcam(ioa_cfg,
7285                                 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7286                                 ioa_cfg->hostrcb[j]);
7287                 else
7288                         ipr_send_hcam(ioa_cfg,
7289                                 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7290                                 ioa_cfg->hostrcb[j]);
7291         }
7292
7293         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7294         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7295
7296         ioa_cfg->reset_retries = 0;
7297         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7298         wake_up_all(&ioa_cfg->reset_wait_q);
7299
7300         ioa_cfg->scsi_unblock = 1;
7301         schedule_work(&ioa_cfg->work_q);
7302         LEAVE;
7303         return IPR_RC_JOB_RETURN;
7304 }
7305
7306 /**
7307  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7308  * @supported_dev:      supported device struct
7309  * @vpids:                      vendor product id struct
7310  *
7311  * Return value:
7312  *      none
7313  **/
7314 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7315                                  struct ipr_std_inq_vpids *vpids)
7316 {
7317         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7318         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7319         supported_dev->num_records = 1;
7320         supported_dev->data_length =
7321                 cpu_to_be16(sizeof(struct ipr_supported_device));
7322         supported_dev->reserved = 0;
7323 }
7324
7325 /**
7326  * ipr_set_supported_devs - Send Set Supported Devices for a device
7327  * @ipr_cmd:    ipr command struct
7328  *
7329  * This function sends a Set Supported Devices to the adapter
7330  *
7331  * Return value:
7332  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7333  **/
7334 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7335 {
7336         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7337         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7338         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7339         struct ipr_resource_entry *res = ipr_cmd->u.res;
7340
7341         ipr_cmd->job_step = ipr_ioa_reset_done;
7342
7343         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7344                 if (!ipr_is_scsi_disk(res))
7345                         continue;
7346
7347                 ipr_cmd->u.res = res;
7348                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7349
7350                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7351                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7352                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7353
7354                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7355                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7356                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7357                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7358
7359                 ipr_init_ioadl(ipr_cmd,
7360                                ioa_cfg->vpd_cbs_dma +
7361                                  offsetof(struct ipr_misc_cbs, supp_dev),
7362                                sizeof(struct ipr_supported_device),
7363                                IPR_IOADL_FLAGS_WRITE_LAST);
7364
7365                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7366                            IPR_SET_SUP_DEVICE_TIMEOUT);
7367
7368                 if (!ioa_cfg->sis64)
7369                         ipr_cmd->job_step = ipr_set_supported_devs;
7370                 LEAVE;
7371                 return IPR_RC_JOB_RETURN;
7372         }
7373
7374         LEAVE;
7375         return IPR_RC_JOB_CONTINUE;
7376 }
7377
7378 /**
7379  * ipr_get_mode_page - Locate specified mode page
7380  * @mode_pages: mode page buffer
7381  * @page_code:  page code to find
7382  * @len:                minimum required length for mode page
7383  *
7384  * Return value:
7385  *      pointer to mode page / NULL on failure
7386  **/
7387 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7388                                u32 page_code, u32 len)
7389 {
7390         struct ipr_mode_page_hdr *mode_hdr;
7391         u32 page_length;
7392         u32 length;
7393
7394         if (!mode_pages || (mode_pages->hdr.length == 0))
7395                 return NULL;
7396
7397         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7398         mode_hdr = (struct ipr_mode_page_hdr *)
7399                 (mode_pages->data + mode_pages->hdr.block_desc_len);
7400
7401         while (length) {
7402                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7403                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7404                                 return mode_hdr;
7405                         break;
7406                 } else {
7407                         page_length = (sizeof(struct ipr_mode_page_hdr) +
7408                                        mode_hdr->page_length);
7409                         length -= page_length;
7410                         mode_hdr = (struct ipr_mode_page_hdr *)
7411                                 ((unsigned long)mode_hdr + page_length);
7412                 }
7413         }
7414         return NULL;
7415 }
7416
7417 /**
7418  * ipr_check_term_power - Check for term power errors
7419  * @ioa_cfg:    ioa config struct
7420  * @mode_pages: IOAFP mode pages buffer
7421  *
7422  * Check the IOAFP's mode page 28 for term power errors
7423  *
7424  * Return value:
7425  *      nothing
7426  **/
7427 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7428                                  struct ipr_mode_pages *mode_pages)
7429 {
7430         int i;
7431         int entry_length;
7432         struct ipr_dev_bus_entry *bus;
7433         struct ipr_mode_page28 *mode_page;
7434
7435         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7436                                       sizeof(struct ipr_mode_page28));
7437
7438         entry_length = mode_page->entry_length;
7439
7440         bus = mode_page->bus;
7441
7442         for (i = 0; i < mode_page->num_entries; i++) {
7443                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7444                         dev_err(&ioa_cfg->pdev->dev,
7445                                 "Term power is absent on scsi bus %d\n",
7446                                 bus->res_addr.bus);
7447                 }
7448
7449                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7450         }
7451 }
7452
7453 /**
7454  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7455  * @ioa_cfg:    ioa config struct
7456  *
7457  * Looks through the config table checking for SES devices. If
7458  * the SES device is in the SES table indicating a maximum SCSI
7459  * bus speed, the speed is limited for the bus.
7460  *
7461  * Return value:
7462  *      none
7463  **/
7464 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7465 {
7466         u32 max_xfer_rate;
7467         int i;
7468
7469         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7470                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7471                                                        ioa_cfg->bus_attr[i].bus_width);
7472
7473                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7474                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7475         }
7476 }
7477
7478 /**
7479  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7480  * @ioa_cfg:    ioa config struct
7481  * @mode_pages: mode page 28 buffer
7482  *
7483  * Updates mode page 28 based on driver configuration
7484  *
7485  * Return value:
7486  *      none
7487  **/
7488 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7489                                           struct ipr_mode_pages *mode_pages)
7490 {
7491         int i, entry_length;
7492         struct ipr_dev_bus_entry *bus;
7493         struct ipr_bus_attributes *bus_attr;
7494         struct ipr_mode_page28 *mode_page;
7495
7496         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7497                                       sizeof(struct ipr_mode_page28));
7498
7499         entry_length = mode_page->entry_length;
7500
7501         /* Loop for each device bus entry */
7502         for (i = 0, bus = mode_page->bus;
7503              i < mode_page->num_entries;
7504              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7505                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7506                         dev_err(&ioa_cfg->pdev->dev,
7507                                 "Invalid resource address reported: 0x%08X\n",
7508                                 IPR_GET_PHYS_LOC(bus->res_addr));
7509                         continue;
7510                 }
7511
7512                 bus_attr = &ioa_cfg->bus_attr[i];
7513                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7514                 bus->bus_width = bus_attr->bus_width;
7515                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7516                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7517                 if (bus_attr->qas_enabled)
7518                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7519                 else
7520                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7521         }
7522 }
7523
7524 /**
7525  * ipr_build_mode_select - Build a mode select command
7526  * @ipr_cmd:    ipr command struct
7527  * @res_handle: resource handle to send command to
7528  * @parm:               Byte 2 of Mode Sense command
7529  * @dma_addr:   DMA buffer address
7530  * @xfer_len:   data transfer length
7531  *
7532  * Return value:
7533  *      none
7534  **/
7535 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7536                                   __be32 res_handle, u8 parm,
7537                                   dma_addr_t dma_addr, u8 xfer_len)
7538 {
7539         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7540
7541         ioarcb->res_handle = res_handle;
7542         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7543         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7544         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7545         ioarcb->cmd_pkt.cdb[1] = parm;
7546         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7547
7548         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7549 }
7550
7551 /**
7552  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7553  * @ipr_cmd:    ipr command struct
7554  *
7555  * This function sets up the SCSI bus attributes and sends
7556  * a Mode Select for Page 28 to activate them.
7557  *
7558  * Return value:
7559  *      IPR_RC_JOB_RETURN
7560  **/
7561 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7562 {
7563         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7564         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7565         int length;
7566
7567         ENTER;
7568         ipr_scsi_bus_speed_limit(ioa_cfg);
7569         ipr_check_term_power(ioa_cfg, mode_pages);
7570         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7571         length = mode_pages->hdr.length + 1;
7572         mode_pages->hdr.length = 0;
7573
7574         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7575                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7576                               length);
7577
7578         ipr_cmd->job_step = ipr_set_supported_devs;
7579         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7580                                     struct ipr_resource_entry, queue);
7581         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7582
7583         LEAVE;
7584         return IPR_RC_JOB_RETURN;
7585 }
7586
7587 /**
7588  * ipr_build_mode_sense - Builds a mode sense command
7589  * @ipr_cmd:    ipr command struct
7590  * @res:                resource entry struct
7591  * @parm:               Byte 2 of mode sense command
7592  * @dma_addr:   DMA address of mode sense buffer
7593  * @xfer_len:   Size of DMA buffer
7594  *
7595  * Return value:
7596  *      none
7597  **/
7598 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7599                                  __be32 res_handle,
7600                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7601 {
7602         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7603
7604         ioarcb->res_handle = res_handle;
7605         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7606         ioarcb->cmd_pkt.cdb[2] = parm;
7607         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7608         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7609
7610         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7611 }
7612
7613 /**
7614  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7615  * @ipr_cmd:    ipr command struct
7616  *
7617  * This function handles the failure of an IOA bringup command.
7618  *
7619  * Return value:
7620  *      IPR_RC_JOB_RETURN
7621  **/
7622 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7623 {
7624         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7625         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7626
7627         dev_err(&ioa_cfg->pdev->dev,
7628                 "0x%02X failed with IOASC: 0x%08X\n",
7629                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7630
7631         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7632         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7633         return IPR_RC_JOB_RETURN;
7634 }
7635
7636 /**
7637  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7638  * @ipr_cmd:    ipr command struct
7639  *
7640  * This function handles the failure of a Mode Sense to the IOAFP.
7641  * Some adapters do not handle all mode pages.
7642  *
7643  * Return value:
7644  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7645  **/
7646 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7647 {
7648         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7649         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7650
7651         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7652                 ipr_cmd->job_step = ipr_set_supported_devs;
7653                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7654                                             struct ipr_resource_entry, queue);
7655                 return IPR_RC_JOB_CONTINUE;
7656         }
7657
7658         return ipr_reset_cmd_failed(ipr_cmd);
7659 }
7660
7661 /**
7662  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7663  * @ipr_cmd:    ipr command struct
7664  *
7665  * This function send a Page 28 mode sense to the IOA to
7666  * retrieve SCSI bus attributes.
7667  *
7668  * Return value:
7669  *      IPR_RC_JOB_RETURN
7670  **/
7671 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7672 {
7673         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7674
7675         ENTER;
7676         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7677                              0x28, ioa_cfg->vpd_cbs_dma +
7678                              offsetof(struct ipr_misc_cbs, mode_pages),
7679                              sizeof(struct ipr_mode_pages));
7680
7681         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7682         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7683
7684         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7685
7686         LEAVE;
7687         return IPR_RC_JOB_RETURN;
7688 }
7689
7690 /**
7691  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7692  * @ipr_cmd:    ipr command struct
7693  *
7694  * This function enables dual IOA RAID support if possible.
7695  *
7696  * Return value:
7697  *      IPR_RC_JOB_RETURN
7698  **/
7699 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7700 {
7701         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7702         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7703         struct ipr_mode_page24 *mode_page;
7704         int length;
7705
7706         ENTER;
7707         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7708                                       sizeof(struct ipr_mode_page24));
7709
7710         if (mode_page)
7711                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7712
7713         length = mode_pages->hdr.length + 1;
7714         mode_pages->hdr.length = 0;
7715
7716         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7717                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7718                               length);
7719
7720         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7721         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7722
7723         LEAVE;
7724         return IPR_RC_JOB_RETURN;
7725 }
7726
7727 /**
7728  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7729  * @ipr_cmd:    ipr command struct
7730  *
7731  * This function handles the failure of a Mode Sense to the IOAFP.
7732  * Some adapters do not handle all mode pages.
7733  *
7734  * Return value:
7735  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7736  **/
7737 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7738 {
7739         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7740
7741         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7742                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7743                 return IPR_RC_JOB_CONTINUE;
7744         }
7745
7746         return ipr_reset_cmd_failed(ipr_cmd);
7747 }
7748
7749 /**
7750  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7751  * @ipr_cmd:    ipr command struct
7752  *
7753  * This function send a mode sense to the IOA to retrieve
7754  * the IOA Advanced Function Control mode page.
7755  *
7756  * Return value:
7757  *      IPR_RC_JOB_RETURN
7758  **/
7759 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7760 {
7761         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7762
7763         ENTER;
7764         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7765                              0x24, ioa_cfg->vpd_cbs_dma +
7766                              offsetof(struct ipr_misc_cbs, mode_pages),
7767                              sizeof(struct ipr_mode_pages));
7768
7769         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7770         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7771
7772         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7773
7774         LEAVE;
7775         return IPR_RC_JOB_RETURN;
7776 }
7777
7778 /**
7779  * ipr_init_res_table - Initialize the resource table
7780  * @ipr_cmd:    ipr command struct
7781  *
7782  * This function looks through the existing resource table, comparing
7783  * it with the config table. This function will take care of old/new
7784  * devices and schedule adding/removing them from the mid-layer
7785  * as appropriate.
7786  *
7787  * Return value:
7788  *      IPR_RC_JOB_CONTINUE
7789  **/
7790 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7791 {
7792         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7793         struct ipr_resource_entry *res, *temp;
7794         struct ipr_config_table_entry_wrapper cfgtew;
7795         int entries, found, flag, i;
7796         LIST_HEAD(old_res);
7797
7798         ENTER;
7799         if (ioa_cfg->sis64)
7800                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7801         else
7802                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7803
7804         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7805                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7806
7807         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7808                 list_move_tail(&res->queue, &old_res);
7809
7810         if (ioa_cfg->sis64)
7811                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7812         else
7813                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7814
7815         for (i = 0; i < entries; i++) {
7816                 if (ioa_cfg->sis64)
7817                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7818                 else
7819                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7820                 found = 0;
7821
7822                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7823                         if (ipr_is_same_device(res, &cfgtew)) {
7824                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7825                                 found = 1;
7826                                 break;
7827                         }
7828                 }
7829
7830                 if (!found) {
7831                         if (list_empty(&ioa_cfg->free_res_q)) {
7832                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7833                                 break;
7834                         }
7835
7836                         found = 1;
7837                         res = list_entry(ioa_cfg->free_res_q.next,
7838                                          struct ipr_resource_entry, queue);
7839                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7840                         ipr_init_res_entry(res, &cfgtew);
7841                         res->add_to_ml = 1;
7842                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7843                         res->sdev->allow_restart = 1;
7844
7845                 if (found)
7846                         ipr_update_res_entry(res, &cfgtew);
7847         }
7848
7849         list_for_each_entry_safe(res, temp, &old_res, queue) {
7850                 if (res->sdev) {
7851                         res->del_from_ml = 1;
7852                         res->res_handle = IPR_INVALID_RES_HANDLE;
7853                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7854                 }
7855         }
7856
7857         list_for_each_entry_safe(res, temp, &old_res, queue) {
7858                 ipr_clear_res_target(res);
7859                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7860         }
7861
7862         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7863                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7864         else
7865                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7866
7867         LEAVE;
7868         return IPR_RC_JOB_CONTINUE;
7869 }
7870
7871 /**
7872  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7873  * @ipr_cmd:    ipr command struct
7874  *
7875  * This function sends a Query IOA Configuration command
7876  * to the adapter to retrieve the IOA configuration table.
7877  *
7878  * Return value:
7879  *      IPR_RC_JOB_RETURN
7880  **/
7881 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7882 {
7883         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7884         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7885         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7886         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7887
7888         ENTER;
7889         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7890                 ioa_cfg->dual_raid = 1;
7891         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7892                  ucode_vpd->major_release, ucode_vpd->card_type,
7893                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7894         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7895         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7896
7897         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7898         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7899         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7900         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7901
7902         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7903                        IPR_IOADL_FLAGS_READ_LAST);
7904
7905         ipr_cmd->job_step = ipr_init_res_table;
7906
7907         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7908
7909         LEAVE;
7910         return IPR_RC_JOB_RETURN;
7911 }
7912
7913 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7914 {
7915         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7916
7917         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7918                 return IPR_RC_JOB_CONTINUE;
7919
7920         return ipr_reset_cmd_failed(ipr_cmd);
7921 }
7922
7923 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7924                                          __be32 res_handle, u8 sa_code)
7925 {
7926         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7927
7928         ioarcb->res_handle = res_handle;
7929         ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7930         ioarcb->cmd_pkt.cdb[1] = sa_code;
7931         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7932 }
7933
7934 /**
7935  * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7936  * action
7937  *
7938  * Return value:
7939  *      none
7940  **/
7941 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7942 {
7943         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7944         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7945         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7946
7947         ENTER;
7948
7949         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7950
7951         if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7952                 ipr_build_ioa_service_action(ipr_cmd,
7953                                              cpu_to_be32(IPR_IOA_RES_HANDLE),
7954                                              IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7955
7956                 ioarcb->cmd_pkt.cdb[2] = 0x40;
7957
7958                 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7959                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7960                            IPR_SET_SUP_DEVICE_TIMEOUT);
7961
7962                 LEAVE;
7963                 return IPR_RC_JOB_RETURN;
7964         }
7965
7966         LEAVE;
7967         return IPR_RC_JOB_CONTINUE;
7968 }
7969
7970 /**
7971  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7972  * @ipr_cmd:    ipr command struct
7973  *
7974  * This utility function sends an inquiry to the adapter.
7975  *
7976  * Return value:
7977  *      none
7978  **/
7979 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7980                               dma_addr_t dma_addr, u8 xfer_len)
7981 {
7982         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7983
7984         ENTER;
7985         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7986         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7987
7988         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7989         ioarcb->cmd_pkt.cdb[1] = flags;
7990         ioarcb->cmd_pkt.cdb[2] = page;
7991         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7992
7993         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7994
7995         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7996         LEAVE;
7997 }
7998
7999 /**
8000  * ipr_inquiry_page_supported - Is the given inquiry page supported
8001  * @page0:              inquiry page 0 buffer
8002  * @page:               page code.
8003  *
8004  * This function determines if the specified inquiry page is supported.
8005  *
8006  * Return value:
8007  *      1 if page is supported / 0 if not
8008  **/
8009 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
8010 {
8011         int i;
8012
8013         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
8014                 if (page0->page[i] == page)
8015                         return 1;
8016
8017         return 0;
8018 }
8019
8020 /**
8021  * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
8022  * @ipr_cmd:    ipr command struct
8023  *
8024  * This function sends a Page 0xC4 inquiry to the adapter
8025  * to retrieve software VPD information.
8026  *
8027  * Return value:
8028  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8029  **/
8030 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8031 {
8032         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8033         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8034         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8035
8036         ENTER;
8037         ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
8038         memset(pageC4, 0, sizeof(*pageC4));
8039
8040         if (ipr_inquiry_page_supported(page0, 0xC4)) {
8041                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8042                                   (ioa_cfg->vpd_cbs_dma
8043                                    + offsetof(struct ipr_misc_cbs,
8044                                               pageC4_data)),
8045                                   sizeof(struct ipr_inquiry_pageC4));
8046                 return IPR_RC_JOB_RETURN;
8047         }
8048
8049         LEAVE;
8050         return IPR_RC_JOB_CONTINUE;
8051 }
8052
8053 /**
8054  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8055  * @ipr_cmd:    ipr command struct
8056  *
8057  * This function sends a Page 0xD0 inquiry to the adapter
8058  * to retrieve adapter capabilities.
8059  *
8060  * Return value:
8061  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8062  **/
8063 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8064 {
8065         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8066         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8067         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8068
8069         ENTER;
8070         ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
8071         memset(cap, 0, sizeof(*cap));
8072
8073         if (ipr_inquiry_page_supported(page0, 0xD0)) {
8074                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8075                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8076                                   sizeof(struct ipr_inquiry_cap));
8077                 return IPR_RC_JOB_RETURN;
8078         }
8079
8080         LEAVE;
8081         return IPR_RC_JOB_CONTINUE;
8082 }
8083
8084 /**
8085  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8086  * @ipr_cmd:    ipr command struct
8087  *
8088  * This function sends a Page 3 inquiry to the adapter
8089  * to retrieve software VPD information.
8090  *
8091  * Return value:
8092  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8093  **/
8094 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8095 {
8096         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8097
8098         ENTER;
8099
8100         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
8101
8102         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8103                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8104                           sizeof(struct ipr_inquiry_page3));
8105
8106         LEAVE;
8107         return IPR_RC_JOB_RETURN;
8108 }
8109
8110 /**
8111  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8112  * @ipr_cmd:    ipr command struct
8113  *
8114  * This function sends a Page 0 inquiry to the adapter
8115  * to retrieve supported inquiry pages.
8116  *
8117  * Return value:
8118  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8119  **/
8120 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8121 {
8122         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8123         char type[5];
8124
8125         ENTER;
8126
8127         /* Grab the type out of the VPD and store it away */
8128         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8129         type[4] = '\0';
8130         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8131
8132         if (ipr_invalid_adapter(ioa_cfg)) {
8133                 dev_err(&ioa_cfg->pdev->dev,
8134                         "Adapter not supported in this hardware configuration.\n");
8135
8136                 if (!ipr_testmode) {
8137                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8138                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8139                         list_add_tail(&ipr_cmd->queue,
8140                                         &ioa_cfg->hrrq->hrrq_free_q);
8141                         return IPR_RC_JOB_RETURN;
8142                 }
8143         }
8144
8145         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
8146
8147         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8148                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8149                           sizeof(struct ipr_inquiry_page0));
8150
8151         LEAVE;
8152         return IPR_RC_JOB_RETURN;
8153 }
8154
8155 /**
8156  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8157  * @ipr_cmd:    ipr command struct
8158  *
8159  * This function sends a standard inquiry to the adapter.
8160  *
8161  * Return value:
8162  *      IPR_RC_JOB_RETURN
8163  **/
8164 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8165 {
8166         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8167
8168         ENTER;
8169         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
8170
8171         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8172                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8173                           sizeof(struct ipr_ioa_vpd));
8174
8175         LEAVE;
8176         return IPR_RC_JOB_RETURN;
8177 }
8178
8179 /**
8180  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8181  * @ipr_cmd:    ipr command struct
8182  *
8183  * This function send an Identify Host Request Response Queue
8184  * command to establish the HRRQ with the adapter.
8185  *
8186  * Return value:
8187  *      IPR_RC_JOB_RETURN
8188  **/
8189 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
8190 {
8191         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8192         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8193         struct ipr_hrr_queue *hrrq;
8194
8195         ENTER;
8196         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
8197         if (ioa_cfg->identify_hrrq_index == 0)
8198                 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
8199
8200         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8201                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
8202
8203                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8204                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8205
8206                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8207                 if (ioa_cfg->sis64)
8208                         ioarcb->cmd_pkt.cdb[1] = 0x1;
8209
8210                 if (ioa_cfg->nvectors == 1)
8211                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8212                 else
8213                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8214
8215                 ioarcb->cmd_pkt.cdb[2] =
8216                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8217                 ioarcb->cmd_pkt.cdb[3] =
8218                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8219                 ioarcb->cmd_pkt.cdb[4] =
8220                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8221                 ioarcb->cmd_pkt.cdb[5] =
8222                         ((u64) hrrq->host_rrq_dma) & 0xff;
8223                 ioarcb->cmd_pkt.cdb[7] =
8224                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8225                 ioarcb->cmd_pkt.cdb[8] =
8226                         (sizeof(u32) * hrrq->size) & 0xff;
8227
8228                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8229                         ioarcb->cmd_pkt.cdb[9] =
8230                                         ioa_cfg->identify_hrrq_index;
8231
8232                 if (ioa_cfg->sis64) {
8233                         ioarcb->cmd_pkt.cdb[10] =
8234                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8235                         ioarcb->cmd_pkt.cdb[11] =
8236                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8237                         ioarcb->cmd_pkt.cdb[12] =
8238                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8239                         ioarcb->cmd_pkt.cdb[13] =
8240                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8241                 }
8242
8243                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8244                         ioarcb->cmd_pkt.cdb[14] =
8245                                         ioa_cfg->identify_hrrq_index;
8246
8247                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8248                            IPR_INTERNAL_TIMEOUT);
8249
8250                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8251                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8252
8253                 LEAVE;
8254                 return IPR_RC_JOB_RETURN;
8255         }
8256
8257         LEAVE;
8258         return IPR_RC_JOB_CONTINUE;
8259 }
8260
8261 /**
8262  * ipr_reset_timer_done - Adapter reset timer function
8263  * @ipr_cmd:    ipr command struct
8264  *
8265  * Description: This function is used in adapter reset processing
8266  * for timing events. If the reset_cmd pointer in the IOA
8267  * config struct is not this adapter's we are doing nested
8268  * resets and fail_all_ops will take care of freeing the
8269  * command block.
8270  *
8271  * Return value:
8272  *      none
8273  **/
8274 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
8275 {
8276         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8277         unsigned long lock_flags = 0;
8278
8279         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8280
8281         if (ioa_cfg->reset_cmd == ipr_cmd) {
8282                 list_del(&ipr_cmd->queue);
8283                 ipr_cmd->done(ipr_cmd);
8284         }
8285
8286         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8287 }
8288
8289 /**
8290  * ipr_reset_start_timer - Start a timer for adapter reset job
8291  * @ipr_cmd:    ipr command struct
8292  * @timeout:    timeout value
8293  *
8294  * Description: This function is used in adapter reset processing
8295  * for timing events. If the reset_cmd pointer in the IOA
8296  * config struct is not this adapter's we are doing nested
8297  * resets and fail_all_ops will take care of freeing the
8298  * command block.
8299  *
8300  * Return value:
8301  *      none
8302  **/
8303 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8304                                   unsigned long timeout)
8305 {
8306
8307         ENTER;
8308         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8309         ipr_cmd->done = ipr_reset_ioa_job;
8310
8311         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8312         ipr_cmd->timer.expires = jiffies + timeout;
8313         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
8314         add_timer(&ipr_cmd->timer);
8315 }
8316
8317 /**
8318  * ipr_init_ioa_mem - Initialize ioa_cfg control block
8319  * @ioa_cfg:    ioa cfg struct
8320  *
8321  * Return value:
8322  *      nothing
8323  **/
8324 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8325 {
8326         struct ipr_hrr_queue *hrrq;
8327
8328         for_each_hrrq(hrrq, ioa_cfg) {
8329                 spin_lock(&hrrq->_lock);
8330                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8331
8332                 /* Initialize Host RRQ pointers */
8333                 hrrq->hrrq_start = hrrq->host_rrq;
8334                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8335                 hrrq->hrrq_curr = hrrq->hrrq_start;
8336                 hrrq->toggle_bit = 1;
8337                 spin_unlock(&hrrq->_lock);
8338         }
8339         wmb();
8340
8341         ioa_cfg->identify_hrrq_index = 0;
8342         if (ioa_cfg->hrrq_num == 1)
8343                 atomic_set(&ioa_cfg->hrrq_index, 0);
8344         else
8345                 atomic_set(&ioa_cfg->hrrq_index, 1);
8346
8347         /* Zero out config table */
8348         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8349 }
8350
8351 /**
8352  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8353  * @ipr_cmd:    ipr command struct
8354  *
8355  * Return value:
8356  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8357  **/
8358 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8359 {
8360         unsigned long stage, stage_time;
8361         u32 feedback;
8362         volatile u32 int_reg;
8363         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8364         u64 maskval = 0;
8365
8366         feedback = readl(ioa_cfg->regs.init_feedback_reg);
8367         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8368         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8369
8370         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8371
8372         /* sanity check the stage_time value */
8373         if (stage_time == 0)
8374                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8375         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8376                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8377         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8378                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8379
8380         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8381                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8382                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8383                 stage_time = ioa_cfg->transop_timeout;
8384                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8385         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8386                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8387                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8388                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8389                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
8390                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8391                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8392                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8393                         return IPR_RC_JOB_CONTINUE;
8394                 }
8395         }
8396
8397         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8398         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8399         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8400         ipr_cmd->done = ipr_reset_ioa_job;
8401         add_timer(&ipr_cmd->timer);
8402
8403         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8404
8405         return IPR_RC_JOB_RETURN;
8406 }
8407
8408 /**
8409  * ipr_reset_enable_ioa - Enable the IOA following a reset.
8410  * @ipr_cmd:    ipr command struct
8411  *
8412  * This function reinitializes some control blocks and
8413  * enables destructive diagnostics on the adapter.
8414  *
8415  * Return value:
8416  *      IPR_RC_JOB_RETURN
8417  **/
8418 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8419 {
8420         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8421         volatile u32 int_reg;
8422         volatile u64 maskval;
8423         int i;
8424
8425         ENTER;
8426         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8427         ipr_init_ioa_mem(ioa_cfg);
8428
8429         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8430                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8431                 ioa_cfg->hrrq[i].allow_interrupts = 1;
8432                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8433         }
8434         wmb();
8435         if (ioa_cfg->sis64) {
8436                 /* Set the adapter to the correct endian mode. */
8437                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8438                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8439         }
8440
8441         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8442
8443         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8444                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8445                        ioa_cfg->regs.clr_interrupt_mask_reg32);
8446                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8447                 return IPR_RC_JOB_CONTINUE;
8448         }
8449
8450         /* Enable destructive diagnostics on IOA */
8451         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8452
8453         if (ioa_cfg->sis64) {
8454                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8455                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8456                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8457         } else
8458                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8459
8460         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8461
8462         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8463
8464         if (ioa_cfg->sis64) {
8465                 ipr_cmd->job_step = ipr_reset_next_stage;
8466                 return IPR_RC_JOB_CONTINUE;
8467         }
8468
8469         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8470         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8471         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8472         ipr_cmd->done = ipr_reset_ioa_job;
8473         add_timer(&ipr_cmd->timer);
8474         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8475
8476         LEAVE;
8477         return IPR_RC_JOB_RETURN;
8478 }
8479
8480 /**
8481  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8482  * @ipr_cmd:    ipr command struct
8483  *
8484  * This function is invoked when an adapter dump has run out
8485  * of processing time.
8486  *
8487  * Return value:
8488  *      IPR_RC_JOB_CONTINUE
8489  **/
8490 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8491 {
8492         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8493
8494         if (ioa_cfg->sdt_state == GET_DUMP)
8495                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8496         else if (ioa_cfg->sdt_state == READ_DUMP)
8497                 ioa_cfg->sdt_state = ABORT_DUMP;
8498
8499         ioa_cfg->dump_timeout = 1;
8500         ipr_cmd->job_step = ipr_reset_alert;
8501
8502         return IPR_RC_JOB_CONTINUE;
8503 }
8504
8505 /**
8506  * ipr_unit_check_no_data - Log a unit check/no data error log
8507  * @ioa_cfg:            ioa config struct
8508  *
8509  * Logs an error indicating the adapter unit checked, but for some
8510  * reason, we were unable to fetch the unit check buffer.
8511  *
8512  * Return value:
8513  *      nothing
8514  **/
8515 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8516 {
8517         ioa_cfg->errors_logged++;
8518         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8519 }
8520
8521 /**
8522  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8523  * @ioa_cfg:            ioa config struct
8524  *
8525  * Fetches the unit check buffer from the adapter by clocking the data
8526  * through the mailbox register.
8527  *
8528  * Return value:
8529  *      nothing
8530  **/
8531 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8532 {
8533         unsigned long mailbox;
8534         struct ipr_hostrcb *hostrcb;
8535         struct ipr_uc_sdt sdt;
8536         int rc, length;
8537         u32 ioasc;
8538
8539         mailbox = readl(ioa_cfg->ioa_mailbox);
8540
8541         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8542                 ipr_unit_check_no_data(ioa_cfg);
8543                 return;
8544         }
8545
8546         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8547         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8548                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8549
8550         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8551             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8552             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8553                 ipr_unit_check_no_data(ioa_cfg);
8554                 return;
8555         }
8556
8557         /* Find length of the first sdt entry (UC buffer) */
8558         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8559                 length = be32_to_cpu(sdt.entry[0].end_token);
8560         else
8561                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8562                           be32_to_cpu(sdt.entry[0].start_token)) &
8563                           IPR_FMT2_MBX_ADDR_MASK;
8564
8565         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8566                              struct ipr_hostrcb, queue);
8567         list_del_init(&hostrcb->queue);
8568         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8569
8570         rc = ipr_get_ldump_data_section(ioa_cfg,
8571                                         be32_to_cpu(sdt.entry[0].start_token),
8572                                         (__be32 *)&hostrcb->hcam,
8573                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8574
8575         if (!rc) {
8576                 ipr_handle_log_data(ioa_cfg, hostrcb);
8577                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8578                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8579                     ioa_cfg->sdt_state == GET_DUMP)
8580                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8581         } else
8582                 ipr_unit_check_no_data(ioa_cfg);
8583
8584         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8585 }
8586
8587 /**
8588  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8589  * @ipr_cmd:    ipr command struct
8590  *
8591  * Description: This function will call to get the unit check buffer.
8592  *
8593  * Return value:
8594  *      IPR_RC_JOB_RETURN
8595  **/
8596 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8597 {
8598         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8599
8600         ENTER;
8601         ioa_cfg->ioa_unit_checked = 0;
8602         ipr_get_unit_check_buffer(ioa_cfg);
8603         ipr_cmd->job_step = ipr_reset_alert;
8604         ipr_reset_start_timer(ipr_cmd, 0);
8605
8606         LEAVE;
8607         return IPR_RC_JOB_RETURN;
8608 }
8609
8610 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8611 {
8612         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8613
8614         ENTER;
8615
8616         if (ioa_cfg->sdt_state != GET_DUMP)
8617                 return IPR_RC_JOB_RETURN;
8618
8619         if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8620             (readl(ioa_cfg->regs.sense_interrupt_reg) &
8621              IPR_PCII_MAILBOX_STABLE)) {
8622
8623                 if (!ipr_cmd->u.time_left)
8624                         dev_err(&ioa_cfg->pdev->dev,
8625                                 "Timed out waiting for Mailbox register.\n");
8626
8627                 ioa_cfg->sdt_state = READ_DUMP;
8628                 ioa_cfg->dump_timeout = 0;
8629                 if (ioa_cfg->sis64)
8630                         ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8631                 else
8632                         ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8633                 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8634                 schedule_work(&ioa_cfg->work_q);
8635
8636         } else {
8637                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8638                 ipr_reset_start_timer(ipr_cmd,
8639                                       IPR_CHECK_FOR_RESET_TIMEOUT);
8640         }
8641
8642         LEAVE;
8643         return IPR_RC_JOB_RETURN;
8644 }
8645
8646 /**
8647  * ipr_reset_restore_cfg_space - Restore PCI config space.
8648  * @ipr_cmd:    ipr command struct
8649  *
8650  * Description: This function restores the saved PCI config space of
8651  * the adapter, fails all outstanding ops back to the callers, and
8652  * fetches the dump/unit check if applicable to this reset.
8653  *
8654  * Return value:
8655  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8656  **/
8657 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8658 {
8659         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8660         u32 int_reg;
8661
8662         ENTER;
8663         ioa_cfg->pdev->state_saved = true;
8664         pci_restore_state(ioa_cfg->pdev);
8665
8666         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8667                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8668                 return IPR_RC_JOB_CONTINUE;
8669         }
8670
8671         ipr_fail_all_ops(ioa_cfg);
8672
8673         if (ioa_cfg->sis64) {
8674                 /* Set the adapter to the correct endian mode. */
8675                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8676                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8677         }
8678
8679         if (ioa_cfg->ioa_unit_checked) {
8680                 if (ioa_cfg->sis64) {
8681                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8682                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8683                         return IPR_RC_JOB_RETURN;
8684                 } else {
8685                         ioa_cfg->ioa_unit_checked = 0;
8686                         ipr_get_unit_check_buffer(ioa_cfg);
8687                         ipr_cmd->job_step = ipr_reset_alert;
8688                         ipr_reset_start_timer(ipr_cmd, 0);
8689                         return IPR_RC_JOB_RETURN;
8690                 }
8691         }
8692
8693         if (ioa_cfg->in_ioa_bringdown) {
8694                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8695         } else if (ioa_cfg->sdt_state == GET_DUMP) {
8696                 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8697                 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8698         } else {
8699                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8700         }
8701
8702         LEAVE;
8703         return IPR_RC_JOB_CONTINUE;
8704 }
8705
8706 /**
8707  * ipr_reset_bist_done - BIST has completed on the adapter.
8708  * @ipr_cmd:    ipr command struct
8709  *
8710  * Description: Unblock config space and resume the reset process.
8711  *
8712  * Return value:
8713  *      IPR_RC_JOB_CONTINUE
8714  **/
8715 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8716 {
8717         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8718
8719         ENTER;
8720         if (ioa_cfg->cfg_locked)
8721                 pci_cfg_access_unlock(ioa_cfg->pdev);
8722         ioa_cfg->cfg_locked = 0;
8723         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8724         LEAVE;
8725         return IPR_RC_JOB_CONTINUE;
8726 }
8727
8728 /**
8729  * ipr_reset_start_bist - Run BIST on the adapter.
8730  * @ipr_cmd:    ipr command struct
8731  *
8732  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8733  *
8734  * Return value:
8735  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8736  **/
8737 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8738 {
8739         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8740         int rc = PCIBIOS_SUCCESSFUL;
8741
8742         ENTER;
8743         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8744                 writel(IPR_UPROCI_SIS64_START_BIST,
8745                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8746         else
8747                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8748
8749         if (rc == PCIBIOS_SUCCESSFUL) {
8750                 ipr_cmd->job_step = ipr_reset_bist_done;
8751                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8752                 rc = IPR_RC_JOB_RETURN;
8753         } else {
8754                 if (ioa_cfg->cfg_locked)
8755                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8756                 ioa_cfg->cfg_locked = 0;
8757                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8758                 rc = IPR_RC_JOB_CONTINUE;
8759         }
8760
8761         LEAVE;
8762         return rc;
8763 }
8764
8765 /**
8766  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8767  * @ipr_cmd:    ipr command struct
8768  *
8769  * Description: This clears PCI reset to the adapter and delays two seconds.
8770  *
8771  * Return value:
8772  *      IPR_RC_JOB_RETURN
8773  **/
8774 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8775 {
8776         ENTER;
8777         ipr_cmd->job_step = ipr_reset_bist_done;
8778         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8779         LEAVE;
8780         return IPR_RC_JOB_RETURN;
8781 }
8782
8783 /**
8784  * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8785  * @work:       work struct
8786  *
8787  * Description: This pulses warm reset to a slot.
8788  *
8789  **/
8790 static void ipr_reset_reset_work(struct work_struct *work)
8791 {
8792         struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8793         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8794         struct pci_dev *pdev = ioa_cfg->pdev;
8795         unsigned long lock_flags = 0;
8796
8797         ENTER;
8798         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8799         msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8800         pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8801
8802         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8803         if (ioa_cfg->reset_cmd == ipr_cmd)
8804                 ipr_reset_ioa_job(ipr_cmd);
8805         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8806         LEAVE;
8807 }
8808
8809 /**
8810  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8811  * @ipr_cmd:    ipr command struct
8812  *
8813  * Description: This asserts PCI reset to the adapter.
8814  *
8815  * Return value:
8816  *      IPR_RC_JOB_RETURN
8817  **/
8818 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8819 {
8820         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8821
8822         ENTER;
8823         INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8824         queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8825         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8826         LEAVE;
8827         return IPR_RC_JOB_RETURN;
8828 }
8829
8830 /**
8831  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8832  * @ipr_cmd:    ipr command struct
8833  *
8834  * Description: This attempts to block config access to the IOA.
8835  *
8836  * Return value:
8837  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8838  **/
8839 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8840 {
8841         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8842         int rc = IPR_RC_JOB_CONTINUE;
8843
8844         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8845                 ioa_cfg->cfg_locked = 1;
8846                 ipr_cmd->job_step = ioa_cfg->reset;
8847         } else {
8848                 if (ipr_cmd->u.time_left) {
8849                         rc = IPR_RC_JOB_RETURN;
8850                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8851                         ipr_reset_start_timer(ipr_cmd,
8852                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8853                 } else {
8854                         ipr_cmd->job_step = ioa_cfg->reset;
8855                         dev_err(&ioa_cfg->pdev->dev,
8856                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8857                 }
8858         }
8859
8860         return rc;
8861 }
8862
8863 /**
8864  * ipr_reset_block_config_access - Block config access to the IOA
8865  * @ipr_cmd:    ipr command struct
8866  *
8867  * Description: This attempts to block config access to the IOA
8868  *
8869  * Return value:
8870  *      IPR_RC_JOB_CONTINUE
8871  **/
8872 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8873 {
8874         ipr_cmd->ioa_cfg->cfg_locked = 0;
8875         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8876         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8877         return IPR_RC_JOB_CONTINUE;
8878 }
8879
8880 /**
8881  * ipr_reset_allowed - Query whether or not IOA can be reset
8882  * @ioa_cfg:    ioa config struct
8883  *
8884  * Return value:
8885  *      0 if reset not allowed / non-zero if reset is allowed
8886  **/
8887 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8888 {
8889         volatile u32 temp_reg;
8890
8891         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8892         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8893 }
8894
8895 /**
8896  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8897  * @ipr_cmd:    ipr command struct
8898  *
8899  * Description: This function waits for adapter permission to run BIST,
8900  * then runs BIST. If the adapter does not give permission after a
8901  * reasonable time, we will reset the adapter anyway. The impact of
8902  * resetting the adapter without warning the adapter is the risk of
8903  * losing the persistent error log on the adapter. If the adapter is
8904  * reset while it is writing to the flash on the adapter, the flash
8905  * segment will have bad ECC and be zeroed.
8906  *
8907  * Return value:
8908  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8909  **/
8910 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8911 {
8912         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8913         int rc = IPR_RC_JOB_RETURN;
8914
8915         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8916                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8917                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8918         } else {
8919                 ipr_cmd->job_step = ipr_reset_block_config_access;
8920                 rc = IPR_RC_JOB_CONTINUE;
8921         }
8922
8923         return rc;
8924 }
8925
8926 /**
8927  * ipr_reset_alert - Alert the adapter of a pending reset
8928  * @ipr_cmd:    ipr command struct
8929  *
8930  * Description: This function alerts the adapter that it will be reset.
8931  * If memory space is not currently enabled, proceed directly
8932  * to running BIST on the adapter. The timer must always be started
8933  * so we guarantee we do not run BIST from ipr_isr.
8934  *
8935  * Return value:
8936  *      IPR_RC_JOB_RETURN
8937  **/
8938 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8939 {
8940         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8941         u16 cmd_reg;
8942         int rc;
8943
8944         ENTER;
8945         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8946
8947         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8948                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8949                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8950                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8951         } else {
8952                 ipr_cmd->job_step = ipr_reset_block_config_access;
8953         }
8954
8955         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8956         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8957
8958         LEAVE;
8959         return IPR_RC_JOB_RETURN;
8960 }
8961
8962 /**
8963  * ipr_reset_quiesce_done - Complete IOA disconnect
8964  * @ipr_cmd:    ipr command struct
8965  *
8966  * Description: Freeze the adapter to complete quiesce processing
8967  *
8968  * Return value:
8969  *      IPR_RC_JOB_CONTINUE
8970  **/
8971 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8972 {
8973         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8974
8975         ENTER;
8976         ipr_cmd->job_step = ipr_ioa_bringdown_done;
8977         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8978         LEAVE;
8979         return IPR_RC_JOB_CONTINUE;
8980 }
8981
8982 /**
8983  * ipr_reset_cancel_hcam_done - Check for outstanding commands
8984  * @ipr_cmd:    ipr command struct
8985  *
8986  * Description: Ensure nothing is outstanding to the IOA and
8987  *                      proceed with IOA disconnect. Otherwise reset the IOA.
8988  *
8989  * Return value:
8990  *      IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8991  **/
8992 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8993 {
8994         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8995         struct ipr_cmnd *loop_cmd;
8996         struct ipr_hrr_queue *hrrq;
8997         int rc = IPR_RC_JOB_CONTINUE;
8998         int count = 0;
8999
9000         ENTER;
9001         ipr_cmd->job_step = ipr_reset_quiesce_done;
9002
9003         for_each_hrrq(hrrq, ioa_cfg) {
9004                 spin_lock(&hrrq->_lock);
9005                 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
9006                         count++;
9007                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9008                         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9009                         rc = IPR_RC_JOB_RETURN;
9010                         break;
9011                 }
9012                 spin_unlock(&hrrq->_lock);
9013
9014                 if (count)
9015                         break;
9016         }
9017
9018         LEAVE;
9019         return rc;
9020 }
9021
9022 /**
9023  * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
9024  * @ipr_cmd:    ipr command struct
9025  *
9026  * Description: Cancel any oustanding HCAMs to the IOA.
9027  *
9028  * Return value:
9029  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9030  **/
9031 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9032 {
9033         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9034         int rc = IPR_RC_JOB_CONTINUE;
9035         struct ipr_cmd_pkt *cmd_pkt;
9036         struct ipr_cmnd *hcam_cmd;
9037         struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9038
9039         ENTER;
9040         ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9041
9042         if (!hrrq->ioa_is_dead) {
9043                 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9044                         list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9045                                 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9046                                         continue;
9047
9048                                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9049                                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9050                                 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9051                                 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9052                                 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9053                                 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9054                                 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9055                                 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9056                                 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9057                                 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9058                                 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9059                                 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9060                                 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9061                                 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9062
9063                                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9064                                            IPR_CANCEL_TIMEOUT);
9065
9066                                 rc = IPR_RC_JOB_RETURN;
9067                                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9068                                 break;
9069                         }
9070                 }
9071         } else
9072                 ipr_cmd->job_step = ipr_reset_alert;
9073
9074         LEAVE;
9075         return rc;
9076 }
9077
9078 /**
9079  * ipr_reset_ucode_download_done - Microcode download completion
9080  * @ipr_cmd:    ipr command struct
9081  *
9082  * Description: This function unmaps the microcode download buffer.
9083  *
9084  * Return value:
9085  *      IPR_RC_JOB_CONTINUE
9086  **/
9087 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9088 {
9089         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9090         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9091
9092         dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
9093                      sglist->num_sg, DMA_TO_DEVICE);
9094
9095         ipr_cmd->job_step = ipr_reset_alert;
9096         return IPR_RC_JOB_CONTINUE;
9097 }
9098
9099 /**
9100  * ipr_reset_ucode_download - Download microcode to the adapter
9101  * @ipr_cmd:    ipr command struct
9102  *
9103  * Description: This function checks to see if it there is microcode
9104  * to download to the adapter. If there is, a download is performed.
9105  *
9106  * Return value:
9107  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9108  **/
9109 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9110 {
9111         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9112         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9113
9114         ENTER;
9115         ipr_cmd->job_step = ipr_reset_alert;
9116
9117         if (!sglist)
9118                 return IPR_RC_JOB_CONTINUE;
9119
9120         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9121         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9122         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9123         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9124         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9125         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9126         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9127
9128         if (ioa_cfg->sis64)
9129                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9130         else
9131                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
9132         ipr_cmd->job_step = ipr_reset_ucode_download_done;
9133
9134         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9135                    IPR_WRITE_BUFFER_TIMEOUT);
9136
9137         LEAVE;
9138         return IPR_RC_JOB_RETURN;
9139 }
9140
9141 /**
9142  * ipr_reset_shutdown_ioa - Shutdown the adapter
9143  * @ipr_cmd:    ipr command struct
9144  *
9145  * Description: This function issues an adapter shutdown of the
9146  * specified type to the specified adapter as part of the
9147  * adapter reset job.
9148  *
9149  * Return value:
9150  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9151  **/
9152 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9153 {
9154         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9155         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9156         unsigned long timeout;
9157         int rc = IPR_RC_JOB_CONTINUE;
9158
9159         ENTER;
9160         if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9161                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9162         else if (shutdown_type != IPR_SHUTDOWN_NONE &&
9163                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
9164                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9165                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9166                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9167                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9168
9169                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9170                         timeout = IPR_SHUTDOWN_TIMEOUT;
9171                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9172                         timeout = IPR_INTERNAL_TIMEOUT;
9173                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9174                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
9175                 else
9176                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
9177
9178                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9179
9180                 rc = IPR_RC_JOB_RETURN;
9181                 ipr_cmd->job_step = ipr_reset_ucode_download;
9182         } else
9183                 ipr_cmd->job_step = ipr_reset_alert;
9184
9185         LEAVE;
9186         return rc;
9187 }
9188
9189 /**
9190  * ipr_reset_ioa_job - Adapter reset job
9191  * @ipr_cmd:    ipr command struct
9192  *
9193  * Description: This function is the job router for the adapter reset job.
9194  *
9195  * Return value:
9196  *      none
9197  **/
9198 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9199 {
9200         u32 rc, ioasc;
9201         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9202
9203         do {
9204                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
9205
9206                 if (ioa_cfg->reset_cmd != ipr_cmd) {
9207                         /*
9208                          * We are doing nested adapter resets and this is
9209                          * not the current reset job.
9210                          */
9211                         list_add_tail(&ipr_cmd->queue,
9212                                         &ipr_cmd->hrrq->hrrq_free_q);
9213                         return;
9214                 }
9215
9216                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
9217                         rc = ipr_cmd->job_step_failed(ipr_cmd);
9218                         if (rc == IPR_RC_JOB_RETURN)
9219                                 return;
9220                 }
9221
9222                 ipr_reinit_ipr_cmnd(ipr_cmd);
9223                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
9224                 rc = ipr_cmd->job_step(ipr_cmd);
9225         } while (rc == IPR_RC_JOB_CONTINUE);
9226 }
9227
9228 /**
9229  * _ipr_initiate_ioa_reset - Initiate an adapter reset
9230  * @ioa_cfg:            ioa config struct
9231  * @job_step:           first job step of reset job
9232  * @shutdown_type:      shutdown type
9233  *
9234  * Description: This function will initiate the reset of the given adapter
9235  * starting at the selected job step.
9236  * If the caller needs to wait on the completion of the reset,
9237  * the caller must sleep on the reset_wait_q.
9238  *
9239  * Return value:
9240  *      none
9241  **/
9242 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9243                                     int (*job_step) (struct ipr_cmnd *),
9244                                     enum ipr_shutdown_type shutdown_type)
9245 {
9246         struct ipr_cmnd *ipr_cmd;
9247         int i;
9248
9249         ioa_cfg->in_reset_reload = 1;
9250         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9251                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9252                 ioa_cfg->hrrq[i].allow_cmds = 0;
9253                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9254         }
9255         wmb();
9256         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9257                 ioa_cfg->scsi_unblock = 0;
9258                 ioa_cfg->scsi_blocked = 1;
9259                 scsi_block_requests(ioa_cfg->host);
9260         }
9261
9262         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9263         ioa_cfg->reset_cmd = ipr_cmd;
9264         ipr_cmd->job_step = job_step;
9265         ipr_cmd->u.shutdown_type = shutdown_type;
9266
9267         ipr_reset_ioa_job(ipr_cmd);
9268 }
9269
9270 /**
9271  * ipr_initiate_ioa_reset - Initiate an adapter reset
9272  * @ioa_cfg:            ioa config struct
9273  * @shutdown_type:      shutdown type
9274  *
9275  * Description: This function will initiate the reset of the given adapter.
9276  * If the caller needs to wait on the completion of the reset,
9277  * the caller must sleep on the reset_wait_q.
9278  *
9279  * Return value:
9280  *      none
9281  **/
9282 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9283                                    enum ipr_shutdown_type shutdown_type)
9284 {
9285         int i;
9286
9287         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9288                 return;
9289
9290         if (ioa_cfg->in_reset_reload) {
9291                 if (ioa_cfg->sdt_state == GET_DUMP)
9292                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9293                 else if (ioa_cfg->sdt_state == READ_DUMP)
9294                         ioa_cfg->sdt_state = ABORT_DUMP;
9295         }
9296
9297         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9298                 dev_err(&ioa_cfg->pdev->dev,
9299                         "IOA taken offline - error recovery failed\n");
9300
9301                 ioa_cfg->reset_retries = 0;
9302                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9303                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9304                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
9305                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9306                 }
9307                 wmb();
9308
9309                 if (ioa_cfg->in_ioa_bringdown) {
9310                         ioa_cfg->reset_cmd = NULL;
9311                         ioa_cfg->in_reset_reload = 0;
9312                         ipr_fail_all_ops(ioa_cfg);
9313                         wake_up_all(&ioa_cfg->reset_wait_q);
9314
9315                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9316                                 ioa_cfg->scsi_unblock = 1;
9317                                 schedule_work(&ioa_cfg->work_q);
9318                         }
9319                         return;
9320                 } else {
9321                         ioa_cfg->in_ioa_bringdown = 1;
9322                         shutdown_type = IPR_SHUTDOWN_NONE;
9323                 }
9324         }
9325
9326         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9327                                 shutdown_type);
9328 }
9329
9330 /**
9331  * ipr_reset_freeze - Hold off all I/O activity
9332  * @ipr_cmd:    ipr command struct
9333  *
9334  * Description: If the PCI slot is frozen, hold off all I/O
9335  * activity; then, as soon as the slot is available again,
9336  * initiate an adapter reset.
9337  */
9338 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9339 {
9340         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9341         int i;
9342
9343         /* Disallow new interrupts, avoid loop */
9344         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9345                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9346                 ioa_cfg->hrrq[i].allow_interrupts = 0;
9347                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9348         }
9349         wmb();
9350         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9351         ipr_cmd->done = ipr_reset_ioa_job;
9352         return IPR_RC_JOB_RETURN;
9353 }
9354
9355 /**
9356  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9357  * @pdev:       PCI device struct
9358  *
9359  * Description: This routine is called to tell us that the MMIO
9360  * access to the IOA has been restored
9361  */
9362 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9363 {
9364         unsigned long flags = 0;
9365         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9366
9367         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9368         if (!ioa_cfg->probe_done)
9369                 pci_save_state(pdev);
9370         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9371         return PCI_ERS_RESULT_NEED_RESET;
9372 }
9373
9374 /**
9375  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9376  * @pdev:       PCI device struct
9377  *
9378  * Description: This routine is called to tell us that the PCI bus
9379  * is down. Can't do anything here, except put the device driver
9380  * into a holding pattern, waiting for the PCI bus to come back.
9381  */
9382 static void ipr_pci_frozen(struct pci_dev *pdev)
9383 {
9384         unsigned long flags = 0;
9385         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9386
9387         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9388         if (ioa_cfg->probe_done)
9389                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9390         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9391 }
9392
9393 /**
9394  * ipr_pci_slot_reset - Called when PCI slot has been reset.
9395  * @pdev:       PCI device struct
9396  *
9397  * Description: This routine is called by the pci error recovery
9398  * code after the PCI slot has been reset, just before we
9399  * should resume normal operations.
9400  */
9401 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9402 {
9403         unsigned long flags = 0;
9404         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9405
9406         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9407         if (ioa_cfg->probe_done) {
9408                 if (ioa_cfg->needs_warm_reset)
9409                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9410                 else
9411                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9412                                                 IPR_SHUTDOWN_NONE);
9413         } else
9414                 wake_up_all(&ioa_cfg->eeh_wait_q);
9415         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9416         return PCI_ERS_RESULT_RECOVERED;
9417 }
9418
9419 /**
9420  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9421  * @pdev:       PCI device struct
9422  *
9423  * Description: This routine is called when the PCI bus has
9424  * permanently failed.
9425  */
9426 static void ipr_pci_perm_failure(struct pci_dev *pdev)
9427 {
9428         unsigned long flags = 0;
9429         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9430         int i;
9431
9432         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9433         if (ioa_cfg->probe_done) {
9434                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9435                         ioa_cfg->sdt_state = ABORT_DUMP;
9436                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9437                 ioa_cfg->in_ioa_bringdown = 1;
9438                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9439                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9440                         ioa_cfg->hrrq[i].allow_cmds = 0;
9441                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9442                 }
9443                 wmb();
9444                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9445         } else
9446                 wake_up_all(&ioa_cfg->eeh_wait_q);
9447         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9448 }
9449
9450 /**
9451  * ipr_pci_error_detected - Called when a PCI error is detected.
9452  * @pdev:       PCI device struct
9453  * @state:      PCI channel state
9454  *
9455  * Description: Called when a PCI error is detected.
9456  *
9457  * Return value:
9458  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9459  */
9460 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9461                                                pci_channel_state_t state)
9462 {
9463         switch (state) {
9464         case pci_channel_io_frozen:
9465                 ipr_pci_frozen(pdev);
9466                 return PCI_ERS_RESULT_CAN_RECOVER;
9467         case pci_channel_io_perm_failure:
9468                 ipr_pci_perm_failure(pdev);
9469                 return PCI_ERS_RESULT_DISCONNECT;
9470                 break;
9471         default:
9472                 break;
9473         }
9474         return PCI_ERS_RESULT_NEED_RESET;
9475 }
9476
9477 /**
9478  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9479  * @ioa_cfg:    ioa cfg struct
9480  *
9481  * Description: This is the second phase of adapter initialization
9482  * This function takes care of initilizing the adapter to the point
9483  * where it can accept new commands.
9484
9485  * Return value:
9486  *      0 on success / -EIO on failure
9487  **/
9488 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9489 {
9490         int rc = 0;
9491         unsigned long host_lock_flags = 0;
9492
9493         ENTER;
9494         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9495         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9496         ioa_cfg->probe_done = 1;
9497         if (ioa_cfg->needs_hard_reset) {
9498                 ioa_cfg->needs_hard_reset = 0;
9499                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9500         } else
9501                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9502                                         IPR_SHUTDOWN_NONE);
9503         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9504
9505         LEAVE;
9506         return rc;
9507 }
9508
9509 /**
9510  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9511  * @ioa_cfg:    ioa config struct
9512  *
9513  * Return value:
9514  *      none
9515  **/
9516 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9517 {
9518         int i;
9519
9520         if (ioa_cfg->ipr_cmnd_list) {
9521                 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9522                         if (ioa_cfg->ipr_cmnd_list[i])
9523                                 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9524                                               ioa_cfg->ipr_cmnd_list[i],
9525                                               ioa_cfg->ipr_cmnd_list_dma[i]);
9526
9527                         ioa_cfg->ipr_cmnd_list[i] = NULL;
9528                 }
9529         }
9530
9531         if (ioa_cfg->ipr_cmd_pool)
9532                 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9533
9534         kfree(ioa_cfg->ipr_cmnd_list);
9535         kfree(ioa_cfg->ipr_cmnd_list_dma);
9536         ioa_cfg->ipr_cmnd_list = NULL;
9537         ioa_cfg->ipr_cmnd_list_dma = NULL;
9538         ioa_cfg->ipr_cmd_pool = NULL;
9539 }
9540
9541 /**
9542  * ipr_free_mem - Frees memory allocated for an adapter
9543  * @ioa_cfg:    ioa cfg struct
9544  *
9545  * Return value:
9546  *      nothing
9547  **/
9548 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9549 {
9550         int i;
9551
9552         kfree(ioa_cfg->res_entries);
9553         dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9554                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9555         ipr_free_cmd_blks(ioa_cfg);
9556
9557         for (i = 0; i < ioa_cfg->hrrq_num; i++)
9558                 dma_free_coherent(&ioa_cfg->pdev->dev,
9559                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9560                                   ioa_cfg->hrrq[i].host_rrq,
9561                                   ioa_cfg->hrrq[i].host_rrq_dma);
9562
9563         dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9564                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9565
9566         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9567                 dma_free_coherent(&ioa_cfg->pdev->dev,
9568                                   sizeof(struct ipr_hostrcb),
9569                                   ioa_cfg->hostrcb[i],
9570                                   ioa_cfg->hostrcb_dma[i]);
9571         }
9572
9573         ipr_free_dump(ioa_cfg);
9574         kfree(ioa_cfg->trace);
9575 }
9576
9577 /**
9578  * ipr_free_irqs - Free all allocated IRQs for the adapter.
9579  * @ioa_cfg:    ipr cfg struct
9580  *
9581  * This function frees all allocated IRQs for the
9582  * specified adapter.
9583  *
9584  * Return value:
9585  *      none
9586  **/
9587 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9588 {
9589         struct pci_dev *pdev = ioa_cfg->pdev;
9590         int i;
9591
9592         for (i = 0; i < ioa_cfg->nvectors; i++)
9593                 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9594         pci_free_irq_vectors(pdev);
9595 }
9596
9597 /**
9598  * ipr_free_all_resources - Free all allocated resources for an adapter.
9599  * @ipr_cmd:    ipr command struct
9600  *
9601  * This function frees all allocated resources for the
9602  * specified adapter.
9603  *
9604  * Return value:
9605  *      none
9606  **/
9607 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9608 {
9609         struct pci_dev *pdev = ioa_cfg->pdev;
9610
9611         ENTER;
9612         ipr_free_irqs(ioa_cfg);
9613         if (ioa_cfg->reset_work_q)
9614                 destroy_workqueue(ioa_cfg->reset_work_q);
9615         iounmap(ioa_cfg->hdw_dma_regs);
9616         pci_release_regions(pdev);
9617         ipr_free_mem(ioa_cfg);
9618         scsi_host_put(ioa_cfg->host);
9619         pci_disable_device(pdev);
9620         LEAVE;
9621 }
9622
9623 /**
9624  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9625  * @ioa_cfg:    ioa config struct
9626  *
9627  * Return value:
9628  *      0 on success / -ENOMEM on allocation failure
9629  **/
9630 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9631 {
9632         struct ipr_cmnd *ipr_cmd;
9633         struct ipr_ioarcb *ioarcb;
9634         dma_addr_t dma_addr;
9635         int i, entries_each_hrrq, hrrq_id = 0;
9636
9637         ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9638                                                 sizeof(struct ipr_cmnd), 512, 0);
9639
9640         if (!ioa_cfg->ipr_cmd_pool)
9641                 return -ENOMEM;
9642
9643         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9644         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9645
9646         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9647                 ipr_free_cmd_blks(ioa_cfg);
9648                 return -ENOMEM;
9649         }
9650
9651         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9652                 if (ioa_cfg->hrrq_num > 1) {
9653                         if (i == 0) {
9654                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9655                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
9656                                         ioa_cfg->hrrq[i].max_cmd_id =
9657                                                 (entries_each_hrrq - 1);
9658                         } else {
9659                                 entries_each_hrrq =
9660                                         IPR_NUM_BASE_CMD_BLKS/
9661                                         (ioa_cfg->hrrq_num - 1);
9662                                 ioa_cfg->hrrq[i].min_cmd_id =
9663                                         IPR_NUM_INTERNAL_CMD_BLKS +
9664                                         (i - 1) * entries_each_hrrq;
9665                                 ioa_cfg->hrrq[i].max_cmd_id =
9666                                         (IPR_NUM_INTERNAL_CMD_BLKS +
9667                                         i * entries_each_hrrq - 1);
9668                         }
9669                 } else {
9670                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
9671                         ioa_cfg->hrrq[i].min_cmd_id = 0;
9672                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9673                 }
9674                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9675         }
9676
9677         BUG_ON(ioa_cfg->hrrq_num == 0);
9678
9679         i = IPR_NUM_CMD_BLKS -
9680                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9681         if (i > 0) {
9682                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9683                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9684         }
9685
9686         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9687                 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9688
9689                 if (!ipr_cmd) {
9690                         ipr_free_cmd_blks(ioa_cfg);
9691                         return -ENOMEM;
9692                 }
9693
9694                 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9695                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9696                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9697
9698                 ioarcb = &ipr_cmd->ioarcb;
9699                 ipr_cmd->dma_addr = dma_addr;
9700                 if (ioa_cfg->sis64)
9701                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9702                 else
9703                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9704
9705                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9706                 if (ioa_cfg->sis64) {
9707                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9708                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9709                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9710                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9711                 } else {
9712                         ioarcb->write_ioadl_addr =
9713                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9714                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9715                         ioarcb->ioasa_host_pci_addr =
9716                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9717                 }
9718                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9719                 ipr_cmd->cmd_index = i;
9720                 ipr_cmd->ioa_cfg = ioa_cfg;
9721                 ipr_cmd->sense_buffer_dma = dma_addr +
9722                         offsetof(struct ipr_cmnd, sense_buffer);
9723
9724                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9725                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9726                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9727                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9728                         hrrq_id++;
9729         }
9730
9731         return 0;
9732 }
9733
9734 /**
9735  * ipr_alloc_mem - Allocate memory for an adapter
9736  * @ioa_cfg:    ioa config struct
9737  *
9738  * Return value:
9739  *      0 on success / non-zero for error
9740  **/
9741 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9742 {
9743         struct pci_dev *pdev = ioa_cfg->pdev;
9744         int i, rc = -ENOMEM;
9745
9746         ENTER;
9747         ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9748                                        ioa_cfg->max_devs_supported, GFP_KERNEL);
9749
9750         if (!ioa_cfg->res_entries)
9751                 goto out;
9752
9753         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9754                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9755                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9756         }
9757
9758         ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9759                                               sizeof(struct ipr_misc_cbs),
9760                                               &ioa_cfg->vpd_cbs_dma,
9761                                               GFP_KERNEL);
9762
9763         if (!ioa_cfg->vpd_cbs)
9764                 goto out_free_res_entries;
9765
9766         if (ipr_alloc_cmd_blks(ioa_cfg))
9767                 goto out_free_vpd_cbs;
9768
9769         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9770                 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9771                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9772                                         &ioa_cfg->hrrq[i].host_rrq_dma,
9773                                         GFP_KERNEL);
9774
9775                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9776                         while (--i > 0)
9777                                 dma_free_coherent(&pdev->dev,
9778                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9779                                         ioa_cfg->hrrq[i].host_rrq,
9780                                         ioa_cfg->hrrq[i].host_rrq_dma);
9781                         goto out_ipr_free_cmd_blocks;
9782                 }
9783                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9784         }
9785
9786         ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9787                                                   ioa_cfg->cfg_table_size,
9788                                                   &ioa_cfg->cfg_table_dma,
9789                                                   GFP_KERNEL);
9790
9791         if (!ioa_cfg->u.cfg_table)
9792                 goto out_free_host_rrq;
9793
9794         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9795                 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9796                                                          sizeof(struct ipr_hostrcb),
9797                                                          &ioa_cfg->hostrcb_dma[i],
9798                                                          GFP_KERNEL);
9799
9800                 if (!ioa_cfg->hostrcb[i])
9801                         goto out_free_hostrcb_dma;
9802
9803                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9804                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9805                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9806                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9807         }
9808
9809         ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9810                                  IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9811
9812         if (!ioa_cfg->trace)
9813                 goto out_free_hostrcb_dma;
9814
9815         rc = 0;
9816 out:
9817         LEAVE;
9818         return rc;
9819
9820 out_free_hostrcb_dma:
9821         while (i-- > 0) {
9822                 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9823                                   ioa_cfg->hostrcb[i],
9824                                   ioa_cfg->hostrcb_dma[i]);
9825         }
9826         dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9827                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9828 out_free_host_rrq:
9829         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9830                 dma_free_coherent(&pdev->dev,
9831                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9832                                   ioa_cfg->hrrq[i].host_rrq,
9833                                   ioa_cfg->hrrq[i].host_rrq_dma);
9834         }
9835 out_ipr_free_cmd_blocks:
9836         ipr_free_cmd_blks(ioa_cfg);
9837 out_free_vpd_cbs:
9838         dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9839                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9840 out_free_res_entries:
9841         kfree(ioa_cfg->res_entries);
9842         goto out;
9843 }
9844
9845 /**
9846  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9847  * @ioa_cfg:    ioa config struct
9848  *
9849  * Return value:
9850  *      none
9851  **/
9852 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9853 {
9854         int i;
9855
9856         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9857                 ioa_cfg->bus_attr[i].bus = i;
9858                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9859                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9860                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9861                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9862                 else
9863                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9864         }
9865 }
9866
9867 /**
9868  * ipr_init_regs - Initialize IOA registers
9869  * @ioa_cfg:    ioa config struct
9870  *
9871  * Return value:
9872  *      none
9873  **/
9874 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9875 {
9876         const struct ipr_interrupt_offsets *p;
9877         struct ipr_interrupts *t;
9878         void __iomem *base;
9879
9880         p = &ioa_cfg->chip_cfg->regs;
9881         t = &ioa_cfg->regs;
9882         base = ioa_cfg->hdw_dma_regs;
9883
9884         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9885         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9886         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9887         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9888         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9889         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9890         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9891         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9892         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9893         t->ioarrin_reg = base + p->ioarrin_reg;
9894         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9895         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9896         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9897         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9898         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9899         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9900
9901         if (ioa_cfg->sis64) {
9902                 t->init_feedback_reg = base + p->init_feedback_reg;
9903                 t->dump_addr_reg = base + p->dump_addr_reg;
9904                 t->dump_data_reg = base + p->dump_data_reg;
9905                 t->endian_swap_reg = base + p->endian_swap_reg;
9906         }
9907 }
9908
9909 /**
9910  * ipr_init_ioa_cfg - Initialize IOA config struct
9911  * @ioa_cfg:    ioa config struct
9912  * @host:               scsi host struct
9913  * @pdev:               PCI dev struct
9914  *
9915  * Return value:
9916  *      none
9917  **/
9918 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9919                              struct Scsi_Host *host, struct pci_dev *pdev)
9920 {
9921         int i;
9922
9923         ioa_cfg->host = host;
9924         ioa_cfg->pdev = pdev;
9925         ioa_cfg->log_level = ipr_log_level;
9926         ioa_cfg->doorbell = IPR_DOORBELL;
9927         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9928         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9929         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9930         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9931         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9932         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9933
9934         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9935         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9936         INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9937         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9938         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9939         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9940         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9941         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9942         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9943         ioa_cfg->sdt_state = INACTIVE;
9944
9945         ipr_initialize_bus_attr(ioa_cfg);
9946         ioa_cfg->max_devs_supported = ipr_max_devs;
9947
9948         if (ioa_cfg->sis64) {
9949                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9950                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9951                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9952                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9953                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9954                                            + ((sizeof(struct ipr_config_table_entry64)
9955                                                * ioa_cfg->max_devs_supported)));
9956         } else {
9957                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9958                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9959                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9960                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9961                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9962                                            + ((sizeof(struct ipr_config_table_entry)
9963                                                * ioa_cfg->max_devs_supported)));
9964         }
9965
9966         host->max_channel = IPR_VSET_BUS;
9967         host->unique_id = host->host_no;
9968         host->max_cmd_len = IPR_MAX_CDB_LEN;
9969         host->can_queue = ioa_cfg->max_cmds;
9970         pci_set_drvdata(pdev, ioa_cfg);
9971
9972         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9973                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9974                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9975                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9976                 if (i == 0)
9977                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9978                 else
9979                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9980         }
9981 }
9982
9983 /**
9984  * ipr_get_chip_info - Find adapter chip information
9985  * @dev_id:             PCI device id struct
9986  *
9987  * Return value:
9988  *      ptr to chip information on success / NULL on failure
9989  **/
9990 static const struct ipr_chip_t *
9991 ipr_get_chip_info(const struct pci_device_id *dev_id)
9992 {
9993         int i;
9994
9995         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9996                 if (ipr_chip[i].vendor == dev_id->vendor &&
9997                     ipr_chip[i].device == dev_id->device)
9998                         return &ipr_chip[i];
9999         return NULL;
10000 }
10001
10002 /**
10003  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
10004  *                                              during probe time
10005  * @ioa_cfg:    ioa config struct
10006  *
10007  * Return value:
10008  *      None
10009  **/
10010 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
10011 {
10012         struct pci_dev *pdev = ioa_cfg->pdev;
10013
10014         if (pci_channel_offline(pdev)) {
10015                 wait_event_timeout(ioa_cfg->eeh_wait_q,
10016                                    !pci_channel_offline(pdev),
10017                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
10018                 pci_restore_state(pdev);
10019         }
10020 }
10021
10022 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
10023 {
10024         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
10025
10026         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
10027                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
10028                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10029                 ioa_cfg->vectors_info[vec_idx].
10030                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10031         }
10032 }
10033
10034 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10035                 struct pci_dev *pdev)
10036 {
10037         int i, rc;
10038
10039         for (i = 1; i < ioa_cfg->nvectors; i++) {
10040                 rc = request_irq(pci_irq_vector(pdev, i),
10041                         ipr_isr_mhrrq,
10042                         0,
10043                         ioa_cfg->vectors_info[i].desc,
10044                         &ioa_cfg->hrrq[i]);
10045                 if (rc) {
10046                         while (--i >= 0)
10047                                 free_irq(pci_irq_vector(pdev, i),
10048                                         &ioa_cfg->hrrq[i]);
10049                         return rc;
10050                 }
10051         }
10052         return 0;
10053 }
10054
10055 /**
10056  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10057  * @pdev:               PCI device struct
10058  *
10059  * Description: Simply set the msi_received flag to 1 indicating that
10060  * Message Signaled Interrupts are supported.
10061  *
10062  * Return value:
10063  *      0 on success / non-zero on failure
10064  **/
10065 static irqreturn_t ipr_test_intr(int irq, void *devp)
10066 {
10067         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10068         unsigned long lock_flags = 0;
10069         irqreturn_t rc = IRQ_HANDLED;
10070
10071         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
10072         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10073
10074         ioa_cfg->msi_received = 1;
10075         wake_up(&ioa_cfg->msi_wait_q);
10076
10077         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10078         return rc;
10079 }
10080
10081 /**
10082  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10083  * @pdev:               PCI device struct
10084  *
10085  * Description: This routine sets up and initiates a test interrupt to determine
10086  * if the interrupt is received via the ipr_test_intr() service routine.
10087  * If the tests fails, the driver will fall back to LSI.
10088  *
10089  * Return value:
10090  *      0 on success / non-zero on failure
10091  **/
10092 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10093 {
10094         int rc;
10095         volatile u32 int_reg;
10096         unsigned long lock_flags = 0;
10097         int irq = pci_irq_vector(pdev, 0);
10098
10099         ENTER;
10100
10101         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10102         init_waitqueue_head(&ioa_cfg->msi_wait_q);
10103         ioa_cfg->msi_received = 0;
10104         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10105         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
10106         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10107         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10108
10109         rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10110         if (rc) {
10111                 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
10112                 return rc;
10113         } else if (ipr_debug)
10114                 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
10115
10116         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
10117         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10118         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
10119         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10120         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10121
10122         if (!ioa_cfg->msi_received) {
10123                 /* MSI test failed */
10124                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
10125                 rc = -EOPNOTSUPP;
10126         } else if (ipr_debug)
10127                 dev_info(&pdev->dev, "MSI test succeeded.\n");
10128
10129         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10130
10131         free_irq(irq, ioa_cfg);
10132
10133         LEAVE;
10134
10135         return rc;
10136 }
10137
10138  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10139  * @pdev:               PCI device struct
10140  * @dev_id:             PCI device id struct
10141  *
10142  * Return value:
10143  *      0 on success / non-zero on failure
10144  **/
10145 static int ipr_probe_ioa(struct pci_dev *pdev,
10146                          const struct pci_device_id *dev_id)
10147 {
10148         struct ipr_ioa_cfg *ioa_cfg;
10149         struct Scsi_Host *host;
10150         unsigned long ipr_regs_pci;
10151         void __iomem *ipr_regs;
10152         int rc = PCIBIOS_SUCCESSFUL;
10153         volatile u32 mask, uproc, interrupts;
10154         unsigned long lock_flags, driver_lock_flags;
10155         unsigned int irq_flag;
10156
10157         ENTER;
10158
10159         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
10160         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10161
10162         if (!host) {
10163                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10164                 rc = -ENOMEM;
10165                 goto out;
10166         }
10167
10168         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10169         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
10170         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
10171
10172         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
10173
10174         if (!ioa_cfg->ipr_chip) {
10175                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10176                         dev_id->vendor, dev_id->device);
10177                 goto out_scsi_host_put;
10178         }
10179
10180         /* set SIS 32 or SIS 64 */
10181         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
10182         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10183         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10184         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10185
10186         if (ipr_transop_timeout)
10187                 ioa_cfg->transop_timeout = ipr_transop_timeout;
10188         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10189                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10190         else
10191                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10192
10193         ioa_cfg->revid = pdev->revision;
10194
10195         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10196
10197         ipr_regs_pci = pci_resource_start(pdev, 0);
10198
10199         rc = pci_request_regions(pdev, IPR_NAME);
10200         if (rc < 0) {
10201                 dev_err(&pdev->dev,
10202                         "Couldn't register memory range of registers\n");
10203                 goto out_scsi_host_put;
10204         }
10205
10206         rc = pci_enable_device(pdev);
10207
10208         if (rc || pci_channel_offline(pdev)) {
10209                 if (pci_channel_offline(pdev)) {
10210                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10211                         rc = pci_enable_device(pdev);
10212                 }
10213
10214                 if (rc) {
10215                         dev_err(&pdev->dev, "Cannot enable adapter\n");
10216                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10217                         goto out_release_regions;
10218                 }
10219         }
10220
10221         ipr_regs = pci_ioremap_bar(pdev, 0);
10222
10223         if (!ipr_regs) {
10224                 dev_err(&pdev->dev,
10225                         "Couldn't map memory range of registers\n");
10226                 rc = -ENOMEM;
10227                 goto out_disable;
10228         }
10229
10230         ioa_cfg->hdw_dma_regs = ipr_regs;
10231         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10232         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10233
10234         ipr_init_regs(ioa_cfg);
10235
10236         if (ioa_cfg->sis64) {
10237                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10238                 if (rc < 0) {
10239                         dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10240                         rc = dma_set_mask_and_coherent(&pdev->dev,
10241                                                        DMA_BIT_MASK(32));
10242                 }
10243         } else
10244                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10245
10246         if (rc < 0) {
10247                 dev_err(&pdev->dev, "Failed to set DMA mask\n");
10248                 goto cleanup_nomem;
10249         }
10250
10251         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10252                                    ioa_cfg->chip_cfg->cache_line_size);
10253
10254         if (rc != PCIBIOS_SUCCESSFUL) {
10255                 dev_err(&pdev->dev, "Write of cache line size failed\n");
10256                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10257                 rc = -EIO;
10258                 goto cleanup_nomem;
10259         }
10260
10261         /* Issue MMIO read to ensure card is not in EEH */
10262         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10263         ipr_wait_for_pci_err_recovery(ioa_cfg);
10264
10265         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10266                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10267                         IPR_MAX_MSIX_VECTORS);
10268                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10269         }
10270
10271         irq_flag = PCI_IRQ_LEGACY;
10272         if (ioa_cfg->ipr_chip->has_msi)
10273                 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10274         rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10275         if (rc < 0) {
10276                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10277                 goto cleanup_nomem;
10278         }
10279         ioa_cfg->nvectors = rc;
10280
10281         if (!pdev->msi_enabled && !pdev->msix_enabled)
10282                 ioa_cfg->clear_isr = 1;
10283
10284         pci_set_master(pdev);
10285
10286         if (pci_channel_offline(pdev)) {
10287                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10288                 pci_set_master(pdev);
10289                 if (pci_channel_offline(pdev)) {
10290                         rc = -EIO;
10291                         goto out_msi_disable;
10292                 }
10293         }
10294
10295         if (pdev->msi_enabled || pdev->msix_enabled) {
10296                 rc = ipr_test_msi(ioa_cfg, pdev);
10297                 switch (rc) {
10298                 case 0:
10299                         dev_info(&pdev->dev,
10300                                 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10301                                 pdev->msix_enabled ? "-X" : "");
10302                         break;
10303                 case -EOPNOTSUPP:
10304                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10305                         pci_free_irq_vectors(pdev);
10306
10307                         ioa_cfg->nvectors = 1;
10308                         ioa_cfg->clear_isr = 1;
10309                         break;
10310                 default:
10311                         goto out_msi_disable;
10312                 }
10313         }
10314
10315         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10316                                 (unsigned int)num_online_cpus(),
10317                                 (unsigned int)IPR_MAX_HRRQ_NUM);
10318
10319         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10320                 goto out_msi_disable;
10321
10322         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10323                 goto out_msi_disable;
10324
10325         rc = ipr_alloc_mem(ioa_cfg);
10326         if (rc < 0) {
10327                 dev_err(&pdev->dev,
10328                         "Couldn't allocate enough memory for device driver!\n");
10329                 goto out_msi_disable;
10330         }
10331
10332         /* Save away PCI config space for use following IOA reset */
10333         rc = pci_save_state(pdev);
10334
10335         if (rc != PCIBIOS_SUCCESSFUL) {
10336                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10337                 rc = -EIO;
10338                 goto cleanup_nolog;
10339         }
10340
10341         /*
10342          * If HRRQ updated interrupt is not masked, or reset alert is set,
10343          * the card is in an unknown state and needs a hard reset
10344          */
10345         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10346         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10347         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10348         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10349                 ioa_cfg->needs_hard_reset = 1;
10350         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10351                 ioa_cfg->needs_hard_reset = 1;
10352         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10353                 ioa_cfg->ioa_unit_checked = 1;
10354
10355         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10356         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10357         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10358
10359         if (pdev->msi_enabled || pdev->msix_enabled) {
10360                 name_msi_vectors(ioa_cfg);
10361                 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
10362                         ioa_cfg->vectors_info[0].desc,
10363                         &ioa_cfg->hrrq[0]);
10364                 if (!rc)
10365                         rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
10366         } else {
10367                 rc = request_irq(pdev->irq, ipr_isr,
10368                          IRQF_SHARED,
10369                          IPR_NAME, &ioa_cfg->hrrq[0]);
10370         }
10371         if (rc) {
10372                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10373                         pdev->irq, rc);
10374                 goto cleanup_nolog;
10375         }
10376
10377         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10378             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10379                 ioa_cfg->needs_warm_reset = 1;
10380                 ioa_cfg->reset = ipr_reset_slot_reset;
10381
10382                 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10383                                                                 WQ_MEM_RECLAIM, host->host_no);
10384
10385                 if (!ioa_cfg->reset_work_q) {
10386                         dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10387                         rc = -ENOMEM;
10388                         goto out_free_irq;
10389                 }
10390         } else
10391                 ioa_cfg->reset = ipr_reset_start_bist;
10392
10393         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10394         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10395         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10396
10397         LEAVE;
10398 out:
10399         return rc;
10400
10401 out_free_irq:
10402         ipr_free_irqs(ioa_cfg);
10403 cleanup_nolog:
10404         ipr_free_mem(ioa_cfg);
10405 out_msi_disable:
10406         ipr_wait_for_pci_err_recovery(ioa_cfg);
10407         pci_free_irq_vectors(pdev);
10408 cleanup_nomem:
10409         iounmap(ipr_regs);
10410 out_disable:
10411         pci_disable_device(pdev);
10412 out_release_regions:
10413         pci_release_regions(pdev);
10414 out_scsi_host_put:
10415         scsi_host_put(host);
10416         goto out;
10417 }
10418
10419 /**
10420  * ipr_initiate_ioa_bringdown - Bring down an adapter
10421  * @ioa_cfg:            ioa config struct
10422  * @shutdown_type:      shutdown type
10423  *
10424  * Description: This function will initiate bringing down the adapter.
10425  * This consists of issuing an IOA shutdown to the adapter
10426  * to flush the cache, and running BIST.
10427  * If the caller needs to wait on the completion of the reset,
10428  * the caller must sleep on the reset_wait_q.
10429  *
10430  * Return value:
10431  *      none
10432  **/
10433 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10434                                        enum ipr_shutdown_type shutdown_type)
10435 {
10436         ENTER;
10437         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10438                 ioa_cfg->sdt_state = ABORT_DUMP;
10439         ioa_cfg->reset_retries = 0;
10440         ioa_cfg->in_ioa_bringdown = 1;
10441         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10442         LEAVE;
10443 }
10444
10445 /**
10446  * __ipr_remove - Remove a single adapter
10447  * @pdev:       pci device struct
10448  *
10449  * Adapter hot plug remove entry point.
10450  *
10451  * Return value:
10452  *      none
10453  **/
10454 static void __ipr_remove(struct pci_dev *pdev)
10455 {
10456         unsigned long host_lock_flags = 0;
10457         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10458         int i;
10459         unsigned long driver_lock_flags;
10460         ENTER;
10461
10462         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10463         while (ioa_cfg->in_reset_reload) {
10464                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10465                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10466                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10467         }
10468
10469         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10470                 spin_lock(&ioa_cfg->hrrq[i]._lock);
10471                 ioa_cfg->hrrq[i].removing_ioa = 1;
10472                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10473         }
10474         wmb();
10475         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10476
10477         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10478         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10479         flush_work(&ioa_cfg->work_q);
10480         if (ioa_cfg->reset_work_q)
10481                 flush_workqueue(ioa_cfg->reset_work_q);
10482         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10483         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10484
10485         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10486         list_del(&ioa_cfg->queue);
10487         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10488
10489         if (ioa_cfg->sdt_state == ABORT_DUMP)
10490                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10491         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10492
10493         ipr_free_all_resources(ioa_cfg);
10494
10495         LEAVE;
10496 }
10497
10498 /**
10499  * ipr_remove - IOA hot plug remove entry point
10500  * @pdev:       pci device struct
10501  *
10502  * Adapter hot plug remove entry point.
10503  *
10504  * Return value:
10505  *      none
10506  **/
10507 static void ipr_remove(struct pci_dev *pdev)
10508 {
10509         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10510
10511         ENTER;
10512
10513         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10514                               &ipr_trace_attr);
10515         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10516                              &ipr_dump_attr);
10517         sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10518                         &ipr_ioa_async_err_log);
10519         scsi_remove_host(ioa_cfg->host);
10520
10521         __ipr_remove(pdev);
10522
10523         LEAVE;
10524 }
10525
10526 /**
10527  * ipr_probe - Adapter hot plug add entry point
10528  *
10529  * Return value:
10530  *      0 on success / non-zero on failure
10531  **/
10532 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10533 {
10534         struct ipr_ioa_cfg *ioa_cfg;
10535         unsigned long flags;
10536         int rc, i;
10537
10538         rc = ipr_probe_ioa(pdev, dev_id);
10539
10540         if (rc)
10541                 return rc;
10542
10543         ioa_cfg = pci_get_drvdata(pdev);
10544         rc = ipr_probe_ioa_part2(ioa_cfg);
10545
10546         if (rc) {
10547                 __ipr_remove(pdev);
10548                 return rc;
10549         }
10550
10551         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10552
10553         if (rc) {
10554                 __ipr_remove(pdev);
10555                 return rc;
10556         }
10557
10558         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10559                                    &ipr_trace_attr);
10560
10561         if (rc) {
10562                 scsi_remove_host(ioa_cfg->host);
10563                 __ipr_remove(pdev);
10564                 return rc;
10565         }
10566
10567         rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10568                         &ipr_ioa_async_err_log);
10569
10570         if (rc) {
10571                 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10572                                 &ipr_dump_attr);
10573                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10574                                 &ipr_trace_attr);
10575                 scsi_remove_host(ioa_cfg->host);
10576                 __ipr_remove(pdev);
10577                 return rc;
10578         }
10579
10580         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10581                                    &ipr_dump_attr);
10582
10583         if (rc) {
10584                 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10585                                       &ipr_ioa_async_err_log);
10586                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10587                                       &ipr_trace_attr);
10588                 scsi_remove_host(ioa_cfg->host);
10589                 __ipr_remove(pdev);
10590                 return rc;
10591         }
10592         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10593         ioa_cfg->scan_enabled = 1;
10594         schedule_work(&ioa_cfg->work_q);
10595         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10596
10597         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10598
10599         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10600                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10601                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10602                                         ioa_cfg->iopoll_weight, ipr_iopoll);
10603                 }
10604         }
10605
10606         scsi_scan_host(ioa_cfg->host);
10607
10608         return 0;
10609 }
10610
10611 /**
10612  * ipr_shutdown - Shutdown handler.
10613  * @pdev:       pci device struct
10614  *
10615  * This function is invoked upon system shutdown/reboot. It will issue
10616  * an adapter shutdown to the adapter to flush the write cache.
10617  *
10618  * Return value:
10619  *      none
10620  **/
10621 static void ipr_shutdown(struct pci_dev *pdev)
10622 {
10623         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10624         unsigned long lock_flags = 0;
10625         enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10626         int i;
10627
10628         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10629         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10630                 ioa_cfg->iopoll_weight = 0;
10631                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10632                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10633         }
10634
10635         while (ioa_cfg->in_reset_reload) {
10636                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10637                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10638                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10639         }
10640
10641         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10642                 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10643
10644         ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10645         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10646         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10647         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10648                 ipr_free_irqs(ioa_cfg);
10649                 pci_disable_device(ioa_cfg->pdev);
10650         }
10651 }
10652
10653 static struct pci_device_id ipr_pci_table[] = {
10654         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10655                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10656         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10657                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10658         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10659                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10660         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10661                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10662         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10663                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10664         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10665                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10666         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10667                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10668         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10669                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10670                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10671         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10672               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10673         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10674               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10675               IPR_USE_LONG_TRANSOP_TIMEOUT },
10676         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10677               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10678               IPR_USE_LONG_TRANSOP_TIMEOUT },
10679         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10680               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10681         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10682               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10683               IPR_USE_LONG_TRANSOP_TIMEOUT},
10684         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10685               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10686               IPR_USE_LONG_TRANSOP_TIMEOUT },
10687         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10688               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10689               IPR_USE_LONG_TRANSOP_TIMEOUT },
10690         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10691               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10692         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10693               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10694         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10695               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10696               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10697         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10698                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10699         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10700                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10701         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10702                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10703                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10704         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10705                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10706                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10707         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10708                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10709         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10710                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10711         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10712                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10713         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10714                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10715         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10716                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10717         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10718                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10719         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10720                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10721         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10722                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10723         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10724                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10725         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10726                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10727         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10728                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10729         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10730                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10731         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10732                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10733         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10734                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10735         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10736                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10737         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10738                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10739         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10740                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10741         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10742                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10743         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10744                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10745         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10746                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10747         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10748                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10749         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10750                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10751         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10752                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10753         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10754                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10755         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10756                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10757         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10758                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10759         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10760                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10761         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10762                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
10763         { }
10764 };
10765 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10766
10767 static const struct pci_error_handlers ipr_err_handler = {
10768         .error_detected = ipr_pci_error_detected,
10769         .mmio_enabled = ipr_pci_mmio_enabled,
10770         .slot_reset = ipr_pci_slot_reset,
10771 };
10772
10773 static struct pci_driver ipr_driver = {
10774         .name = IPR_NAME,
10775         .id_table = ipr_pci_table,
10776         .probe = ipr_probe,
10777         .remove = ipr_remove,
10778         .shutdown = ipr_shutdown,
10779         .err_handler = &ipr_err_handler,
10780 };
10781
10782 /**
10783  * ipr_halt_done - Shutdown prepare completion
10784  *
10785  * Return value:
10786  *      none
10787  **/
10788 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10789 {
10790         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10791 }
10792
10793 /**
10794  * ipr_halt - Issue shutdown prepare to all adapters
10795  *
10796  * Return value:
10797  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10798  **/
10799 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10800 {
10801         struct ipr_cmnd *ipr_cmd;
10802         struct ipr_ioa_cfg *ioa_cfg;
10803         unsigned long flags = 0, driver_lock_flags;
10804
10805         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10806                 return NOTIFY_DONE;
10807
10808         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10809
10810         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10811                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10812                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10813                     (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10814                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10815                         continue;
10816                 }
10817
10818                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10819                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10820                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10821                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10822                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10823
10824                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10825                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10826         }
10827         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10828
10829         return NOTIFY_OK;
10830 }
10831
10832 static struct notifier_block ipr_notifier = {
10833         ipr_halt, NULL, 0
10834 };
10835
10836 /**
10837  * ipr_init - Module entry point
10838  *
10839  * Return value:
10840  *      0 on success / negative value on failure
10841  **/
10842 static int __init ipr_init(void)
10843 {
10844         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10845                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10846
10847         register_reboot_notifier(&ipr_notifier);
10848         return pci_register_driver(&ipr_driver);
10849 }
10850
10851 /**
10852  * ipr_exit - Module unload
10853  *
10854  * Module unload entry point.
10855  *
10856  * Return value:
10857  *      none
10858  **/
10859 static void __exit ipr_exit(void)
10860 {
10861         unregister_reboot_notifier(&ipr_notifier);
10862         pci_unregister_driver(&ipr_driver);
10863 }
10864
10865 module_init(ipr_init);
10866 module_exit(ipr_exit);