Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 16;
102 static unsigned int ipr_fast_reboot;
103 static DEFINE_SPINLOCK(ipr_driver_lock);
104
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
107         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
108                 .mailbox = 0x0042C,
109                 .max_cmds = 100,
110                 .cache_line_size = 0x20,
111                 .clear_isr = 1,
112                 .iopoll_weight = 0,
113                 {
114                         .set_interrupt_mask_reg = 0x0022C,
115                         .clr_interrupt_mask_reg = 0x00230,
116                         .clr_interrupt_mask_reg32 = 0x00230,
117                         .sense_interrupt_mask_reg = 0x0022C,
118                         .sense_interrupt_mask_reg32 = 0x0022C,
119                         .clr_interrupt_reg = 0x00228,
120                         .clr_interrupt_reg32 = 0x00228,
121                         .sense_interrupt_reg = 0x00224,
122                         .sense_interrupt_reg32 = 0x00224,
123                         .ioarrin_reg = 0x00404,
124                         .sense_uproc_interrupt_reg = 0x00214,
125                         .sense_uproc_interrupt_reg32 = 0x00214,
126                         .set_uproc_interrupt_reg = 0x00214,
127                         .set_uproc_interrupt_reg32 = 0x00214,
128                         .clr_uproc_interrupt_reg = 0x00218,
129                         .clr_uproc_interrupt_reg32 = 0x00218
130                 }
131         },
132         { /* Snipe and Scamp */
133                 .mailbox = 0x0052C,
134                 .max_cmds = 100,
135                 .cache_line_size = 0x20,
136                 .clear_isr = 1,
137                 .iopoll_weight = 0,
138                 {
139                         .set_interrupt_mask_reg = 0x00288,
140                         .clr_interrupt_mask_reg = 0x0028C,
141                         .clr_interrupt_mask_reg32 = 0x0028C,
142                         .sense_interrupt_mask_reg = 0x00288,
143                         .sense_interrupt_mask_reg32 = 0x00288,
144                         .clr_interrupt_reg = 0x00284,
145                         .clr_interrupt_reg32 = 0x00284,
146                         .sense_interrupt_reg = 0x00280,
147                         .sense_interrupt_reg32 = 0x00280,
148                         .ioarrin_reg = 0x00504,
149                         .sense_uproc_interrupt_reg = 0x00290,
150                         .sense_uproc_interrupt_reg32 = 0x00290,
151                         .set_uproc_interrupt_reg = 0x00290,
152                         .set_uproc_interrupt_reg32 = 0x00290,
153                         .clr_uproc_interrupt_reg = 0x00294,
154                         .clr_uproc_interrupt_reg32 = 0x00294
155                 }
156         },
157         { /* CRoC */
158                 .mailbox = 0x00044,
159                 .max_cmds = 1000,
160                 .cache_line_size = 0x20,
161                 .clear_isr = 0,
162                 .iopoll_weight = 64,
163                 {
164                         .set_interrupt_mask_reg = 0x00010,
165                         .clr_interrupt_mask_reg = 0x00018,
166                         .clr_interrupt_mask_reg32 = 0x0001C,
167                         .sense_interrupt_mask_reg = 0x00010,
168                         .sense_interrupt_mask_reg32 = 0x00014,
169                         .clr_interrupt_reg = 0x00008,
170                         .clr_interrupt_reg32 = 0x0000C,
171                         .sense_interrupt_reg = 0x00000,
172                         .sense_interrupt_reg32 = 0x00004,
173                         .ioarrin_reg = 0x00070,
174                         .sense_uproc_interrupt_reg = 0x00020,
175                         .sense_uproc_interrupt_reg32 = 0x00024,
176                         .set_uproc_interrupt_reg = 0x00020,
177                         .set_uproc_interrupt_reg32 = 0x00024,
178                         .clr_uproc_interrupt_reg = 0x00028,
179                         .clr_uproc_interrupt_reg32 = 0x0002C,
180                         .init_feedback_reg = 0x0005C,
181                         .dump_addr_reg = 0x00064,
182                         .dump_data_reg = 0x00068,
183                         .endian_swap_reg = 0x00084
184                 }
185         },
186 };
187
188 static const struct ipr_chip_t ipr_chip[] = {
189         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
199 };
200
201 static int ipr_max_bus_speeds[] = {
202         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203 };
204
205 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207 module_param_named(max_speed, ipr_max_speed, uint, 0);
208 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209 module_param_named(log_level, ipr_log_level, uint, 0);
210 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211 module_param_named(testmode, ipr_testmode, int, 0);
212 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
213 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
214 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
217 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
218 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
219 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
221 module_param_named(max_devs, ipr_max_devs, int, 0);
222 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
224 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
225 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:16)");
226 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(IPR_DRIVER_VERSION);
230
231 /*  A constant array of IOASCs/URCs/Error Messages */
232 static const
233 struct ipr_error_table_t ipr_error_table[] = {
234         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
235         "8155: An unknown error was received"},
236         {0x00330000, 0, 0,
237         "Soft underlength error"},
238         {0x005A0000, 0, 0,
239         "Command to be cancelled not found"},
240         {0x00808000, 0, 0,
241         "Qualified success"},
242         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
243         "FFFE: Soft device bus error recovered by the IOA"},
244         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
245         "4101: Soft device bus fabric error"},
246         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247         "FFFC: Logical block guard error recovered by the device"},
248         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249         "FFFC: Logical block reference tag error recovered by the device"},
250         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251         "4171: Recovered scatter list tag / sequence number error"},
252         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257         "FFFD: Recovered logical block reference tag error detected by the IOA"},
258         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259         "FFFD: Logical block guard error recovered by the IOA"},
260         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
261         "FFF9: Device sector reassign successful"},
262         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
263         "FFF7: Media error recovered by device rewrite procedures"},
264         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
265         "7001: IOA sector reassignment successful"},
266         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
267         "FFF9: Soft media error. Sector reassignment recommended"},
268         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
269         "FFF7: Media error recovered by IOA rewrite procedures"},
270         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
271         "FF3D: Soft PCI bus error recovered by the IOA"},
272         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
273         "FFF6: Device hardware error recovered by the IOA"},
274         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
275         "FFF6: Device hardware error recovered by the device"},
276         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
277         "FF3D: Soft IOA error recovered by the IOA"},
278         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
279         "FFFA: Undefined device response recovered by the IOA"},
280         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
281         "FFF6: Device bus error, message or command phase"},
282         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
283         "FFFE: Task Management Function failed"},
284         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
285         "FFF6: Failure prediction threshold exceeded"},
286         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
287         "8009: Impending cache battery pack failure"},
288         {0x02040100, 0, 0,
289         "Logical Unit in process of becoming ready"},
290         {0x02040200, 0, 0,
291         "Initializing command required"},
292         {0x02040400, 0, 0,
293         "34FF: Disk device format in progress"},
294         {0x02040C00, 0, 0,
295         "Logical unit not accessible, target port in unavailable state"},
296         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297         "9070: IOA requested reset"},
298         {0x023F0000, 0, 0,
299         "Synchronization required"},
300         {0x02408500, 0, 0,
301         "IOA microcode download required"},
302         {0x02408600, 0, 0,
303         "Device bus connection is prohibited by host"},
304         {0x024E0000, 0, 0,
305         "No ready, IOA shutdown"},
306         {0x025A0000, 0, 0,
307         "Not ready, IOA has been shutdown"},
308         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
309         "3020: Storage subsystem configuration error"},
310         {0x03110B00, 0, 0,
311         "FFF5: Medium error, data unreadable, recommend reassign"},
312         {0x03110C00, 0, 0,
313         "7000: Medium error, data unreadable, do not reassign"},
314         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
315         "FFF3: Disk media format bad"},
316         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
317         "3002: Addressed device failed to respond to selection"},
318         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
319         "3100: Device bus error"},
320         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
321         "3109: IOA timed out a device command"},
322         {0x04088000, 0, 0,
323         "3120: SCSI bus is not operational"},
324         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
325         "4100: Hard device bus fabric error"},
326         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327         "310C: Logical block guard error detected by the device"},
328         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329         "310C: Logical block reference tag error detected by the device"},
330         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331         "4170: Scatter list tag / sequence number error"},
332         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333         "8150: Logical block CRC error on IOA to Host transfer"},
334         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335         "4170: Logical block sequence number error on IOA to Host transfer"},
336         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337         "310D: Logical block reference tag error detected by the IOA"},
338         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339         "310D: Logical block guard error detected by the IOA"},
340         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
341         "9000: IOA reserved area data check"},
342         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
343         "9001: IOA reserved area invalid data pattern"},
344         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
345         "9002: IOA reserved area LRC error"},
346         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347         "Hardware Error, IOA metadata access error"},
348         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
349         "102E: Out of alternate sectors for disk storage"},
350         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
351         "FFF4: Data transfer underlength error"},
352         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
353         "FFF4: Data transfer overlength error"},
354         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
355         "3400: Logical unit failure"},
356         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
357         "FFF4: Device microcode is corrupt"},
358         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
359         "8150: PCI bus error"},
360         {0x04430000, 1, 0,
361         "Unsupported device bus message received"},
362         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
363         "FFF4: Disk device problem"},
364         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
365         "8150: Permanent IOA failure"},
366         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
367         "3010: Disk device returned wrong response to IOA"},
368         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
369         "8151: IOA microcode error"},
370         {0x04448500, 0, 0,
371         "Device bus status error"},
372         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
373         "8157: IOA error requiring IOA reset to recover"},
374         {0x04448700, 0, 0,
375         "ATA device status error"},
376         {0x04490000, 0, 0,
377         "Message reject received from the device"},
378         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
379         "8008: A permanent cache battery pack failure occurred"},
380         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
381         "9090: Disk unit has been modified after the last known status"},
382         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
383         "9081: IOA detected device error"},
384         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
385         "9082: IOA detected device error"},
386         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
387         "3110: Device bus error, message or command phase"},
388         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
389         "3110: SAS Command / Task Management Function failed"},
390         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
391         "9091: Incorrect hardware configuration change has been detected"},
392         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
393         "9073: Invalid multi-adapter configuration"},
394         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
395         "4010: Incorrect connection between cascaded expanders"},
396         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
397         "4020: Connections exceed IOA design limits"},
398         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
399         "4030: Incorrect multipath connection"},
400         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
401         "4110: Unsupported enclosure function"},
402         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403         "4120: SAS cable VPD cannot be read"},
404         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
405         "FFF4: Command to logical unit failed"},
406         {0x05240000, 1, 0,
407         "Illegal request, invalid request type or request packet"},
408         {0x05250000, 0, 0,
409         "Illegal request, invalid resource handle"},
410         {0x05258000, 0, 0,
411         "Illegal request, commands not allowed to this device"},
412         {0x05258100, 0, 0,
413         "Illegal request, command not allowed to a secondary adapter"},
414         {0x05258200, 0, 0,
415         "Illegal request, command not allowed to a non-optimized resource"},
416         {0x05260000, 0, 0,
417         "Illegal request, invalid field in parameter list"},
418         {0x05260100, 0, 0,
419         "Illegal request, parameter not supported"},
420         {0x05260200, 0, 0,
421         "Illegal request, parameter value invalid"},
422         {0x052C0000, 0, 0,
423         "Illegal request, command sequence error"},
424         {0x052C8000, 1, 0,
425         "Illegal request, dual adapter support not enabled"},
426         {0x052C8100, 1, 0,
427         "Illegal request, another cable connector was physically disabled"},
428         {0x054E8000, 1, 0,
429         "Illegal request, inconsistent group id/group count"},
430         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
431         "9031: Array protection temporarily suspended, protection resuming"},
432         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
433         "9040: Array protection temporarily suspended, protection resuming"},
434         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435         "4080: IOA exceeded maximum operating temperature"},
436         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437         "4085: Service required"},
438         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
439         "3140: Device bus not ready to ready transition"},
440         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
441         "FFFB: SCSI bus was reset"},
442         {0x06290500, 0, 0,
443         "FFFE: SCSI bus transition to single ended"},
444         {0x06290600, 0, 0,
445         "FFFE: SCSI bus transition to LVD"},
446         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
447         "FFFB: SCSI bus was reset by another initiator"},
448         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
449         "3029: A device replacement has occurred"},
450         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
451         "4102: Device bus fabric performance degradation"},
452         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
453         "9051: IOA cache data exists for a missing or failed device"},
454         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
455         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
456         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
457         "9025: Disk unit is not supported at its physical location"},
458         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
459         "3020: IOA detected a SCSI bus configuration error"},
460         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
461         "3150: SCSI bus configuration error"},
462         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
463         "9074: Asymmetric advanced function disk configuration"},
464         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
465         "4040: Incomplete multipath connection between IOA and enclosure"},
466         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
467         "4041: Incomplete multipath connection between enclosure and device"},
468         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
469         "9075: Incomplete multipath connection between IOA and remote IOA"},
470         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
471         "9076: Configuration error, missing remote IOA"},
472         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
473         "4050: Enclosure does not support a required multipath function"},
474         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
475         "4121: Configuration error, required cable is missing"},
476         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
477         "4122: Cable is not plugged into the correct location on remote IOA"},
478         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
479         "4123: Configuration error, invalid cable vital product data"},
480         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
481         "4124: Configuration error, both cable ends are plugged into the same IOA"},
482         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
483         "4070: Logically bad block written on device"},
484         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
485         "9041: Array protection temporarily suspended"},
486         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
487         "9042: Corrupt array parity detected on specified device"},
488         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
489         "9030: Array no longer protected due to missing or failed disk unit"},
490         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
491         "9071: Link operational transition"},
492         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
493         "9072: Link not operational transition"},
494         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
495         "9032: Array exposed but still protected"},
496         {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
497         "70DD: Device forced failed by disrupt device command"},
498         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
499         "4061: Multipath redundancy level got better"},
500         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
501         "4060: Multipath redundancy level got worse"},
502         {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
503         "9083: Device raw mode enabled"},
504         {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
505         "9084: Device raw mode disabled"},
506         {0x07270000, 0, 0,
507         "Failure due to other device"},
508         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
509         "9008: IOA does not support functions expected by devices"},
510         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
511         "9010: Cache data associated with attached devices cannot be found"},
512         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
513         "9011: Cache data belongs to devices other than those attached"},
514         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
515         "9020: Array missing 2 or more devices with only 1 device present"},
516         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
517         "9021: Array missing 2 or more devices with 2 or more devices present"},
518         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
519         "9022: Exposed array is missing a required device"},
520         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
521         "9023: Array member(s) not at required physical locations"},
522         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
523         "9024: Array not functional due to present hardware configuration"},
524         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
525         "9026: Array not functional due to present hardware configuration"},
526         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
527         "9027: Array is missing a device and parity is out of sync"},
528         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
529         "9028: Maximum number of arrays already exist"},
530         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
531         "9050: Required cache data cannot be located for a disk unit"},
532         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
533         "9052: Cache data exists for a device that has been modified"},
534         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
535         "9054: IOA resources not available due to previous problems"},
536         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
537         "9092: Disk unit requires initialization before use"},
538         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
539         "9029: Incorrect hardware configuration change has been detected"},
540         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
541         "9060: One or more disk pairs are missing from an array"},
542         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
543         "9061: One or more disks are missing from an array"},
544         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
545         "9062: One or more disks are missing from an array"},
546         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
547         "9063: Maximum number of functional arrays has been exceeded"},
548         {0x07279A00, 0, 0,
549         "Data protect, other volume set problem"},
550         {0x0B260000, 0, 0,
551         "Aborted command, invalid descriptor"},
552         {0x0B3F9000, 0, 0,
553         "Target operating conditions have changed, dual adapter takeover"},
554         {0x0B530200, 0, 0,
555         "Aborted command, medium removal prevented"},
556         {0x0B5A0000, 0, 0,
557         "Command terminated by host"},
558         {0x0B5B8000, 0, 0,
559         "Aborted command, command terminated by host"}
560 };
561
562 static const struct ipr_ses_table_entry ipr_ses_table[] = {
563         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
564         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
565         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
566         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
567         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
568         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
569         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
570         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
571         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
572         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
573         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
574         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
575         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
576 };
577
578 /*
579  *  Function Prototypes
580  */
581 static int ipr_reset_alert(struct ipr_cmnd *);
582 static void ipr_process_ccn(struct ipr_cmnd *);
583 static void ipr_process_error(struct ipr_cmnd *);
584 static void ipr_reset_ioa_job(struct ipr_cmnd *);
585 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
586                                    enum ipr_shutdown_type);
587
588 #ifdef CONFIG_SCSI_IPR_TRACE
589 /**
590  * ipr_trc_hook - Add a trace entry to the driver trace
591  * @ipr_cmd:    ipr command struct
592  * @type:               trace type
593  * @add_data:   additional data
594  *
595  * Return value:
596  *      none
597  **/
598 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
599                          u8 type, u32 add_data)
600 {
601         struct ipr_trace_entry *trace_entry;
602         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
603         unsigned int trace_index;
604
605         trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
606         trace_entry = &ioa_cfg->trace[trace_index];
607         trace_entry->time = jiffies;
608         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
609         trace_entry->type = type;
610         if (ipr_cmd->ioa_cfg->sis64)
611                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
612         else
613                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
614         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
615         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
616         trace_entry->u.add_data = add_data;
617         wmb();
618 }
619 #else
620 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
621 #endif
622
623 /**
624  * ipr_lock_and_done - Acquire lock and complete command
625  * @ipr_cmd:    ipr command struct
626  *
627  * Return value:
628  *      none
629  **/
630 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
631 {
632         unsigned long lock_flags;
633         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
634
635         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
636         ipr_cmd->done(ipr_cmd);
637         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
638 }
639
640 /**
641  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
642  * @ipr_cmd:    ipr command struct
643  *
644  * Return value:
645  *      none
646  **/
647 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
648 {
649         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
650         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
651         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
652         dma_addr_t dma_addr = ipr_cmd->dma_addr;
653         int hrrq_id;
654
655         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
656         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
657         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
658         ioarcb->data_transfer_length = 0;
659         ioarcb->read_data_transfer_length = 0;
660         ioarcb->ioadl_len = 0;
661         ioarcb->read_ioadl_len = 0;
662
663         if (ipr_cmd->ioa_cfg->sis64) {
664                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
665                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
666                 ioasa64->u.gata.status = 0;
667         } else {
668                 ioarcb->write_ioadl_addr =
669                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
670                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
671                 ioasa->u.gata.status = 0;
672         }
673
674         ioasa->hdr.ioasc = 0;
675         ioasa->hdr.residual_data_len = 0;
676         ipr_cmd->scsi_cmd = NULL;
677         ipr_cmd->qc = NULL;
678         ipr_cmd->sense_buffer[0] = 0;
679         ipr_cmd->dma_use_sg = 0;
680 }
681
682 /**
683  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
684  * @ipr_cmd:    ipr command struct
685  *
686  * Return value:
687  *      none
688  **/
689 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
690                               void (*fast_done) (struct ipr_cmnd *))
691 {
692         ipr_reinit_ipr_cmnd(ipr_cmd);
693         ipr_cmd->u.scratch = 0;
694         ipr_cmd->sibling = NULL;
695         ipr_cmd->eh_comp = NULL;
696         ipr_cmd->fast_done = fast_done;
697         timer_setup(&ipr_cmd->timer, NULL, 0);
698 }
699
700 /**
701  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
702  * @ioa_cfg:    ioa config struct
703  *
704  * Return value:
705  *      pointer to ipr command struct
706  **/
707 static
708 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
709 {
710         struct ipr_cmnd *ipr_cmd = NULL;
711
712         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
713                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
714                         struct ipr_cmnd, queue);
715                 list_del(&ipr_cmd->queue);
716         }
717
718
719         return ipr_cmd;
720 }
721
722 /**
723  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
724  * @ioa_cfg:    ioa config struct
725  *
726  * Return value:
727  *      pointer to ipr command struct
728  **/
729 static
730 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
731 {
732         struct ipr_cmnd *ipr_cmd =
733                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
734         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
735         return ipr_cmd;
736 }
737
738 /**
739  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
740  * @ioa_cfg:    ioa config struct
741  * @clr_ints:     interrupts to clear
742  *
743  * This function masks all interrupts on the adapter, then clears the
744  * interrupts specified in the mask
745  *
746  * Return value:
747  *      none
748  **/
749 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
750                                           u32 clr_ints)
751 {
752         volatile u32 int_reg;
753         int i;
754
755         /* Stop new interrupts */
756         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
757                 spin_lock(&ioa_cfg->hrrq[i]._lock);
758                 ioa_cfg->hrrq[i].allow_interrupts = 0;
759                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
760         }
761         wmb();
762
763         /* Set interrupt mask to stop all new interrupts */
764         if (ioa_cfg->sis64)
765                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
766         else
767                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
768
769         /* Clear any pending interrupts */
770         if (ioa_cfg->sis64)
771                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
772         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
773         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
774 }
775
776 /**
777  * ipr_save_pcix_cmd_reg - Save PCI-X command register
778  * @ioa_cfg:    ioa config struct
779  *
780  * Return value:
781  *      0 on success / -EIO on failure
782  **/
783 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
784 {
785         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
786
787         if (pcix_cmd_reg == 0)
788                 return 0;
789
790         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
791                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
792                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
793                 return -EIO;
794         }
795
796         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
797         return 0;
798 }
799
800 /**
801  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
802  * @ioa_cfg:    ioa config struct
803  *
804  * Return value:
805  *      0 on success / -EIO on failure
806  **/
807 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
808 {
809         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
810
811         if (pcix_cmd_reg) {
812                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
813                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
814                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
815                         return -EIO;
816                 }
817         }
818
819         return 0;
820 }
821
822 /**
823  * __ipr_sata_eh_done - done function for aborted SATA commands
824  * @ipr_cmd:    ipr command struct
825  *
826  * This function is invoked for ops generated to SATA
827  * devices which are being aborted.
828  *
829  * Return value:
830  *      none
831  **/
832 static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
833 {
834         struct ata_queued_cmd *qc = ipr_cmd->qc;
835         struct ipr_sata_port *sata_port = qc->ap->private_data;
836
837         qc->err_mask |= AC_ERR_OTHER;
838         sata_port->ioasa.status |= ATA_BUSY;
839         ata_qc_complete(qc);
840         if (ipr_cmd->eh_comp)
841                 complete(ipr_cmd->eh_comp);
842         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
843 }
844
845 /**
846  * ipr_sata_eh_done - done function for aborted SATA commands
847  * @ipr_cmd:    ipr command struct
848  *
849  * This function is invoked for ops generated to SATA
850  * devices which are being aborted.
851  *
852  * Return value:
853  *      none
854  **/
855 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
856 {
857         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
858         unsigned long hrrq_flags;
859
860         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
861         __ipr_sata_eh_done(ipr_cmd);
862         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
863 }
864
865 /**
866  * __ipr_scsi_eh_done - mid-layer done function for aborted ops
867  * @ipr_cmd:    ipr command struct
868  *
869  * This function is invoked by the interrupt handler for
870  * ops generated by the SCSI mid-layer which are being aborted.
871  *
872  * Return value:
873  *      none
874  **/
875 static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
876 {
877         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
878
879         scsi_cmd->result |= (DID_ERROR << 16);
880
881         scsi_dma_unmap(ipr_cmd->scsi_cmd);
882         scsi_cmd->scsi_done(scsi_cmd);
883         if (ipr_cmd->eh_comp)
884                 complete(ipr_cmd->eh_comp);
885         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
886 }
887
888 /**
889  * ipr_scsi_eh_done - mid-layer done function for aborted ops
890  * @ipr_cmd:    ipr command struct
891  *
892  * This function is invoked by the interrupt handler for
893  * ops generated by the SCSI mid-layer which are being aborted.
894  *
895  * Return value:
896  *      none
897  **/
898 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
899 {
900         unsigned long hrrq_flags;
901         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
902
903         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
904         __ipr_scsi_eh_done(ipr_cmd);
905         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
906 }
907
908 /**
909  * ipr_fail_all_ops - Fails all outstanding ops.
910  * @ioa_cfg:    ioa config struct
911  *
912  * This function fails all outstanding ops.
913  *
914  * Return value:
915  *      none
916  **/
917 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
918 {
919         struct ipr_cmnd *ipr_cmd, *temp;
920         struct ipr_hrr_queue *hrrq;
921
922         ENTER;
923         for_each_hrrq(hrrq, ioa_cfg) {
924                 spin_lock(&hrrq->_lock);
925                 list_for_each_entry_safe(ipr_cmd,
926                                         temp, &hrrq->hrrq_pending_q, queue) {
927                         list_del(&ipr_cmd->queue);
928
929                         ipr_cmd->s.ioasa.hdr.ioasc =
930                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
931                         ipr_cmd->s.ioasa.hdr.ilid =
932                                 cpu_to_be32(IPR_DRIVER_ILID);
933
934                         if (ipr_cmd->scsi_cmd)
935                                 ipr_cmd->done = __ipr_scsi_eh_done;
936                         else if (ipr_cmd->qc)
937                                 ipr_cmd->done = __ipr_sata_eh_done;
938
939                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
940                                      IPR_IOASC_IOA_WAS_RESET);
941                         del_timer(&ipr_cmd->timer);
942                         ipr_cmd->done(ipr_cmd);
943                 }
944                 spin_unlock(&hrrq->_lock);
945         }
946         LEAVE;
947 }
948
949 /**
950  * ipr_send_command -  Send driver initiated requests.
951  * @ipr_cmd:            ipr command struct
952  *
953  * This function sends a command to the adapter using the correct write call.
954  * In the case of sis64, calculate the ioarcb size required. Then or in the
955  * appropriate bits.
956  *
957  * Return value:
958  *      none
959  **/
960 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
961 {
962         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
963         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
964
965         if (ioa_cfg->sis64) {
966                 /* The default size is 256 bytes */
967                 send_dma_addr |= 0x1;
968
969                 /* If the number of ioadls * size of ioadl > 128 bytes,
970                    then use a 512 byte ioarcb */
971                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
972                         send_dma_addr |= 0x4;
973                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
974         } else
975                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
976 }
977
978 /**
979  * ipr_do_req -  Send driver initiated requests.
980  * @ipr_cmd:            ipr command struct
981  * @done:                       done function
982  * @timeout_func:       timeout function
983  * @timeout:            timeout value
984  *
985  * This function sends the specified command to the adapter with the
986  * timeout given. The done function is invoked on command completion.
987  *
988  * Return value:
989  *      none
990  **/
991 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
992                        void (*done) (struct ipr_cmnd *),
993                        void (*timeout_func) (struct timer_list *), u32 timeout)
994 {
995         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
996
997         ipr_cmd->done = done;
998
999         ipr_cmd->timer.expires = jiffies + timeout;
1000         ipr_cmd->timer.function = timeout_func;
1001
1002         add_timer(&ipr_cmd->timer);
1003
1004         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
1005
1006         ipr_send_command(ipr_cmd);
1007 }
1008
1009 /**
1010  * ipr_internal_cmd_done - Op done function for an internally generated op.
1011  * @ipr_cmd:    ipr command struct
1012  *
1013  * This function is the op done function for an internally generated,
1014  * blocking op. It simply wakes the sleeping thread.
1015  *
1016  * Return value:
1017  *      none
1018  **/
1019 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1020 {
1021         if (ipr_cmd->sibling)
1022                 ipr_cmd->sibling = NULL;
1023         else
1024                 complete(&ipr_cmd->completion);
1025 }
1026
1027 /**
1028  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1029  * @ipr_cmd:    ipr command struct
1030  * @dma_addr:   dma address
1031  * @len:        transfer length
1032  * @flags:      ioadl flag value
1033  *
1034  * This function initializes an ioadl in the case where there is only a single
1035  * descriptor.
1036  *
1037  * Return value:
1038  *      nothing
1039  **/
1040 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1041                            u32 len, int flags)
1042 {
1043         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1044         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1045
1046         ipr_cmd->dma_use_sg = 1;
1047
1048         if (ipr_cmd->ioa_cfg->sis64) {
1049                 ioadl64->flags = cpu_to_be32(flags);
1050                 ioadl64->data_len = cpu_to_be32(len);
1051                 ioadl64->address = cpu_to_be64(dma_addr);
1052
1053                 ipr_cmd->ioarcb.ioadl_len =
1054                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1055                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1056         } else {
1057                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1058                 ioadl->address = cpu_to_be32(dma_addr);
1059
1060                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1061                         ipr_cmd->ioarcb.read_ioadl_len =
1062                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1063                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1064                 } else {
1065                         ipr_cmd->ioarcb.ioadl_len =
1066                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1067                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1068                 }
1069         }
1070 }
1071
1072 /**
1073  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1074  * @ipr_cmd:    ipr command struct
1075  * @timeout_func:       function to invoke if command times out
1076  * @timeout:    timeout
1077  *
1078  * Return value:
1079  *      none
1080  **/
1081 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1082                                   void (*timeout_func) (struct timer_list *),
1083                                   u32 timeout)
1084 {
1085         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1086
1087         init_completion(&ipr_cmd->completion);
1088         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1089
1090         spin_unlock_irq(ioa_cfg->host->host_lock);
1091         wait_for_completion(&ipr_cmd->completion);
1092         spin_lock_irq(ioa_cfg->host->host_lock);
1093 }
1094
1095 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1096 {
1097         unsigned int hrrq;
1098
1099         if (ioa_cfg->hrrq_num == 1)
1100                 hrrq = 0;
1101         else {
1102                 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1103                 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1104         }
1105         return hrrq;
1106 }
1107
1108 /**
1109  * ipr_send_hcam - Send an HCAM to the adapter.
1110  * @ioa_cfg:    ioa config struct
1111  * @type:               HCAM type
1112  * @hostrcb:    hostrcb struct
1113  *
1114  * This function will send a Host Controlled Async command to the adapter.
1115  * If HCAMs are currently not allowed to be issued to the adapter, it will
1116  * place the hostrcb on the free queue.
1117  *
1118  * Return value:
1119  *      none
1120  **/
1121 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1122                           struct ipr_hostrcb *hostrcb)
1123 {
1124         struct ipr_cmnd *ipr_cmd;
1125         struct ipr_ioarcb *ioarcb;
1126
1127         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1128                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1129                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1130                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1131
1132                 ipr_cmd->u.hostrcb = hostrcb;
1133                 ioarcb = &ipr_cmd->ioarcb;
1134
1135                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1136                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1137                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1138                 ioarcb->cmd_pkt.cdb[1] = type;
1139                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1140                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1141
1142                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1143                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1144
1145                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1146                         ipr_cmd->done = ipr_process_ccn;
1147                 else
1148                         ipr_cmd->done = ipr_process_error;
1149
1150                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1151
1152                 ipr_send_command(ipr_cmd);
1153         } else {
1154                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1155         }
1156 }
1157
1158 /**
1159  * ipr_update_ata_class - Update the ata class in the resource entry
1160  * @res:        resource entry struct
1161  * @proto:      cfgte device bus protocol value
1162  *
1163  * Return value:
1164  *      none
1165  **/
1166 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1167 {
1168         switch (proto) {
1169         case IPR_PROTO_SATA:
1170         case IPR_PROTO_SAS_STP:
1171                 res->ata_class = ATA_DEV_ATA;
1172                 break;
1173         case IPR_PROTO_SATA_ATAPI:
1174         case IPR_PROTO_SAS_STP_ATAPI:
1175                 res->ata_class = ATA_DEV_ATAPI;
1176                 break;
1177         default:
1178                 res->ata_class = ATA_DEV_UNKNOWN;
1179                 break;
1180         };
1181 }
1182
1183 /**
1184  * ipr_init_res_entry - Initialize a resource entry struct.
1185  * @res:        resource entry struct
1186  * @cfgtew:     config table entry wrapper struct
1187  *
1188  * Return value:
1189  *      none
1190  **/
1191 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1192                                struct ipr_config_table_entry_wrapper *cfgtew)
1193 {
1194         int found = 0;
1195         unsigned int proto;
1196         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1197         struct ipr_resource_entry *gscsi_res = NULL;
1198
1199         res->needs_sync_complete = 0;
1200         res->in_erp = 0;
1201         res->add_to_ml = 0;
1202         res->del_from_ml = 0;
1203         res->resetting_device = 0;
1204         res->reset_occurred = 0;
1205         res->sdev = NULL;
1206         res->sata_port = NULL;
1207
1208         if (ioa_cfg->sis64) {
1209                 proto = cfgtew->u.cfgte64->proto;
1210                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1211                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1212                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1213                 res->type = cfgtew->u.cfgte64->res_type;
1214
1215                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1216                         sizeof(res->res_path));
1217
1218                 res->bus = 0;
1219                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1220                         sizeof(res->dev_lun.scsi_lun));
1221                 res->lun = scsilun_to_int(&res->dev_lun);
1222
1223                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1224                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1225                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1226                                         found = 1;
1227                                         res->target = gscsi_res->target;
1228                                         break;
1229                                 }
1230                         }
1231                         if (!found) {
1232                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1233                                                                   ioa_cfg->max_devs_supported);
1234                                 set_bit(res->target, ioa_cfg->target_ids);
1235                         }
1236                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1237                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1238                         res->target = 0;
1239                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1240                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1241                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1242                                                           ioa_cfg->max_devs_supported);
1243                         set_bit(res->target, ioa_cfg->array_ids);
1244                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1245                         res->bus = IPR_VSET_VIRTUAL_BUS;
1246                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1247                                                           ioa_cfg->max_devs_supported);
1248                         set_bit(res->target, ioa_cfg->vset_ids);
1249                 } else {
1250                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1251                                                           ioa_cfg->max_devs_supported);
1252                         set_bit(res->target, ioa_cfg->target_ids);
1253                 }
1254         } else {
1255                 proto = cfgtew->u.cfgte->proto;
1256                 res->qmodel = IPR_QUEUEING_MODEL(res);
1257                 res->flags = cfgtew->u.cfgte->flags;
1258                 if (res->flags & IPR_IS_IOA_RESOURCE)
1259                         res->type = IPR_RES_TYPE_IOAFP;
1260                 else
1261                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1262
1263                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1264                 res->target = cfgtew->u.cfgte->res_addr.target;
1265                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1266                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1267         }
1268
1269         ipr_update_ata_class(res, proto);
1270 }
1271
1272 /**
1273  * ipr_is_same_device - Determine if two devices are the same.
1274  * @res:        resource entry struct
1275  * @cfgtew:     config table entry wrapper struct
1276  *
1277  * Return value:
1278  *      1 if the devices are the same / 0 otherwise
1279  **/
1280 static int ipr_is_same_device(struct ipr_resource_entry *res,
1281                               struct ipr_config_table_entry_wrapper *cfgtew)
1282 {
1283         if (res->ioa_cfg->sis64) {
1284                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1285                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1286                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1287                                         sizeof(cfgtew->u.cfgte64->lun))) {
1288                         return 1;
1289                 }
1290         } else {
1291                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1292                     res->target == cfgtew->u.cfgte->res_addr.target &&
1293                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1294                         return 1;
1295         }
1296
1297         return 0;
1298 }
1299
1300 /**
1301  * __ipr_format_res_path - Format the resource path for printing.
1302  * @res_path:   resource path
1303  * @buf:        buffer
1304  * @len:        length of buffer provided
1305  *
1306  * Return value:
1307  *      pointer to buffer
1308  **/
1309 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1310 {
1311         int i;
1312         char *p = buffer;
1313
1314         *p = '\0';
1315         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1316         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1317                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1318
1319         return buffer;
1320 }
1321
1322 /**
1323  * ipr_format_res_path - Format the resource path for printing.
1324  * @ioa_cfg:    ioa config struct
1325  * @res_path:   resource path
1326  * @buf:        buffer
1327  * @len:        length of buffer provided
1328  *
1329  * Return value:
1330  *      pointer to buffer
1331  **/
1332 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1333                                  u8 *res_path, char *buffer, int len)
1334 {
1335         char *p = buffer;
1336
1337         *p = '\0';
1338         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1339         __ipr_format_res_path(res_path, p, len - (buffer - p));
1340         return buffer;
1341 }
1342
1343 /**
1344  * ipr_update_res_entry - Update the resource entry.
1345  * @res:        resource entry struct
1346  * @cfgtew:     config table entry wrapper struct
1347  *
1348  * Return value:
1349  *      none
1350  **/
1351 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1352                                  struct ipr_config_table_entry_wrapper *cfgtew)
1353 {
1354         char buffer[IPR_MAX_RES_PATH_LENGTH];
1355         unsigned int proto;
1356         int new_path = 0;
1357
1358         if (res->ioa_cfg->sis64) {
1359                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1360                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1361                 res->type = cfgtew->u.cfgte64->res_type;
1362
1363                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1364                         sizeof(struct ipr_std_inq_data));
1365
1366                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1367                 proto = cfgtew->u.cfgte64->proto;
1368                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1369                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1370
1371                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1372                         sizeof(res->dev_lun.scsi_lun));
1373
1374                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1375                                         sizeof(res->res_path))) {
1376                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1377                                 sizeof(res->res_path));
1378                         new_path = 1;
1379                 }
1380
1381                 if (res->sdev && new_path)
1382                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1383                                     ipr_format_res_path(res->ioa_cfg,
1384                                         res->res_path, buffer, sizeof(buffer)));
1385         } else {
1386                 res->flags = cfgtew->u.cfgte->flags;
1387                 if (res->flags & IPR_IS_IOA_RESOURCE)
1388                         res->type = IPR_RES_TYPE_IOAFP;
1389                 else
1390                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1391
1392                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1393                         sizeof(struct ipr_std_inq_data));
1394
1395                 res->qmodel = IPR_QUEUEING_MODEL(res);
1396                 proto = cfgtew->u.cfgte->proto;
1397                 res->res_handle = cfgtew->u.cfgte->res_handle;
1398         }
1399
1400         ipr_update_ata_class(res, proto);
1401 }
1402
1403 /**
1404  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1405  *                        for the resource.
1406  * @res:        resource entry struct
1407  * @cfgtew:     config table entry wrapper struct
1408  *
1409  * Return value:
1410  *      none
1411  **/
1412 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1413 {
1414         struct ipr_resource_entry *gscsi_res = NULL;
1415         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1416
1417         if (!ioa_cfg->sis64)
1418                 return;
1419
1420         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1421                 clear_bit(res->target, ioa_cfg->array_ids);
1422         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1423                 clear_bit(res->target, ioa_cfg->vset_ids);
1424         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1425                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1426                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1427                                 return;
1428                 clear_bit(res->target, ioa_cfg->target_ids);
1429
1430         } else if (res->bus == 0)
1431                 clear_bit(res->target, ioa_cfg->target_ids);
1432 }
1433
1434 /**
1435  * ipr_handle_config_change - Handle a config change from the adapter
1436  * @ioa_cfg:    ioa config struct
1437  * @hostrcb:    hostrcb
1438  *
1439  * Return value:
1440  *      none
1441  **/
1442 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1443                                      struct ipr_hostrcb *hostrcb)
1444 {
1445         struct ipr_resource_entry *res = NULL;
1446         struct ipr_config_table_entry_wrapper cfgtew;
1447         __be32 cc_res_handle;
1448
1449         u32 is_ndn = 1;
1450
1451         if (ioa_cfg->sis64) {
1452                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1453                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1454         } else {
1455                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1456                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1457         }
1458
1459         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1460                 if (res->res_handle == cc_res_handle) {
1461                         is_ndn = 0;
1462                         break;
1463                 }
1464         }
1465
1466         if (is_ndn) {
1467                 if (list_empty(&ioa_cfg->free_res_q)) {
1468                         ipr_send_hcam(ioa_cfg,
1469                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1470                                       hostrcb);
1471                         return;
1472                 }
1473
1474                 res = list_entry(ioa_cfg->free_res_q.next,
1475                                  struct ipr_resource_entry, queue);
1476
1477                 list_del(&res->queue);
1478                 ipr_init_res_entry(res, &cfgtew);
1479                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1480         }
1481
1482         ipr_update_res_entry(res, &cfgtew);
1483
1484         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1485                 if (res->sdev) {
1486                         res->del_from_ml = 1;
1487                         res->res_handle = IPR_INVALID_RES_HANDLE;
1488                         schedule_work(&ioa_cfg->work_q);
1489                 } else {
1490                         ipr_clear_res_target(res);
1491                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1492                 }
1493         } else if (!res->sdev || res->del_from_ml) {
1494                 res->add_to_ml = 1;
1495                 schedule_work(&ioa_cfg->work_q);
1496         }
1497
1498         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1499 }
1500
1501 /**
1502  * ipr_process_ccn - Op done function for a CCN.
1503  * @ipr_cmd:    ipr command struct
1504  *
1505  * This function is the op done function for a configuration
1506  * change notification host controlled async from the adapter.
1507  *
1508  * Return value:
1509  *      none
1510  **/
1511 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1512 {
1513         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1514         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1515         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1516
1517         list_del_init(&hostrcb->queue);
1518         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1519
1520         if (ioasc) {
1521                 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1522                     ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1523                         dev_err(&ioa_cfg->pdev->dev,
1524                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1525
1526                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1527         } else {
1528                 ipr_handle_config_change(ioa_cfg, hostrcb);
1529         }
1530 }
1531
1532 /**
1533  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1534  * @i:          index into buffer
1535  * @buf:                string to modify
1536  *
1537  * This function will strip all trailing whitespace, pad the end
1538  * of the string with a single space, and NULL terminate the string.
1539  *
1540  * Return value:
1541  *      new length of string
1542  **/
1543 static int strip_and_pad_whitespace(int i, char *buf)
1544 {
1545         while (i && buf[i] == ' ')
1546                 i--;
1547         buf[i+1] = ' ';
1548         buf[i+2] = '\0';
1549         return i + 2;
1550 }
1551
1552 /**
1553  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1554  * @prefix:             string to print at start of printk
1555  * @hostrcb:    hostrcb pointer
1556  * @vpd:                vendor/product id/sn struct
1557  *
1558  * Return value:
1559  *      none
1560  **/
1561 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1562                                 struct ipr_vpd *vpd)
1563 {
1564         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1565         int i = 0;
1566
1567         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1568         i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1569
1570         memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1571         i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1572
1573         memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1574         buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1575
1576         ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1577 }
1578
1579 /**
1580  * ipr_log_vpd - Log the passed VPD to the error log.
1581  * @vpd:                vendor/product id/sn struct
1582  *
1583  * Return value:
1584  *      none
1585  **/
1586 static void ipr_log_vpd(struct ipr_vpd *vpd)
1587 {
1588         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1589                     + IPR_SERIAL_NUM_LEN];
1590
1591         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1592         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1593                IPR_PROD_ID_LEN);
1594         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1595         ipr_err("Vendor/Product ID: %s\n", buffer);
1596
1597         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1598         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1599         ipr_err("    Serial Number: %s\n", buffer);
1600 }
1601
1602 /**
1603  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1604  * @prefix:             string to print at start of printk
1605  * @hostrcb:    hostrcb pointer
1606  * @vpd:                vendor/product id/sn/wwn struct
1607  *
1608  * Return value:
1609  *      none
1610  **/
1611 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1612                                     struct ipr_ext_vpd *vpd)
1613 {
1614         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1615         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1616                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1617 }
1618
1619 /**
1620  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1621  * @vpd:                vendor/product id/sn/wwn struct
1622  *
1623  * Return value:
1624  *      none
1625  **/
1626 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1627 {
1628         ipr_log_vpd(&vpd->vpd);
1629         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1630                 be32_to_cpu(vpd->wwid[1]));
1631 }
1632
1633 /**
1634  * ipr_log_enhanced_cache_error - Log a cache error.
1635  * @ioa_cfg:    ioa config struct
1636  * @hostrcb:    hostrcb struct
1637  *
1638  * Return value:
1639  *      none
1640  **/
1641 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1642                                          struct ipr_hostrcb *hostrcb)
1643 {
1644         struct ipr_hostrcb_type_12_error *error;
1645
1646         if (ioa_cfg->sis64)
1647                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1648         else
1649                 error = &hostrcb->hcam.u.error.u.type_12_error;
1650
1651         ipr_err("-----Current Configuration-----\n");
1652         ipr_err("Cache Directory Card Information:\n");
1653         ipr_log_ext_vpd(&error->ioa_vpd);
1654         ipr_err("Adapter Card Information:\n");
1655         ipr_log_ext_vpd(&error->cfc_vpd);
1656
1657         ipr_err("-----Expected Configuration-----\n");
1658         ipr_err("Cache Directory Card Information:\n");
1659         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1660         ipr_err("Adapter Card Information:\n");
1661         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1662
1663         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1664                      be32_to_cpu(error->ioa_data[0]),
1665                      be32_to_cpu(error->ioa_data[1]),
1666                      be32_to_cpu(error->ioa_data[2]));
1667 }
1668
1669 /**
1670  * ipr_log_cache_error - Log a cache error.
1671  * @ioa_cfg:    ioa config struct
1672  * @hostrcb:    hostrcb struct
1673  *
1674  * Return value:
1675  *      none
1676  **/
1677 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1678                                 struct ipr_hostrcb *hostrcb)
1679 {
1680         struct ipr_hostrcb_type_02_error *error =
1681                 &hostrcb->hcam.u.error.u.type_02_error;
1682
1683         ipr_err("-----Current Configuration-----\n");
1684         ipr_err("Cache Directory Card Information:\n");
1685         ipr_log_vpd(&error->ioa_vpd);
1686         ipr_err("Adapter Card Information:\n");
1687         ipr_log_vpd(&error->cfc_vpd);
1688
1689         ipr_err("-----Expected Configuration-----\n");
1690         ipr_err("Cache Directory Card Information:\n");
1691         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1692         ipr_err("Adapter Card Information:\n");
1693         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1694
1695         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1696                      be32_to_cpu(error->ioa_data[0]),
1697                      be32_to_cpu(error->ioa_data[1]),
1698                      be32_to_cpu(error->ioa_data[2]));
1699 }
1700
1701 /**
1702  * ipr_log_enhanced_config_error - Log a configuration error.
1703  * @ioa_cfg:    ioa config struct
1704  * @hostrcb:    hostrcb struct
1705  *
1706  * Return value:
1707  *      none
1708  **/
1709 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1710                                           struct ipr_hostrcb *hostrcb)
1711 {
1712         int errors_logged, i;
1713         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1714         struct ipr_hostrcb_type_13_error *error;
1715
1716         error = &hostrcb->hcam.u.error.u.type_13_error;
1717         errors_logged = be32_to_cpu(error->errors_logged);
1718
1719         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1720                 be32_to_cpu(error->errors_detected), errors_logged);
1721
1722         dev_entry = error->dev;
1723
1724         for (i = 0; i < errors_logged; i++, dev_entry++) {
1725                 ipr_err_separator;
1726
1727                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1728                 ipr_log_ext_vpd(&dev_entry->vpd);
1729
1730                 ipr_err("-----New Device Information-----\n");
1731                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1732
1733                 ipr_err("Cache Directory Card Information:\n");
1734                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1735
1736                 ipr_err("Adapter Card Information:\n");
1737                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1738         }
1739 }
1740
1741 /**
1742  * ipr_log_sis64_config_error - Log a device error.
1743  * @ioa_cfg:    ioa config struct
1744  * @hostrcb:    hostrcb struct
1745  *
1746  * Return value:
1747  *      none
1748  **/
1749 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1750                                        struct ipr_hostrcb *hostrcb)
1751 {
1752         int errors_logged, i;
1753         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1754         struct ipr_hostrcb_type_23_error *error;
1755         char buffer[IPR_MAX_RES_PATH_LENGTH];
1756
1757         error = &hostrcb->hcam.u.error64.u.type_23_error;
1758         errors_logged = be32_to_cpu(error->errors_logged);
1759
1760         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1761                 be32_to_cpu(error->errors_detected), errors_logged);
1762
1763         dev_entry = error->dev;
1764
1765         for (i = 0; i < errors_logged; i++, dev_entry++) {
1766                 ipr_err_separator;
1767
1768                 ipr_err("Device %d : %s", i + 1,
1769                         __ipr_format_res_path(dev_entry->res_path,
1770                                               buffer, sizeof(buffer)));
1771                 ipr_log_ext_vpd(&dev_entry->vpd);
1772
1773                 ipr_err("-----New Device Information-----\n");
1774                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1775
1776                 ipr_err("Cache Directory Card Information:\n");
1777                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1778
1779                 ipr_err("Adapter Card Information:\n");
1780                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1781         }
1782 }
1783
1784 /**
1785  * ipr_log_config_error - Log a configuration error.
1786  * @ioa_cfg:    ioa config struct
1787  * @hostrcb:    hostrcb struct
1788  *
1789  * Return value:
1790  *      none
1791  **/
1792 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1793                                  struct ipr_hostrcb *hostrcb)
1794 {
1795         int errors_logged, i;
1796         struct ipr_hostrcb_device_data_entry *dev_entry;
1797         struct ipr_hostrcb_type_03_error *error;
1798
1799         error = &hostrcb->hcam.u.error.u.type_03_error;
1800         errors_logged = be32_to_cpu(error->errors_logged);
1801
1802         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1803                 be32_to_cpu(error->errors_detected), errors_logged);
1804
1805         dev_entry = error->dev;
1806
1807         for (i = 0; i < errors_logged; i++, dev_entry++) {
1808                 ipr_err_separator;
1809
1810                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1811                 ipr_log_vpd(&dev_entry->vpd);
1812
1813                 ipr_err("-----New Device Information-----\n");
1814                 ipr_log_vpd(&dev_entry->new_vpd);
1815
1816                 ipr_err("Cache Directory Card Information:\n");
1817                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1818
1819                 ipr_err("Adapter Card Information:\n");
1820                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1821
1822                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1823                         be32_to_cpu(dev_entry->ioa_data[0]),
1824                         be32_to_cpu(dev_entry->ioa_data[1]),
1825                         be32_to_cpu(dev_entry->ioa_data[2]),
1826                         be32_to_cpu(dev_entry->ioa_data[3]),
1827                         be32_to_cpu(dev_entry->ioa_data[4]));
1828         }
1829 }
1830
1831 /**
1832  * ipr_log_enhanced_array_error - Log an array configuration error.
1833  * @ioa_cfg:    ioa config struct
1834  * @hostrcb:    hostrcb struct
1835  *
1836  * Return value:
1837  *      none
1838  **/
1839 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1840                                          struct ipr_hostrcb *hostrcb)
1841 {
1842         int i, num_entries;
1843         struct ipr_hostrcb_type_14_error *error;
1844         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1845         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1846
1847         error = &hostrcb->hcam.u.error.u.type_14_error;
1848
1849         ipr_err_separator;
1850
1851         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1852                 error->protection_level,
1853                 ioa_cfg->host->host_no,
1854                 error->last_func_vset_res_addr.bus,
1855                 error->last_func_vset_res_addr.target,
1856                 error->last_func_vset_res_addr.lun);
1857
1858         ipr_err_separator;
1859
1860         array_entry = error->array_member;
1861         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1862                             ARRAY_SIZE(error->array_member));
1863
1864         for (i = 0; i < num_entries; i++, array_entry++) {
1865                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1866                         continue;
1867
1868                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1869                         ipr_err("Exposed Array Member %d:\n", i);
1870                 else
1871                         ipr_err("Array Member %d:\n", i);
1872
1873                 ipr_log_ext_vpd(&array_entry->vpd);
1874                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1875                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1876                                  "Expected Location");
1877
1878                 ipr_err_separator;
1879         }
1880 }
1881
1882 /**
1883  * ipr_log_array_error - Log an array configuration error.
1884  * @ioa_cfg:    ioa config struct
1885  * @hostrcb:    hostrcb struct
1886  *
1887  * Return value:
1888  *      none
1889  **/
1890 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1891                                 struct ipr_hostrcb *hostrcb)
1892 {
1893         int i;
1894         struct ipr_hostrcb_type_04_error *error;
1895         struct ipr_hostrcb_array_data_entry *array_entry;
1896         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1897
1898         error = &hostrcb->hcam.u.error.u.type_04_error;
1899
1900         ipr_err_separator;
1901
1902         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1903                 error->protection_level,
1904                 ioa_cfg->host->host_no,
1905                 error->last_func_vset_res_addr.bus,
1906                 error->last_func_vset_res_addr.target,
1907                 error->last_func_vset_res_addr.lun);
1908
1909         ipr_err_separator;
1910
1911         array_entry = error->array_member;
1912
1913         for (i = 0; i < 18; i++) {
1914                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1915                         continue;
1916
1917                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1918                         ipr_err("Exposed Array Member %d:\n", i);
1919                 else
1920                         ipr_err("Array Member %d:\n", i);
1921
1922                 ipr_log_vpd(&array_entry->vpd);
1923
1924                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1925                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1926                                  "Expected Location");
1927
1928                 ipr_err_separator;
1929
1930                 if (i == 9)
1931                         array_entry = error->array_member2;
1932                 else
1933                         array_entry++;
1934         }
1935 }
1936
1937 /**
1938  * ipr_log_hex_data - Log additional hex IOA error data.
1939  * @ioa_cfg:    ioa config struct
1940  * @data:               IOA error data
1941  * @len:                data length
1942  *
1943  * Return value:
1944  *      none
1945  **/
1946 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1947 {
1948         int i;
1949
1950         if (len == 0)
1951                 return;
1952
1953         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1954                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1955
1956         for (i = 0; i < len / 4; i += 4) {
1957                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1958                         be32_to_cpu(data[i]),
1959                         be32_to_cpu(data[i+1]),
1960                         be32_to_cpu(data[i+2]),
1961                         be32_to_cpu(data[i+3]));
1962         }
1963 }
1964
1965 /**
1966  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1967  * @ioa_cfg:    ioa config struct
1968  * @hostrcb:    hostrcb struct
1969  *
1970  * Return value:
1971  *      none
1972  **/
1973 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1974                                             struct ipr_hostrcb *hostrcb)
1975 {
1976         struct ipr_hostrcb_type_17_error *error;
1977
1978         if (ioa_cfg->sis64)
1979                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1980         else
1981                 error = &hostrcb->hcam.u.error.u.type_17_error;
1982
1983         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1984         strim(error->failure_reason);
1985
1986         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1987                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1988         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1989         ipr_log_hex_data(ioa_cfg, error->data,
1990                          be32_to_cpu(hostrcb->hcam.length) -
1991                          (offsetof(struct ipr_hostrcb_error, u) +
1992                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1993 }
1994
1995 /**
1996  * ipr_log_dual_ioa_error - Log a dual adapter error.
1997  * @ioa_cfg:    ioa config struct
1998  * @hostrcb:    hostrcb struct
1999  *
2000  * Return value:
2001  *      none
2002  **/
2003 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
2004                                    struct ipr_hostrcb *hostrcb)
2005 {
2006         struct ipr_hostrcb_type_07_error *error;
2007
2008         error = &hostrcb->hcam.u.error.u.type_07_error;
2009         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2010         strim(error->failure_reason);
2011
2012         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2013                      be32_to_cpu(hostrcb->hcam.u.error.prc));
2014         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
2015         ipr_log_hex_data(ioa_cfg, error->data,
2016                          be32_to_cpu(hostrcb->hcam.length) -
2017                          (offsetof(struct ipr_hostrcb_error, u) +
2018                           offsetof(struct ipr_hostrcb_type_07_error, data)));
2019 }
2020
2021 static const struct {
2022         u8 active;
2023         char *desc;
2024 } path_active_desc[] = {
2025         { IPR_PATH_NO_INFO, "Path" },
2026         { IPR_PATH_ACTIVE, "Active path" },
2027         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2028 };
2029
2030 static const struct {
2031         u8 state;
2032         char *desc;
2033 } path_state_desc[] = {
2034         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2035         { IPR_PATH_HEALTHY, "is healthy" },
2036         { IPR_PATH_DEGRADED, "is degraded" },
2037         { IPR_PATH_FAILED, "is failed" }
2038 };
2039
2040 /**
2041  * ipr_log_fabric_path - Log a fabric path error
2042  * @hostrcb:    hostrcb struct
2043  * @fabric:             fabric descriptor
2044  *
2045  * Return value:
2046  *      none
2047  **/
2048 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2049                                 struct ipr_hostrcb_fabric_desc *fabric)
2050 {
2051         int i, j;
2052         u8 path_state = fabric->path_state;
2053         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2054         u8 state = path_state & IPR_PATH_STATE_MASK;
2055
2056         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2057                 if (path_active_desc[i].active != active)
2058                         continue;
2059
2060                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2061                         if (path_state_desc[j].state != state)
2062                                 continue;
2063
2064                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2065                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2066                                              path_active_desc[i].desc, path_state_desc[j].desc,
2067                                              fabric->ioa_port);
2068                         } else if (fabric->cascaded_expander == 0xff) {
2069                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2070                                              path_active_desc[i].desc, path_state_desc[j].desc,
2071                                              fabric->ioa_port, fabric->phy);
2072                         } else if (fabric->phy == 0xff) {
2073                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2074                                              path_active_desc[i].desc, path_state_desc[j].desc,
2075                                              fabric->ioa_port, fabric->cascaded_expander);
2076                         } else {
2077                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2078                                              path_active_desc[i].desc, path_state_desc[j].desc,
2079                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2080                         }
2081                         return;
2082                 }
2083         }
2084
2085         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2086                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2087 }
2088
2089 /**
2090  * ipr_log64_fabric_path - Log a fabric path error
2091  * @hostrcb:    hostrcb struct
2092  * @fabric:             fabric descriptor
2093  *
2094  * Return value:
2095  *      none
2096  **/
2097 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2098                                   struct ipr_hostrcb64_fabric_desc *fabric)
2099 {
2100         int i, j;
2101         u8 path_state = fabric->path_state;
2102         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2103         u8 state = path_state & IPR_PATH_STATE_MASK;
2104         char buffer[IPR_MAX_RES_PATH_LENGTH];
2105
2106         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2107                 if (path_active_desc[i].active != active)
2108                         continue;
2109
2110                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2111                         if (path_state_desc[j].state != state)
2112                                 continue;
2113
2114                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2115                                      path_active_desc[i].desc, path_state_desc[j].desc,
2116                                      ipr_format_res_path(hostrcb->ioa_cfg,
2117                                                 fabric->res_path,
2118                                                 buffer, sizeof(buffer)));
2119                         return;
2120                 }
2121         }
2122
2123         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2124                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2125                                     buffer, sizeof(buffer)));
2126 }
2127
2128 static const struct {
2129         u8 type;
2130         char *desc;
2131 } path_type_desc[] = {
2132         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2133         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2134         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2135         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2136 };
2137
2138 static const struct {
2139         u8 status;
2140         char *desc;
2141 } path_status_desc[] = {
2142         { IPR_PATH_CFG_NO_PROB, "Functional" },
2143         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2144         { IPR_PATH_CFG_FAILED, "Failed" },
2145         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2146         { IPR_PATH_NOT_DETECTED, "Missing" },
2147         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2148 };
2149
2150 static const char *link_rate[] = {
2151         "unknown",
2152         "disabled",
2153         "phy reset problem",
2154         "spinup hold",
2155         "port selector",
2156         "unknown",
2157         "unknown",
2158         "unknown",
2159         "1.5Gbps",
2160         "3.0Gbps",
2161         "unknown",
2162         "unknown",
2163         "unknown",
2164         "unknown",
2165         "unknown",
2166         "unknown"
2167 };
2168
2169 /**
2170  * ipr_log_path_elem - Log a fabric path element.
2171  * @hostrcb:    hostrcb struct
2172  * @cfg:                fabric path element struct
2173  *
2174  * Return value:
2175  *      none
2176  **/
2177 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2178                               struct ipr_hostrcb_config_element *cfg)
2179 {
2180         int i, j;
2181         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2182         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2183
2184         if (type == IPR_PATH_CFG_NOT_EXIST)
2185                 return;
2186
2187         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2188                 if (path_type_desc[i].type != type)
2189                         continue;
2190
2191                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2192                         if (path_status_desc[j].status != status)
2193                                 continue;
2194
2195                         if (type == IPR_PATH_CFG_IOA_PORT) {
2196                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2197                                              path_status_desc[j].desc, path_type_desc[i].desc,
2198                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2199                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2200                         } else {
2201                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2202                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2203                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2204                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2205                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2206                                 } else if (cfg->cascaded_expander == 0xff) {
2207                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2208                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2209                                                      path_type_desc[i].desc, cfg->phy,
2210                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2211                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2212                                 } else if (cfg->phy == 0xff) {
2213                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2214                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2215                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2216                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2217                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2218                                 } else {
2219                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2220                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2221                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2222                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2223                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2224                                 }
2225                         }
2226                         return;
2227                 }
2228         }
2229
2230         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2231                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2232                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2233                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2234 }
2235
2236 /**
2237  * ipr_log64_path_elem - Log a fabric path element.
2238  * @hostrcb:    hostrcb struct
2239  * @cfg:                fabric path element struct
2240  *
2241  * Return value:
2242  *      none
2243  **/
2244 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2245                                 struct ipr_hostrcb64_config_element *cfg)
2246 {
2247         int i, j;
2248         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2249         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2250         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2251         char buffer[IPR_MAX_RES_PATH_LENGTH];
2252
2253         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2254                 return;
2255
2256         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2257                 if (path_type_desc[i].type != type)
2258                         continue;
2259
2260                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2261                         if (path_status_desc[j].status != status)
2262                                 continue;
2263
2264                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2265                                      path_status_desc[j].desc, path_type_desc[i].desc,
2266                                      ipr_format_res_path(hostrcb->ioa_cfg,
2267                                         cfg->res_path, buffer, sizeof(buffer)),
2268                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2269                                         be32_to_cpu(cfg->wwid[0]),
2270                                         be32_to_cpu(cfg->wwid[1]));
2271                         return;
2272                 }
2273         }
2274         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2275                      "WWN=%08X%08X\n", cfg->type_status,
2276                      ipr_format_res_path(hostrcb->ioa_cfg,
2277                         cfg->res_path, buffer, sizeof(buffer)),
2278                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2279                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2280 }
2281
2282 /**
2283  * ipr_log_fabric_error - Log a fabric error.
2284  * @ioa_cfg:    ioa config struct
2285  * @hostrcb:    hostrcb struct
2286  *
2287  * Return value:
2288  *      none
2289  **/
2290 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2291                                  struct ipr_hostrcb *hostrcb)
2292 {
2293         struct ipr_hostrcb_type_20_error *error;
2294         struct ipr_hostrcb_fabric_desc *fabric;
2295         struct ipr_hostrcb_config_element *cfg;
2296         int i, add_len;
2297
2298         error = &hostrcb->hcam.u.error.u.type_20_error;
2299         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2300         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2301
2302         add_len = be32_to_cpu(hostrcb->hcam.length) -
2303                 (offsetof(struct ipr_hostrcb_error, u) +
2304                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2305
2306         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2307                 ipr_log_fabric_path(hostrcb, fabric);
2308                 for_each_fabric_cfg(fabric, cfg)
2309                         ipr_log_path_elem(hostrcb, cfg);
2310
2311                 add_len -= be16_to_cpu(fabric->length);
2312                 fabric = (struct ipr_hostrcb_fabric_desc *)
2313                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2314         }
2315
2316         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2317 }
2318
2319 /**
2320  * ipr_log_sis64_array_error - Log a sis64 array error.
2321  * @ioa_cfg:    ioa config struct
2322  * @hostrcb:    hostrcb struct
2323  *
2324  * Return value:
2325  *      none
2326  **/
2327 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2328                                       struct ipr_hostrcb *hostrcb)
2329 {
2330         int i, num_entries;
2331         struct ipr_hostrcb_type_24_error *error;
2332         struct ipr_hostrcb64_array_data_entry *array_entry;
2333         char buffer[IPR_MAX_RES_PATH_LENGTH];
2334         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2335
2336         error = &hostrcb->hcam.u.error64.u.type_24_error;
2337
2338         ipr_err_separator;
2339
2340         ipr_err("RAID %s Array Configuration: %s\n",
2341                 error->protection_level,
2342                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2343                         buffer, sizeof(buffer)));
2344
2345         ipr_err_separator;
2346
2347         array_entry = error->array_member;
2348         num_entries = min_t(u32, error->num_entries,
2349                             ARRAY_SIZE(error->array_member));
2350
2351         for (i = 0; i < num_entries; i++, array_entry++) {
2352
2353                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2354                         continue;
2355
2356                 if (error->exposed_mode_adn == i)
2357                         ipr_err("Exposed Array Member %d:\n", i);
2358                 else
2359                         ipr_err("Array Member %d:\n", i);
2360
2361                 ipr_err("Array Member %d:\n", i);
2362                 ipr_log_ext_vpd(&array_entry->vpd);
2363                 ipr_err("Current Location: %s\n",
2364                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2365                                 buffer, sizeof(buffer)));
2366                 ipr_err("Expected Location: %s\n",
2367                          ipr_format_res_path(ioa_cfg,
2368                                 array_entry->expected_res_path,
2369                                 buffer, sizeof(buffer)));
2370
2371                 ipr_err_separator;
2372         }
2373 }
2374
2375 /**
2376  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2377  * @ioa_cfg:    ioa config struct
2378  * @hostrcb:    hostrcb struct
2379  *
2380  * Return value:
2381  *      none
2382  **/
2383 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2384                                        struct ipr_hostrcb *hostrcb)
2385 {
2386         struct ipr_hostrcb_type_30_error *error;
2387         struct ipr_hostrcb64_fabric_desc *fabric;
2388         struct ipr_hostrcb64_config_element *cfg;
2389         int i, add_len;
2390
2391         error = &hostrcb->hcam.u.error64.u.type_30_error;
2392
2393         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2394         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2395
2396         add_len = be32_to_cpu(hostrcb->hcam.length) -
2397                 (offsetof(struct ipr_hostrcb64_error, u) +
2398                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2399
2400         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2401                 ipr_log64_fabric_path(hostrcb, fabric);
2402                 for_each_fabric_cfg(fabric, cfg)
2403                         ipr_log64_path_elem(hostrcb, cfg);
2404
2405                 add_len -= be16_to_cpu(fabric->length);
2406                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2407                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2408         }
2409
2410         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2411 }
2412
2413 /**
2414  * ipr_log_generic_error - Log an adapter error.
2415  * @ioa_cfg:    ioa config struct
2416  * @hostrcb:    hostrcb struct
2417  *
2418  * Return value:
2419  *      none
2420  **/
2421 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2422                                   struct ipr_hostrcb *hostrcb)
2423 {
2424         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2425                          be32_to_cpu(hostrcb->hcam.length));
2426 }
2427
2428 /**
2429  * ipr_log_sis64_device_error - Log a cache error.
2430  * @ioa_cfg:    ioa config struct
2431  * @hostrcb:    hostrcb struct
2432  *
2433  * Return value:
2434  *      none
2435  **/
2436 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2437                                          struct ipr_hostrcb *hostrcb)
2438 {
2439         struct ipr_hostrcb_type_21_error *error;
2440         char buffer[IPR_MAX_RES_PATH_LENGTH];
2441
2442         error = &hostrcb->hcam.u.error64.u.type_21_error;
2443
2444         ipr_err("-----Failing Device Information-----\n");
2445         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2446                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2447                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2448         ipr_err("Device Resource Path: %s\n",
2449                 __ipr_format_res_path(error->res_path,
2450                                       buffer, sizeof(buffer)));
2451         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2452         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2453         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2454         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2455         ipr_err("SCSI Sense Data:\n");
2456         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2457         ipr_err("SCSI Command Descriptor Block: \n");
2458         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2459
2460         ipr_err("Additional IOA Data:\n");
2461         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2462 }
2463
2464 /**
2465  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2466  * @ioasc:      IOASC
2467  *
2468  * This function will return the index of into the ipr_error_table
2469  * for the specified IOASC. If the IOASC is not in the table,
2470  * 0 will be returned, which points to the entry used for unknown errors.
2471  *
2472  * Return value:
2473  *      index into the ipr_error_table
2474  **/
2475 static u32 ipr_get_error(u32 ioasc)
2476 {
2477         int i;
2478
2479         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2480                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2481                         return i;
2482
2483         return 0;
2484 }
2485
2486 /**
2487  * ipr_handle_log_data - Log an adapter error.
2488  * @ioa_cfg:    ioa config struct
2489  * @hostrcb:    hostrcb struct
2490  *
2491  * This function logs an adapter error to the system.
2492  *
2493  * Return value:
2494  *      none
2495  **/
2496 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2497                                 struct ipr_hostrcb *hostrcb)
2498 {
2499         u32 ioasc;
2500         int error_index;
2501         struct ipr_hostrcb_type_21_error *error;
2502
2503         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2504                 return;
2505
2506         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2507                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2508
2509         if (ioa_cfg->sis64)
2510                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2511         else
2512                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2513
2514         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2515             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2516                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2517                 scsi_report_bus_reset(ioa_cfg->host,
2518                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2519         }
2520
2521         error_index = ipr_get_error(ioasc);
2522
2523         if (!ipr_error_table[error_index].log_hcam)
2524                 return;
2525
2526         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2527             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2528                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2529
2530                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2531                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2532                                 return;
2533         }
2534
2535         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2536
2537         /* Set indication we have logged an error */
2538         ioa_cfg->errors_logged++;
2539
2540         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2541                 return;
2542         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2543                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2544
2545         switch (hostrcb->hcam.overlay_id) {
2546         case IPR_HOST_RCB_OVERLAY_ID_2:
2547                 ipr_log_cache_error(ioa_cfg, hostrcb);
2548                 break;
2549         case IPR_HOST_RCB_OVERLAY_ID_3:
2550                 ipr_log_config_error(ioa_cfg, hostrcb);
2551                 break;
2552         case IPR_HOST_RCB_OVERLAY_ID_4:
2553         case IPR_HOST_RCB_OVERLAY_ID_6:
2554                 ipr_log_array_error(ioa_cfg, hostrcb);
2555                 break;
2556         case IPR_HOST_RCB_OVERLAY_ID_7:
2557                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2558                 break;
2559         case IPR_HOST_RCB_OVERLAY_ID_12:
2560                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2561                 break;
2562         case IPR_HOST_RCB_OVERLAY_ID_13:
2563                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2564                 break;
2565         case IPR_HOST_RCB_OVERLAY_ID_14:
2566         case IPR_HOST_RCB_OVERLAY_ID_16:
2567                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2568                 break;
2569         case IPR_HOST_RCB_OVERLAY_ID_17:
2570                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2571                 break;
2572         case IPR_HOST_RCB_OVERLAY_ID_20:
2573                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2574                 break;
2575         case IPR_HOST_RCB_OVERLAY_ID_21:
2576                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2577                 break;
2578         case IPR_HOST_RCB_OVERLAY_ID_23:
2579                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2580                 break;
2581         case IPR_HOST_RCB_OVERLAY_ID_24:
2582         case IPR_HOST_RCB_OVERLAY_ID_26:
2583                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2584                 break;
2585         case IPR_HOST_RCB_OVERLAY_ID_30:
2586                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2587                 break;
2588         case IPR_HOST_RCB_OVERLAY_ID_1:
2589         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2590         default:
2591                 ipr_log_generic_error(ioa_cfg, hostrcb);
2592                 break;
2593         }
2594 }
2595
2596 static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2597 {
2598         struct ipr_hostrcb *hostrcb;
2599
2600         hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2601                                         struct ipr_hostrcb, queue);
2602
2603         if (unlikely(!hostrcb)) {
2604                 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2605                 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2606                                                 struct ipr_hostrcb, queue);
2607         }
2608
2609         list_del_init(&hostrcb->queue);
2610         return hostrcb;
2611 }
2612
2613 /**
2614  * ipr_process_error - Op done function for an adapter error log.
2615  * @ipr_cmd:    ipr command struct
2616  *
2617  * This function is the op done function for an error log host
2618  * controlled async from the adapter. It will log the error and
2619  * send the HCAM back to the adapter.
2620  *
2621  * Return value:
2622  *      none
2623  **/
2624 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2625 {
2626         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2627         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2628         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2629         u32 fd_ioasc;
2630
2631         if (ioa_cfg->sis64)
2632                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2633         else
2634                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2635
2636         list_del_init(&hostrcb->queue);
2637         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2638
2639         if (!ioasc) {
2640                 ipr_handle_log_data(ioa_cfg, hostrcb);
2641                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2642                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2643         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2644                    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2645                 dev_err(&ioa_cfg->pdev->dev,
2646                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2647         }
2648
2649         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2650         schedule_work(&ioa_cfg->work_q);
2651         hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2652
2653         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2654 }
2655
2656 /**
2657  * ipr_timeout -  An internally generated op has timed out.
2658  * @ipr_cmd:    ipr command struct
2659  *
2660  * This function blocks host requests and initiates an
2661  * adapter reset.
2662  *
2663  * Return value:
2664  *      none
2665  **/
2666 static void ipr_timeout(struct timer_list *t)
2667 {
2668         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2669         unsigned long lock_flags = 0;
2670         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2671
2672         ENTER;
2673         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2674
2675         ioa_cfg->errors_logged++;
2676         dev_err(&ioa_cfg->pdev->dev,
2677                 "Adapter being reset due to command timeout.\n");
2678
2679         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2680                 ioa_cfg->sdt_state = GET_DUMP;
2681
2682         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2683                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2684
2685         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2686         LEAVE;
2687 }
2688
2689 /**
2690  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2691  * @ipr_cmd:    ipr command struct
2692  *
2693  * This function blocks host requests and initiates an
2694  * adapter reset.
2695  *
2696  * Return value:
2697  *      none
2698  **/
2699 static void ipr_oper_timeout(struct timer_list *t)
2700 {
2701         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2702         unsigned long lock_flags = 0;
2703         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2704
2705         ENTER;
2706         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2707
2708         ioa_cfg->errors_logged++;
2709         dev_err(&ioa_cfg->pdev->dev,
2710                 "Adapter timed out transitioning to operational.\n");
2711
2712         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2713                 ioa_cfg->sdt_state = GET_DUMP;
2714
2715         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2716                 if (ipr_fastfail)
2717                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2718                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2719         }
2720
2721         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2722         LEAVE;
2723 }
2724
2725 /**
2726  * ipr_find_ses_entry - Find matching SES in SES table
2727  * @res:        resource entry struct of SES
2728  *
2729  * Return value:
2730  *      pointer to SES table entry / NULL on failure
2731  **/
2732 static const struct ipr_ses_table_entry *
2733 ipr_find_ses_entry(struct ipr_resource_entry *res)
2734 {
2735         int i, j, matches;
2736         struct ipr_std_inq_vpids *vpids;
2737         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2738
2739         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2740                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2741                         if (ste->compare_product_id_byte[j] == 'X') {
2742                                 vpids = &res->std_inq_data.vpids;
2743                                 if (vpids->product_id[j] == ste->product_id[j])
2744                                         matches++;
2745                                 else
2746                                         break;
2747                         } else
2748                                 matches++;
2749                 }
2750
2751                 if (matches == IPR_PROD_ID_LEN)
2752                         return ste;
2753         }
2754
2755         return NULL;
2756 }
2757
2758 /**
2759  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2760  * @ioa_cfg:    ioa config struct
2761  * @bus:                SCSI bus
2762  * @bus_width:  bus width
2763  *
2764  * Return value:
2765  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2766  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2767  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2768  *      max 160MHz = max 320MB/sec).
2769  **/
2770 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2771 {
2772         struct ipr_resource_entry *res;
2773         const struct ipr_ses_table_entry *ste;
2774         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2775
2776         /* Loop through each config table entry in the config table buffer */
2777         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2778                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2779                         continue;
2780
2781                 if (bus != res->bus)
2782                         continue;
2783
2784                 if (!(ste = ipr_find_ses_entry(res)))
2785                         continue;
2786
2787                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2788         }
2789
2790         return max_xfer_rate;
2791 }
2792
2793 /**
2794  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2795  * @ioa_cfg:            ioa config struct
2796  * @max_delay:          max delay in micro-seconds to wait
2797  *
2798  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2799  *
2800  * Return value:
2801  *      0 on success / other on failure
2802  **/
2803 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2804 {
2805         volatile u32 pcii_reg;
2806         int delay = 1;
2807
2808         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2809         while (delay < max_delay) {
2810                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2811
2812                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2813                         return 0;
2814
2815                 /* udelay cannot be used if delay is more than a few milliseconds */
2816                 if ((delay / 1000) > MAX_UDELAY_MS)
2817                         mdelay(delay / 1000);
2818                 else
2819                         udelay(delay);
2820
2821                 delay += delay;
2822         }
2823         return -EIO;
2824 }
2825
2826 /**
2827  * ipr_get_sis64_dump_data_section - Dump IOA memory
2828  * @ioa_cfg:                    ioa config struct
2829  * @start_addr:                 adapter address to dump
2830  * @dest:                       destination kernel buffer
2831  * @length_in_words:            length to dump in 4 byte words
2832  *
2833  * Return value:
2834  *      0 on success
2835  **/
2836 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2837                                            u32 start_addr,
2838                                            __be32 *dest, u32 length_in_words)
2839 {
2840         int i;
2841
2842         for (i = 0; i < length_in_words; i++) {
2843                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2844                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2845                 dest++;
2846         }
2847
2848         return 0;
2849 }
2850
2851 /**
2852  * ipr_get_ldump_data_section - Dump IOA memory
2853  * @ioa_cfg:                    ioa config struct
2854  * @start_addr:                 adapter address to dump
2855  * @dest:                               destination kernel buffer
2856  * @length_in_words:    length to dump in 4 byte words
2857  *
2858  * Return value:
2859  *      0 on success / -EIO on failure
2860  **/
2861 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2862                                       u32 start_addr,
2863                                       __be32 *dest, u32 length_in_words)
2864 {
2865         volatile u32 temp_pcii_reg;
2866         int i, delay = 0;
2867
2868         if (ioa_cfg->sis64)
2869                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2870                                                        dest, length_in_words);
2871
2872         /* Write IOA interrupt reg starting LDUMP state  */
2873         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2874                ioa_cfg->regs.set_uproc_interrupt_reg32);
2875
2876         /* Wait for IO debug acknowledge */
2877         if (ipr_wait_iodbg_ack(ioa_cfg,
2878                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2879                 dev_err(&ioa_cfg->pdev->dev,
2880                         "IOA dump long data transfer timeout\n");
2881                 return -EIO;
2882         }
2883
2884         /* Signal LDUMP interlocked - clear IO debug ack */
2885         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2886                ioa_cfg->regs.clr_interrupt_reg);
2887
2888         /* Write Mailbox with starting address */
2889         writel(start_addr, ioa_cfg->ioa_mailbox);
2890
2891         /* Signal address valid - clear IOA Reset alert */
2892         writel(IPR_UPROCI_RESET_ALERT,
2893                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2894
2895         for (i = 0; i < length_in_words; i++) {
2896                 /* Wait for IO debug acknowledge */
2897                 if (ipr_wait_iodbg_ack(ioa_cfg,
2898                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2899                         dev_err(&ioa_cfg->pdev->dev,
2900                                 "IOA dump short data transfer timeout\n");
2901                         return -EIO;
2902                 }
2903
2904                 /* Read data from mailbox and increment destination pointer */
2905                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2906                 dest++;
2907
2908                 /* For all but the last word of data, signal data received */
2909                 if (i < (length_in_words - 1)) {
2910                         /* Signal dump data received - Clear IO debug Ack */
2911                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2912                                ioa_cfg->regs.clr_interrupt_reg);
2913                 }
2914         }
2915
2916         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2917         writel(IPR_UPROCI_RESET_ALERT,
2918                ioa_cfg->regs.set_uproc_interrupt_reg32);
2919
2920         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2921                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2922
2923         /* Signal dump data received - Clear IO debug Ack */
2924         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2925                ioa_cfg->regs.clr_interrupt_reg);
2926
2927         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2928         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2929                 temp_pcii_reg =
2930                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2931
2932                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2933                         return 0;
2934
2935                 udelay(10);
2936                 delay += 10;
2937         }
2938
2939         return 0;
2940 }
2941
2942 #ifdef CONFIG_SCSI_IPR_DUMP
2943 /**
2944  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2945  * @ioa_cfg:            ioa config struct
2946  * @pci_address:        adapter address
2947  * @length:                     length of data to copy
2948  *
2949  * Copy data from PCI adapter to kernel buffer.
2950  * Note: length MUST be a 4 byte multiple
2951  * Return value:
2952  *      0 on success / other on failure
2953  **/
2954 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2955                         unsigned long pci_address, u32 length)
2956 {
2957         int bytes_copied = 0;
2958         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2959         __be32 *page;
2960         unsigned long lock_flags = 0;
2961         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2962
2963         if (ioa_cfg->sis64)
2964                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2965         else
2966                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2967
2968         while (bytes_copied < length &&
2969                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2970                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2971                     ioa_dump->page_offset == 0) {
2972                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2973
2974                         if (!page) {
2975                                 ipr_trace;
2976                                 return bytes_copied;
2977                         }
2978
2979                         ioa_dump->page_offset = 0;
2980                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2981                         ioa_dump->next_page_index++;
2982                 } else
2983                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2984
2985                 rem_len = length - bytes_copied;
2986                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2987                 cur_len = min(rem_len, rem_page_len);
2988
2989                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2990                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2991                         rc = -EIO;
2992                 } else {
2993                         rc = ipr_get_ldump_data_section(ioa_cfg,
2994                                                         pci_address + bytes_copied,
2995                                                         &page[ioa_dump->page_offset / 4],
2996                                                         (cur_len / sizeof(u32)));
2997                 }
2998                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2999
3000                 if (!rc) {
3001                         ioa_dump->page_offset += cur_len;
3002                         bytes_copied += cur_len;
3003                 } else {
3004                         ipr_trace;
3005                         break;
3006                 }
3007                 schedule();
3008         }
3009
3010         return bytes_copied;
3011 }
3012
3013 /**
3014  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3015  * @hdr:        dump entry header struct
3016  *
3017  * Return value:
3018  *      nothing
3019  **/
3020 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3021 {
3022         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3023         hdr->num_elems = 1;
3024         hdr->offset = sizeof(*hdr);
3025         hdr->status = IPR_DUMP_STATUS_SUCCESS;
3026 }
3027
3028 /**
3029  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3030  * @ioa_cfg:    ioa config struct
3031  * @driver_dump:        driver dump struct
3032  *
3033  * Return value:
3034  *      nothing
3035  **/
3036 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3037                                    struct ipr_driver_dump *driver_dump)
3038 {
3039         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3040
3041         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3042         driver_dump->ioa_type_entry.hdr.len =
3043                 sizeof(struct ipr_dump_ioa_type_entry) -
3044                 sizeof(struct ipr_dump_entry_header);
3045         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3046         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3047         driver_dump->ioa_type_entry.type = ioa_cfg->type;
3048         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3049                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3050                 ucode_vpd->minor_release[1];
3051         driver_dump->hdr.num_entries++;
3052 }
3053
3054 /**
3055  * ipr_dump_version_data - Fill in the driver version in the dump.
3056  * @ioa_cfg:    ioa config struct
3057  * @driver_dump:        driver dump struct
3058  *
3059  * Return value:
3060  *      nothing
3061  **/
3062 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3063                                   struct ipr_driver_dump *driver_dump)
3064 {
3065         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3066         driver_dump->version_entry.hdr.len =
3067                 sizeof(struct ipr_dump_version_entry) -
3068                 sizeof(struct ipr_dump_entry_header);
3069         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3070         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3071         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3072         driver_dump->hdr.num_entries++;
3073 }
3074
3075 /**
3076  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3077  * @ioa_cfg:    ioa config struct
3078  * @driver_dump:        driver dump struct
3079  *
3080  * Return value:
3081  *      nothing
3082  **/
3083 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3084                                    struct ipr_driver_dump *driver_dump)
3085 {
3086         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3087         driver_dump->trace_entry.hdr.len =
3088                 sizeof(struct ipr_dump_trace_entry) -
3089                 sizeof(struct ipr_dump_entry_header);
3090         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3091         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3092         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3093         driver_dump->hdr.num_entries++;
3094 }
3095
3096 /**
3097  * ipr_dump_location_data - Fill in the IOA location in the dump.
3098  * @ioa_cfg:    ioa config struct
3099  * @driver_dump:        driver dump struct
3100  *
3101  * Return value:
3102  *      nothing
3103  **/
3104 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3105                                    struct ipr_driver_dump *driver_dump)
3106 {
3107         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3108         driver_dump->location_entry.hdr.len =
3109                 sizeof(struct ipr_dump_location_entry) -
3110                 sizeof(struct ipr_dump_entry_header);
3111         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3112         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3113         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3114         driver_dump->hdr.num_entries++;
3115 }
3116
3117 /**
3118  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3119  * @ioa_cfg:    ioa config struct
3120  * @dump:               dump struct
3121  *
3122  * Return value:
3123  *      nothing
3124  **/
3125 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3126 {
3127         unsigned long start_addr, sdt_word;
3128         unsigned long lock_flags = 0;
3129         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3130         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3131         u32 num_entries, max_num_entries, start_off, end_off;
3132         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3133         struct ipr_sdt *sdt;
3134         int valid = 1;
3135         int i;
3136
3137         ENTER;
3138
3139         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3140
3141         if (ioa_cfg->sdt_state != READ_DUMP) {
3142                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3143                 return;
3144         }
3145
3146         if (ioa_cfg->sis64) {
3147                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3148                 ssleep(IPR_DUMP_DELAY_SECONDS);
3149                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3150         }
3151
3152         start_addr = readl(ioa_cfg->ioa_mailbox);
3153
3154         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3155                 dev_err(&ioa_cfg->pdev->dev,
3156                         "Invalid dump table format: %lx\n", start_addr);
3157                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3158                 return;
3159         }
3160
3161         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3162
3163         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3164
3165         /* Initialize the overall dump header */
3166         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3167         driver_dump->hdr.num_entries = 1;
3168         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3169         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3170         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3171         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3172
3173         ipr_dump_version_data(ioa_cfg, driver_dump);
3174         ipr_dump_location_data(ioa_cfg, driver_dump);
3175         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3176         ipr_dump_trace_data(ioa_cfg, driver_dump);
3177
3178         /* Update dump_header */
3179         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3180
3181         /* IOA Dump entry */
3182         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3183         ioa_dump->hdr.len = 0;
3184         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3185         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3186
3187         /* First entries in sdt are actually a list of dump addresses and
3188          lengths to gather the real dump data.  sdt represents the pointer
3189          to the ioa generated dump table.  Dump data will be extracted based
3190          on entries in this table */
3191         sdt = &ioa_dump->sdt;
3192
3193         if (ioa_cfg->sis64) {
3194                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3195                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3196         } else {
3197                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3198                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3199         }
3200
3201         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3202                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3203         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3204                                         bytes_to_copy / sizeof(__be32));
3205
3206         /* Smart Dump table is ready to use and the first entry is valid */
3207         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3208             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3209                 dev_err(&ioa_cfg->pdev->dev,
3210                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3211                         rc, be32_to_cpu(sdt->hdr.state));
3212                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3213                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3214                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3215                 return;
3216         }
3217
3218         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3219
3220         if (num_entries > max_num_entries)
3221                 num_entries = max_num_entries;
3222
3223         /* Update dump length to the actual data to be copied */
3224         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3225         if (ioa_cfg->sis64)
3226                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3227         else
3228                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3229
3230         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3231
3232         for (i = 0; i < num_entries; i++) {
3233                 if (ioa_dump->hdr.len > max_dump_size) {
3234                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3235                         break;
3236                 }
3237
3238                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3239                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3240                         if (ioa_cfg->sis64)
3241                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3242                         else {
3243                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3244                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3245
3246                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3247                                         bytes_to_copy = end_off - start_off;
3248                                 else
3249                                         valid = 0;
3250                         }
3251                         if (valid) {
3252                                 if (bytes_to_copy > max_dump_size) {
3253                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3254                                         continue;
3255                                 }
3256
3257                                 /* Copy data from adapter to driver buffers */
3258                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3259                                                             bytes_to_copy);
3260
3261                                 ioa_dump->hdr.len += bytes_copied;
3262
3263                                 if (bytes_copied != bytes_to_copy) {
3264                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3265                                         break;
3266                                 }
3267                         }
3268                 }
3269         }
3270
3271         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3272
3273         /* Update dump_header */
3274         driver_dump->hdr.len += ioa_dump->hdr.len;
3275         wmb();
3276         ioa_cfg->sdt_state = DUMP_OBTAINED;
3277         LEAVE;
3278 }
3279
3280 #else
3281 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3282 #endif
3283
3284 /**
3285  * ipr_release_dump - Free adapter dump memory
3286  * @kref:       kref struct
3287  *
3288  * Return value:
3289  *      nothing
3290  **/
3291 static void ipr_release_dump(struct kref *kref)
3292 {
3293         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3294         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3295         unsigned long lock_flags = 0;
3296         int i;
3297
3298         ENTER;
3299         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3300         ioa_cfg->dump = NULL;
3301         ioa_cfg->sdt_state = INACTIVE;
3302         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3303
3304         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3305                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3306
3307         vfree(dump->ioa_dump.ioa_data);
3308         kfree(dump);
3309         LEAVE;
3310 }
3311
3312 /**
3313  * ipr_worker_thread - Worker thread
3314  * @work:               ioa config struct
3315  *
3316  * Called at task level from a work thread. This function takes care
3317  * of adding and removing device from the mid-layer as configuration
3318  * changes are detected by the adapter.
3319  *
3320  * Return value:
3321  *      nothing
3322  **/
3323 static void ipr_worker_thread(struct work_struct *work)
3324 {
3325         unsigned long lock_flags;
3326         struct ipr_resource_entry *res;
3327         struct scsi_device *sdev;
3328         struct ipr_dump *dump;
3329         struct ipr_ioa_cfg *ioa_cfg =
3330                 container_of(work, struct ipr_ioa_cfg, work_q);
3331         u8 bus, target, lun;
3332         int did_work;
3333
3334         ENTER;
3335         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3336
3337         if (ioa_cfg->sdt_state == READ_DUMP) {
3338                 dump = ioa_cfg->dump;
3339                 if (!dump) {
3340                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3341                         return;
3342                 }
3343                 kref_get(&dump->kref);
3344                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3345                 ipr_get_ioa_dump(ioa_cfg, dump);
3346                 kref_put(&dump->kref, ipr_release_dump);
3347
3348                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3349                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3350                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3351                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3352                 return;
3353         }
3354
3355         if (ioa_cfg->scsi_unblock) {
3356                 ioa_cfg->scsi_unblock = 0;
3357                 ioa_cfg->scsi_blocked = 0;
3358                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3359                 scsi_unblock_requests(ioa_cfg->host);
3360                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3361                 if (ioa_cfg->scsi_blocked)
3362                         scsi_block_requests(ioa_cfg->host);
3363         }
3364
3365         if (!ioa_cfg->scan_enabled) {
3366                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3367                 return;
3368         }
3369
3370 restart:
3371         do {
3372                 did_work = 0;
3373                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3374                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3375                         return;
3376                 }
3377
3378                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3379                         if (res->del_from_ml && res->sdev) {
3380                                 did_work = 1;
3381                                 sdev = res->sdev;
3382                                 if (!scsi_device_get(sdev)) {
3383                                         if (!res->add_to_ml)
3384                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3385                                         else
3386                                                 res->del_from_ml = 0;
3387                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3388                                         scsi_remove_device(sdev);
3389                                         scsi_device_put(sdev);
3390                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3391                                 }
3392                                 break;
3393                         }
3394                 }
3395         } while (did_work);
3396
3397         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3398                 if (res->add_to_ml) {
3399                         bus = res->bus;
3400                         target = res->target;
3401                         lun = res->lun;
3402                         res->add_to_ml = 0;
3403                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3404                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3405                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3406                         goto restart;
3407                 }
3408         }
3409
3410         ioa_cfg->scan_done = 1;
3411         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3412         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3413         LEAVE;
3414 }
3415
3416 #ifdef CONFIG_SCSI_IPR_TRACE
3417 /**
3418  * ipr_read_trace - Dump the adapter trace
3419  * @filp:               open sysfs file
3420  * @kobj:               kobject struct
3421  * @bin_attr:           bin_attribute struct
3422  * @buf:                buffer
3423  * @off:                offset
3424  * @count:              buffer size
3425  *
3426  * Return value:
3427  *      number of bytes printed to buffer
3428  **/
3429 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3430                               struct bin_attribute *bin_attr,
3431                               char *buf, loff_t off, size_t count)
3432 {
3433         struct device *dev = container_of(kobj, struct device, kobj);
3434         struct Scsi_Host *shost = class_to_shost(dev);
3435         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3436         unsigned long lock_flags = 0;
3437         ssize_t ret;
3438
3439         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3440         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3441                                 IPR_TRACE_SIZE);
3442         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3443
3444         return ret;
3445 }
3446
3447 static struct bin_attribute ipr_trace_attr = {
3448         .attr = {
3449                 .name = "trace",
3450                 .mode = S_IRUGO,
3451         },
3452         .size = 0,
3453         .read = ipr_read_trace,
3454 };
3455 #endif
3456
3457 /**
3458  * ipr_show_fw_version - Show the firmware version
3459  * @dev:        class device struct
3460  * @buf:        buffer
3461  *
3462  * Return value:
3463  *      number of bytes printed to buffer
3464  **/
3465 static ssize_t ipr_show_fw_version(struct device *dev,
3466                                    struct device_attribute *attr, char *buf)
3467 {
3468         struct Scsi_Host *shost = class_to_shost(dev);
3469         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3470         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3471         unsigned long lock_flags = 0;
3472         int len;
3473
3474         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3475         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3476                        ucode_vpd->major_release, ucode_vpd->card_type,
3477                        ucode_vpd->minor_release[0],
3478                        ucode_vpd->minor_release[1]);
3479         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3480         return len;
3481 }
3482
3483 static struct device_attribute ipr_fw_version_attr = {
3484         .attr = {
3485                 .name =         "fw_version",
3486                 .mode =         S_IRUGO,
3487         },
3488         .show = ipr_show_fw_version,
3489 };
3490
3491 /**
3492  * ipr_show_log_level - Show the adapter's error logging level
3493  * @dev:        class device struct
3494  * @buf:        buffer
3495  *
3496  * Return value:
3497  *      number of bytes printed to buffer
3498  **/
3499 static ssize_t ipr_show_log_level(struct device *dev,
3500                                    struct device_attribute *attr, char *buf)
3501 {
3502         struct Scsi_Host *shost = class_to_shost(dev);
3503         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3504         unsigned long lock_flags = 0;
3505         int len;
3506
3507         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3508         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3509         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3510         return len;
3511 }
3512
3513 /**
3514  * ipr_store_log_level - Change the adapter's error logging level
3515  * @dev:        class device struct
3516  * @buf:        buffer
3517  *
3518  * Return value:
3519  *      number of bytes printed to buffer
3520  **/
3521 static ssize_t ipr_store_log_level(struct device *dev,
3522                                    struct device_attribute *attr,
3523                                    const char *buf, size_t count)
3524 {
3525         struct Scsi_Host *shost = class_to_shost(dev);
3526         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3527         unsigned long lock_flags = 0;
3528
3529         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3530         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3531         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3532         return strlen(buf);
3533 }
3534
3535 static struct device_attribute ipr_log_level_attr = {
3536         .attr = {
3537                 .name =         "log_level",
3538                 .mode =         S_IRUGO | S_IWUSR,
3539         },
3540         .show = ipr_show_log_level,
3541         .store = ipr_store_log_level
3542 };
3543
3544 /**
3545  * ipr_store_diagnostics - IOA Diagnostics interface
3546  * @dev:        device struct
3547  * @buf:        buffer
3548  * @count:      buffer size
3549  *
3550  * This function will reset the adapter and wait a reasonable
3551  * amount of time for any errors that the adapter might log.
3552  *
3553  * Return value:
3554  *      count on success / other on failure
3555  **/
3556 static ssize_t ipr_store_diagnostics(struct device *dev,
3557                                      struct device_attribute *attr,
3558                                      const char *buf, size_t count)
3559 {
3560         struct Scsi_Host *shost = class_to_shost(dev);
3561         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3562         unsigned long lock_flags = 0;
3563         int rc = count;
3564
3565         if (!capable(CAP_SYS_ADMIN))
3566                 return -EACCES;
3567
3568         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3569         while (ioa_cfg->in_reset_reload) {
3570                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3571                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3572                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3573         }
3574
3575         ioa_cfg->errors_logged = 0;
3576         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3577
3578         if (ioa_cfg->in_reset_reload) {
3579                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3580                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3581
3582                 /* Wait for a second for any errors to be logged */
3583                 msleep(1000);
3584         } else {
3585                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3586                 return -EIO;
3587         }
3588
3589         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3590         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3591                 rc = -EIO;
3592         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3593
3594         return rc;
3595 }
3596
3597 static struct device_attribute ipr_diagnostics_attr = {
3598         .attr = {
3599                 .name =         "run_diagnostics",
3600                 .mode =         S_IWUSR,
3601         },
3602         .store = ipr_store_diagnostics
3603 };
3604
3605 /**
3606  * ipr_show_adapter_state - Show the adapter's state
3607  * @class_dev:  device struct
3608  * @buf:        buffer
3609  *
3610  * Return value:
3611  *      number of bytes printed to buffer
3612  **/
3613 static ssize_t ipr_show_adapter_state(struct device *dev,
3614                                       struct device_attribute *attr, char *buf)
3615 {
3616         struct Scsi_Host *shost = class_to_shost(dev);
3617         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3618         unsigned long lock_flags = 0;
3619         int len;
3620
3621         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3622         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3623                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3624         else
3625                 len = snprintf(buf, PAGE_SIZE, "online\n");
3626         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3627         return len;
3628 }
3629
3630 /**
3631  * ipr_store_adapter_state - Change adapter state
3632  * @dev:        device struct
3633  * @buf:        buffer
3634  * @count:      buffer size
3635  *
3636  * This function will change the adapter's state.
3637  *
3638  * Return value:
3639  *      count on success / other on failure
3640  **/
3641 static ssize_t ipr_store_adapter_state(struct device *dev,
3642                                        struct device_attribute *attr,
3643                                        const char *buf, size_t count)
3644 {
3645         struct Scsi_Host *shost = class_to_shost(dev);
3646         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3647         unsigned long lock_flags;
3648         int result = count, i;
3649
3650         if (!capable(CAP_SYS_ADMIN))
3651                 return -EACCES;
3652
3653         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3654         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3655             !strncmp(buf, "online", 6)) {
3656                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3657                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3658                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3659                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3660                 }
3661                 wmb();
3662                 ioa_cfg->reset_retries = 0;
3663                 ioa_cfg->in_ioa_bringdown = 0;
3664                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3665         }
3666         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3667         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3668
3669         return result;
3670 }
3671
3672 static struct device_attribute ipr_ioa_state_attr = {
3673         .attr = {
3674                 .name =         "online_state",
3675                 .mode =         S_IRUGO | S_IWUSR,
3676         },
3677         .show = ipr_show_adapter_state,
3678         .store = ipr_store_adapter_state
3679 };
3680
3681 /**
3682  * ipr_store_reset_adapter - Reset the adapter
3683  * @dev:        device struct
3684  * @buf:        buffer
3685  * @count:      buffer size
3686  *
3687  * This function will reset the adapter.
3688  *
3689  * Return value:
3690  *      count on success / other on failure
3691  **/
3692 static ssize_t ipr_store_reset_adapter(struct device *dev,
3693                                        struct device_attribute *attr,
3694                                        const char *buf, size_t count)
3695 {
3696         struct Scsi_Host *shost = class_to_shost(dev);
3697         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3698         unsigned long lock_flags;
3699         int result = count;
3700
3701         if (!capable(CAP_SYS_ADMIN))
3702                 return -EACCES;
3703
3704         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3705         if (!ioa_cfg->in_reset_reload)
3706                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3707         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3708         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3709
3710         return result;
3711 }
3712
3713 static struct device_attribute ipr_ioa_reset_attr = {
3714         .attr = {
3715                 .name =         "reset_host",
3716                 .mode =         S_IWUSR,
3717         },
3718         .store = ipr_store_reset_adapter
3719 };
3720
3721 static int ipr_iopoll(struct irq_poll *iop, int budget);
3722  /**
3723  * ipr_show_iopoll_weight - Show ipr polling mode
3724  * @dev:        class device struct
3725  * @buf:        buffer
3726  *
3727  * Return value:
3728  *      number of bytes printed to buffer
3729  **/
3730 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3731                                    struct device_attribute *attr, char *buf)
3732 {
3733         struct Scsi_Host *shost = class_to_shost(dev);
3734         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3735         unsigned long lock_flags = 0;
3736         int len;
3737
3738         spin_lock_irqsave(shost->host_lock, lock_flags);
3739         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3740         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3741
3742         return len;
3743 }
3744
3745 /**
3746  * ipr_store_iopoll_weight - Change the adapter's polling mode
3747  * @dev:        class device struct
3748  * @buf:        buffer
3749  *
3750  * Return value:
3751  *      number of bytes printed to buffer
3752  **/
3753 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3754                                         struct device_attribute *attr,
3755                                         const char *buf, size_t count)
3756 {
3757         struct Scsi_Host *shost = class_to_shost(dev);
3758         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3759         unsigned long user_iopoll_weight;
3760         unsigned long lock_flags = 0;
3761         int i;
3762
3763         if (!ioa_cfg->sis64) {
3764                 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3765                 return -EINVAL;
3766         }
3767         if (kstrtoul(buf, 10, &user_iopoll_weight))
3768                 return -EINVAL;
3769
3770         if (user_iopoll_weight > 256) {
3771                 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3772                 return -EINVAL;
3773         }
3774
3775         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3776                 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3777                 return strlen(buf);
3778         }
3779
3780         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3781                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3782                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3783         }
3784
3785         spin_lock_irqsave(shost->host_lock, lock_flags);
3786         ioa_cfg->iopoll_weight = user_iopoll_weight;
3787         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3788                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3789                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3790                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3791                 }
3792         }
3793         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3794
3795         return strlen(buf);
3796 }
3797
3798 static struct device_attribute ipr_iopoll_weight_attr = {
3799         .attr = {
3800                 .name =         "iopoll_weight",
3801                 .mode =         S_IRUGO | S_IWUSR,
3802         },
3803         .show = ipr_show_iopoll_weight,
3804         .store = ipr_store_iopoll_weight
3805 };
3806
3807 /**
3808  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3809  * @buf_len:            buffer length
3810  *
3811  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3812  * list to use for microcode download
3813  *
3814  * Return value:
3815  *      pointer to sglist / NULL on failure
3816  **/
3817 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3818 {
3819         int sg_size, order;
3820         struct ipr_sglist *sglist;
3821
3822         /* Get the minimum size per scatter/gather element */
3823         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3824
3825         /* Get the actual size per element */
3826         order = get_order(sg_size);
3827
3828         /* Allocate a scatter/gather list for the DMA */
3829         sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
3830         if (sglist == NULL) {
3831                 ipr_trace;
3832                 return NULL;
3833         }
3834         sglist->order = order;
3835         sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
3836                                               &sglist->num_sg);
3837         if (!sglist->scatterlist) {
3838                 kfree(sglist);
3839                 return NULL;
3840         }
3841
3842         return sglist;
3843 }
3844
3845 /**
3846  * ipr_free_ucode_buffer - Frees a microcode download buffer
3847  * @p_dnld:             scatter/gather list pointer
3848  *
3849  * Free a DMA'able ucode download buffer previously allocated with
3850  * ipr_alloc_ucode_buffer
3851  *
3852  * Return value:
3853  *      nothing
3854  **/
3855 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3856 {
3857         sgl_free_order(sglist->scatterlist, sglist->order);
3858         kfree(sglist);
3859 }
3860
3861 /**
3862  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3863  * @sglist:             scatter/gather list pointer
3864  * @buffer:             buffer pointer
3865  * @len:                buffer length
3866  *
3867  * Copy a microcode image from a user buffer into a buffer allocated by
3868  * ipr_alloc_ucode_buffer
3869  *
3870  * Return value:
3871  *      0 on success / other on failure
3872  **/
3873 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3874                                  u8 *buffer, u32 len)
3875 {
3876         int bsize_elem, i, result = 0;
3877         struct scatterlist *scatterlist;
3878         void *kaddr;
3879
3880         /* Determine the actual number of bytes per element */
3881         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3882
3883         scatterlist = sglist->scatterlist;
3884
3885         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3886                 struct page *page = sg_page(&scatterlist[i]);
3887
3888                 kaddr = kmap(page);
3889                 memcpy(kaddr, buffer, bsize_elem);
3890                 kunmap(page);
3891
3892                 scatterlist[i].length = bsize_elem;
3893
3894                 if (result != 0) {
3895                         ipr_trace;
3896                         return result;
3897                 }
3898         }
3899
3900         if (len % bsize_elem) {
3901                 struct page *page = sg_page(&scatterlist[i]);
3902
3903                 kaddr = kmap(page);
3904                 memcpy(kaddr, buffer, len % bsize_elem);
3905                 kunmap(page);
3906
3907                 scatterlist[i].length = len % bsize_elem;
3908         }
3909
3910         sglist->buffer_len = len;
3911         return result;
3912 }
3913
3914 /**
3915  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3916  * @ipr_cmd:            ipr command struct
3917  * @sglist:             scatter/gather list
3918  *
3919  * Builds a microcode download IOA data list (IOADL).
3920  *
3921  **/
3922 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3923                                     struct ipr_sglist *sglist)
3924 {
3925         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3926         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3927         struct scatterlist *scatterlist = sglist->scatterlist;
3928         int i;
3929
3930         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3931         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3932         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3933
3934         ioarcb->ioadl_len =
3935                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3936         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3937                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3938                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3939                 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3940         }
3941
3942         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3943 }
3944
3945 /**
3946  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3947  * @ipr_cmd:    ipr command struct
3948  * @sglist:             scatter/gather list
3949  *
3950  * Builds a microcode download IOA data list (IOADL).
3951  *
3952  **/
3953 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3954                                   struct ipr_sglist *sglist)
3955 {
3956         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3957         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3958         struct scatterlist *scatterlist = sglist->scatterlist;
3959         int i;
3960
3961         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3962         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3963         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3964
3965         ioarcb->ioadl_len =
3966                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3967
3968         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3969                 ioadl[i].flags_and_data_len =
3970                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3971                 ioadl[i].address =
3972                         cpu_to_be32(sg_dma_address(&scatterlist[i]));
3973         }
3974
3975         ioadl[i-1].flags_and_data_len |=
3976                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3977 }
3978
3979 /**
3980  * ipr_update_ioa_ucode - Update IOA's microcode
3981  * @ioa_cfg:    ioa config struct
3982  * @sglist:             scatter/gather list
3983  *
3984  * Initiate an adapter reset to update the IOA's microcode
3985  *
3986  * Return value:
3987  *      0 on success / -EIO on failure
3988  **/
3989 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3990                                 struct ipr_sglist *sglist)
3991 {
3992         unsigned long lock_flags;
3993
3994         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3995         while (ioa_cfg->in_reset_reload) {
3996                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3997                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3998                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3999         }
4000
4001         if (ioa_cfg->ucode_sglist) {
4002                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4003                 dev_err(&ioa_cfg->pdev->dev,
4004                         "Microcode download already in progress\n");
4005                 return -EIO;
4006         }
4007
4008         sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4009                                         sglist->scatterlist, sglist->num_sg,
4010                                         DMA_TO_DEVICE);
4011
4012         if (!sglist->num_dma_sg) {
4013                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4014                 dev_err(&ioa_cfg->pdev->dev,
4015                         "Failed to map microcode download buffer!\n");
4016                 return -EIO;
4017         }
4018
4019         ioa_cfg->ucode_sglist = sglist;
4020         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4021         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4022         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4023
4024         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4025         ioa_cfg->ucode_sglist = NULL;
4026         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4027         return 0;
4028 }
4029
4030 /**
4031  * ipr_store_update_fw - Update the firmware on the adapter
4032  * @class_dev:  device struct
4033  * @buf:        buffer
4034  * @count:      buffer size
4035  *
4036  * This function will update the firmware on the adapter.
4037  *
4038  * Return value:
4039  *      count on success / other on failure
4040  **/
4041 static ssize_t ipr_store_update_fw(struct device *dev,
4042                                    struct device_attribute *attr,
4043                                    const char *buf, size_t count)
4044 {
4045         struct Scsi_Host *shost = class_to_shost(dev);
4046         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4047         struct ipr_ucode_image_header *image_hdr;
4048         const struct firmware *fw_entry;
4049         struct ipr_sglist *sglist;
4050         char fname[100];
4051         char *src;
4052         char *endline;
4053         int result, dnld_size;
4054
4055         if (!capable(CAP_SYS_ADMIN))
4056                 return -EACCES;
4057
4058         snprintf(fname, sizeof(fname), "%s", buf);
4059
4060         endline = strchr(fname, '\n');
4061         if (endline)
4062                 *endline = '\0';
4063
4064         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4065                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4066                 return -EIO;
4067         }
4068
4069         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4070
4071         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4072         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4073         sglist = ipr_alloc_ucode_buffer(dnld_size);
4074
4075         if (!sglist) {
4076                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4077                 release_firmware(fw_entry);
4078                 return -ENOMEM;
4079         }
4080
4081         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4082
4083         if (result) {
4084                 dev_err(&ioa_cfg->pdev->dev,
4085                         "Microcode buffer copy to DMA buffer failed\n");
4086                 goto out;
4087         }
4088
4089         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4090
4091         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4092
4093         if (!result)
4094                 result = count;
4095 out:
4096         ipr_free_ucode_buffer(sglist);
4097         release_firmware(fw_entry);
4098         return result;
4099 }
4100
4101 static struct device_attribute ipr_update_fw_attr = {
4102         .attr = {
4103                 .name =         "update_fw",
4104                 .mode =         S_IWUSR,
4105         },
4106         .store = ipr_store_update_fw
4107 };
4108
4109 /**
4110  * ipr_show_fw_type - Show the adapter's firmware type.
4111  * @dev:        class device struct
4112  * @buf:        buffer
4113  *
4114  * Return value:
4115  *      number of bytes printed to buffer
4116  **/
4117 static ssize_t ipr_show_fw_type(struct device *dev,
4118                                 struct device_attribute *attr, char *buf)
4119 {
4120         struct Scsi_Host *shost = class_to_shost(dev);
4121         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4122         unsigned long lock_flags = 0;
4123         int len;
4124
4125         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4126         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4127         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4128         return len;
4129 }
4130
4131 static struct device_attribute ipr_ioa_fw_type_attr = {
4132         .attr = {
4133                 .name =         "fw_type",
4134                 .mode =         S_IRUGO,
4135         },
4136         .show = ipr_show_fw_type
4137 };
4138
4139 static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4140                                 struct bin_attribute *bin_attr, char *buf,
4141                                 loff_t off, size_t count)
4142 {
4143         struct device *cdev = container_of(kobj, struct device, kobj);
4144         struct Scsi_Host *shost = class_to_shost(cdev);
4145         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4146         struct ipr_hostrcb *hostrcb;
4147         unsigned long lock_flags = 0;
4148         int ret;
4149
4150         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4151         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4152                                         struct ipr_hostrcb, queue);
4153         if (!hostrcb) {
4154                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4155                 return 0;
4156         }
4157         ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4158                                 sizeof(hostrcb->hcam));
4159         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4160         return ret;
4161 }
4162
4163 static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4164                                 struct bin_attribute *bin_attr, char *buf,
4165                                 loff_t off, size_t count)
4166 {
4167         struct device *cdev = container_of(kobj, struct device, kobj);
4168         struct Scsi_Host *shost = class_to_shost(cdev);
4169         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4170         struct ipr_hostrcb *hostrcb;
4171         unsigned long lock_flags = 0;
4172
4173         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4174         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4175                                         struct ipr_hostrcb, queue);
4176         if (!hostrcb) {
4177                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4178                 return count;
4179         }
4180
4181         /* Reclaim hostrcb before exit */
4182         list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4183         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4184         return count;
4185 }
4186
4187 static struct bin_attribute ipr_ioa_async_err_log = {
4188         .attr = {
4189                 .name =         "async_err_log",
4190                 .mode =         S_IRUGO | S_IWUSR,
4191         },
4192         .size = 0,
4193         .read = ipr_read_async_err_log,
4194         .write = ipr_next_async_err_log
4195 };
4196
4197 static struct device_attribute *ipr_ioa_attrs[] = {
4198         &ipr_fw_version_attr,
4199         &ipr_log_level_attr,
4200         &ipr_diagnostics_attr,
4201         &ipr_ioa_state_attr,
4202         &ipr_ioa_reset_attr,
4203         &ipr_update_fw_attr,
4204         &ipr_ioa_fw_type_attr,
4205         &ipr_iopoll_weight_attr,
4206         NULL,
4207 };
4208
4209 #ifdef CONFIG_SCSI_IPR_DUMP
4210 /**
4211  * ipr_read_dump - Dump the adapter
4212  * @filp:               open sysfs file
4213  * @kobj:               kobject struct
4214  * @bin_attr:           bin_attribute struct
4215  * @buf:                buffer
4216  * @off:                offset
4217  * @count:              buffer size
4218  *
4219  * Return value:
4220  *      number of bytes printed to buffer
4221  **/
4222 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4223                              struct bin_attribute *bin_attr,
4224                              char *buf, loff_t off, size_t count)
4225 {
4226         struct device *cdev = container_of(kobj, struct device, kobj);
4227         struct Scsi_Host *shost = class_to_shost(cdev);
4228         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4229         struct ipr_dump *dump;
4230         unsigned long lock_flags = 0;
4231         char *src;
4232         int len, sdt_end;
4233         size_t rc = count;
4234
4235         if (!capable(CAP_SYS_ADMIN))
4236                 return -EACCES;
4237
4238         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4239         dump = ioa_cfg->dump;
4240
4241         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4242                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4243                 return 0;
4244         }
4245         kref_get(&dump->kref);
4246         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4247
4248         if (off > dump->driver_dump.hdr.len) {
4249                 kref_put(&dump->kref, ipr_release_dump);
4250                 return 0;
4251         }
4252
4253         if (off + count > dump->driver_dump.hdr.len) {
4254                 count = dump->driver_dump.hdr.len - off;
4255                 rc = count;
4256         }
4257
4258         if (count && off < sizeof(dump->driver_dump)) {
4259                 if (off + count > sizeof(dump->driver_dump))
4260                         len = sizeof(dump->driver_dump) - off;
4261                 else
4262                         len = count;
4263                 src = (u8 *)&dump->driver_dump + off;
4264                 memcpy(buf, src, len);
4265                 buf += len;
4266                 off += len;
4267                 count -= len;
4268         }
4269
4270         off -= sizeof(dump->driver_dump);
4271
4272         if (ioa_cfg->sis64)
4273                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4274                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4275                            sizeof(struct ipr_sdt_entry));
4276         else
4277                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4278                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4279
4280         if (count && off < sdt_end) {
4281                 if (off + count > sdt_end)
4282                         len = sdt_end - off;
4283                 else
4284                         len = count;
4285                 src = (u8 *)&dump->ioa_dump + off;
4286                 memcpy(buf, src, len);
4287                 buf += len;
4288                 off += len;
4289                 count -= len;
4290         }
4291
4292         off -= sdt_end;
4293
4294         while (count) {
4295                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4296                         len = PAGE_ALIGN(off) - off;
4297                 else
4298                         len = count;
4299                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4300                 src += off & ~PAGE_MASK;
4301                 memcpy(buf, src, len);
4302                 buf += len;
4303                 off += len;
4304                 count -= len;
4305         }
4306
4307         kref_put(&dump->kref, ipr_release_dump);
4308         return rc;
4309 }
4310
4311 /**
4312  * ipr_alloc_dump - Prepare for adapter dump
4313  * @ioa_cfg:    ioa config struct
4314  *
4315  * Return value:
4316  *      0 on success / other on failure
4317  **/
4318 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4319 {
4320         struct ipr_dump *dump;
4321         __be32 **ioa_data;
4322         unsigned long lock_flags = 0;
4323
4324         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4325
4326         if (!dump) {
4327                 ipr_err("Dump memory allocation failed\n");
4328                 return -ENOMEM;
4329         }
4330
4331         if (ioa_cfg->sis64)
4332                 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4333         else
4334                 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4335
4336         if (!ioa_data) {
4337                 ipr_err("Dump memory allocation failed\n");
4338                 kfree(dump);
4339                 return -ENOMEM;
4340         }
4341
4342         dump->ioa_dump.ioa_data = ioa_data;
4343
4344         kref_init(&dump->kref);
4345         dump->ioa_cfg = ioa_cfg;
4346
4347         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4348
4349         if (INACTIVE != ioa_cfg->sdt_state) {
4350                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4351                 vfree(dump->ioa_dump.ioa_data);
4352                 kfree(dump);
4353                 return 0;
4354         }
4355
4356         ioa_cfg->dump = dump;
4357         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4358         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4359                 ioa_cfg->dump_taken = 1;
4360                 schedule_work(&ioa_cfg->work_q);
4361         }
4362         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4363
4364         return 0;
4365 }
4366
4367 /**
4368  * ipr_free_dump - Free adapter dump memory
4369  * @ioa_cfg:    ioa config struct
4370  *
4371  * Return value:
4372  *      0 on success / other on failure
4373  **/
4374 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4375 {
4376         struct ipr_dump *dump;
4377         unsigned long lock_flags = 0;
4378
4379         ENTER;
4380
4381         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4382         dump = ioa_cfg->dump;
4383         if (!dump) {
4384                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4385                 return 0;
4386         }
4387
4388         ioa_cfg->dump = NULL;
4389         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4390
4391         kref_put(&dump->kref, ipr_release_dump);
4392
4393         LEAVE;
4394         return 0;
4395 }
4396
4397 /**
4398  * ipr_write_dump - Setup dump state of adapter
4399  * @filp:               open sysfs file
4400  * @kobj:               kobject struct
4401  * @bin_attr:           bin_attribute struct
4402  * @buf:                buffer
4403  * @off:                offset
4404  * @count:              buffer size
4405  *
4406  * Return value:
4407  *      number of bytes printed to buffer
4408  **/
4409 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4410                               struct bin_attribute *bin_attr,
4411                               char *buf, loff_t off, size_t count)
4412 {
4413         struct device *cdev = container_of(kobj, struct device, kobj);
4414         struct Scsi_Host *shost = class_to_shost(cdev);
4415         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4416         int rc;
4417
4418         if (!capable(CAP_SYS_ADMIN))
4419                 return -EACCES;
4420
4421         if (buf[0] == '1')
4422                 rc = ipr_alloc_dump(ioa_cfg);
4423         else if (buf[0] == '0')
4424                 rc = ipr_free_dump(ioa_cfg);
4425         else
4426                 return -EINVAL;
4427
4428         if (rc)
4429                 return rc;
4430         else
4431                 return count;
4432 }
4433
4434 static struct bin_attribute ipr_dump_attr = {
4435         .attr = {
4436                 .name = "dump",
4437                 .mode = S_IRUSR | S_IWUSR,
4438         },
4439         .size = 0,
4440         .read = ipr_read_dump,
4441         .write = ipr_write_dump
4442 };
4443 #else
4444 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4445 #endif
4446
4447 /**
4448  * ipr_change_queue_depth - Change the device's queue depth
4449  * @sdev:       scsi device struct
4450  * @qdepth:     depth to set
4451  * @reason:     calling context
4452  *
4453  * Return value:
4454  *      actual depth set
4455  **/
4456 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4457 {
4458         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4459         struct ipr_resource_entry *res;
4460         unsigned long lock_flags = 0;
4461
4462         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4463         res = (struct ipr_resource_entry *)sdev->hostdata;
4464
4465         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4466                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4467         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4468
4469         scsi_change_queue_depth(sdev, qdepth);
4470         return sdev->queue_depth;
4471 }
4472
4473 /**
4474  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4475  * @dev:        device struct
4476  * @attr:       device attribute structure
4477  * @buf:        buffer
4478  *
4479  * Return value:
4480  *      number of bytes printed to buffer
4481  **/
4482 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4483 {
4484         struct scsi_device *sdev = to_scsi_device(dev);
4485         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4486         struct ipr_resource_entry *res;
4487         unsigned long lock_flags = 0;
4488         ssize_t len = -ENXIO;
4489
4490         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4491         res = (struct ipr_resource_entry *)sdev->hostdata;
4492         if (res)
4493                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4494         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4495         return len;
4496 }
4497
4498 static struct device_attribute ipr_adapter_handle_attr = {
4499         .attr = {
4500                 .name =         "adapter_handle",
4501                 .mode =         S_IRUSR,
4502         },
4503         .show = ipr_show_adapter_handle
4504 };
4505
4506 /**
4507  * ipr_show_resource_path - Show the resource path or the resource address for
4508  *                          this device.
4509  * @dev:        device struct
4510  * @attr:       device attribute structure
4511  * @buf:        buffer
4512  *
4513  * Return value:
4514  *      number of bytes printed to buffer
4515  **/
4516 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4517 {
4518         struct scsi_device *sdev = to_scsi_device(dev);
4519         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4520         struct ipr_resource_entry *res;
4521         unsigned long lock_flags = 0;
4522         ssize_t len = -ENXIO;
4523         char buffer[IPR_MAX_RES_PATH_LENGTH];
4524
4525         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4526         res = (struct ipr_resource_entry *)sdev->hostdata;
4527         if (res && ioa_cfg->sis64)
4528                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4529                                __ipr_format_res_path(res->res_path, buffer,
4530                                                      sizeof(buffer)));
4531         else if (res)
4532                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4533                                res->bus, res->target, res->lun);
4534
4535         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4536         return len;
4537 }
4538
4539 static struct device_attribute ipr_resource_path_attr = {
4540         .attr = {
4541                 .name =         "resource_path",
4542                 .mode =         S_IRUGO,
4543         },
4544         .show = ipr_show_resource_path
4545 };
4546
4547 /**
4548  * ipr_show_device_id - Show the device_id for this device.
4549  * @dev:        device struct
4550  * @attr:       device attribute structure
4551  * @buf:        buffer
4552  *
4553  * Return value:
4554  *      number of bytes printed to buffer
4555  **/
4556 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4557 {
4558         struct scsi_device *sdev = to_scsi_device(dev);
4559         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4560         struct ipr_resource_entry *res;
4561         unsigned long lock_flags = 0;
4562         ssize_t len = -ENXIO;
4563
4564         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4565         res = (struct ipr_resource_entry *)sdev->hostdata;
4566         if (res && ioa_cfg->sis64)
4567                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4568         else if (res)
4569                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4570
4571         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4572         return len;
4573 }
4574
4575 static struct device_attribute ipr_device_id_attr = {
4576         .attr = {
4577                 .name =         "device_id",
4578                 .mode =         S_IRUGO,
4579         },
4580         .show = ipr_show_device_id
4581 };
4582
4583 /**
4584  * ipr_show_resource_type - Show the resource type for this device.
4585  * @dev:        device struct
4586  * @attr:       device attribute structure
4587  * @buf:        buffer
4588  *
4589  * Return value:
4590  *      number of bytes printed to buffer
4591  **/
4592 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4593 {
4594         struct scsi_device *sdev = to_scsi_device(dev);
4595         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4596         struct ipr_resource_entry *res;
4597         unsigned long lock_flags = 0;
4598         ssize_t len = -ENXIO;
4599
4600         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4601         res = (struct ipr_resource_entry *)sdev->hostdata;
4602
4603         if (res)
4604                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4605
4606         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4607         return len;
4608 }
4609
4610 static struct device_attribute ipr_resource_type_attr = {
4611         .attr = {
4612                 .name =         "resource_type",
4613                 .mode =         S_IRUGO,
4614         },
4615         .show = ipr_show_resource_type
4616 };
4617
4618 /**
4619  * ipr_show_raw_mode - Show the adapter's raw mode
4620  * @dev:        class device struct
4621  * @buf:        buffer
4622  *
4623  * Return value:
4624  *      number of bytes printed to buffer
4625  **/
4626 static ssize_t ipr_show_raw_mode(struct device *dev,
4627                                  struct device_attribute *attr, char *buf)
4628 {
4629         struct scsi_device *sdev = to_scsi_device(dev);
4630         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4631         struct ipr_resource_entry *res;
4632         unsigned long lock_flags = 0;
4633         ssize_t len;
4634
4635         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4636         res = (struct ipr_resource_entry *)sdev->hostdata;
4637         if (res)
4638                 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4639         else
4640                 len = -ENXIO;
4641         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4642         return len;
4643 }
4644
4645 /**
4646  * ipr_store_raw_mode - Change the adapter's raw mode
4647  * @dev:        class device struct
4648  * @buf:        buffer
4649  *
4650  * Return value:
4651  *      number of bytes printed to buffer
4652  **/
4653 static ssize_t ipr_store_raw_mode(struct device *dev,
4654                                   struct device_attribute *attr,
4655                                   const char *buf, size_t count)
4656 {
4657         struct scsi_device *sdev = to_scsi_device(dev);
4658         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4659         struct ipr_resource_entry *res;
4660         unsigned long lock_flags = 0;
4661         ssize_t len;
4662
4663         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4664         res = (struct ipr_resource_entry *)sdev->hostdata;
4665         if (res) {
4666                 if (ipr_is_af_dasd_device(res)) {
4667                         res->raw_mode = simple_strtoul(buf, NULL, 10);
4668                         len = strlen(buf);
4669                         if (res->sdev)
4670                                 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4671                                         res->raw_mode ? "enabled" : "disabled");
4672                 } else
4673                         len = -EINVAL;
4674         } else
4675                 len = -ENXIO;
4676         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4677         return len;
4678 }
4679
4680 static struct device_attribute ipr_raw_mode_attr = {
4681         .attr = {
4682                 .name =         "raw_mode",
4683                 .mode =         S_IRUGO | S_IWUSR,
4684         },
4685         .show = ipr_show_raw_mode,
4686         .store = ipr_store_raw_mode
4687 };
4688
4689 static struct device_attribute *ipr_dev_attrs[] = {
4690         &ipr_adapter_handle_attr,
4691         &ipr_resource_path_attr,
4692         &ipr_device_id_attr,
4693         &ipr_resource_type_attr,
4694         &ipr_raw_mode_attr,
4695         NULL,
4696 };
4697
4698 /**
4699  * ipr_biosparam - Return the HSC mapping
4700  * @sdev:                       scsi device struct
4701  * @block_device:       block device pointer
4702  * @capacity:           capacity of the device
4703  * @parm:                       Array containing returned HSC values.
4704  *
4705  * This function generates the HSC parms that fdisk uses.
4706  * We want to make sure we return something that places partitions
4707  * on 4k boundaries for best performance with the IOA.
4708  *
4709  * Return value:
4710  *      0 on success
4711  **/
4712 static int ipr_biosparam(struct scsi_device *sdev,
4713                          struct block_device *block_device,
4714                          sector_t capacity, int *parm)
4715 {
4716         int heads, sectors;
4717         sector_t cylinders;
4718
4719         heads = 128;
4720         sectors = 32;
4721
4722         cylinders = capacity;
4723         sector_div(cylinders, (128 * 32));
4724
4725         /* return result */
4726         parm[0] = heads;
4727         parm[1] = sectors;
4728         parm[2] = cylinders;
4729
4730         return 0;
4731 }
4732
4733 /**
4734  * ipr_find_starget - Find target based on bus/target.
4735  * @starget:    scsi target struct
4736  *
4737  * Return value:
4738  *      resource entry pointer if found / NULL if not found
4739  **/
4740 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4741 {
4742         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4743         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4744         struct ipr_resource_entry *res;
4745
4746         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4747                 if ((res->bus == starget->channel) &&
4748                     (res->target == starget->id)) {
4749                         return res;
4750                 }
4751         }
4752
4753         return NULL;
4754 }
4755
4756 static struct ata_port_info sata_port_info;
4757
4758 /**
4759  * ipr_target_alloc - Prepare for commands to a SCSI target
4760  * @starget:    scsi target struct
4761  *
4762  * If the device is a SATA device, this function allocates an
4763  * ATA port with libata, else it does nothing.
4764  *
4765  * Return value:
4766  *      0 on success / non-0 on failure
4767  **/
4768 static int ipr_target_alloc(struct scsi_target *starget)
4769 {
4770         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4771         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4772         struct ipr_sata_port *sata_port;
4773         struct ata_port *ap;
4774         struct ipr_resource_entry *res;
4775         unsigned long lock_flags;
4776
4777         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4778         res = ipr_find_starget(starget);
4779         starget->hostdata = NULL;
4780
4781         if (res && ipr_is_gata(res)) {
4782                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4783                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4784                 if (!sata_port)
4785                         return -ENOMEM;
4786
4787                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4788                 if (ap) {
4789                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4790                         sata_port->ioa_cfg = ioa_cfg;
4791                         sata_port->ap = ap;
4792                         sata_port->res = res;
4793
4794                         res->sata_port = sata_port;
4795                         ap->private_data = sata_port;
4796                         starget->hostdata = sata_port;
4797                 } else {
4798                         kfree(sata_port);
4799                         return -ENOMEM;
4800                 }
4801         }
4802         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4803
4804         return 0;
4805 }
4806
4807 /**
4808  * ipr_target_destroy - Destroy a SCSI target
4809  * @starget:    scsi target struct
4810  *
4811  * If the device was a SATA device, this function frees the libata
4812  * ATA port, else it does nothing.
4813  *
4814  **/
4815 static void ipr_target_destroy(struct scsi_target *starget)
4816 {
4817         struct ipr_sata_port *sata_port = starget->hostdata;
4818         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4819         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4820
4821         if (ioa_cfg->sis64) {
4822                 if (!ipr_find_starget(starget)) {
4823                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4824                                 clear_bit(starget->id, ioa_cfg->array_ids);
4825                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4826                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4827                         else if (starget->channel == 0)
4828                                 clear_bit(starget->id, ioa_cfg->target_ids);
4829                 }
4830         }
4831
4832         if (sata_port) {
4833                 starget->hostdata = NULL;
4834                 ata_sas_port_destroy(sata_port->ap);
4835                 kfree(sata_port);
4836         }
4837 }
4838
4839 /**
4840  * ipr_find_sdev - Find device based on bus/target/lun.
4841  * @sdev:       scsi device struct
4842  *
4843  * Return value:
4844  *      resource entry pointer if found / NULL if not found
4845  **/
4846 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4847 {
4848         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4849         struct ipr_resource_entry *res;
4850
4851         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4852                 if ((res->bus == sdev->channel) &&
4853                     (res->target == sdev->id) &&
4854                     (res->lun == sdev->lun))
4855                         return res;
4856         }
4857
4858         return NULL;
4859 }
4860
4861 /**
4862  * ipr_slave_destroy - Unconfigure a SCSI device
4863  * @sdev:       scsi device struct
4864  *
4865  * Return value:
4866  *      nothing
4867  **/
4868 static void ipr_slave_destroy(struct scsi_device *sdev)
4869 {
4870         struct ipr_resource_entry *res;
4871         struct ipr_ioa_cfg *ioa_cfg;
4872         unsigned long lock_flags = 0;
4873
4874         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4875
4876         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4877         res = (struct ipr_resource_entry *) sdev->hostdata;
4878         if (res) {
4879                 if (res->sata_port)
4880                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4881                 sdev->hostdata = NULL;
4882                 res->sdev = NULL;
4883                 res->sata_port = NULL;
4884         }
4885         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4886 }
4887
4888 /**
4889  * ipr_slave_configure - Configure a SCSI device
4890  * @sdev:       scsi device struct
4891  *
4892  * This function configures the specified scsi device.
4893  *
4894  * Return value:
4895  *      0 on success
4896  **/
4897 static int ipr_slave_configure(struct scsi_device *sdev)
4898 {
4899         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4900         struct ipr_resource_entry *res;
4901         struct ata_port *ap = NULL;
4902         unsigned long lock_flags = 0;
4903         char buffer[IPR_MAX_RES_PATH_LENGTH];
4904
4905         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4906         res = sdev->hostdata;
4907         if (res) {
4908                 if (ipr_is_af_dasd_device(res))
4909                         sdev->type = TYPE_RAID;
4910                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4911                         sdev->scsi_level = 4;
4912                         sdev->no_uld_attach = 1;
4913                 }
4914                 if (ipr_is_vset_device(res)) {
4915                         sdev->scsi_level = SCSI_SPC_3;
4916                         sdev->no_report_opcodes = 1;
4917                         blk_queue_rq_timeout(sdev->request_queue,
4918                                              IPR_VSET_RW_TIMEOUT);
4919                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4920                 }
4921                 if (ipr_is_gata(res) && res->sata_port)
4922                         ap = res->sata_port->ap;
4923                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4924
4925                 if (ap) {
4926                         scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4927                         ata_sas_slave_configure(sdev, ap);
4928                 }
4929
4930                 if (ioa_cfg->sis64)
4931                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4932                                     ipr_format_res_path(ioa_cfg,
4933                                 res->res_path, buffer, sizeof(buffer)));
4934                 return 0;
4935         }
4936         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4937         return 0;
4938 }
4939
4940 /**
4941  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4942  * @sdev:       scsi device struct
4943  *
4944  * This function initializes an ATA port so that future commands
4945  * sent through queuecommand will work.
4946  *
4947  * Return value:
4948  *      0 on success
4949  **/
4950 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4951 {
4952         struct ipr_sata_port *sata_port = NULL;
4953         int rc = -ENXIO;
4954
4955         ENTER;
4956         if (sdev->sdev_target)
4957                 sata_port = sdev->sdev_target->hostdata;
4958         if (sata_port) {
4959                 rc = ata_sas_port_init(sata_port->ap);
4960                 if (rc == 0)
4961                         rc = ata_sas_sync_probe(sata_port->ap);
4962         }
4963
4964         if (rc)
4965                 ipr_slave_destroy(sdev);
4966
4967         LEAVE;
4968         return rc;
4969 }
4970
4971 /**
4972  * ipr_slave_alloc - Prepare for commands to a device.
4973  * @sdev:       scsi device struct
4974  *
4975  * This function saves a pointer to the resource entry
4976  * in the scsi device struct if the device exists. We
4977  * can then use this pointer in ipr_queuecommand when
4978  * handling new commands.
4979  *
4980  * Return value:
4981  *      0 on success / -ENXIO if device does not exist
4982  **/
4983 static int ipr_slave_alloc(struct scsi_device *sdev)
4984 {
4985         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4986         struct ipr_resource_entry *res;
4987         unsigned long lock_flags;
4988         int rc = -ENXIO;
4989
4990         sdev->hostdata = NULL;
4991
4992         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4993
4994         res = ipr_find_sdev(sdev);
4995         if (res) {
4996                 res->sdev = sdev;
4997                 res->add_to_ml = 0;
4998                 res->in_erp = 0;
4999                 sdev->hostdata = res;
5000                 if (!ipr_is_naca_model(res))
5001                         res->needs_sync_complete = 1;
5002                 rc = 0;
5003                 if (ipr_is_gata(res)) {
5004                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5005                         return ipr_ata_slave_alloc(sdev);
5006                 }
5007         }
5008
5009         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5010
5011         return rc;
5012 }
5013
5014 /**
5015  * ipr_match_lun - Match function for specified LUN
5016  * @ipr_cmd:    ipr command struct
5017  * @device:             device to match (sdev)
5018  *
5019  * Returns:
5020  *      1 if command matches sdev / 0 if command does not match sdev
5021  **/
5022 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5023 {
5024         if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5025                 return 1;
5026         return 0;
5027 }
5028
5029 /**
5030  * ipr_cmnd_is_free - Check if a command is free or not
5031  * @ipr_cmd     ipr command struct
5032  *
5033  * Returns:
5034  *      true / false
5035  **/
5036 static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5037 {
5038         struct ipr_cmnd *loop_cmd;
5039
5040         list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5041                 if (loop_cmd == ipr_cmd)
5042                         return true;
5043         }
5044
5045         return false;
5046 }
5047
5048 /**
5049  * ipr_match_res - Match function for specified resource entry
5050  * @ipr_cmd:    ipr command struct
5051  * @resource:   resource entry to match
5052  *
5053  * Returns:
5054  *      1 if command matches sdev / 0 if command does not match sdev
5055  **/
5056 static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5057 {
5058         struct ipr_resource_entry *res = resource;
5059
5060         if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5061                 return 1;
5062         return 0;
5063 }
5064
5065 /**
5066  * ipr_wait_for_ops - Wait for matching commands to complete
5067  * @ipr_cmd:    ipr command struct
5068  * @device:             device to match (sdev)
5069  * @match:              match function to use
5070  *
5071  * Returns:
5072  *      SUCCESS / FAILED
5073  **/
5074 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5075                             int (*match)(struct ipr_cmnd *, void *))
5076 {
5077         struct ipr_cmnd *ipr_cmd;
5078         int wait, i;
5079         unsigned long flags;
5080         struct ipr_hrr_queue *hrrq;
5081         signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5082         DECLARE_COMPLETION_ONSTACK(comp);
5083
5084         ENTER;
5085         do {
5086                 wait = 0;
5087
5088                 for_each_hrrq(hrrq, ioa_cfg) {
5089                         spin_lock_irqsave(hrrq->lock, flags);
5090                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5091                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5092                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5093                                         if (match(ipr_cmd, device)) {
5094                                                 ipr_cmd->eh_comp = &comp;
5095                                                 wait++;
5096                                         }
5097                                 }
5098                         }
5099                         spin_unlock_irqrestore(hrrq->lock, flags);
5100                 }
5101
5102                 if (wait) {
5103                         timeout = wait_for_completion_timeout(&comp, timeout);
5104
5105                         if (!timeout) {
5106                                 wait = 0;
5107
5108                                 for_each_hrrq(hrrq, ioa_cfg) {
5109                                         spin_lock_irqsave(hrrq->lock, flags);
5110                                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5111                                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5112                                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5113                                                         if (match(ipr_cmd, device)) {
5114                                                                 ipr_cmd->eh_comp = NULL;
5115                                                                 wait++;
5116                                                         }
5117                                                 }
5118                                         }
5119                                         spin_unlock_irqrestore(hrrq->lock, flags);
5120                                 }
5121
5122                                 if (wait)
5123                                         dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5124                                 LEAVE;
5125                                 return wait ? FAILED : SUCCESS;
5126                         }
5127                 }
5128         } while (wait);
5129
5130         LEAVE;
5131         return SUCCESS;
5132 }
5133
5134 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
5135 {
5136         struct ipr_ioa_cfg *ioa_cfg;
5137         unsigned long lock_flags = 0;
5138         int rc = SUCCESS;
5139
5140         ENTER;
5141         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5142         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5143
5144         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5145                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5146                 dev_err(&ioa_cfg->pdev->dev,
5147                         "Adapter being reset as a result of error recovery.\n");
5148
5149                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5150                         ioa_cfg->sdt_state = GET_DUMP;
5151         }
5152
5153         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5154         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5155         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5156
5157         /* If we got hit with a host reset while we were already resetting
5158          the adapter for some reason, and the reset failed. */
5159         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5160                 ipr_trace;
5161                 rc = FAILED;
5162         }
5163
5164         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5165         LEAVE;
5166         return rc;
5167 }
5168
5169 /**
5170  * ipr_device_reset - Reset the device
5171  * @ioa_cfg:    ioa config struct
5172  * @res:                resource entry struct
5173  *
5174  * This function issues a device reset to the affected device.
5175  * If the device is a SCSI device, a LUN reset will be sent
5176  * to the device first. If that does not work, a target reset
5177  * will be sent. If the device is a SATA device, a PHY reset will
5178  * be sent.
5179  *
5180  * Return value:
5181  *      0 on success / non-zero on failure
5182  **/
5183 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5184                             struct ipr_resource_entry *res)
5185 {
5186         struct ipr_cmnd *ipr_cmd;
5187         struct ipr_ioarcb *ioarcb;
5188         struct ipr_cmd_pkt *cmd_pkt;
5189         struct ipr_ioarcb_ata_regs *regs;
5190         u32 ioasc;
5191
5192         ENTER;
5193         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5194         ioarcb = &ipr_cmd->ioarcb;
5195         cmd_pkt = &ioarcb->cmd_pkt;
5196
5197         if (ipr_cmd->ioa_cfg->sis64) {
5198                 regs = &ipr_cmd->i.ata_ioadl.regs;
5199                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5200         } else
5201                 regs = &ioarcb->u.add_data.u.regs;
5202
5203         ioarcb->res_handle = res->res_handle;
5204         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5205         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5206         if (ipr_is_gata(res)) {
5207                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5208                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5209                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5210         }
5211
5212         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5213         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5214         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5215         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5216                 if (ipr_cmd->ioa_cfg->sis64)
5217                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5218                                sizeof(struct ipr_ioasa_gata));
5219                 else
5220                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5221                                sizeof(struct ipr_ioasa_gata));
5222         }
5223
5224         LEAVE;
5225         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5226 }
5227
5228 /**
5229  * ipr_sata_reset - Reset the SATA port
5230  * @link:       SATA link to reset
5231  * @classes:    class of the attached device
5232  *
5233  * This function issues a SATA phy reset to the affected ATA link.
5234  *
5235  * Return value:
5236  *      0 on success / non-zero on failure
5237  **/
5238 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5239                                 unsigned long deadline)
5240 {
5241         struct ipr_sata_port *sata_port = link->ap->private_data;
5242         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5243         struct ipr_resource_entry *res;
5244         unsigned long lock_flags = 0;
5245         int rc = -ENXIO, ret;
5246
5247         ENTER;
5248         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5249         while (ioa_cfg->in_reset_reload) {
5250                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5251                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5252                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5253         }
5254
5255         res = sata_port->res;
5256         if (res) {
5257                 rc = ipr_device_reset(ioa_cfg, res);
5258                 *classes = res->ata_class;
5259                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5260
5261                 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5262                 if (ret != SUCCESS) {
5263                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5264                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5265                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5266
5267                         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5268                 }
5269         } else
5270                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5271
5272         LEAVE;
5273         return rc;
5274 }
5275
5276 /**
5277  * ipr_eh_dev_reset - Reset the device
5278  * @scsi_cmd:   scsi command struct
5279  *
5280  * This function issues a device reset to the affected device.
5281  * A LUN reset will be sent to the device first. If that does
5282  * not work, a target reset will be sent.
5283  *
5284  * Return value:
5285  *      SUCCESS / FAILED
5286  **/
5287 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5288 {
5289         struct ipr_cmnd *ipr_cmd;
5290         struct ipr_ioa_cfg *ioa_cfg;
5291         struct ipr_resource_entry *res;
5292         struct ata_port *ap;
5293         int rc = 0, i;
5294         struct ipr_hrr_queue *hrrq;
5295
5296         ENTER;
5297         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5298         res = scsi_cmd->device->hostdata;
5299
5300         /*
5301          * If we are currently going through reset/reload, return failed. This will force the
5302          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5303          * reset to complete
5304          */
5305         if (ioa_cfg->in_reset_reload)
5306                 return FAILED;
5307         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5308                 return FAILED;
5309
5310         for_each_hrrq(hrrq, ioa_cfg) {
5311                 spin_lock(&hrrq->_lock);
5312                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5313                         ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5314
5315                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5316                                 if (!ipr_cmd->qc)
5317                                         continue;
5318                                 if (ipr_cmnd_is_free(ipr_cmd))
5319                                         continue;
5320
5321                                 ipr_cmd->done = ipr_sata_eh_done;
5322                                 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5323                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5324                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5325                                 }
5326                         }
5327                 }
5328                 spin_unlock(&hrrq->_lock);
5329         }
5330         res->resetting_device = 1;
5331         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5332
5333         if (ipr_is_gata(res) && res->sata_port) {
5334                 ap = res->sata_port->ap;
5335                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5336                 ata_std_error_handler(ap);
5337                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5338         } else
5339                 rc = ipr_device_reset(ioa_cfg, res);
5340         res->resetting_device = 0;
5341         res->reset_occurred = 1;
5342
5343         LEAVE;
5344         return rc ? FAILED : SUCCESS;
5345 }
5346
5347 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5348 {
5349         int rc;
5350         struct ipr_ioa_cfg *ioa_cfg;
5351         struct ipr_resource_entry *res;
5352
5353         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5354         res = cmd->device->hostdata;
5355
5356         if (!res)
5357                 return FAILED;
5358
5359         spin_lock_irq(cmd->device->host->host_lock);
5360         rc = __ipr_eh_dev_reset(cmd);
5361         spin_unlock_irq(cmd->device->host->host_lock);
5362
5363         if (rc == SUCCESS) {
5364                 if (ipr_is_gata(res) && res->sata_port)
5365                         rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5366                 else
5367                         rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5368         }
5369
5370         return rc;
5371 }
5372
5373 /**
5374  * ipr_bus_reset_done - Op done function for bus reset.
5375  * @ipr_cmd:    ipr command struct
5376  *
5377  * This function is the op done function for a bus reset
5378  *
5379  * Return value:
5380  *      none
5381  **/
5382 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5383 {
5384         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5385         struct ipr_resource_entry *res;
5386
5387         ENTER;
5388         if (!ioa_cfg->sis64)
5389                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5390                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5391                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5392                                 break;
5393                         }
5394                 }
5395
5396         /*
5397          * If abort has not completed, indicate the reset has, else call the
5398          * abort's done function to wake the sleeping eh thread
5399          */
5400         if (ipr_cmd->sibling->sibling)
5401                 ipr_cmd->sibling->sibling = NULL;
5402         else
5403                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5404
5405         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5406         LEAVE;
5407 }
5408
5409 /**
5410  * ipr_abort_timeout - An abort task has timed out
5411  * @ipr_cmd:    ipr command struct
5412  *
5413  * This function handles when an abort task times out. If this
5414  * happens we issue a bus reset since we have resources tied
5415  * up that must be freed before returning to the midlayer.
5416  *
5417  * Return value:
5418  *      none
5419  **/
5420 static void ipr_abort_timeout(struct timer_list *t)
5421 {
5422         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
5423         struct ipr_cmnd *reset_cmd;
5424         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5425         struct ipr_cmd_pkt *cmd_pkt;
5426         unsigned long lock_flags = 0;
5427
5428         ENTER;
5429         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5430         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5431                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5432                 return;
5433         }
5434
5435         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5436         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5437         ipr_cmd->sibling = reset_cmd;
5438         reset_cmd->sibling = ipr_cmd;
5439         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5440         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5441         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5442         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5443         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5444
5445         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5446         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5447         LEAVE;
5448 }
5449
5450 /**
5451  * ipr_cancel_op - Cancel specified op
5452  * @scsi_cmd:   scsi command struct
5453  *
5454  * This function cancels specified op.
5455  *
5456  * Return value:
5457  *      SUCCESS / FAILED
5458  **/
5459 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5460 {
5461         struct ipr_cmnd *ipr_cmd;
5462         struct ipr_ioa_cfg *ioa_cfg;
5463         struct ipr_resource_entry *res;
5464         struct ipr_cmd_pkt *cmd_pkt;
5465         u32 ioasc, int_reg;
5466         int i, op_found = 0;
5467         struct ipr_hrr_queue *hrrq;
5468
5469         ENTER;
5470         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5471         res = scsi_cmd->device->hostdata;
5472
5473         /* If we are currently going through reset/reload, return failed.
5474          * This will force the mid-layer to call ipr_eh_host_reset,
5475          * which will then go to sleep and wait for the reset to complete
5476          */
5477         if (ioa_cfg->in_reset_reload ||
5478             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5479                 return FAILED;
5480         if (!res)
5481                 return FAILED;
5482
5483         /*
5484          * If we are aborting a timed out op, chances are that the timeout was caused
5485          * by a still not detected EEH error. In such cases, reading a register will
5486          * trigger the EEH recovery infrastructure.
5487          */
5488         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5489
5490         if (!ipr_is_gscsi(res))
5491                 return FAILED;
5492
5493         for_each_hrrq(hrrq, ioa_cfg) {
5494                 spin_lock(&hrrq->_lock);
5495                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5496                         if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5497                                 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5498                                         op_found = 1;
5499                                         break;
5500                                 }
5501                         }
5502                 }
5503                 spin_unlock(&hrrq->_lock);
5504         }
5505
5506         if (!op_found)
5507                 return SUCCESS;
5508
5509         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5510         ipr_cmd->ioarcb.res_handle = res->res_handle;
5511         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5512         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5513         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5514         ipr_cmd->u.sdev = scsi_cmd->device;
5515
5516         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5517                     scsi_cmd->cmnd[0]);
5518         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5519         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5520
5521         /*
5522          * If the abort task timed out and we sent a bus reset, we will get
5523          * one the following responses to the abort
5524          */
5525         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5526                 ioasc = 0;
5527                 ipr_trace;
5528         }
5529
5530         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5531         if (!ipr_is_naca_model(res))
5532                 res->needs_sync_complete = 1;
5533
5534         LEAVE;
5535         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5536 }
5537
5538 /**
5539  * ipr_eh_abort - Abort a single op
5540  * @scsi_cmd:   scsi command struct
5541  *
5542  * Return value:
5543  *      0 if scan in progress / 1 if scan is complete
5544  **/
5545 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5546 {
5547         unsigned long lock_flags;
5548         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5549         int rc = 0;
5550
5551         spin_lock_irqsave(shost->host_lock, lock_flags);
5552         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5553                 rc = 1;
5554         if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5555                 rc = 1;
5556         spin_unlock_irqrestore(shost->host_lock, lock_flags);
5557         return rc;
5558 }
5559
5560 /**
5561  * ipr_eh_host_reset - Reset the host adapter
5562  * @scsi_cmd:   scsi command struct
5563  *
5564  * Return value:
5565  *      SUCCESS / FAILED
5566  **/
5567 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5568 {
5569         unsigned long flags;
5570         int rc;
5571         struct ipr_ioa_cfg *ioa_cfg;
5572
5573         ENTER;
5574
5575         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5576
5577         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5578         rc = ipr_cancel_op(scsi_cmd);
5579         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5580
5581         if (rc == SUCCESS)
5582                 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5583         LEAVE;
5584         return rc;
5585 }
5586
5587 /**
5588  * ipr_handle_other_interrupt - Handle "other" interrupts
5589  * @ioa_cfg:    ioa config struct
5590  * @int_reg:    interrupt register
5591  *
5592  * Return value:
5593  *      IRQ_NONE / IRQ_HANDLED
5594  **/
5595 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5596                                               u32 int_reg)
5597 {
5598         irqreturn_t rc = IRQ_HANDLED;
5599         u32 int_mask_reg;
5600
5601         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5602         int_reg &= ~int_mask_reg;
5603
5604         /* If an interrupt on the adapter did not occur, ignore it.
5605          * Or in the case of SIS 64, check for a stage change interrupt.
5606          */
5607         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5608                 if (ioa_cfg->sis64) {
5609                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5610                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5611                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5612
5613                                 /* clear stage change */
5614                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5615                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5616                                 list_del(&ioa_cfg->reset_cmd->queue);
5617                                 del_timer(&ioa_cfg->reset_cmd->timer);
5618                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5619                                 return IRQ_HANDLED;
5620                         }
5621                 }
5622
5623                 return IRQ_NONE;
5624         }
5625
5626         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5627                 /* Mask the interrupt */
5628                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5629                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5630
5631                 list_del(&ioa_cfg->reset_cmd->queue);
5632                 del_timer(&ioa_cfg->reset_cmd->timer);
5633                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5634         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5635                 if (ioa_cfg->clear_isr) {
5636                         if (ipr_debug && printk_ratelimit())
5637                                 dev_err(&ioa_cfg->pdev->dev,
5638                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5639                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5640                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5641                         return IRQ_NONE;
5642                 }
5643         } else {
5644                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5645                         ioa_cfg->ioa_unit_checked = 1;
5646                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5647                         dev_err(&ioa_cfg->pdev->dev,
5648                                 "No Host RRQ. 0x%08X\n", int_reg);
5649                 else
5650                         dev_err(&ioa_cfg->pdev->dev,
5651                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5652
5653                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5654                         ioa_cfg->sdt_state = GET_DUMP;
5655
5656                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5657                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5658         }
5659
5660         return rc;
5661 }
5662
5663 /**
5664  * ipr_isr_eh - Interrupt service routine error handler
5665  * @ioa_cfg:    ioa config struct
5666  * @msg:        message to log
5667  *
5668  * Return value:
5669  *      none
5670  **/
5671 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5672 {
5673         ioa_cfg->errors_logged++;
5674         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5675
5676         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5677                 ioa_cfg->sdt_state = GET_DUMP;
5678
5679         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5680 }
5681
5682 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5683                                                 struct list_head *doneq)
5684 {
5685         u32 ioasc;
5686         u16 cmd_index;
5687         struct ipr_cmnd *ipr_cmd;
5688         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5689         int num_hrrq = 0;
5690
5691         /* If interrupts are disabled, ignore the interrupt */
5692         if (!hrr_queue->allow_interrupts)
5693                 return 0;
5694
5695         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5696                hrr_queue->toggle_bit) {
5697
5698                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5699                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5700                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5701
5702                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5703                              cmd_index < hrr_queue->min_cmd_id)) {
5704                         ipr_isr_eh(ioa_cfg,
5705                                 "Invalid response handle from IOA: ",
5706                                 cmd_index);
5707                         break;
5708                 }
5709
5710                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5711                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5712
5713                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5714
5715                 list_move_tail(&ipr_cmd->queue, doneq);
5716
5717                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5718                         hrr_queue->hrrq_curr++;
5719                 } else {
5720                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5721                         hrr_queue->toggle_bit ^= 1u;
5722                 }
5723                 num_hrrq++;
5724                 if (budget > 0 && num_hrrq >= budget)
5725                         break;
5726         }
5727
5728         return num_hrrq;
5729 }
5730
5731 static int ipr_iopoll(struct irq_poll *iop, int budget)
5732 {
5733         struct ipr_ioa_cfg *ioa_cfg;
5734         struct ipr_hrr_queue *hrrq;
5735         struct ipr_cmnd *ipr_cmd, *temp;
5736         unsigned long hrrq_flags;
5737         int completed_ops;
5738         LIST_HEAD(doneq);
5739
5740         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5741         ioa_cfg = hrrq->ioa_cfg;
5742
5743         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5744         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5745
5746         if (completed_ops < budget)
5747                 irq_poll_complete(iop);
5748         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5749
5750         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5751                 list_del(&ipr_cmd->queue);
5752                 del_timer(&ipr_cmd->timer);
5753                 ipr_cmd->fast_done(ipr_cmd);
5754         }
5755
5756         return completed_ops;
5757 }
5758
5759 /**
5760  * ipr_isr - Interrupt service routine
5761  * @irq:        irq number
5762  * @devp:       pointer to ioa config struct
5763  *
5764  * Return value:
5765  *      IRQ_NONE / IRQ_HANDLED
5766  **/
5767 static irqreturn_t ipr_isr(int irq, void *devp)
5768 {
5769         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5770         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5771         unsigned long hrrq_flags = 0;
5772         u32 int_reg = 0;
5773         int num_hrrq = 0;
5774         int irq_none = 0;
5775         struct ipr_cmnd *ipr_cmd, *temp;
5776         irqreturn_t rc = IRQ_NONE;
5777         LIST_HEAD(doneq);
5778
5779         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5780         /* If interrupts are disabled, ignore the interrupt */
5781         if (!hrrq->allow_interrupts) {
5782                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5783                 return IRQ_NONE;
5784         }
5785
5786         while (1) {
5787                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5788                         rc =  IRQ_HANDLED;
5789
5790                         if (!ioa_cfg->clear_isr)
5791                                 break;
5792
5793                         /* Clear the PCI interrupt */
5794                         num_hrrq = 0;
5795                         do {
5796                                 writel(IPR_PCII_HRRQ_UPDATED,
5797                                      ioa_cfg->regs.clr_interrupt_reg32);
5798                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5799                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5800                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5801
5802                 } else if (rc == IRQ_NONE && irq_none == 0) {
5803                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5804                         irq_none++;
5805                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5806                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5807                         ipr_isr_eh(ioa_cfg,
5808                                 "Error clearing HRRQ: ", num_hrrq);
5809                         rc = IRQ_HANDLED;
5810                         break;
5811                 } else
5812                         break;
5813         }
5814
5815         if (unlikely(rc == IRQ_NONE))
5816                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5817
5818         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5819         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5820                 list_del(&ipr_cmd->queue);
5821                 del_timer(&ipr_cmd->timer);
5822                 ipr_cmd->fast_done(ipr_cmd);
5823         }
5824         return rc;
5825 }
5826
5827 /**
5828  * ipr_isr_mhrrq - Interrupt service routine
5829  * @irq:        irq number
5830  * @devp:       pointer to ioa config struct
5831  *
5832  * Return value:
5833  *      IRQ_NONE / IRQ_HANDLED
5834  **/
5835 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5836 {
5837         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5838         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5839         unsigned long hrrq_flags = 0;
5840         struct ipr_cmnd *ipr_cmd, *temp;
5841         irqreturn_t rc = IRQ_NONE;
5842         LIST_HEAD(doneq);
5843
5844         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5845
5846         /* If interrupts are disabled, ignore the interrupt */
5847         if (!hrrq->allow_interrupts) {
5848                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5849                 return IRQ_NONE;
5850         }
5851
5852         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5853                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5854                        hrrq->toggle_bit) {
5855                         irq_poll_sched(&hrrq->iopoll);
5856                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5857                         return IRQ_HANDLED;
5858                 }
5859         } else {
5860                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5861                         hrrq->toggle_bit)
5862
5863                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5864                                 rc =  IRQ_HANDLED;
5865         }
5866
5867         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5868
5869         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5870                 list_del(&ipr_cmd->queue);
5871                 del_timer(&ipr_cmd->timer);
5872                 ipr_cmd->fast_done(ipr_cmd);
5873         }
5874         return rc;
5875 }
5876
5877 /**
5878  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5879  * @ioa_cfg:    ioa config struct
5880  * @ipr_cmd:    ipr command struct
5881  *
5882  * Return value:
5883  *      0 on success / -1 on failure
5884  **/
5885 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5886                              struct ipr_cmnd *ipr_cmd)
5887 {
5888         int i, nseg;
5889         struct scatterlist *sg;
5890         u32 length;
5891         u32 ioadl_flags = 0;
5892         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5893         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5894         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5895
5896         length = scsi_bufflen(scsi_cmd);
5897         if (!length)
5898                 return 0;
5899
5900         nseg = scsi_dma_map(scsi_cmd);
5901         if (nseg < 0) {
5902                 if (printk_ratelimit())
5903                         dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5904                 return -1;
5905         }
5906
5907         ipr_cmd->dma_use_sg = nseg;
5908
5909         ioarcb->data_transfer_length = cpu_to_be32(length);
5910         ioarcb->ioadl_len =
5911                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5912
5913         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5914                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5915                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5916         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5917                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5918
5919         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5920                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5921                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5922                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5923         }
5924
5925         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5926         return 0;
5927 }
5928
5929 /**
5930  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5931  * @ioa_cfg:    ioa config struct
5932  * @ipr_cmd:    ipr command struct
5933  *
5934  * Return value:
5935  *      0 on success / -1 on failure
5936  **/
5937 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5938                            struct ipr_cmnd *ipr_cmd)
5939 {
5940         int i, nseg;
5941         struct scatterlist *sg;
5942         u32 length;
5943         u32 ioadl_flags = 0;
5944         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5945         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5946         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5947
5948         length = scsi_bufflen(scsi_cmd);
5949         if (!length)
5950                 return 0;
5951
5952         nseg = scsi_dma_map(scsi_cmd);
5953         if (nseg < 0) {
5954                 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5955                 return -1;
5956         }
5957
5958         ipr_cmd->dma_use_sg = nseg;
5959
5960         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5961                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5962                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5963                 ioarcb->data_transfer_length = cpu_to_be32(length);
5964                 ioarcb->ioadl_len =
5965                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5966         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5967                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5968                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5969                 ioarcb->read_ioadl_len =
5970                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5971         }
5972
5973         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5974                 ioadl = ioarcb->u.add_data.u.ioadl;
5975                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5976                                     offsetof(struct ipr_ioarcb, u.add_data));
5977                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5978         }
5979
5980         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5981                 ioadl[i].flags_and_data_len =
5982                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5983                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5984         }
5985
5986         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5987         return 0;
5988 }
5989
5990 /**
5991  * __ipr_erp_done - Process completion of ERP for a device
5992  * @ipr_cmd:            ipr command struct
5993  *
5994  * This function copies the sense buffer into the scsi_cmd
5995  * struct and pushes the scsi_done function.
5996  *
5997  * Return value:
5998  *      nothing
5999  **/
6000 static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6001 {
6002         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6003         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6004         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6005
6006         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6007                 scsi_cmd->result |= (DID_ERROR << 16);
6008                 scmd_printk(KERN_ERR, scsi_cmd,
6009                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
6010         } else {
6011                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6012                        SCSI_SENSE_BUFFERSIZE);
6013         }
6014
6015         if (res) {
6016                 if (!ipr_is_naca_model(res))
6017                         res->needs_sync_complete = 1;
6018                 res->in_erp = 0;
6019         }
6020         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6021         scsi_cmd->scsi_done(scsi_cmd);
6022         if (ipr_cmd->eh_comp)
6023                 complete(ipr_cmd->eh_comp);
6024         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6025 }
6026
6027 /**
6028  * ipr_erp_done - Process completion of ERP for a device
6029  * @ipr_cmd:            ipr command struct
6030  *
6031  * This function copies the sense buffer into the scsi_cmd
6032  * struct and pushes the scsi_done function.
6033  *
6034  * Return value:
6035  *      nothing
6036  **/
6037 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6038 {
6039         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6040         unsigned long hrrq_flags;
6041
6042         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6043         __ipr_erp_done(ipr_cmd);
6044         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6045 }
6046
6047 /**
6048  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6049  * @ipr_cmd:    ipr command struct
6050  *
6051  * Return value:
6052  *      none
6053  **/
6054 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6055 {
6056         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6057         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6058         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6059
6060         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
6061         ioarcb->data_transfer_length = 0;
6062         ioarcb->read_data_transfer_length = 0;
6063         ioarcb->ioadl_len = 0;
6064         ioarcb->read_ioadl_len = 0;
6065         ioasa->hdr.ioasc = 0;
6066         ioasa->hdr.residual_data_len = 0;
6067
6068         if (ipr_cmd->ioa_cfg->sis64)
6069                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6070                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6071         else {
6072                 ioarcb->write_ioadl_addr =
6073                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6074                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6075         }
6076 }
6077
6078 /**
6079  * __ipr_erp_request_sense - Send request sense to a device
6080  * @ipr_cmd:    ipr command struct
6081  *
6082  * This function sends a request sense to a device as a result
6083  * of a check condition.
6084  *
6085  * Return value:
6086  *      nothing
6087  **/
6088 static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6089 {
6090         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6091         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6092
6093         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6094                 __ipr_erp_done(ipr_cmd);
6095                 return;
6096         }
6097
6098         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6099
6100         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6101         cmd_pkt->cdb[0] = REQUEST_SENSE;
6102         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6103         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6104         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6105         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6106
6107         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6108                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
6109
6110         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6111                    IPR_REQUEST_SENSE_TIMEOUT * 2);
6112 }
6113
6114 /**
6115  * ipr_erp_request_sense - Send request sense to a device
6116  * @ipr_cmd:    ipr command struct
6117  *
6118  * This function sends a request sense to a device as a result
6119  * of a check condition.
6120  *
6121  * Return value:
6122  *      nothing
6123  **/
6124 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6125 {
6126         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6127         unsigned long hrrq_flags;
6128
6129         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6130         __ipr_erp_request_sense(ipr_cmd);
6131         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6132 }
6133
6134 /**
6135  * ipr_erp_cancel_all - Send cancel all to a device
6136  * @ipr_cmd:    ipr command struct
6137  *
6138  * This function sends a cancel all to a device to clear the
6139  * queue. If we are running TCQ on the device, QERR is set to 1,
6140  * which means all outstanding ops have been dropped on the floor.
6141  * Cancel all will return them to us.
6142  *
6143  * Return value:
6144  *      nothing
6145  **/
6146 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6147 {
6148         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6149         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6150         struct ipr_cmd_pkt *cmd_pkt;
6151
6152         res->in_erp = 1;
6153
6154         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6155
6156         if (!scsi_cmd->device->simple_tags) {
6157                 __ipr_erp_request_sense(ipr_cmd);
6158                 return;
6159         }
6160
6161         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6162         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6163         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6164
6165         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6166                    IPR_CANCEL_ALL_TIMEOUT);
6167 }
6168
6169 /**
6170  * ipr_dump_ioasa - Dump contents of IOASA
6171  * @ioa_cfg:    ioa config struct
6172  * @ipr_cmd:    ipr command struct
6173  * @res:                resource entry struct
6174  *
6175  * This function is invoked by the interrupt handler when ops
6176  * fail. It will log the IOASA if appropriate. Only called
6177  * for GPDD ops.
6178  *
6179  * Return value:
6180  *      none
6181  **/
6182 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
6183                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
6184 {
6185         int i;
6186         u16 data_len;
6187         u32 ioasc, fd_ioasc;
6188         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6189         __be32 *ioasa_data = (__be32 *)ioasa;
6190         int error_index;
6191
6192         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6193         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
6194
6195         if (0 == ioasc)
6196                 return;
6197
6198         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6199                 return;
6200
6201         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6202                 error_index = ipr_get_error(fd_ioasc);
6203         else
6204                 error_index = ipr_get_error(ioasc);
6205
6206         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6207                 /* Don't log an error if the IOA already logged one */
6208                 if (ioasa->hdr.ilid != 0)
6209                         return;
6210
6211                 if (!ipr_is_gscsi(res))
6212                         return;
6213
6214                 if (ipr_error_table[error_index].log_ioasa == 0)
6215                         return;
6216         }
6217
6218         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6219
6220         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6221         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6222                 data_len = sizeof(struct ipr_ioasa64);
6223         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6224                 data_len = sizeof(struct ipr_ioasa);
6225
6226         ipr_err("IOASA Dump:\n");
6227
6228         for (i = 0; i < data_len / 4; i += 4) {
6229                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6230                         be32_to_cpu(ioasa_data[i]),
6231                         be32_to_cpu(ioasa_data[i+1]),
6232                         be32_to_cpu(ioasa_data[i+2]),
6233                         be32_to_cpu(ioasa_data[i+3]));
6234         }
6235 }
6236
6237 /**
6238  * ipr_gen_sense - Generate SCSI sense data from an IOASA
6239  * @ioasa:              IOASA
6240  * @sense_buf:  sense data buffer
6241  *
6242  * Return value:
6243  *      none
6244  **/
6245 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6246 {
6247         u32 failing_lba;
6248         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6249         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6250         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6251         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6252
6253         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6254
6255         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6256                 return;
6257
6258         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6259
6260         if (ipr_is_vset_device(res) &&
6261             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6262             ioasa->u.vset.failing_lba_hi != 0) {
6263                 sense_buf[0] = 0x72;
6264                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6265                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6266                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6267
6268                 sense_buf[7] = 12;
6269                 sense_buf[8] = 0;
6270                 sense_buf[9] = 0x0A;
6271                 sense_buf[10] = 0x80;
6272
6273                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6274
6275                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6276                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6277                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6278                 sense_buf[15] = failing_lba & 0x000000ff;
6279
6280                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6281
6282                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6283                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6284                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6285                 sense_buf[19] = failing_lba & 0x000000ff;
6286         } else {
6287                 sense_buf[0] = 0x70;
6288                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6289                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6290                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6291
6292                 /* Illegal request */
6293                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6294                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6295                         sense_buf[7] = 10;      /* additional length */
6296
6297                         /* IOARCB was in error */
6298                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6299                                 sense_buf[15] = 0xC0;
6300                         else    /* Parameter data was invalid */
6301                                 sense_buf[15] = 0x80;
6302
6303                         sense_buf[16] =
6304                             ((IPR_FIELD_POINTER_MASK &
6305                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6306                         sense_buf[17] =
6307                             (IPR_FIELD_POINTER_MASK &
6308                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6309                 } else {
6310                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6311                                 if (ipr_is_vset_device(res))
6312                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6313                                 else
6314                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6315
6316                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
6317                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6318                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6319                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6320                                 sense_buf[6] = failing_lba & 0x000000ff;
6321                         }
6322
6323                         sense_buf[7] = 6;       /* additional length */
6324                 }
6325         }
6326 }
6327
6328 /**
6329  * ipr_get_autosense - Copy autosense data to sense buffer
6330  * @ipr_cmd:    ipr command struct
6331  *
6332  * This function copies the autosense buffer to the buffer
6333  * in the scsi_cmd, if there is autosense available.
6334  *
6335  * Return value:
6336  *      1 if autosense was available / 0 if not
6337  **/
6338 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6339 {
6340         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6341         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6342
6343         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6344                 return 0;
6345
6346         if (ipr_cmd->ioa_cfg->sis64)
6347                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6348                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6349                            SCSI_SENSE_BUFFERSIZE));
6350         else
6351                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6352                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6353                            SCSI_SENSE_BUFFERSIZE));
6354         return 1;
6355 }
6356
6357 /**
6358  * ipr_erp_start - Process an error response for a SCSI op
6359  * @ioa_cfg:    ioa config struct
6360  * @ipr_cmd:    ipr command struct
6361  *
6362  * This function determines whether or not to initiate ERP
6363  * on the affected device.
6364  *
6365  * Return value:
6366  *      nothing
6367  **/
6368 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6369                               struct ipr_cmnd *ipr_cmd)
6370 {
6371         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6372         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6373         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6374         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6375
6376         if (!res) {
6377                 __ipr_scsi_eh_done(ipr_cmd);
6378                 return;
6379         }
6380
6381         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6382                 ipr_gen_sense(ipr_cmd);
6383
6384         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6385
6386         switch (masked_ioasc) {
6387         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6388                 if (ipr_is_naca_model(res))
6389                         scsi_cmd->result |= (DID_ABORT << 16);
6390                 else
6391                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6392                 break;
6393         case IPR_IOASC_IR_RESOURCE_HANDLE:
6394         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6395                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6396                 break;
6397         case IPR_IOASC_HW_SEL_TIMEOUT:
6398                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6399                 if (!ipr_is_naca_model(res))
6400                         res->needs_sync_complete = 1;
6401                 break;
6402         case IPR_IOASC_SYNC_REQUIRED:
6403                 if (!res->in_erp)
6404                         res->needs_sync_complete = 1;
6405                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6406                 break;
6407         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6408         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6409                 /*
6410                  * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6411                  * so SCSI mid-layer and upper layers handle it accordingly.
6412                  */
6413                 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6414                         scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6415                 break;
6416         case IPR_IOASC_BUS_WAS_RESET:
6417         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6418                 /*
6419                  * Report the bus reset and ask for a retry. The device
6420                  * will give CC/UA the next command.
6421                  */
6422                 if (!res->resetting_device)
6423                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6424                 scsi_cmd->result |= (DID_ERROR << 16);
6425                 if (!ipr_is_naca_model(res))
6426                         res->needs_sync_complete = 1;
6427                 break;
6428         case IPR_IOASC_HW_DEV_BUS_STATUS:
6429                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6430                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6431                         if (!ipr_get_autosense(ipr_cmd)) {
6432                                 if (!ipr_is_naca_model(res)) {
6433                                         ipr_erp_cancel_all(ipr_cmd);
6434                                         return;
6435                                 }
6436                         }
6437                 }
6438                 if (!ipr_is_naca_model(res))
6439                         res->needs_sync_complete = 1;
6440                 break;
6441         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6442                 break;
6443         case IPR_IOASC_IR_NON_OPTIMIZED:
6444                 if (res->raw_mode) {
6445                         res->raw_mode = 0;
6446                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6447                 } else
6448                         scsi_cmd->result |= (DID_ERROR << 16);
6449                 break;
6450         default:
6451                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6452                         scsi_cmd->result |= (DID_ERROR << 16);
6453                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6454                         res->needs_sync_complete = 1;
6455                 break;
6456         }
6457
6458         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6459         scsi_cmd->scsi_done(scsi_cmd);
6460         if (ipr_cmd->eh_comp)
6461                 complete(ipr_cmd->eh_comp);
6462         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6463 }
6464
6465 /**
6466  * ipr_scsi_done - mid-layer done function
6467  * @ipr_cmd:    ipr command struct
6468  *
6469  * This function is invoked by the interrupt handler for
6470  * ops generated by the SCSI mid-layer
6471  *
6472  * Return value:
6473  *      none
6474  **/
6475 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6476 {
6477         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6478         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6479         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6480         unsigned long lock_flags;
6481
6482         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6483
6484         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6485                 scsi_dma_unmap(scsi_cmd);
6486
6487                 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6488                 scsi_cmd->scsi_done(scsi_cmd);
6489                 if (ipr_cmd->eh_comp)
6490                         complete(ipr_cmd->eh_comp);
6491                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6492                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6493         } else {
6494                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6495                 spin_lock(&ipr_cmd->hrrq->_lock);
6496                 ipr_erp_start(ioa_cfg, ipr_cmd);
6497                 spin_unlock(&ipr_cmd->hrrq->_lock);
6498                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6499         }
6500 }
6501
6502 /**
6503  * ipr_queuecommand - Queue a mid-layer request
6504  * @shost:              scsi host struct
6505  * @scsi_cmd:   scsi command struct
6506  *
6507  * This function queues a request generated by the mid-layer.
6508  *
6509  * Return value:
6510  *      0 on success
6511  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6512  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6513  **/
6514 static int ipr_queuecommand(struct Scsi_Host *shost,
6515                             struct scsi_cmnd *scsi_cmd)
6516 {
6517         struct ipr_ioa_cfg *ioa_cfg;
6518         struct ipr_resource_entry *res;
6519         struct ipr_ioarcb *ioarcb;
6520         struct ipr_cmnd *ipr_cmd;
6521         unsigned long hrrq_flags, lock_flags;
6522         int rc;
6523         struct ipr_hrr_queue *hrrq;
6524         int hrrq_id;
6525
6526         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6527
6528         scsi_cmd->result = (DID_OK << 16);
6529         res = scsi_cmd->device->hostdata;
6530
6531         if (ipr_is_gata(res) && res->sata_port) {
6532                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6533                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6534                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6535                 return rc;
6536         }
6537
6538         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6539         hrrq = &ioa_cfg->hrrq[hrrq_id];
6540
6541         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6542         /*
6543          * We are currently blocking all devices due to a host reset
6544          * We have told the host to stop giving us new requests, but
6545          * ERP ops don't count. FIXME
6546          */
6547         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6548                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6549                 return SCSI_MLQUEUE_HOST_BUSY;
6550         }
6551
6552         /*
6553          * FIXME - Create scsi_set_host_offline interface
6554          *  and the ioa_is_dead check can be removed
6555          */
6556         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6557                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6558                 goto err_nodev;
6559         }
6560
6561         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6562         if (ipr_cmd == NULL) {
6563                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6564                 return SCSI_MLQUEUE_HOST_BUSY;
6565         }
6566         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6567
6568         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6569         ioarcb = &ipr_cmd->ioarcb;
6570
6571         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6572         ipr_cmd->scsi_cmd = scsi_cmd;
6573         ipr_cmd->done = ipr_scsi_eh_done;
6574
6575         if (ipr_is_gscsi(res)) {
6576                 if (scsi_cmd->underflow == 0)
6577                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6578
6579                 if (res->reset_occurred) {
6580                         res->reset_occurred = 0;
6581                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6582                 }
6583         }
6584
6585         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6586                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6587
6588                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6589                 if (scsi_cmd->flags & SCMD_TAGGED)
6590                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6591                 else
6592                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6593         }
6594
6595         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6596             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6597                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6598         }
6599         if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6600                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6601
6602                 if (scsi_cmd->underflow == 0)
6603                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6604         }
6605
6606         if (ioa_cfg->sis64)
6607                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6608         else
6609                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6610
6611         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6612         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6613                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6614                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6615                 if (!rc)
6616                         scsi_dma_unmap(scsi_cmd);
6617                 return SCSI_MLQUEUE_HOST_BUSY;
6618         }
6619
6620         if (unlikely(hrrq->ioa_is_dead)) {
6621                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6622                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6623                 scsi_dma_unmap(scsi_cmd);
6624                 goto err_nodev;
6625         }
6626
6627         ioarcb->res_handle = res->res_handle;
6628         if (res->needs_sync_complete) {
6629                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6630                 res->needs_sync_complete = 0;
6631         }
6632         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6633         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6634         ipr_send_command(ipr_cmd);
6635         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6636         return 0;
6637
6638 err_nodev:
6639         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6640         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6641         scsi_cmd->result = (DID_NO_CONNECT << 16);
6642         scsi_cmd->scsi_done(scsi_cmd);
6643         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6644         return 0;
6645 }
6646
6647 /**
6648  * ipr_ioctl - IOCTL handler
6649  * @sdev:       scsi device struct
6650  * @cmd:        IOCTL cmd
6651  * @arg:        IOCTL arg
6652  *
6653  * Return value:
6654  *      0 on success / other on failure
6655  **/
6656 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6657 {
6658         struct ipr_resource_entry *res;
6659
6660         res = (struct ipr_resource_entry *)sdev->hostdata;
6661         if (res && ipr_is_gata(res)) {
6662                 if (cmd == HDIO_GET_IDENTITY)
6663                         return -ENOTTY;
6664                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6665         }
6666
6667         return -EINVAL;
6668 }
6669
6670 /**
6671  * ipr_info - Get information about the card/driver
6672  * @scsi_host:  scsi host struct
6673  *
6674  * Return value:
6675  *      pointer to buffer with description string
6676  **/
6677 static const char *ipr_ioa_info(struct Scsi_Host *host)
6678 {
6679         static char buffer[512];
6680         struct ipr_ioa_cfg *ioa_cfg;
6681         unsigned long lock_flags = 0;
6682
6683         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6684
6685         spin_lock_irqsave(host->host_lock, lock_flags);
6686         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6687         spin_unlock_irqrestore(host->host_lock, lock_flags);
6688
6689         return buffer;
6690 }
6691
6692 static struct scsi_host_template driver_template = {
6693         .module = THIS_MODULE,
6694         .name = "IPR",
6695         .info = ipr_ioa_info,
6696         .ioctl = ipr_ioctl,
6697         .queuecommand = ipr_queuecommand,
6698         .eh_abort_handler = ipr_eh_abort,
6699         .eh_device_reset_handler = ipr_eh_dev_reset,
6700         .eh_host_reset_handler = ipr_eh_host_reset,
6701         .slave_alloc = ipr_slave_alloc,
6702         .slave_configure = ipr_slave_configure,
6703         .slave_destroy = ipr_slave_destroy,
6704         .scan_finished = ipr_scan_finished,
6705         .target_alloc = ipr_target_alloc,
6706         .target_destroy = ipr_target_destroy,
6707         .change_queue_depth = ipr_change_queue_depth,
6708         .bios_param = ipr_biosparam,
6709         .can_queue = IPR_MAX_COMMANDS,
6710         .this_id = -1,
6711         .sg_tablesize = IPR_MAX_SGLIST,
6712         .max_sectors = IPR_IOA_MAX_SECTORS,
6713         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6714         .use_clustering = ENABLE_CLUSTERING,
6715         .shost_attrs = ipr_ioa_attrs,
6716         .sdev_attrs = ipr_dev_attrs,
6717         .proc_name = IPR_NAME,
6718 };
6719
6720 /**
6721  * ipr_ata_phy_reset - libata phy_reset handler
6722  * @ap:         ata port to reset
6723  *
6724  **/
6725 static void ipr_ata_phy_reset(struct ata_port *ap)
6726 {
6727         unsigned long flags;
6728         struct ipr_sata_port *sata_port = ap->private_data;
6729         struct ipr_resource_entry *res = sata_port->res;
6730         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6731         int rc;
6732
6733         ENTER;
6734         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6735         while (ioa_cfg->in_reset_reload) {
6736                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6737                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6738                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6739         }
6740
6741         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6742                 goto out_unlock;
6743
6744         rc = ipr_device_reset(ioa_cfg, res);
6745
6746         if (rc) {
6747                 ap->link.device[0].class = ATA_DEV_NONE;
6748                 goto out_unlock;
6749         }
6750
6751         ap->link.device[0].class = res->ata_class;
6752         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6753                 ap->link.device[0].class = ATA_DEV_NONE;
6754
6755 out_unlock:
6756         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6757         LEAVE;
6758 }
6759
6760 /**
6761  * ipr_ata_post_internal - Cleanup after an internal command
6762  * @qc: ATA queued command
6763  *
6764  * Return value:
6765  *      none
6766  **/
6767 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6768 {
6769         struct ipr_sata_port *sata_port = qc->ap->private_data;
6770         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6771         struct ipr_cmnd *ipr_cmd;
6772         struct ipr_hrr_queue *hrrq;
6773         unsigned long flags;
6774
6775         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6776         while (ioa_cfg->in_reset_reload) {
6777                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6778                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6779                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6780         }
6781
6782         for_each_hrrq(hrrq, ioa_cfg) {
6783                 spin_lock(&hrrq->_lock);
6784                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6785                         if (ipr_cmd->qc == qc) {
6786                                 ipr_device_reset(ioa_cfg, sata_port->res);
6787                                 break;
6788                         }
6789                 }
6790                 spin_unlock(&hrrq->_lock);
6791         }
6792         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6793 }
6794
6795 /**
6796  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6797  * @regs:       destination
6798  * @tf: source ATA taskfile
6799  *
6800  * Return value:
6801  *      none
6802  **/
6803 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6804                              struct ata_taskfile *tf)
6805 {
6806         regs->feature = tf->feature;
6807         regs->nsect = tf->nsect;
6808         regs->lbal = tf->lbal;
6809         regs->lbam = tf->lbam;
6810         regs->lbah = tf->lbah;
6811         regs->device = tf->device;
6812         regs->command = tf->command;
6813         regs->hob_feature = tf->hob_feature;
6814         regs->hob_nsect = tf->hob_nsect;
6815         regs->hob_lbal = tf->hob_lbal;
6816         regs->hob_lbam = tf->hob_lbam;
6817         regs->hob_lbah = tf->hob_lbah;
6818         regs->ctl = tf->ctl;
6819 }
6820
6821 /**
6822  * ipr_sata_done - done function for SATA commands
6823  * @ipr_cmd:    ipr command struct
6824  *
6825  * This function is invoked by the interrupt handler for
6826  * ops generated by the SCSI mid-layer to SATA devices
6827  *
6828  * Return value:
6829  *      none
6830  **/
6831 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6832 {
6833         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6834         struct ata_queued_cmd *qc = ipr_cmd->qc;
6835         struct ipr_sata_port *sata_port = qc->ap->private_data;
6836         struct ipr_resource_entry *res = sata_port->res;
6837         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6838
6839         spin_lock(&ipr_cmd->hrrq->_lock);
6840         if (ipr_cmd->ioa_cfg->sis64)
6841                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6842                        sizeof(struct ipr_ioasa_gata));
6843         else
6844                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6845                        sizeof(struct ipr_ioasa_gata));
6846         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6847
6848         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6849                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6850
6851         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6852                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6853         else
6854                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6855         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6856         spin_unlock(&ipr_cmd->hrrq->_lock);
6857         ata_qc_complete(qc);
6858 }
6859
6860 /**
6861  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6862  * @ipr_cmd:    ipr command struct
6863  * @qc:         ATA queued command
6864  *
6865  **/
6866 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6867                                   struct ata_queued_cmd *qc)
6868 {
6869         u32 ioadl_flags = 0;
6870         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6871         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6872         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6873         int len = qc->nbytes;
6874         struct scatterlist *sg;
6875         unsigned int si;
6876         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6877
6878         if (len == 0)
6879                 return;
6880
6881         if (qc->dma_dir == DMA_TO_DEVICE) {
6882                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6883                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6884         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6885                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6886
6887         ioarcb->data_transfer_length = cpu_to_be32(len);
6888         ioarcb->ioadl_len =
6889                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6890         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6891                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6892
6893         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6894                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6895                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6896                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6897
6898                 last_ioadl64 = ioadl64;
6899                 ioadl64++;
6900         }
6901
6902         if (likely(last_ioadl64))
6903                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6904 }
6905
6906 /**
6907  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6908  * @ipr_cmd:    ipr command struct
6909  * @qc:         ATA queued command
6910  *
6911  **/
6912 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6913                                 struct ata_queued_cmd *qc)
6914 {
6915         u32 ioadl_flags = 0;
6916         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6917         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6918         struct ipr_ioadl_desc *last_ioadl = NULL;
6919         int len = qc->nbytes;
6920         struct scatterlist *sg;
6921         unsigned int si;
6922
6923         if (len == 0)
6924                 return;
6925
6926         if (qc->dma_dir == DMA_TO_DEVICE) {
6927                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6928                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6929                 ioarcb->data_transfer_length = cpu_to_be32(len);
6930                 ioarcb->ioadl_len =
6931                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6932         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6933                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6934                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6935                 ioarcb->read_ioadl_len =
6936                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6937         }
6938
6939         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6940                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6941                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6942
6943                 last_ioadl = ioadl;
6944                 ioadl++;
6945         }
6946
6947         if (likely(last_ioadl))
6948                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6949 }
6950
6951 /**
6952  * ipr_qc_defer - Get a free ipr_cmd
6953  * @qc: queued command
6954  *
6955  * Return value:
6956  *      0 if success
6957  **/
6958 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6959 {
6960         struct ata_port *ap = qc->ap;
6961         struct ipr_sata_port *sata_port = ap->private_data;
6962         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6963         struct ipr_cmnd *ipr_cmd;
6964         struct ipr_hrr_queue *hrrq;
6965         int hrrq_id;
6966
6967         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6968         hrrq = &ioa_cfg->hrrq[hrrq_id];
6969
6970         qc->lldd_task = NULL;
6971         spin_lock(&hrrq->_lock);
6972         if (unlikely(hrrq->ioa_is_dead)) {
6973                 spin_unlock(&hrrq->_lock);
6974                 return 0;
6975         }
6976
6977         if (unlikely(!hrrq->allow_cmds)) {
6978                 spin_unlock(&hrrq->_lock);
6979                 return ATA_DEFER_LINK;
6980         }
6981
6982         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6983         if (ipr_cmd == NULL) {
6984                 spin_unlock(&hrrq->_lock);
6985                 return ATA_DEFER_LINK;
6986         }
6987
6988         qc->lldd_task = ipr_cmd;
6989         spin_unlock(&hrrq->_lock);
6990         return 0;
6991 }
6992
6993 /**
6994  * ipr_qc_issue - Issue a SATA qc to a device
6995  * @qc: queued command
6996  *
6997  * Return value:
6998  *      0 if success
6999  **/
7000 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7001 {
7002         struct ata_port *ap = qc->ap;
7003         struct ipr_sata_port *sata_port = ap->private_data;
7004         struct ipr_resource_entry *res = sata_port->res;
7005         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7006         struct ipr_cmnd *ipr_cmd;
7007         struct ipr_ioarcb *ioarcb;
7008         struct ipr_ioarcb_ata_regs *regs;
7009
7010         if (qc->lldd_task == NULL)
7011                 ipr_qc_defer(qc);
7012
7013         ipr_cmd = qc->lldd_task;
7014         if (ipr_cmd == NULL)
7015                 return AC_ERR_SYSTEM;
7016
7017         qc->lldd_task = NULL;
7018         spin_lock(&ipr_cmd->hrrq->_lock);
7019         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7020                         ipr_cmd->hrrq->ioa_is_dead)) {
7021                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7022                 spin_unlock(&ipr_cmd->hrrq->_lock);
7023                 return AC_ERR_SYSTEM;
7024         }
7025
7026         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
7027         ioarcb = &ipr_cmd->ioarcb;
7028
7029         if (ioa_cfg->sis64) {
7030                 regs = &ipr_cmd->i.ata_ioadl.regs;
7031                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7032         } else
7033                 regs = &ioarcb->u.add_data.u.regs;
7034
7035         memset(regs, 0, sizeof(*regs));
7036         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
7037
7038         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7039         ipr_cmd->qc = qc;
7040         ipr_cmd->done = ipr_sata_done;
7041         ipr_cmd->ioarcb.res_handle = res->res_handle;
7042         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7043         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7044         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
7045         ipr_cmd->dma_use_sg = qc->n_elem;
7046
7047         if (ioa_cfg->sis64)
7048                 ipr_build_ata_ioadl64(ipr_cmd, qc);
7049         else
7050                 ipr_build_ata_ioadl(ipr_cmd, qc);
7051
7052         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7053         ipr_copy_sata_tf(regs, &qc->tf);
7054         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
7055         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
7056
7057         switch (qc->tf.protocol) {
7058         case ATA_PROT_NODATA:
7059         case ATA_PROT_PIO:
7060                 break;
7061
7062         case ATA_PROT_DMA:
7063                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7064                 break;
7065
7066         case ATAPI_PROT_PIO:
7067         case ATAPI_PROT_NODATA:
7068                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7069                 break;
7070
7071         case ATAPI_PROT_DMA:
7072                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7073                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7074                 break;
7075
7076         default:
7077                 WARN_ON(1);
7078                 spin_unlock(&ipr_cmd->hrrq->_lock);
7079                 return AC_ERR_INVALID;
7080         }
7081
7082         ipr_send_command(ipr_cmd);
7083         spin_unlock(&ipr_cmd->hrrq->_lock);
7084
7085         return 0;
7086 }
7087
7088 /**
7089  * ipr_qc_fill_rtf - Read result TF
7090  * @qc: ATA queued command
7091  *
7092  * Return value:
7093  *      true
7094  **/
7095 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7096 {
7097         struct ipr_sata_port *sata_port = qc->ap->private_data;
7098         struct ipr_ioasa_gata *g = &sata_port->ioasa;
7099         struct ata_taskfile *tf = &qc->result_tf;
7100
7101         tf->feature = g->error;
7102         tf->nsect = g->nsect;
7103         tf->lbal = g->lbal;
7104         tf->lbam = g->lbam;
7105         tf->lbah = g->lbah;
7106         tf->device = g->device;
7107         tf->command = g->status;
7108         tf->hob_nsect = g->hob_nsect;
7109         tf->hob_lbal = g->hob_lbal;
7110         tf->hob_lbam = g->hob_lbam;
7111         tf->hob_lbah = g->hob_lbah;
7112
7113         return true;
7114 }
7115
7116 static struct ata_port_operations ipr_sata_ops = {
7117         .phy_reset = ipr_ata_phy_reset,
7118         .hardreset = ipr_sata_reset,
7119         .post_internal_cmd = ipr_ata_post_internal,
7120         .qc_prep = ata_noop_qc_prep,
7121         .qc_defer = ipr_qc_defer,
7122         .qc_issue = ipr_qc_issue,
7123         .qc_fill_rtf = ipr_qc_fill_rtf,
7124         .port_start = ata_sas_port_start,
7125         .port_stop = ata_sas_port_stop
7126 };
7127
7128 static struct ata_port_info sata_port_info = {
7129         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7130                           ATA_FLAG_SAS_HOST,
7131         .pio_mask       = ATA_PIO4_ONLY,
7132         .mwdma_mask     = ATA_MWDMA2,
7133         .udma_mask      = ATA_UDMA6,
7134         .port_ops       = &ipr_sata_ops
7135 };
7136
7137 #ifdef CONFIG_PPC_PSERIES
7138 static const u16 ipr_blocked_processors[] = {
7139         PVR_NORTHSTAR,
7140         PVR_PULSAR,
7141         PVR_POWER4,
7142         PVR_ICESTAR,
7143         PVR_SSTAR,
7144         PVR_POWER4p,
7145         PVR_630,
7146         PVR_630p
7147 };
7148
7149 /**
7150  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7151  * @ioa_cfg:    ioa cfg struct
7152  *
7153  * Adapters that use Gemstone revision < 3.1 do not work reliably on
7154  * certain pSeries hardware. This function determines if the given
7155  * adapter is in one of these confgurations or not.
7156  *
7157  * Return value:
7158  *      1 if adapter is not supported / 0 if adapter is supported
7159  **/
7160 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7161 {
7162         int i;
7163
7164         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
7165                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
7166                         if (pvr_version_is(ipr_blocked_processors[i]))
7167                                 return 1;
7168                 }
7169         }
7170         return 0;
7171 }
7172 #else
7173 #define ipr_invalid_adapter(ioa_cfg) 0
7174 #endif
7175
7176 /**
7177  * ipr_ioa_bringdown_done - IOA bring down completion.
7178  * @ipr_cmd:    ipr command struct
7179  *
7180  * This function processes the completion of an adapter bring down.
7181  * It wakes any reset sleepers.
7182  *
7183  * Return value:
7184  *      IPR_RC_JOB_RETURN
7185  **/
7186 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7187 {
7188         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7189         int i;
7190
7191         ENTER;
7192         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7193                 ipr_trace;
7194                 ioa_cfg->scsi_unblock = 1;
7195                 schedule_work(&ioa_cfg->work_q);
7196         }
7197
7198         ioa_cfg->in_reset_reload = 0;
7199         ioa_cfg->reset_retries = 0;
7200         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7201                 spin_lock(&ioa_cfg->hrrq[i]._lock);
7202                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7203                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7204         }
7205         wmb();
7206
7207         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7208         wake_up_all(&ioa_cfg->reset_wait_q);
7209         LEAVE;
7210
7211         return IPR_RC_JOB_RETURN;
7212 }
7213
7214 /**
7215  * ipr_ioa_reset_done - IOA reset completion.
7216  * @ipr_cmd:    ipr command struct
7217  *
7218  * This function processes the completion of an adapter reset.
7219  * It schedules any necessary mid-layer add/removes and
7220  * wakes any reset sleepers.
7221  *
7222  * Return value:
7223  *      IPR_RC_JOB_RETURN
7224  **/
7225 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7226 {
7227         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7228         struct ipr_resource_entry *res;
7229         int j;
7230
7231         ENTER;
7232         ioa_cfg->in_reset_reload = 0;
7233         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7234                 spin_lock(&ioa_cfg->hrrq[j]._lock);
7235                 ioa_cfg->hrrq[j].allow_cmds = 1;
7236                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7237         }
7238         wmb();
7239         ioa_cfg->reset_cmd = NULL;
7240         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7241
7242         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7243                 if (res->add_to_ml || res->del_from_ml) {
7244                         ipr_trace;
7245                         break;
7246                 }
7247         }
7248         schedule_work(&ioa_cfg->work_q);
7249
7250         for (j = 0; j < IPR_NUM_HCAMS; j++) {
7251                 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7252                 if (j < IPR_NUM_LOG_HCAMS)
7253                         ipr_send_hcam(ioa_cfg,
7254                                 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7255                                 ioa_cfg->hostrcb[j]);
7256                 else
7257                         ipr_send_hcam(ioa_cfg,
7258                                 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7259                                 ioa_cfg->hostrcb[j]);
7260         }
7261
7262         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7263         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7264
7265         ioa_cfg->reset_retries = 0;
7266         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7267         wake_up_all(&ioa_cfg->reset_wait_q);
7268
7269         ioa_cfg->scsi_unblock = 1;
7270         schedule_work(&ioa_cfg->work_q);
7271         LEAVE;
7272         return IPR_RC_JOB_RETURN;
7273 }
7274
7275 /**
7276  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7277  * @supported_dev:      supported device struct
7278  * @vpids:                      vendor product id struct
7279  *
7280  * Return value:
7281  *      none
7282  **/
7283 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7284                                  struct ipr_std_inq_vpids *vpids)
7285 {
7286         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7287         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7288         supported_dev->num_records = 1;
7289         supported_dev->data_length =
7290                 cpu_to_be16(sizeof(struct ipr_supported_device));
7291         supported_dev->reserved = 0;
7292 }
7293
7294 /**
7295  * ipr_set_supported_devs - Send Set Supported Devices for a device
7296  * @ipr_cmd:    ipr command struct
7297  *
7298  * This function sends a Set Supported Devices to the adapter
7299  *
7300  * Return value:
7301  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7302  **/
7303 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7304 {
7305         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7306         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7307         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7308         struct ipr_resource_entry *res = ipr_cmd->u.res;
7309
7310         ipr_cmd->job_step = ipr_ioa_reset_done;
7311
7312         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7313                 if (!ipr_is_scsi_disk(res))
7314                         continue;
7315
7316                 ipr_cmd->u.res = res;
7317                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7318
7319                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7320                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7321                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7322
7323                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7324                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7325                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7326                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7327
7328                 ipr_init_ioadl(ipr_cmd,
7329                                ioa_cfg->vpd_cbs_dma +
7330                                  offsetof(struct ipr_misc_cbs, supp_dev),
7331                                sizeof(struct ipr_supported_device),
7332                                IPR_IOADL_FLAGS_WRITE_LAST);
7333
7334                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7335                            IPR_SET_SUP_DEVICE_TIMEOUT);
7336
7337                 if (!ioa_cfg->sis64)
7338                         ipr_cmd->job_step = ipr_set_supported_devs;
7339                 LEAVE;
7340                 return IPR_RC_JOB_RETURN;
7341         }
7342
7343         LEAVE;
7344         return IPR_RC_JOB_CONTINUE;
7345 }
7346
7347 /**
7348  * ipr_get_mode_page - Locate specified mode page
7349  * @mode_pages: mode page buffer
7350  * @page_code:  page code to find
7351  * @len:                minimum required length for mode page
7352  *
7353  * Return value:
7354  *      pointer to mode page / NULL on failure
7355  **/
7356 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7357                                u32 page_code, u32 len)
7358 {
7359         struct ipr_mode_page_hdr *mode_hdr;
7360         u32 page_length;
7361         u32 length;
7362
7363         if (!mode_pages || (mode_pages->hdr.length == 0))
7364                 return NULL;
7365
7366         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7367         mode_hdr = (struct ipr_mode_page_hdr *)
7368                 (mode_pages->data + mode_pages->hdr.block_desc_len);
7369
7370         while (length) {
7371                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7372                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7373                                 return mode_hdr;
7374                         break;
7375                 } else {
7376                         page_length = (sizeof(struct ipr_mode_page_hdr) +
7377                                        mode_hdr->page_length);
7378                         length -= page_length;
7379                         mode_hdr = (struct ipr_mode_page_hdr *)
7380                                 ((unsigned long)mode_hdr + page_length);
7381                 }
7382         }
7383         return NULL;
7384 }
7385
7386 /**
7387  * ipr_check_term_power - Check for term power errors
7388  * @ioa_cfg:    ioa config struct
7389  * @mode_pages: IOAFP mode pages buffer
7390  *
7391  * Check the IOAFP's mode page 28 for term power errors
7392  *
7393  * Return value:
7394  *      nothing
7395  **/
7396 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7397                                  struct ipr_mode_pages *mode_pages)
7398 {
7399         int i;
7400         int entry_length;
7401         struct ipr_dev_bus_entry *bus;
7402         struct ipr_mode_page28 *mode_page;
7403
7404         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7405                                       sizeof(struct ipr_mode_page28));
7406
7407         entry_length = mode_page->entry_length;
7408
7409         bus = mode_page->bus;
7410
7411         for (i = 0; i < mode_page->num_entries; i++) {
7412                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7413                         dev_err(&ioa_cfg->pdev->dev,
7414                                 "Term power is absent on scsi bus %d\n",
7415                                 bus->res_addr.bus);
7416                 }
7417
7418                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7419         }
7420 }
7421
7422 /**
7423  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7424  * @ioa_cfg:    ioa config struct
7425  *
7426  * Looks through the config table checking for SES devices. If
7427  * the SES device is in the SES table indicating a maximum SCSI
7428  * bus speed, the speed is limited for the bus.
7429  *
7430  * Return value:
7431  *      none
7432  **/
7433 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7434 {
7435         u32 max_xfer_rate;
7436         int i;
7437
7438         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7439                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7440                                                        ioa_cfg->bus_attr[i].bus_width);
7441
7442                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7443                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7444         }
7445 }
7446
7447 /**
7448  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7449  * @ioa_cfg:    ioa config struct
7450  * @mode_pages: mode page 28 buffer
7451  *
7452  * Updates mode page 28 based on driver configuration
7453  *
7454  * Return value:
7455  *      none
7456  **/
7457 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7458                                           struct ipr_mode_pages *mode_pages)
7459 {
7460         int i, entry_length;
7461         struct ipr_dev_bus_entry *bus;
7462         struct ipr_bus_attributes *bus_attr;
7463         struct ipr_mode_page28 *mode_page;
7464
7465         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7466                                       sizeof(struct ipr_mode_page28));
7467
7468         entry_length = mode_page->entry_length;
7469
7470         /* Loop for each device bus entry */
7471         for (i = 0, bus = mode_page->bus;
7472              i < mode_page->num_entries;
7473              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7474                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7475                         dev_err(&ioa_cfg->pdev->dev,
7476                                 "Invalid resource address reported: 0x%08X\n",
7477                                 IPR_GET_PHYS_LOC(bus->res_addr));
7478                         continue;
7479                 }
7480
7481                 bus_attr = &ioa_cfg->bus_attr[i];
7482                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7483                 bus->bus_width = bus_attr->bus_width;
7484                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7485                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7486                 if (bus_attr->qas_enabled)
7487                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7488                 else
7489                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7490         }
7491 }
7492
7493 /**
7494  * ipr_build_mode_select - Build a mode select command
7495  * @ipr_cmd:    ipr command struct
7496  * @res_handle: resource handle to send command to
7497  * @parm:               Byte 2 of Mode Sense command
7498  * @dma_addr:   DMA buffer address
7499  * @xfer_len:   data transfer length
7500  *
7501  * Return value:
7502  *      none
7503  **/
7504 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7505                                   __be32 res_handle, u8 parm,
7506                                   dma_addr_t dma_addr, u8 xfer_len)
7507 {
7508         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7509
7510         ioarcb->res_handle = res_handle;
7511         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7512         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7513         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7514         ioarcb->cmd_pkt.cdb[1] = parm;
7515         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7516
7517         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7518 }
7519
7520 /**
7521  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7522  * @ipr_cmd:    ipr command struct
7523  *
7524  * This function sets up the SCSI bus attributes and sends
7525  * a Mode Select for Page 28 to activate them.
7526  *
7527  * Return value:
7528  *      IPR_RC_JOB_RETURN
7529  **/
7530 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7531 {
7532         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7533         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7534         int length;
7535
7536         ENTER;
7537         ipr_scsi_bus_speed_limit(ioa_cfg);
7538         ipr_check_term_power(ioa_cfg, mode_pages);
7539         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7540         length = mode_pages->hdr.length + 1;
7541         mode_pages->hdr.length = 0;
7542
7543         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7544                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7545                               length);
7546
7547         ipr_cmd->job_step = ipr_set_supported_devs;
7548         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7549                                     struct ipr_resource_entry, queue);
7550         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7551
7552         LEAVE;
7553         return IPR_RC_JOB_RETURN;
7554 }
7555
7556 /**
7557  * ipr_build_mode_sense - Builds a mode sense command
7558  * @ipr_cmd:    ipr command struct
7559  * @res:                resource entry struct
7560  * @parm:               Byte 2 of mode sense command
7561  * @dma_addr:   DMA address of mode sense buffer
7562  * @xfer_len:   Size of DMA buffer
7563  *
7564  * Return value:
7565  *      none
7566  **/
7567 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7568                                  __be32 res_handle,
7569                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7570 {
7571         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7572
7573         ioarcb->res_handle = res_handle;
7574         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7575         ioarcb->cmd_pkt.cdb[2] = parm;
7576         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7577         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7578
7579         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7580 }
7581
7582 /**
7583  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7584  * @ipr_cmd:    ipr command struct
7585  *
7586  * This function handles the failure of an IOA bringup command.
7587  *
7588  * Return value:
7589  *      IPR_RC_JOB_RETURN
7590  **/
7591 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7592 {
7593         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7594         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7595
7596         dev_err(&ioa_cfg->pdev->dev,
7597                 "0x%02X failed with IOASC: 0x%08X\n",
7598                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7599
7600         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7601         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7602         return IPR_RC_JOB_RETURN;
7603 }
7604
7605 /**
7606  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7607  * @ipr_cmd:    ipr command struct
7608  *
7609  * This function handles the failure of a Mode Sense to the IOAFP.
7610  * Some adapters do not handle all mode pages.
7611  *
7612  * Return value:
7613  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7614  **/
7615 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7616 {
7617         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7618         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7619
7620         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7621                 ipr_cmd->job_step = ipr_set_supported_devs;
7622                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7623                                             struct ipr_resource_entry, queue);
7624                 return IPR_RC_JOB_CONTINUE;
7625         }
7626
7627         return ipr_reset_cmd_failed(ipr_cmd);
7628 }
7629
7630 /**
7631  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7632  * @ipr_cmd:    ipr command struct
7633  *
7634  * This function send a Page 28 mode sense to the IOA to
7635  * retrieve SCSI bus attributes.
7636  *
7637  * Return value:
7638  *      IPR_RC_JOB_RETURN
7639  **/
7640 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7641 {
7642         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7643
7644         ENTER;
7645         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7646                              0x28, ioa_cfg->vpd_cbs_dma +
7647                              offsetof(struct ipr_misc_cbs, mode_pages),
7648                              sizeof(struct ipr_mode_pages));
7649
7650         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7651         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7652
7653         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7654
7655         LEAVE;
7656         return IPR_RC_JOB_RETURN;
7657 }
7658
7659 /**
7660  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7661  * @ipr_cmd:    ipr command struct
7662  *
7663  * This function enables dual IOA RAID support if possible.
7664  *
7665  * Return value:
7666  *      IPR_RC_JOB_RETURN
7667  **/
7668 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7669 {
7670         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7671         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7672         struct ipr_mode_page24 *mode_page;
7673         int length;
7674
7675         ENTER;
7676         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7677                                       sizeof(struct ipr_mode_page24));
7678
7679         if (mode_page)
7680                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7681
7682         length = mode_pages->hdr.length + 1;
7683         mode_pages->hdr.length = 0;
7684
7685         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7686                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7687                               length);
7688
7689         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7690         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7691
7692         LEAVE;
7693         return IPR_RC_JOB_RETURN;
7694 }
7695
7696 /**
7697  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7698  * @ipr_cmd:    ipr command struct
7699  *
7700  * This function handles the failure of a Mode Sense to the IOAFP.
7701  * Some adapters do not handle all mode pages.
7702  *
7703  * Return value:
7704  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7705  **/
7706 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7707 {
7708         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7709
7710         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7711                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7712                 return IPR_RC_JOB_CONTINUE;
7713         }
7714
7715         return ipr_reset_cmd_failed(ipr_cmd);
7716 }
7717
7718 /**
7719  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7720  * @ipr_cmd:    ipr command struct
7721  *
7722  * This function send a mode sense to the IOA to retrieve
7723  * the IOA Advanced Function Control mode page.
7724  *
7725  * Return value:
7726  *      IPR_RC_JOB_RETURN
7727  **/
7728 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7729 {
7730         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7731
7732         ENTER;
7733         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7734                              0x24, ioa_cfg->vpd_cbs_dma +
7735                              offsetof(struct ipr_misc_cbs, mode_pages),
7736                              sizeof(struct ipr_mode_pages));
7737
7738         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7739         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7740
7741         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7742
7743         LEAVE;
7744         return IPR_RC_JOB_RETURN;
7745 }
7746
7747 /**
7748  * ipr_init_res_table - Initialize the resource table
7749  * @ipr_cmd:    ipr command struct
7750  *
7751  * This function looks through the existing resource table, comparing
7752  * it with the config table. This function will take care of old/new
7753  * devices and schedule adding/removing them from the mid-layer
7754  * as appropriate.
7755  *
7756  * Return value:
7757  *      IPR_RC_JOB_CONTINUE
7758  **/
7759 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7760 {
7761         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7762         struct ipr_resource_entry *res, *temp;
7763         struct ipr_config_table_entry_wrapper cfgtew;
7764         int entries, found, flag, i;
7765         LIST_HEAD(old_res);
7766
7767         ENTER;
7768         if (ioa_cfg->sis64)
7769                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7770         else
7771                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7772
7773         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7774                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7775
7776         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7777                 list_move_tail(&res->queue, &old_res);
7778
7779         if (ioa_cfg->sis64)
7780                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7781         else
7782                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7783
7784         for (i = 0; i < entries; i++) {
7785                 if (ioa_cfg->sis64)
7786                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7787                 else
7788                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7789                 found = 0;
7790
7791                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7792                         if (ipr_is_same_device(res, &cfgtew)) {
7793                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7794                                 found = 1;
7795                                 break;
7796                         }
7797                 }
7798
7799                 if (!found) {
7800                         if (list_empty(&ioa_cfg->free_res_q)) {
7801                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7802                                 break;
7803                         }
7804
7805                         found = 1;
7806                         res = list_entry(ioa_cfg->free_res_q.next,
7807                                          struct ipr_resource_entry, queue);
7808                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7809                         ipr_init_res_entry(res, &cfgtew);
7810                         res->add_to_ml = 1;
7811                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7812                         res->sdev->allow_restart = 1;
7813
7814                 if (found)
7815                         ipr_update_res_entry(res, &cfgtew);
7816         }
7817
7818         list_for_each_entry_safe(res, temp, &old_res, queue) {
7819                 if (res->sdev) {
7820                         res->del_from_ml = 1;
7821                         res->res_handle = IPR_INVALID_RES_HANDLE;
7822                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7823                 }
7824         }
7825
7826         list_for_each_entry_safe(res, temp, &old_res, queue) {
7827                 ipr_clear_res_target(res);
7828                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7829         }
7830
7831         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7832                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7833         else
7834                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7835
7836         LEAVE;
7837         return IPR_RC_JOB_CONTINUE;
7838 }
7839
7840 /**
7841  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7842  * @ipr_cmd:    ipr command struct
7843  *
7844  * This function sends a Query IOA Configuration command
7845  * to the adapter to retrieve the IOA configuration table.
7846  *
7847  * Return value:
7848  *      IPR_RC_JOB_RETURN
7849  **/
7850 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7851 {
7852         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7853         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7854         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7855         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7856
7857         ENTER;
7858         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7859                 ioa_cfg->dual_raid = 1;
7860         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7861                  ucode_vpd->major_release, ucode_vpd->card_type,
7862                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7863         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7864         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7865
7866         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7867         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7868         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7869         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7870
7871         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7872                        IPR_IOADL_FLAGS_READ_LAST);
7873
7874         ipr_cmd->job_step = ipr_init_res_table;
7875
7876         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7877
7878         LEAVE;
7879         return IPR_RC_JOB_RETURN;
7880 }
7881
7882 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7883 {
7884         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7885
7886         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7887                 return IPR_RC_JOB_CONTINUE;
7888
7889         return ipr_reset_cmd_failed(ipr_cmd);
7890 }
7891
7892 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7893                                          __be32 res_handle, u8 sa_code)
7894 {
7895         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7896
7897         ioarcb->res_handle = res_handle;
7898         ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7899         ioarcb->cmd_pkt.cdb[1] = sa_code;
7900         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7901 }
7902
7903 /**
7904  * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7905  * action
7906  *
7907  * Return value:
7908  *      none
7909  **/
7910 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7911 {
7912         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7913         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7914         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7915
7916         ENTER;
7917
7918         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7919
7920         if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7921                 ipr_build_ioa_service_action(ipr_cmd,
7922                                              cpu_to_be32(IPR_IOA_RES_HANDLE),
7923                                              IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7924
7925                 ioarcb->cmd_pkt.cdb[2] = 0x40;
7926
7927                 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7928                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7929                            IPR_SET_SUP_DEVICE_TIMEOUT);
7930
7931                 LEAVE;
7932                 return IPR_RC_JOB_RETURN;
7933         }
7934
7935         LEAVE;
7936         return IPR_RC_JOB_CONTINUE;
7937 }
7938
7939 /**
7940  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7941  * @ipr_cmd:    ipr command struct
7942  *
7943  * This utility function sends an inquiry to the adapter.
7944  *
7945  * Return value:
7946  *      none
7947  **/
7948 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7949                               dma_addr_t dma_addr, u8 xfer_len)
7950 {
7951         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7952
7953         ENTER;
7954         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7955         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7956
7957         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7958         ioarcb->cmd_pkt.cdb[1] = flags;
7959         ioarcb->cmd_pkt.cdb[2] = page;
7960         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7961
7962         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7963
7964         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7965         LEAVE;
7966 }
7967
7968 /**
7969  * ipr_inquiry_page_supported - Is the given inquiry page supported
7970  * @page0:              inquiry page 0 buffer
7971  * @page:               page code.
7972  *
7973  * This function determines if the specified inquiry page is supported.
7974  *
7975  * Return value:
7976  *      1 if page is supported / 0 if not
7977  **/
7978 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7979 {
7980         int i;
7981
7982         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7983                 if (page0->page[i] == page)
7984                         return 1;
7985
7986         return 0;
7987 }
7988
7989 /**
7990  * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
7991  * @ipr_cmd:    ipr command struct
7992  *
7993  * This function sends a Page 0xC4 inquiry to the adapter
7994  * to retrieve software VPD information.
7995  *
7996  * Return value:
7997  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7998  **/
7999 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8000 {
8001         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8002         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8003         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8004
8005         ENTER;
8006         ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
8007         memset(pageC4, 0, sizeof(*pageC4));
8008
8009         if (ipr_inquiry_page_supported(page0, 0xC4)) {
8010                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8011                                   (ioa_cfg->vpd_cbs_dma
8012                                    + offsetof(struct ipr_misc_cbs,
8013                                               pageC4_data)),
8014                                   sizeof(struct ipr_inquiry_pageC4));
8015                 return IPR_RC_JOB_RETURN;
8016         }
8017
8018         LEAVE;
8019         return IPR_RC_JOB_CONTINUE;
8020 }
8021
8022 /**
8023  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8024  * @ipr_cmd:    ipr command struct
8025  *
8026  * This function sends a Page 0xD0 inquiry to the adapter
8027  * to retrieve adapter capabilities.
8028  *
8029  * Return value:
8030  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8031  **/
8032 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8033 {
8034         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8035         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8036         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8037
8038         ENTER;
8039         ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
8040         memset(cap, 0, sizeof(*cap));
8041
8042         if (ipr_inquiry_page_supported(page0, 0xD0)) {
8043                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8044                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8045                                   sizeof(struct ipr_inquiry_cap));
8046                 return IPR_RC_JOB_RETURN;
8047         }
8048
8049         LEAVE;
8050         return IPR_RC_JOB_CONTINUE;
8051 }
8052
8053 /**
8054  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8055  * @ipr_cmd:    ipr command struct
8056  *
8057  * This function sends a Page 3 inquiry to the adapter
8058  * to retrieve software VPD information.
8059  *
8060  * Return value:
8061  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8062  **/
8063 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8064 {
8065         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8066
8067         ENTER;
8068
8069         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
8070
8071         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8072                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8073                           sizeof(struct ipr_inquiry_page3));
8074
8075         LEAVE;
8076         return IPR_RC_JOB_RETURN;
8077 }
8078
8079 /**
8080  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8081  * @ipr_cmd:    ipr command struct
8082  *
8083  * This function sends a Page 0 inquiry to the adapter
8084  * to retrieve supported inquiry pages.
8085  *
8086  * Return value:
8087  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8088  **/
8089 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8090 {
8091         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8092         char type[5];
8093
8094         ENTER;
8095
8096         /* Grab the type out of the VPD and store it away */
8097         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8098         type[4] = '\0';
8099         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8100
8101         if (ipr_invalid_adapter(ioa_cfg)) {
8102                 dev_err(&ioa_cfg->pdev->dev,
8103                         "Adapter not supported in this hardware configuration.\n");
8104
8105                 if (!ipr_testmode) {
8106                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8107                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8108                         list_add_tail(&ipr_cmd->queue,
8109                                         &ioa_cfg->hrrq->hrrq_free_q);
8110                         return IPR_RC_JOB_RETURN;
8111                 }
8112         }
8113
8114         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
8115
8116         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8117                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8118                           sizeof(struct ipr_inquiry_page0));
8119
8120         LEAVE;
8121         return IPR_RC_JOB_RETURN;
8122 }
8123
8124 /**
8125  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8126  * @ipr_cmd:    ipr command struct
8127  *
8128  * This function sends a standard inquiry to the adapter.
8129  *
8130  * Return value:
8131  *      IPR_RC_JOB_RETURN
8132  **/
8133 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8134 {
8135         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8136
8137         ENTER;
8138         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
8139
8140         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8141                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8142                           sizeof(struct ipr_ioa_vpd));
8143
8144         LEAVE;
8145         return IPR_RC_JOB_RETURN;
8146 }
8147
8148 /**
8149  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8150  * @ipr_cmd:    ipr command struct
8151  *
8152  * This function send an Identify Host Request Response Queue
8153  * command to establish the HRRQ with the adapter.
8154  *
8155  * Return value:
8156  *      IPR_RC_JOB_RETURN
8157  **/
8158 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
8159 {
8160         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8161         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8162         struct ipr_hrr_queue *hrrq;
8163
8164         ENTER;
8165         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
8166         if (ioa_cfg->identify_hrrq_index == 0)
8167                 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
8168
8169         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8170                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
8171
8172                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8173                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8174
8175                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8176                 if (ioa_cfg->sis64)
8177                         ioarcb->cmd_pkt.cdb[1] = 0x1;
8178
8179                 if (ioa_cfg->nvectors == 1)
8180                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8181                 else
8182                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8183
8184                 ioarcb->cmd_pkt.cdb[2] =
8185                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8186                 ioarcb->cmd_pkt.cdb[3] =
8187                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8188                 ioarcb->cmd_pkt.cdb[4] =
8189                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8190                 ioarcb->cmd_pkt.cdb[5] =
8191                         ((u64) hrrq->host_rrq_dma) & 0xff;
8192                 ioarcb->cmd_pkt.cdb[7] =
8193                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8194                 ioarcb->cmd_pkt.cdb[8] =
8195                         (sizeof(u32) * hrrq->size) & 0xff;
8196
8197                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8198                         ioarcb->cmd_pkt.cdb[9] =
8199                                         ioa_cfg->identify_hrrq_index;
8200
8201                 if (ioa_cfg->sis64) {
8202                         ioarcb->cmd_pkt.cdb[10] =
8203                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8204                         ioarcb->cmd_pkt.cdb[11] =
8205                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8206                         ioarcb->cmd_pkt.cdb[12] =
8207                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8208                         ioarcb->cmd_pkt.cdb[13] =
8209                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8210                 }
8211
8212                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8213                         ioarcb->cmd_pkt.cdb[14] =
8214                                         ioa_cfg->identify_hrrq_index;
8215
8216                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8217                            IPR_INTERNAL_TIMEOUT);
8218
8219                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8220                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8221
8222                 LEAVE;
8223                 return IPR_RC_JOB_RETURN;
8224         }
8225
8226         LEAVE;
8227         return IPR_RC_JOB_CONTINUE;
8228 }
8229
8230 /**
8231  * ipr_reset_timer_done - Adapter reset timer function
8232  * @ipr_cmd:    ipr command struct
8233  *
8234  * Description: This function is used in adapter reset processing
8235  * for timing events. If the reset_cmd pointer in the IOA
8236  * config struct is not this adapter's we are doing nested
8237  * resets and fail_all_ops will take care of freeing the
8238  * command block.
8239  *
8240  * Return value:
8241  *      none
8242  **/
8243 static void ipr_reset_timer_done(struct timer_list *t)
8244 {
8245         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
8246         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8247         unsigned long lock_flags = 0;
8248
8249         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8250
8251         if (ioa_cfg->reset_cmd == ipr_cmd) {
8252                 list_del(&ipr_cmd->queue);
8253                 ipr_cmd->done(ipr_cmd);
8254         }
8255
8256         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8257 }
8258
8259 /**
8260  * ipr_reset_start_timer - Start a timer for adapter reset job
8261  * @ipr_cmd:    ipr command struct
8262  * @timeout:    timeout value
8263  *
8264  * Description: This function is used in adapter reset processing
8265  * for timing events. If the reset_cmd pointer in the IOA
8266  * config struct is not this adapter's we are doing nested
8267  * resets and fail_all_ops will take care of freeing the
8268  * command block.
8269  *
8270  * Return value:
8271  *      none
8272  **/
8273 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8274                                   unsigned long timeout)
8275 {
8276
8277         ENTER;
8278         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8279         ipr_cmd->done = ipr_reset_ioa_job;
8280
8281         ipr_cmd->timer.expires = jiffies + timeout;
8282         ipr_cmd->timer.function = ipr_reset_timer_done;
8283         add_timer(&ipr_cmd->timer);
8284 }
8285
8286 /**
8287  * ipr_init_ioa_mem - Initialize ioa_cfg control block
8288  * @ioa_cfg:    ioa cfg struct
8289  *
8290  * Return value:
8291  *      nothing
8292  **/
8293 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8294 {
8295         struct ipr_hrr_queue *hrrq;
8296
8297         for_each_hrrq(hrrq, ioa_cfg) {
8298                 spin_lock(&hrrq->_lock);
8299                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8300
8301                 /* Initialize Host RRQ pointers */
8302                 hrrq->hrrq_start = hrrq->host_rrq;
8303                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8304                 hrrq->hrrq_curr = hrrq->hrrq_start;
8305                 hrrq->toggle_bit = 1;
8306                 spin_unlock(&hrrq->_lock);
8307         }
8308         wmb();
8309
8310         ioa_cfg->identify_hrrq_index = 0;
8311         if (ioa_cfg->hrrq_num == 1)
8312                 atomic_set(&ioa_cfg->hrrq_index, 0);
8313         else
8314                 atomic_set(&ioa_cfg->hrrq_index, 1);
8315
8316         /* Zero out config table */
8317         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8318 }
8319
8320 /**
8321  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8322  * @ipr_cmd:    ipr command struct
8323  *
8324  * Return value:
8325  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8326  **/
8327 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8328 {
8329         unsigned long stage, stage_time;
8330         u32 feedback;
8331         volatile u32 int_reg;
8332         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8333         u64 maskval = 0;
8334
8335         feedback = readl(ioa_cfg->regs.init_feedback_reg);
8336         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8337         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8338
8339         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8340
8341         /* sanity check the stage_time value */
8342         if (stage_time == 0)
8343                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8344         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8345                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8346         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8347                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8348
8349         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8350                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8351                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8352                 stage_time = ioa_cfg->transop_timeout;
8353                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8354         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8355                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8356                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8357                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8358                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
8359                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8360                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8361                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8362                         return IPR_RC_JOB_CONTINUE;
8363                 }
8364         }
8365
8366         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8367         ipr_cmd->timer.function = ipr_oper_timeout;
8368         ipr_cmd->done = ipr_reset_ioa_job;
8369         add_timer(&ipr_cmd->timer);
8370
8371         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8372
8373         return IPR_RC_JOB_RETURN;
8374 }
8375
8376 /**
8377  * ipr_reset_enable_ioa - Enable the IOA following a reset.
8378  * @ipr_cmd:    ipr command struct
8379  *
8380  * This function reinitializes some control blocks and
8381  * enables destructive diagnostics on the adapter.
8382  *
8383  * Return value:
8384  *      IPR_RC_JOB_RETURN
8385  **/
8386 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8387 {
8388         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8389         volatile u32 int_reg;
8390         volatile u64 maskval;
8391         int i;
8392
8393         ENTER;
8394         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8395         ipr_init_ioa_mem(ioa_cfg);
8396
8397         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8398                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8399                 ioa_cfg->hrrq[i].allow_interrupts = 1;
8400                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8401         }
8402         wmb();
8403         if (ioa_cfg->sis64) {
8404                 /* Set the adapter to the correct endian mode. */
8405                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8406                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8407         }
8408
8409         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8410
8411         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8412                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8413                        ioa_cfg->regs.clr_interrupt_mask_reg32);
8414                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8415                 return IPR_RC_JOB_CONTINUE;
8416         }
8417
8418         /* Enable destructive diagnostics on IOA */
8419         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8420
8421         if (ioa_cfg->sis64) {
8422                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8423                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8424                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8425         } else
8426                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8427
8428         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8429
8430         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8431
8432         if (ioa_cfg->sis64) {
8433                 ipr_cmd->job_step = ipr_reset_next_stage;
8434                 return IPR_RC_JOB_CONTINUE;
8435         }
8436
8437         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8438         ipr_cmd->timer.function = ipr_oper_timeout;
8439         ipr_cmd->done = ipr_reset_ioa_job;
8440         add_timer(&ipr_cmd->timer);
8441         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8442
8443         LEAVE;
8444         return IPR_RC_JOB_RETURN;
8445 }
8446
8447 /**
8448  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8449  * @ipr_cmd:    ipr command struct
8450  *
8451  * This function is invoked when an adapter dump has run out
8452  * of processing time.
8453  *
8454  * Return value:
8455  *      IPR_RC_JOB_CONTINUE
8456  **/
8457 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8458 {
8459         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8460
8461         if (ioa_cfg->sdt_state == GET_DUMP)
8462                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8463         else if (ioa_cfg->sdt_state == READ_DUMP)
8464                 ioa_cfg->sdt_state = ABORT_DUMP;
8465
8466         ioa_cfg->dump_timeout = 1;
8467         ipr_cmd->job_step = ipr_reset_alert;
8468
8469         return IPR_RC_JOB_CONTINUE;
8470 }
8471
8472 /**
8473  * ipr_unit_check_no_data - Log a unit check/no data error log
8474  * @ioa_cfg:            ioa config struct
8475  *
8476  * Logs an error indicating the adapter unit checked, but for some
8477  * reason, we were unable to fetch the unit check buffer.
8478  *
8479  * Return value:
8480  *      nothing
8481  **/
8482 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8483 {
8484         ioa_cfg->errors_logged++;
8485         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8486 }
8487
8488 /**
8489  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8490  * @ioa_cfg:            ioa config struct
8491  *
8492  * Fetches the unit check buffer from the adapter by clocking the data
8493  * through the mailbox register.
8494  *
8495  * Return value:
8496  *      nothing
8497  **/
8498 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8499 {
8500         unsigned long mailbox;
8501         struct ipr_hostrcb *hostrcb;
8502         struct ipr_uc_sdt sdt;
8503         int rc, length;
8504         u32 ioasc;
8505
8506         mailbox = readl(ioa_cfg->ioa_mailbox);
8507
8508         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8509                 ipr_unit_check_no_data(ioa_cfg);
8510                 return;
8511         }
8512
8513         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8514         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8515                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8516
8517         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8518             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8519             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8520                 ipr_unit_check_no_data(ioa_cfg);
8521                 return;
8522         }
8523
8524         /* Find length of the first sdt entry (UC buffer) */
8525         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8526                 length = be32_to_cpu(sdt.entry[0].end_token);
8527         else
8528                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8529                           be32_to_cpu(sdt.entry[0].start_token)) &
8530                           IPR_FMT2_MBX_ADDR_MASK;
8531
8532         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8533                              struct ipr_hostrcb, queue);
8534         list_del_init(&hostrcb->queue);
8535         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8536
8537         rc = ipr_get_ldump_data_section(ioa_cfg,
8538                                         be32_to_cpu(sdt.entry[0].start_token),
8539                                         (__be32 *)&hostrcb->hcam,
8540                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8541
8542         if (!rc) {
8543                 ipr_handle_log_data(ioa_cfg, hostrcb);
8544                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8545                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8546                     ioa_cfg->sdt_state == GET_DUMP)
8547                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8548         } else
8549                 ipr_unit_check_no_data(ioa_cfg);
8550
8551         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8552 }
8553
8554 /**
8555  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8556  * @ipr_cmd:    ipr command struct
8557  *
8558  * Description: This function will call to get the unit check buffer.
8559  *
8560  * Return value:
8561  *      IPR_RC_JOB_RETURN
8562  **/
8563 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8564 {
8565         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8566
8567         ENTER;
8568         ioa_cfg->ioa_unit_checked = 0;
8569         ipr_get_unit_check_buffer(ioa_cfg);
8570         ipr_cmd->job_step = ipr_reset_alert;
8571         ipr_reset_start_timer(ipr_cmd, 0);
8572
8573         LEAVE;
8574         return IPR_RC_JOB_RETURN;
8575 }
8576
8577 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8578 {
8579         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8580
8581         ENTER;
8582
8583         if (ioa_cfg->sdt_state != GET_DUMP)
8584                 return IPR_RC_JOB_RETURN;
8585
8586         if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8587             (readl(ioa_cfg->regs.sense_interrupt_reg) &
8588              IPR_PCII_MAILBOX_STABLE)) {
8589
8590                 if (!ipr_cmd->u.time_left)
8591                         dev_err(&ioa_cfg->pdev->dev,
8592                                 "Timed out waiting for Mailbox register.\n");
8593
8594                 ioa_cfg->sdt_state = READ_DUMP;
8595                 ioa_cfg->dump_timeout = 0;
8596                 if (ioa_cfg->sis64)
8597                         ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8598                 else
8599                         ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8600                 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8601                 schedule_work(&ioa_cfg->work_q);
8602
8603         } else {
8604                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8605                 ipr_reset_start_timer(ipr_cmd,
8606                                       IPR_CHECK_FOR_RESET_TIMEOUT);
8607         }
8608
8609         LEAVE;
8610         return IPR_RC_JOB_RETURN;
8611 }
8612
8613 /**
8614  * ipr_reset_restore_cfg_space - Restore PCI config space.
8615  * @ipr_cmd:    ipr command struct
8616  *
8617  * Description: This function restores the saved PCI config space of
8618  * the adapter, fails all outstanding ops back to the callers, and
8619  * fetches the dump/unit check if applicable to this reset.
8620  *
8621  * Return value:
8622  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8623  **/
8624 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8625 {
8626         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8627         u32 int_reg;
8628
8629         ENTER;
8630         ioa_cfg->pdev->state_saved = true;
8631         pci_restore_state(ioa_cfg->pdev);
8632
8633         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8634                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8635                 return IPR_RC_JOB_CONTINUE;
8636         }
8637
8638         ipr_fail_all_ops(ioa_cfg);
8639
8640         if (ioa_cfg->sis64) {
8641                 /* Set the adapter to the correct endian mode. */
8642                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8643                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8644         }
8645
8646         if (ioa_cfg->ioa_unit_checked) {
8647                 if (ioa_cfg->sis64) {
8648                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8649                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8650                         return IPR_RC_JOB_RETURN;
8651                 } else {
8652                         ioa_cfg->ioa_unit_checked = 0;
8653                         ipr_get_unit_check_buffer(ioa_cfg);
8654                         ipr_cmd->job_step = ipr_reset_alert;
8655                         ipr_reset_start_timer(ipr_cmd, 0);
8656                         return IPR_RC_JOB_RETURN;
8657                 }
8658         }
8659
8660         if (ioa_cfg->in_ioa_bringdown) {
8661                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8662         } else if (ioa_cfg->sdt_state == GET_DUMP) {
8663                 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8664                 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8665         } else {
8666                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8667         }
8668
8669         LEAVE;
8670         return IPR_RC_JOB_CONTINUE;
8671 }
8672
8673 /**
8674  * ipr_reset_bist_done - BIST has completed on the adapter.
8675  * @ipr_cmd:    ipr command struct
8676  *
8677  * Description: Unblock config space and resume the reset process.
8678  *
8679  * Return value:
8680  *      IPR_RC_JOB_CONTINUE
8681  **/
8682 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8683 {
8684         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8685
8686         ENTER;
8687         if (ioa_cfg->cfg_locked)
8688                 pci_cfg_access_unlock(ioa_cfg->pdev);
8689         ioa_cfg->cfg_locked = 0;
8690         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8691         LEAVE;
8692         return IPR_RC_JOB_CONTINUE;
8693 }
8694
8695 /**
8696  * ipr_reset_start_bist - Run BIST on the adapter.
8697  * @ipr_cmd:    ipr command struct
8698  *
8699  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8700  *
8701  * Return value:
8702  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8703  **/
8704 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8705 {
8706         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8707         int rc = PCIBIOS_SUCCESSFUL;
8708
8709         ENTER;
8710         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8711                 writel(IPR_UPROCI_SIS64_START_BIST,
8712                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8713         else
8714                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8715
8716         if (rc == PCIBIOS_SUCCESSFUL) {
8717                 ipr_cmd->job_step = ipr_reset_bist_done;
8718                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8719                 rc = IPR_RC_JOB_RETURN;
8720         } else {
8721                 if (ioa_cfg->cfg_locked)
8722                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8723                 ioa_cfg->cfg_locked = 0;
8724                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8725                 rc = IPR_RC_JOB_CONTINUE;
8726         }
8727
8728         LEAVE;
8729         return rc;
8730 }
8731
8732 /**
8733  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8734  * @ipr_cmd:    ipr command struct
8735  *
8736  * Description: This clears PCI reset to the adapter and delays two seconds.
8737  *
8738  * Return value:
8739  *      IPR_RC_JOB_RETURN
8740  **/
8741 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8742 {
8743         ENTER;
8744         ipr_cmd->job_step = ipr_reset_bist_done;
8745         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8746         LEAVE;
8747         return IPR_RC_JOB_RETURN;
8748 }
8749
8750 /**
8751  * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8752  * @work:       work struct
8753  *
8754  * Description: This pulses warm reset to a slot.
8755  *
8756  **/
8757 static void ipr_reset_reset_work(struct work_struct *work)
8758 {
8759         struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8760         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8761         struct pci_dev *pdev = ioa_cfg->pdev;
8762         unsigned long lock_flags = 0;
8763
8764         ENTER;
8765         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8766         msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8767         pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8768
8769         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8770         if (ioa_cfg->reset_cmd == ipr_cmd)
8771                 ipr_reset_ioa_job(ipr_cmd);
8772         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8773         LEAVE;
8774 }
8775
8776 /**
8777  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8778  * @ipr_cmd:    ipr command struct
8779  *
8780  * Description: This asserts PCI reset to the adapter.
8781  *
8782  * Return value:
8783  *      IPR_RC_JOB_RETURN
8784  **/
8785 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8786 {
8787         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8788
8789         ENTER;
8790         INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8791         queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8792         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8793         LEAVE;
8794         return IPR_RC_JOB_RETURN;
8795 }
8796
8797 /**
8798  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8799  * @ipr_cmd:    ipr command struct
8800  *
8801  * Description: This attempts to block config access to the IOA.
8802  *
8803  * Return value:
8804  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8805  **/
8806 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8807 {
8808         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8809         int rc = IPR_RC_JOB_CONTINUE;
8810
8811         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8812                 ioa_cfg->cfg_locked = 1;
8813                 ipr_cmd->job_step = ioa_cfg->reset;
8814         } else {
8815                 if (ipr_cmd->u.time_left) {
8816                         rc = IPR_RC_JOB_RETURN;
8817                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8818                         ipr_reset_start_timer(ipr_cmd,
8819                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8820                 } else {
8821                         ipr_cmd->job_step = ioa_cfg->reset;
8822                         dev_err(&ioa_cfg->pdev->dev,
8823                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8824                 }
8825         }
8826
8827         return rc;
8828 }
8829
8830 /**
8831  * ipr_reset_block_config_access - Block config access to the IOA
8832  * @ipr_cmd:    ipr command struct
8833  *
8834  * Description: This attempts to block config access to the IOA
8835  *
8836  * Return value:
8837  *      IPR_RC_JOB_CONTINUE
8838  **/
8839 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8840 {
8841         ipr_cmd->ioa_cfg->cfg_locked = 0;
8842         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8843         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8844         return IPR_RC_JOB_CONTINUE;
8845 }
8846
8847 /**
8848  * ipr_reset_allowed - Query whether or not IOA can be reset
8849  * @ioa_cfg:    ioa config struct
8850  *
8851  * Return value:
8852  *      0 if reset not allowed / non-zero if reset is allowed
8853  **/
8854 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8855 {
8856         volatile u32 temp_reg;
8857
8858         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8859         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8860 }
8861
8862 /**
8863  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8864  * @ipr_cmd:    ipr command struct
8865  *
8866  * Description: This function waits for adapter permission to run BIST,
8867  * then runs BIST. If the adapter does not give permission after a
8868  * reasonable time, we will reset the adapter anyway. The impact of
8869  * resetting the adapter without warning the adapter is the risk of
8870  * losing the persistent error log on the adapter. If the adapter is
8871  * reset while it is writing to the flash on the adapter, the flash
8872  * segment will have bad ECC and be zeroed.
8873  *
8874  * Return value:
8875  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8876  **/
8877 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8878 {
8879         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8880         int rc = IPR_RC_JOB_RETURN;
8881
8882         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8883                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8884                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8885         } else {
8886                 ipr_cmd->job_step = ipr_reset_block_config_access;
8887                 rc = IPR_RC_JOB_CONTINUE;
8888         }
8889
8890         return rc;
8891 }
8892
8893 /**
8894  * ipr_reset_alert - Alert the adapter of a pending reset
8895  * @ipr_cmd:    ipr command struct
8896  *
8897  * Description: This function alerts the adapter that it will be reset.
8898  * If memory space is not currently enabled, proceed directly
8899  * to running BIST on the adapter. The timer must always be started
8900  * so we guarantee we do not run BIST from ipr_isr.
8901  *
8902  * Return value:
8903  *      IPR_RC_JOB_RETURN
8904  **/
8905 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8906 {
8907         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8908         u16 cmd_reg;
8909         int rc;
8910
8911         ENTER;
8912         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8913
8914         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8915                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8916                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8917                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8918         } else {
8919                 ipr_cmd->job_step = ipr_reset_block_config_access;
8920         }
8921
8922         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8923         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8924
8925         LEAVE;
8926         return IPR_RC_JOB_RETURN;
8927 }
8928
8929 /**
8930  * ipr_reset_quiesce_done - Complete IOA disconnect
8931  * @ipr_cmd:    ipr command struct
8932  *
8933  * Description: Freeze the adapter to complete quiesce processing
8934  *
8935  * Return value:
8936  *      IPR_RC_JOB_CONTINUE
8937  **/
8938 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8939 {
8940         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8941
8942         ENTER;
8943         ipr_cmd->job_step = ipr_ioa_bringdown_done;
8944         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8945         LEAVE;
8946         return IPR_RC_JOB_CONTINUE;
8947 }
8948
8949 /**
8950  * ipr_reset_cancel_hcam_done - Check for outstanding commands
8951  * @ipr_cmd:    ipr command struct
8952  *
8953  * Description: Ensure nothing is outstanding to the IOA and
8954  *                      proceed with IOA disconnect. Otherwise reset the IOA.
8955  *
8956  * Return value:
8957  *      IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8958  **/
8959 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8960 {
8961         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8962         struct ipr_cmnd *loop_cmd;
8963         struct ipr_hrr_queue *hrrq;
8964         int rc = IPR_RC_JOB_CONTINUE;
8965         int count = 0;
8966
8967         ENTER;
8968         ipr_cmd->job_step = ipr_reset_quiesce_done;
8969
8970         for_each_hrrq(hrrq, ioa_cfg) {
8971                 spin_lock(&hrrq->_lock);
8972                 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
8973                         count++;
8974                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8975                         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8976                         rc = IPR_RC_JOB_RETURN;
8977                         break;
8978                 }
8979                 spin_unlock(&hrrq->_lock);
8980
8981                 if (count)
8982                         break;
8983         }
8984
8985         LEAVE;
8986         return rc;
8987 }
8988
8989 /**
8990  * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8991  * @ipr_cmd:    ipr command struct
8992  *
8993  * Description: Cancel any oustanding HCAMs to the IOA.
8994  *
8995  * Return value:
8996  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8997  **/
8998 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
8999 {
9000         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9001         int rc = IPR_RC_JOB_CONTINUE;
9002         struct ipr_cmd_pkt *cmd_pkt;
9003         struct ipr_cmnd *hcam_cmd;
9004         struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9005
9006         ENTER;
9007         ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9008
9009         if (!hrrq->ioa_is_dead) {
9010                 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9011                         list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9012                                 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9013                                         continue;
9014
9015                                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9016                                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9017                                 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9018                                 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9019                                 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9020                                 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9021                                 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9022                                 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9023                                 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9024                                 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9025                                 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9026                                 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9027                                 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9028                                 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9029
9030                                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9031                                            IPR_CANCEL_TIMEOUT);
9032
9033                                 rc = IPR_RC_JOB_RETURN;
9034                                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9035                                 break;
9036                         }
9037                 }
9038         } else
9039                 ipr_cmd->job_step = ipr_reset_alert;
9040
9041         LEAVE;
9042         return rc;
9043 }
9044
9045 /**
9046  * ipr_reset_ucode_download_done - Microcode download completion
9047  * @ipr_cmd:    ipr command struct
9048  *
9049  * Description: This function unmaps the microcode download buffer.
9050  *
9051  * Return value:
9052  *      IPR_RC_JOB_CONTINUE
9053  **/
9054 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9055 {
9056         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9057         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9058
9059         dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
9060                      sglist->num_sg, DMA_TO_DEVICE);
9061
9062         ipr_cmd->job_step = ipr_reset_alert;
9063         return IPR_RC_JOB_CONTINUE;
9064 }
9065
9066 /**
9067  * ipr_reset_ucode_download - Download microcode to the adapter
9068  * @ipr_cmd:    ipr command struct
9069  *
9070  * Description: This function checks to see if it there is microcode
9071  * to download to the adapter. If there is, a download is performed.
9072  *
9073  * Return value:
9074  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9075  **/
9076 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9077 {
9078         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9079         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9080
9081         ENTER;
9082         ipr_cmd->job_step = ipr_reset_alert;
9083
9084         if (!sglist)
9085                 return IPR_RC_JOB_CONTINUE;
9086
9087         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9088         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9089         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9090         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9091         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9092         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9093         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9094
9095         if (ioa_cfg->sis64)
9096                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9097         else
9098                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
9099         ipr_cmd->job_step = ipr_reset_ucode_download_done;
9100
9101         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9102                    IPR_WRITE_BUFFER_TIMEOUT);
9103
9104         LEAVE;
9105         return IPR_RC_JOB_RETURN;
9106 }
9107
9108 /**
9109  * ipr_reset_shutdown_ioa - Shutdown the adapter
9110  * @ipr_cmd:    ipr command struct
9111  *
9112  * Description: This function issues an adapter shutdown of the
9113  * specified type to the specified adapter as part of the
9114  * adapter reset job.
9115  *
9116  * Return value:
9117  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9118  **/
9119 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9120 {
9121         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9122         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9123         unsigned long timeout;
9124         int rc = IPR_RC_JOB_CONTINUE;
9125
9126         ENTER;
9127         if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9128                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9129         else if (shutdown_type != IPR_SHUTDOWN_NONE &&
9130                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
9131                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9132                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9133                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9134                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9135
9136                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9137                         timeout = IPR_SHUTDOWN_TIMEOUT;
9138                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9139                         timeout = IPR_INTERNAL_TIMEOUT;
9140                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9141                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
9142                 else
9143                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
9144
9145                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9146
9147                 rc = IPR_RC_JOB_RETURN;
9148                 ipr_cmd->job_step = ipr_reset_ucode_download;
9149         } else
9150                 ipr_cmd->job_step = ipr_reset_alert;
9151
9152         LEAVE;
9153         return rc;
9154 }
9155
9156 /**
9157  * ipr_reset_ioa_job - Adapter reset job
9158  * @ipr_cmd:    ipr command struct
9159  *
9160  * Description: This function is the job router for the adapter reset job.
9161  *
9162  * Return value:
9163  *      none
9164  **/
9165 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9166 {
9167         u32 rc, ioasc;
9168         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9169
9170         do {
9171                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
9172
9173                 if (ioa_cfg->reset_cmd != ipr_cmd) {
9174                         /*
9175                          * We are doing nested adapter resets and this is
9176                          * not the current reset job.
9177                          */
9178                         list_add_tail(&ipr_cmd->queue,
9179                                         &ipr_cmd->hrrq->hrrq_free_q);
9180                         return;
9181                 }
9182
9183                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
9184                         rc = ipr_cmd->job_step_failed(ipr_cmd);
9185                         if (rc == IPR_RC_JOB_RETURN)
9186                                 return;
9187                 }
9188
9189                 ipr_reinit_ipr_cmnd(ipr_cmd);
9190                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
9191                 rc = ipr_cmd->job_step(ipr_cmd);
9192         } while (rc == IPR_RC_JOB_CONTINUE);
9193 }
9194
9195 /**
9196  * _ipr_initiate_ioa_reset - Initiate an adapter reset
9197  * @ioa_cfg:            ioa config struct
9198  * @job_step:           first job step of reset job
9199  * @shutdown_type:      shutdown type
9200  *
9201  * Description: This function will initiate the reset of the given adapter
9202  * starting at the selected job step.
9203  * If the caller needs to wait on the completion of the reset,
9204  * the caller must sleep on the reset_wait_q.
9205  *
9206  * Return value:
9207  *      none
9208  **/
9209 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9210                                     int (*job_step) (struct ipr_cmnd *),
9211                                     enum ipr_shutdown_type shutdown_type)
9212 {
9213         struct ipr_cmnd *ipr_cmd;
9214         int i;
9215
9216         ioa_cfg->in_reset_reload = 1;
9217         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9218                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9219                 ioa_cfg->hrrq[i].allow_cmds = 0;
9220                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9221         }
9222         wmb();
9223         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9224                 ioa_cfg->scsi_unblock = 0;
9225                 ioa_cfg->scsi_blocked = 1;
9226                 scsi_block_requests(ioa_cfg->host);
9227         }
9228
9229         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9230         ioa_cfg->reset_cmd = ipr_cmd;
9231         ipr_cmd->job_step = job_step;
9232         ipr_cmd->u.shutdown_type = shutdown_type;
9233
9234         ipr_reset_ioa_job(ipr_cmd);
9235 }
9236
9237 /**
9238  * ipr_initiate_ioa_reset - Initiate an adapter reset
9239  * @ioa_cfg:            ioa config struct
9240  * @shutdown_type:      shutdown type
9241  *
9242  * Description: This function will initiate the reset of the given adapter.
9243  * If the caller needs to wait on the completion of the reset,
9244  * the caller must sleep on the reset_wait_q.
9245  *
9246  * Return value:
9247  *      none
9248  **/
9249 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9250                                    enum ipr_shutdown_type shutdown_type)
9251 {
9252         int i;
9253
9254         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9255                 return;
9256
9257         if (ioa_cfg->in_reset_reload) {
9258                 if (ioa_cfg->sdt_state == GET_DUMP)
9259                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9260                 else if (ioa_cfg->sdt_state == READ_DUMP)
9261                         ioa_cfg->sdt_state = ABORT_DUMP;
9262         }
9263
9264         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9265                 dev_err(&ioa_cfg->pdev->dev,
9266                         "IOA taken offline - error recovery failed\n");
9267
9268                 ioa_cfg->reset_retries = 0;
9269                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9270                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9271                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
9272                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9273                 }
9274                 wmb();
9275
9276                 if (ioa_cfg->in_ioa_bringdown) {
9277                         ioa_cfg->reset_cmd = NULL;
9278                         ioa_cfg->in_reset_reload = 0;
9279                         ipr_fail_all_ops(ioa_cfg);
9280                         wake_up_all(&ioa_cfg->reset_wait_q);
9281
9282                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9283                                 ioa_cfg->scsi_unblock = 1;
9284                                 schedule_work(&ioa_cfg->work_q);
9285                         }
9286                         return;
9287                 } else {
9288                         ioa_cfg->in_ioa_bringdown = 1;
9289                         shutdown_type = IPR_SHUTDOWN_NONE;
9290                 }
9291         }
9292
9293         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9294                                 shutdown_type);
9295 }
9296
9297 /**
9298  * ipr_reset_freeze - Hold off all I/O activity
9299  * @ipr_cmd:    ipr command struct
9300  *
9301  * Description: If the PCI slot is frozen, hold off all I/O
9302  * activity; then, as soon as the slot is available again,
9303  * initiate an adapter reset.
9304  */
9305 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9306 {
9307         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9308         int i;
9309
9310         /* Disallow new interrupts, avoid loop */
9311         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9312                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9313                 ioa_cfg->hrrq[i].allow_interrupts = 0;
9314                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9315         }
9316         wmb();
9317         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9318         ipr_cmd->done = ipr_reset_ioa_job;
9319         return IPR_RC_JOB_RETURN;
9320 }
9321
9322 /**
9323  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9324  * @pdev:       PCI device struct
9325  *
9326  * Description: This routine is called to tell us that the MMIO
9327  * access to the IOA has been restored
9328  */
9329 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9330 {
9331         unsigned long flags = 0;
9332         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9333
9334         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9335         if (!ioa_cfg->probe_done)
9336                 pci_save_state(pdev);
9337         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9338         return PCI_ERS_RESULT_NEED_RESET;
9339 }
9340
9341 /**
9342  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9343  * @pdev:       PCI device struct
9344  *
9345  * Description: This routine is called to tell us that the PCI bus
9346  * is down. Can't do anything here, except put the device driver
9347  * into a holding pattern, waiting for the PCI bus to come back.
9348  */
9349 static void ipr_pci_frozen(struct pci_dev *pdev)
9350 {
9351         unsigned long flags = 0;
9352         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9353
9354         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9355         if (ioa_cfg->probe_done)
9356                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9357         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9358 }
9359
9360 /**
9361  * ipr_pci_slot_reset - Called when PCI slot has been reset.
9362  * @pdev:       PCI device struct
9363  *
9364  * Description: This routine is called by the pci error recovery
9365  * code after the PCI slot has been reset, just before we
9366  * should resume normal operations.
9367  */
9368 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9369 {
9370         unsigned long flags = 0;
9371         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9372
9373         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9374         if (ioa_cfg->probe_done) {
9375                 if (ioa_cfg->needs_warm_reset)
9376                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9377                 else
9378                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9379                                                 IPR_SHUTDOWN_NONE);
9380         } else
9381                 wake_up_all(&ioa_cfg->eeh_wait_q);
9382         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9383         return PCI_ERS_RESULT_RECOVERED;
9384 }
9385
9386 /**
9387  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9388  * @pdev:       PCI device struct
9389  *
9390  * Description: This routine is called when the PCI bus has
9391  * permanently failed.
9392  */
9393 static void ipr_pci_perm_failure(struct pci_dev *pdev)
9394 {
9395         unsigned long flags = 0;
9396         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9397         int i;
9398
9399         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9400         if (ioa_cfg->probe_done) {
9401                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9402                         ioa_cfg->sdt_state = ABORT_DUMP;
9403                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9404                 ioa_cfg->in_ioa_bringdown = 1;
9405                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9406                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9407                         ioa_cfg->hrrq[i].allow_cmds = 0;
9408                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9409                 }
9410                 wmb();
9411                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9412         } else
9413                 wake_up_all(&ioa_cfg->eeh_wait_q);
9414         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9415 }
9416
9417 /**
9418  * ipr_pci_error_detected - Called when a PCI error is detected.
9419  * @pdev:       PCI device struct
9420  * @state:      PCI channel state
9421  *
9422  * Description: Called when a PCI error is detected.
9423  *
9424  * Return value:
9425  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9426  */
9427 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9428                                                pci_channel_state_t state)
9429 {
9430         switch (state) {
9431         case pci_channel_io_frozen:
9432                 ipr_pci_frozen(pdev);
9433                 return PCI_ERS_RESULT_CAN_RECOVER;
9434         case pci_channel_io_perm_failure:
9435                 ipr_pci_perm_failure(pdev);
9436                 return PCI_ERS_RESULT_DISCONNECT;
9437                 break;
9438         default:
9439                 break;
9440         }
9441         return PCI_ERS_RESULT_NEED_RESET;
9442 }
9443
9444 /**
9445  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9446  * @ioa_cfg:    ioa cfg struct
9447  *
9448  * Description: This is the second phase of adapter initialization
9449  * This function takes care of initilizing the adapter to the point
9450  * where it can accept new commands.
9451
9452  * Return value:
9453  *      0 on success / -EIO on failure
9454  **/
9455 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9456 {
9457         int rc = 0;
9458         unsigned long host_lock_flags = 0;
9459
9460         ENTER;
9461         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9462         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9463         ioa_cfg->probe_done = 1;
9464         if (ioa_cfg->needs_hard_reset) {
9465                 ioa_cfg->needs_hard_reset = 0;
9466                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9467         } else
9468                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9469                                         IPR_SHUTDOWN_NONE);
9470         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9471
9472         LEAVE;
9473         return rc;
9474 }
9475
9476 /**
9477  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9478  * @ioa_cfg:    ioa config struct
9479  *
9480  * Return value:
9481  *      none
9482  **/
9483 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9484 {
9485         int i;
9486
9487         if (ioa_cfg->ipr_cmnd_list) {
9488                 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9489                         if (ioa_cfg->ipr_cmnd_list[i])
9490                                 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9491                                               ioa_cfg->ipr_cmnd_list[i],
9492                                               ioa_cfg->ipr_cmnd_list_dma[i]);
9493
9494                         ioa_cfg->ipr_cmnd_list[i] = NULL;
9495                 }
9496         }
9497
9498         if (ioa_cfg->ipr_cmd_pool)
9499                 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9500
9501         kfree(ioa_cfg->ipr_cmnd_list);
9502         kfree(ioa_cfg->ipr_cmnd_list_dma);
9503         ioa_cfg->ipr_cmnd_list = NULL;
9504         ioa_cfg->ipr_cmnd_list_dma = NULL;
9505         ioa_cfg->ipr_cmd_pool = NULL;
9506 }
9507
9508 /**
9509  * ipr_free_mem - Frees memory allocated for an adapter
9510  * @ioa_cfg:    ioa cfg struct
9511  *
9512  * Return value:
9513  *      nothing
9514  **/
9515 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9516 {
9517         int i;
9518
9519         kfree(ioa_cfg->res_entries);
9520         dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9521                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9522         ipr_free_cmd_blks(ioa_cfg);
9523
9524         for (i = 0; i < ioa_cfg->hrrq_num; i++)
9525                 dma_free_coherent(&ioa_cfg->pdev->dev,
9526                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9527                                   ioa_cfg->hrrq[i].host_rrq,
9528                                   ioa_cfg->hrrq[i].host_rrq_dma);
9529
9530         dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9531                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9532
9533         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9534                 dma_free_coherent(&ioa_cfg->pdev->dev,
9535                                   sizeof(struct ipr_hostrcb),
9536                                   ioa_cfg->hostrcb[i],
9537                                   ioa_cfg->hostrcb_dma[i]);
9538         }
9539
9540         ipr_free_dump(ioa_cfg);
9541         kfree(ioa_cfg->trace);
9542 }
9543
9544 /**
9545  * ipr_free_irqs - Free all allocated IRQs for the adapter.
9546  * @ioa_cfg:    ipr cfg struct
9547  *
9548  * This function frees all allocated IRQs for the
9549  * specified adapter.
9550  *
9551  * Return value:
9552  *      none
9553  **/
9554 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9555 {
9556         struct pci_dev *pdev = ioa_cfg->pdev;
9557         int i;
9558
9559         for (i = 0; i < ioa_cfg->nvectors; i++)
9560                 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9561         pci_free_irq_vectors(pdev);
9562 }
9563
9564 /**
9565  * ipr_free_all_resources - Free all allocated resources for an adapter.
9566  * @ipr_cmd:    ipr command struct
9567  *
9568  * This function frees all allocated resources for the
9569  * specified adapter.
9570  *
9571  * Return value:
9572  *      none
9573  **/
9574 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9575 {
9576         struct pci_dev *pdev = ioa_cfg->pdev;
9577
9578         ENTER;
9579         ipr_free_irqs(ioa_cfg);
9580         if (ioa_cfg->reset_work_q)
9581                 destroy_workqueue(ioa_cfg->reset_work_q);
9582         iounmap(ioa_cfg->hdw_dma_regs);
9583         pci_release_regions(pdev);
9584         ipr_free_mem(ioa_cfg);
9585         scsi_host_put(ioa_cfg->host);
9586         pci_disable_device(pdev);
9587         LEAVE;
9588 }
9589
9590 /**
9591  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9592  * @ioa_cfg:    ioa config struct
9593  *
9594  * Return value:
9595  *      0 on success / -ENOMEM on allocation failure
9596  **/
9597 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9598 {
9599         struct ipr_cmnd *ipr_cmd;
9600         struct ipr_ioarcb *ioarcb;
9601         dma_addr_t dma_addr;
9602         int i, entries_each_hrrq, hrrq_id = 0;
9603
9604         ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9605                                                 sizeof(struct ipr_cmnd), 512, 0);
9606
9607         if (!ioa_cfg->ipr_cmd_pool)
9608                 return -ENOMEM;
9609
9610         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9611         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9612
9613         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9614                 ipr_free_cmd_blks(ioa_cfg);
9615                 return -ENOMEM;
9616         }
9617
9618         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9619                 if (ioa_cfg->hrrq_num > 1) {
9620                         if (i == 0) {
9621                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9622                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
9623                                 ioa_cfg->hrrq[i].max_cmd_id =
9624                                         (entries_each_hrrq - 1);
9625                         } else {
9626                                 entries_each_hrrq =
9627                                         IPR_NUM_BASE_CMD_BLKS/
9628                                         (ioa_cfg->hrrq_num - 1);
9629                                 ioa_cfg->hrrq[i].min_cmd_id =
9630                                         IPR_NUM_INTERNAL_CMD_BLKS +
9631                                         (i - 1) * entries_each_hrrq;
9632                                 ioa_cfg->hrrq[i].max_cmd_id =
9633                                         (IPR_NUM_INTERNAL_CMD_BLKS +
9634                                         i * entries_each_hrrq - 1);
9635                         }
9636                 } else {
9637                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
9638                         ioa_cfg->hrrq[i].min_cmd_id = 0;
9639                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9640                 }
9641                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9642         }
9643
9644         BUG_ON(ioa_cfg->hrrq_num == 0);
9645
9646         i = IPR_NUM_CMD_BLKS -
9647                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9648         if (i > 0) {
9649                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9650                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9651         }
9652
9653         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9654                 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
9655                                 GFP_KERNEL, &dma_addr);
9656
9657                 if (!ipr_cmd) {
9658                         ipr_free_cmd_blks(ioa_cfg);
9659                         return -ENOMEM;
9660                 }
9661
9662                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9663                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9664
9665                 ioarcb = &ipr_cmd->ioarcb;
9666                 ipr_cmd->dma_addr = dma_addr;
9667                 if (ioa_cfg->sis64)
9668                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9669                 else
9670                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9671
9672                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9673                 if (ioa_cfg->sis64) {
9674                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9675                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9676                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9677                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9678                 } else {
9679                         ioarcb->write_ioadl_addr =
9680                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9681                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9682                         ioarcb->ioasa_host_pci_addr =
9683                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9684                 }
9685                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9686                 ipr_cmd->cmd_index = i;
9687                 ipr_cmd->ioa_cfg = ioa_cfg;
9688                 ipr_cmd->sense_buffer_dma = dma_addr +
9689                         offsetof(struct ipr_cmnd, sense_buffer);
9690
9691                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9692                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9693                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9694                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9695                         hrrq_id++;
9696         }
9697
9698         return 0;
9699 }
9700
9701 /**
9702  * ipr_alloc_mem - Allocate memory for an adapter
9703  * @ioa_cfg:    ioa config struct
9704  *
9705  * Return value:
9706  *      0 on success / non-zero for error
9707  **/
9708 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9709 {
9710         struct pci_dev *pdev = ioa_cfg->pdev;
9711         int i, rc = -ENOMEM;
9712
9713         ENTER;
9714         ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9715                                        ioa_cfg->max_devs_supported, GFP_KERNEL);
9716
9717         if (!ioa_cfg->res_entries)
9718                 goto out;
9719
9720         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9721                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9722                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9723         }
9724
9725         ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9726                                               sizeof(struct ipr_misc_cbs),
9727                                               &ioa_cfg->vpd_cbs_dma,
9728                                               GFP_KERNEL);
9729
9730         if (!ioa_cfg->vpd_cbs)
9731                 goto out_free_res_entries;
9732
9733         if (ipr_alloc_cmd_blks(ioa_cfg))
9734                 goto out_free_vpd_cbs;
9735
9736         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9737                 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9738                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9739                                         &ioa_cfg->hrrq[i].host_rrq_dma,
9740                                         GFP_KERNEL);
9741
9742                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9743                         while (--i > 0)
9744                                 dma_free_coherent(&pdev->dev,
9745                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9746                                         ioa_cfg->hrrq[i].host_rrq,
9747                                         ioa_cfg->hrrq[i].host_rrq_dma);
9748                         goto out_ipr_free_cmd_blocks;
9749                 }
9750                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9751         }
9752
9753         ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9754                                                   ioa_cfg->cfg_table_size,
9755                                                   &ioa_cfg->cfg_table_dma,
9756                                                   GFP_KERNEL);
9757
9758         if (!ioa_cfg->u.cfg_table)
9759                 goto out_free_host_rrq;
9760
9761         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9762                 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9763                                                          sizeof(struct ipr_hostrcb),
9764                                                          &ioa_cfg->hostrcb_dma[i],
9765                                                          GFP_KERNEL);
9766
9767                 if (!ioa_cfg->hostrcb[i])
9768                         goto out_free_hostrcb_dma;
9769
9770                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9771                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9772                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9773                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9774         }
9775
9776         ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9777                                  IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9778
9779         if (!ioa_cfg->trace)
9780                 goto out_free_hostrcb_dma;
9781
9782         rc = 0;
9783 out:
9784         LEAVE;
9785         return rc;
9786
9787 out_free_hostrcb_dma:
9788         while (i-- > 0) {
9789                 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9790                                   ioa_cfg->hostrcb[i],
9791                                   ioa_cfg->hostrcb_dma[i]);
9792         }
9793         dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9794                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9795 out_free_host_rrq:
9796         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9797                 dma_free_coherent(&pdev->dev,
9798                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9799                                   ioa_cfg->hrrq[i].host_rrq,
9800                                   ioa_cfg->hrrq[i].host_rrq_dma);
9801         }
9802 out_ipr_free_cmd_blocks:
9803         ipr_free_cmd_blks(ioa_cfg);
9804 out_free_vpd_cbs:
9805         dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9806                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9807 out_free_res_entries:
9808         kfree(ioa_cfg->res_entries);
9809         goto out;
9810 }
9811
9812 /**
9813  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9814  * @ioa_cfg:    ioa config struct
9815  *
9816  * Return value:
9817  *      none
9818  **/
9819 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9820 {
9821         int i;
9822
9823         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9824                 ioa_cfg->bus_attr[i].bus = i;
9825                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9826                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9827                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9828                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9829                 else
9830                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9831         }
9832 }
9833
9834 /**
9835  * ipr_init_regs - Initialize IOA registers
9836  * @ioa_cfg:    ioa config struct
9837  *
9838  * Return value:
9839  *      none
9840  **/
9841 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9842 {
9843         const struct ipr_interrupt_offsets *p;
9844         struct ipr_interrupts *t;
9845         void __iomem *base;
9846
9847         p = &ioa_cfg->chip_cfg->regs;
9848         t = &ioa_cfg->regs;
9849         base = ioa_cfg->hdw_dma_regs;
9850
9851         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9852         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9853         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9854         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9855         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9856         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9857         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9858         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9859         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9860         t->ioarrin_reg = base + p->ioarrin_reg;
9861         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9862         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9863         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9864         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9865         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9866         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9867
9868         if (ioa_cfg->sis64) {
9869                 t->init_feedback_reg = base + p->init_feedback_reg;
9870                 t->dump_addr_reg = base + p->dump_addr_reg;
9871                 t->dump_data_reg = base + p->dump_data_reg;
9872                 t->endian_swap_reg = base + p->endian_swap_reg;
9873         }
9874 }
9875
9876 /**
9877  * ipr_init_ioa_cfg - Initialize IOA config struct
9878  * @ioa_cfg:    ioa config struct
9879  * @host:               scsi host struct
9880  * @pdev:               PCI dev struct
9881  *
9882  * Return value:
9883  *      none
9884  **/
9885 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9886                              struct Scsi_Host *host, struct pci_dev *pdev)
9887 {
9888         int i;
9889
9890         ioa_cfg->host = host;
9891         ioa_cfg->pdev = pdev;
9892         ioa_cfg->log_level = ipr_log_level;
9893         ioa_cfg->doorbell = IPR_DOORBELL;
9894         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9895         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9896         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9897         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9898         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9899         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9900
9901         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9902         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9903         INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9904         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9905         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9906         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9907         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9908         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9909         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9910         ioa_cfg->sdt_state = INACTIVE;
9911
9912         ipr_initialize_bus_attr(ioa_cfg);
9913         ioa_cfg->max_devs_supported = ipr_max_devs;
9914
9915         if (ioa_cfg->sis64) {
9916                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9917                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9918                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9919                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9920                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9921                                            + ((sizeof(struct ipr_config_table_entry64)
9922                                                * ioa_cfg->max_devs_supported)));
9923         } else {
9924                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9925                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9926                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9927                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9928                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9929                                            + ((sizeof(struct ipr_config_table_entry)
9930                                                * ioa_cfg->max_devs_supported)));
9931         }
9932
9933         host->max_channel = IPR_VSET_BUS;
9934         host->unique_id = host->host_no;
9935         host->max_cmd_len = IPR_MAX_CDB_LEN;
9936         host->can_queue = ioa_cfg->max_cmds;
9937         pci_set_drvdata(pdev, ioa_cfg);
9938
9939         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9940                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9941                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9942                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9943                 if (i == 0)
9944                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9945                 else
9946                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9947         }
9948 }
9949
9950 /**
9951  * ipr_get_chip_info - Find adapter chip information
9952  * @dev_id:             PCI device id struct
9953  *
9954  * Return value:
9955  *      ptr to chip information on success / NULL on failure
9956  **/
9957 static const struct ipr_chip_t *
9958 ipr_get_chip_info(const struct pci_device_id *dev_id)
9959 {
9960         int i;
9961
9962         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9963                 if (ipr_chip[i].vendor == dev_id->vendor &&
9964                     ipr_chip[i].device == dev_id->device)
9965                         return &ipr_chip[i];
9966         return NULL;
9967 }
9968
9969 /**
9970  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9971  *                                              during probe time
9972  * @ioa_cfg:    ioa config struct
9973  *
9974  * Return value:
9975  *      None
9976  **/
9977 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9978 {
9979         struct pci_dev *pdev = ioa_cfg->pdev;
9980
9981         if (pci_channel_offline(pdev)) {
9982                 wait_event_timeout(ioa_cfg->eeh_wait_q,
9983                                    !pci_channel_offline(pdev),
9984                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9985                 pci_restore_state(pdev);
9986         }
9987 }
9988
9989 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9990 {
9991         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9992
9993         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9994                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9995                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9996                 ioa_cfg->vectors_info[vec_idx].
9997                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9998         }
9999 }
10000
10001 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10002                 struct pci_dev *pdev)
10003 {
10004         int i, rc;
10005
10006         for (i = 1; i < ioa_cfg->nvectors; i++) {
10007                 rc = request_irq(pci_irq_vector(pdev, i),
10008                         ipr_isr_mhrrq,
10009                         0,
10010                         ioa_cfg->vectors_info[i].desc,
10011                         &ioa_cfg->hrrq[i]);
10012                 if (rc) {
10013                         while (--i >= 0)
10014                                 free_irq(pci_irq_vector(pdev, i),
10015                                         &ioa_cfg->hrrq[i]);
10016                         return rc;
10017                 }
10018         }
10019         return 0;
10020 }
10021
10022 /**
10023  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10024  * @pdev:               PCI device struct
10025  *
10026  * Description: Simply set the msi_received flag to 1 indicating that
10027  * Message Signaled Interrupts are supported.
10028  *
10029  * Return value:
10030  *      0 on success / non-zero on failure
10031  **/
10032 static irqreturn_t ipr_test_intr(int irq, void *devp)
10033 {
10034         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10035         unsigned long lock_flags = 0;
10036         irqreturn_t rc = IRQ_HANDLED;
10037
10038         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
10039         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10040
10041         ioa_cfg->msi_received = 1;
10042         wake_up(&ioa_cfg->msi_wait_q);
10043
10044         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10045         return rc;
10046 }
10047
10048 /**
10049  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10050  * @pdev:               PCI device struct
10051  *
10052  * Description: This routine sets up and initiates a test interrupt to determine
10053  * if the interrupt is received via the ipr_test_intr() service routine.
10054  * If the tests fails, the driver will fall back to LSI.
10055  *
10056  * Return value:
10057  *      0 on success / non-zero on failure
10058  **/
10059 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10060 {
10061         int rc;
10062         volatile u32 int_reg;
10063         unsigned long lock_flags = 0;
10064         int irq = pci_irq_vector(pdev, 0);
10065
10066         ENTER;
10067
10068         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10069         init_waitqueue_head(&ioa_cfg->msi_wait_q);
10070         ioa_cfg->msi_received = 0;
10071         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10072         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
10073         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10074         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10075
10076         rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10077         if (rc) {
10078                 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
10079                 return rc;
10080         } else if (ipr_debug)
10081                 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
10082
10083         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
10084         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10085         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
10086         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10087         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10088
10089         if (!ioa_cfg->msi_received) {
10090                 /* MSI test failed */
10091                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
10092                 rc = -EOPNOTSUPP;
10093         } else if (ipr_debug)
10094                 dev_info(&pdev->dev, "MSI test succeeded.\n");
10095
10096         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10097
10098         free_irq(irq, ioa_cfg);
10099
10100         LEAVE;
10101
10102         return rc;
10103 }
10104
10105  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10106  * @pdev:               PCI device struct
10107  * @dev_id:             PCI device id struct
10108  *
10109  * Return value:
10110  *      0 on success / non-zero on failure
10111  **/
10112 static int ipr_probe_ioa(struct pci_dev *pdev,
10113                          const struct pci_device_id *dev_id)
10114 {
10115         struct ipr_ioa_cfg *ioa_cfg;
10116         struct Scsi_Host *host;
10117         unsigned long ipr_regs_pci;
10118         void __iomem *ipr_regs;
10119         int rc = PCIBIOS_SUCCESSFUL;
10120         volatile u32 mask, uproc, interrupts;
10121         unsigned long lock_flags, driver_lock_flags;
10122         unsigned int irq_flag;
10123
10124         ENTER;
10125
10126         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
10127         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10128
10129         if (!host) {
10130                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10131                 rc = -ENOMEM;
10132                 goto out;
10133         }
10134
10135         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10136         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
10137         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
10138
10139         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
10140
10141         if (!ioa_cfg->ipr_chip) {
10142                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10143                         dev_id->vendor, dev_id->device);
10144                 goto out_scsi_host_put;
10145         }
10146
10147         /* set SIS 32 or SIS 64 */
10148         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
10149         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10150         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10151         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10152
10153         if (ipr_transop_timeout)
10154                 ioa_cfg->transop_timeout = ipr_transop_timeout;
10155         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10156                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10157         else
10158                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10159
10160         ioa_cfg->revid = pdev->revision;
10161
10162         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10163
10164         ipr_regs_pci = pci_resource_start(pdev, 0);
10165
10166         rc = pci_request_regions(pdev, IPR_NAME);
10167         if (rc < 0) {
10168                 dev_err(&pdev->dev,
10169                         "Couldn't register memory range of registers\n");
10170                 goto out_scsi_host_put;
10171         }
10172
10173         rc = pci_enable_device(pdev);
10174
10175         if (rc || pci_channel_offline(pdev)) {
10176                 if (pci_channel_offline(pdev)) {
10177                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10178                         rc = pci_enable_device(pdev);
10179                 }
10180
10181                 if (rc) {
10182                         dev_err(&pdev->dev, "Cannot enable adapter\n");
10183                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10184                         goto out_release_regions;
10185                 }
10186         }
10187
10188         ipr_regs = pci_ioremap_bar(pdev, 0);
10189
10190         if (!ipr_regs) {
10191                 dev_err(&pdev->dev,
10192                         "Couldn't map memory range of registers\n");
10193                 rc = -ENOMEM;
10194                 goto out_disable;
10195         }
10196
10197         ioa_cfg->hdw_dma_regs = ipr_regs;
10198         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10199         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10200
10201         ipr_init_regs(ioa_cfg);
10202
10203         if (ioa_cfg->sis64) {
10204                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10205                 if (rc < 0) {
10206                         dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10207                         rc = dma_set_mask_and_coherent(&pdev->dev,
10208                                                        DMA_BIT_MASK(32));
10209                 }
10210         } else
10211                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10212
10213         if (rc < 0) {
10214                 dev_err(&pdev->dev, "Failed to set DMA mask\n");
10215                 goto cleanup_nomem;
10216         }
10217
10218         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10219                                    ioa_cfg->chip_cfg->cache_line_size);
10220
10221         if (rc != PCIBIOS_SUCCESSFUL) {
10222                 dev_err(&pdev->dev, "Write of cache line size failed\n");
10223                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10224                 rc = -EIO;
10225                 goto cleanup_nomem;
10226         }
10227
10228         /* Issue MMIO read to ensure card is not in EEH */
10229         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10230         ipr_wait_for_pci_err_recovery(ioa_cfg);
10231
10232         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10233                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10234                         IPR_MAX_MSIX_VECTORS);
10235                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10236         }
10237
10238         irq_flag = PCI_IRQ_LEGACY;
10239         if (ioa_cfg->ipr_chip->has_msi)
10240                 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10241         rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10242         if (rc < 0) {
10243                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10244                 goto cleanup_nomem;
10245         }
10246         ioa_cfg->nvectors = rc;
10247
10248         if (!pdev->msi_enabled && !pdev->msix_enabled)
10249                 ioa_cfg->clear_isr = 1;
10250
10251         pci_set_master(pdev);
10252
10253         if (pci_channel_offline(pdev)) {
10254                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10255                 pci_set_master(pdev);
10256                 if (pci_channel_offline(pdev)) {
10257                         rc = -EIO;
10258                         goto out_msi_disable;
10259                 }
10260         }
10261
10262         if (pdev->msi_enabled || pdev->msix_enabled) {
10263                 rc = ipr_test_msi(ioa_cfg, pdev);
10264                 switch (rc) {
10265                 case 0:
10266                         dev_info(&pdev->dev,
10267                                 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10268                                 pdev->msix_enabled ? "-X" : "");
10269                         break;
10270                 case -EOPNOTSUPP:
10271                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10272                         pci_free_irq_vectors(pdev);
10273
10274                         ioa_cfg->nvectors = 1;
10275                         ioa_cfg->clear_isr = 1;
10276                         break;
10277                 default:
10278                         goto out_msi_disable;
10279                 }
10280         }
10281
10282         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10283                                 (unsigned int)num_online_cpus(),
10284                                 (unsigned int)IPR_MAX_HRRQ_NUM);
10285
10286         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10287                 goto out_msi_disable;
10288
10289         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10290                 goto out_msi_disable;
10291
10292         rc = ipr_alloc_mem(ioa_cfg);
10293         if (rc < 0) {
10294                 dev_err(&pdev->dev,
10295                         "Couldn't allocate enough memory for device driver!\n");
10296                 goto out_msi_disable;
10297         }
10298
10299         /* Save away PCI config space for use following IOA reset */
10300         rc = pci_save_state(pdev);
10301
10302         if (rc != PCIBIOS_SUCCESSFUL) {
10303                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10304                 rc = -EIO;
10305                 goto cleanup_nolog;
10306         }
10307
10308         /*
10309          * If HRRQ updated interrupt is not masked, or reset alert is set,
10310          * the card is in an unknown state and needs a hard reset
10311          */
10312         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10313         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10314         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10315         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10316                 ioa_cfg->needs_hard_reset = 1;
10317         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10318                 ioa_cfg->needs_hard_reset = 1;
10319         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10320                 ioa_cfg->ioa_unit_checked = 1;
10321
10322         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10323         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10324         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10325
10326         if (pdev->msi_enabled || pdev->msix_enabled) {
10327                 name_msi_vectors(ioa_cfg);
10328                 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
10329                         ioa_cfg->vectors_info[0].desc,
10330                         &ioa_cfg->hrrq[0]);
10331                 if (!rc)
10332                         rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
10333         } else {
10334                 rc = request_irq(pdev->irq, ipr_isr,
10335                          IRQF_SHARED,
10336                          IPR_NAME, &ioa_cfg->hrrq[0]);
10337         }
10338         if (rc) {
10339                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10340                         pdev->irq, rc);
10341                 goto cleanup_nolog;
10342         }
10343
10344         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10345             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10346                 ioa_cfg->needs_warm_reset = 1;
10347                 ioa_cfg->reset = ipr_reset_slot_reset;
10348
10349                 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10350                                                                 WQ_MEM_RECLAIM, host->host_no);
10351
10352                 if (!ioa_cfg->reset_work_q) {
10353                         dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10354                         rc = -ENOMEM;
10355                         goto out_free_irq;
10356                 }
10357         } else
10358                 ioa_cfg->reset = ipr_reset_start_bist;
10359
10360         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10361         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10362         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10363
10364         LEAVE;
10365 out:
10366         return rc;
10367
10368 out_free_irq:
10369         ipr_free_irqs(ioa_cfg);
10370 cleanup_nolog:
10371         ipr_free_mem(ioa_cfg);
10372 out_msi_disable:
10373         ipr_wait_for_pci_err_recovery(ioa_cfg);
10374         pci_free_irq_vectors(pdev);
10375 cleanup_nomem:
10376         iounmap(ipr_regs);
10377 out_disable:
10378         pci_disable_device(pdev);
10379 out_release_regions:
10380         pci_release_regions(pdev);
10381 out_scsi_host_put:
10382         scsi_host_put(host);
10383         goto out;
10384 }
10385
10386 /**
10387  * ipr_initiate_ioa_bringdown - Bring down an adapter
10388  * @ioa_cfg:            ioa config struct
10389  * @shutdown_type:      shutdown type
10390  *
10391  * Description: This function will initiate bringing down the adapter.
10392  * This consists of issuing an IOA shutdown to the adapter
10393  * to flush the cache, and running BIST.
10394  * If the caller needs to wait on the completion of the reset,
10395  * the caller must sleep on the reset_wait_q.
10396  *
10397  * Return value:
10398  *      none
10399  **/
10400 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10401                                        enum ipr_shutdown_type shutdown_type)
10402 {
10403         ENTER;
10404         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10405                 ioa_cfg->sdt_state = ABORT_DUMP;
10406         ioa_cfg->reset_retries = 0;
10407         ioa_cfg->in_ioa_bringdown = 1;
10408         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10409         LEAVE;
10410 }
10411
10412 /**
10413  * __ipr_remove - Remove a single adapter
10414  * @pdev:       pci device struct
10415  *
10416  * Adapter hot plug remove entry point.
10417  *
10418  * Return value:
10419  *      none
10420  **/
10421 static void __ipr_remove(struct pci_dev *pdev)
10422 {
10423         unsigned long host_lock_flags = 0;
10424         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10425         int i;
10426         unsigned long driver_lock_flags;
10427         ENTER;
10428
10429         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10430         while (ioa_cfg->in_reset_reload) {
10431                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10432                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10433                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10434         }
10435
10436         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10437                 spin_lock(&ioa_cfg->hrrq[i]._lock);
10438                 ioa_cfg->hrrq[i].removing_ioa = 1;
10439                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10440         }
10441         wmb();
10442         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10443
10444         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10445         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10446         flush_work(&ioa_cfg->work_q);
10447         if (ioa_cfg->reset_work_q)
10448                 flush_workqueue(ioa_cfg->reset_work_q);
10449         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10450         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10451
10452         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10453         list_del(&ioa_cfg->queue);
10454         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10455
10456         if (ioa_cfg->sdt_state == ABORT_DUMP)
10457                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10458         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10459
10460         ipr_free_all_resources(ioa_cfg);
10461
10462         LEAVE;
10463 }
10464
10465 /**
10466  * ipr_remove - IOA hot plug remove entry point
10467  * @pdev:       pci device struct
10468  *
10469  * Adapter hot plug remove entry point.
10470  *
10471  * Return value:
10472  *      none
10473  **/
10474 static void ipr_remove(struct pci_dev *pdev)
10475 {
10476         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10477
10478         ENTER;
10479
10480         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10481                               &ipr_trace_attr);
10482         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10483                              &ipr_dump_attr);
10484         sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10485                         &ipr_ioa_async_err_log);
10486         scsi_remove_host(ioa_cfg->host);
10487
10488         __ipr_remove(pdev);
10489
10490         LEAVE;
10491 }
10492
10493 /**
10494  * ipr_probe - Adapter hot plug add entry point
10495  *
10496  * Return value:
10497  *      0 on success / non-zero on failure
10498  **/
10499 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10500 {
10501         struct ipr_ioa_cfg *ioa_cfg;
10502         unsigned long flags;
10503         int rc, i;
10504
10505         rc = ipr_probe_ioa(pdev, dev_id);
10506
10507         if (rc)
10508                 return rc;
10509
10510         ioa_cfg = pci_get_drvdata(pdev);
10511         rc = ipr_probe_ioa_part2(ioa_cfg);
10512
10513         if (rc) {
10514                 __ipr_remove(pdev);
10515                 return rc;
10516         }
10517
10518         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10519
10520         if (rc) {
10521                 __ipr_remove(pdev);
10522                 return rc;
10523         }
10524
10525         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10526                                    &ipr_trace_attr);
10527
10528         if (rc) {
10529                 scsi_remove_host(ioa_cfg->host);
10530                 __ipr_remove(pdev);
10531                 return rc;
10532         }
10533
10534         rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10535                         &ipr_ioa_async_err_log);
10536
10537         if (rc) {
10538                 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10539                                 &ipr_dump_attr);
10540                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10541                                 &ipr_trace_attr);
10542                 scsi_remove_host(ioa_cfg->host);
10543                 __ipr_remove(pdev);
10544                 return rc;
10545         }
10546
10547         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10548                                    &ipr_dump_attr);
10549
10550         if (rc) {
10551                 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10552                                       &ipr_ioa_async_err_log);
10553                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10554                                       &ipr_trace_attr);
10555                 scsi_remove_host(ioa_cfg->host);
10556                 __ipr_remove(pdev);
10557                 return rc;
10558         }
10559         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10560         ioa_cfg->scan_enabled = 1;
10561         schedule_work(&ioa_cfg->work_q);
10562         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10563
10564         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10565
10566         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10567                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10568                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10569                                         ioa_cfg->iopoll_weight, ipr_iopoll);
10570                 }
10571         }
10572
10573         scsi_scan_host(ioa_cfg->host);
10574
10575         return 0;
10576 }
10577
10578 /**
10579  * ipr_shutdown - Shutdown handler.
10580  * @pdev:       pci device struct
10581  *
10582  * This function is invoked upon system shutdown/reboot. It will issue
10583  * an adapter shutdown to the adapter to flush the write cache.
10584  *
10585  * Return value:
10586  *      none
10587  **/
10588 static void ipr_shutdown(struct pci_dev *pdev)
10589 {
10590         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10591         unsigned long lock_flags = 0;
10592         enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10593         int i;
10594
10595         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10596         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10597                 ioa_cfg->iopoll_weight = 0;
10598                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10599                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10600         }
10601
10602         while (ioa_cfg->in_reset_reload) {
10603                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10604                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10605                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10606         }
10607
10608         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10609                 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10610
10611         ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10612         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10613         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10614         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10615                 ipr_free_irqs(ioa_cfg);
10616                 pci_disable_device(ioa_cfg->pdev);
10617         }
10618 }
10619
10620 static struct pci_device_id ipr_pci_table[] = {
10621         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10622                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10623         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10624                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10625         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10626                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10627         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10628                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10629         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10630                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10631         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10632                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10633         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10634                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10635         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10636                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10637                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10638         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10639               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10640         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10641               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10642               IPR_USE_LONG_TRANSOP_TIMEOUT },
10643         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10644               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10645               IPR_USE_LONG_TRANSOP_TIMEOUT },
10646         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10647               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10648         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10649               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10650               IPR_USE_LONG_TRANSOP_TIMEOUT},
10651         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10652               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10653               IPR_USE_LONG_TRANSOP_TIMEOUT },
10654         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10655               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10656               IPR_USE_LONG_TRANSOP_TIMEOUT },
10657         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10658               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10659         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10660               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10661         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10662               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10663               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10664         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10665                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10666         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10667                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10668         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10669                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10670                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10671         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10672                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10673                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10674         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10675                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10676         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10677                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10678         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10679                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10680         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10681                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10682         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10683                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10684         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10685                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10686         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10687                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10688         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10689                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10690         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10691                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10692         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10693                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10694         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10695                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10696         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10697                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10698         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10699                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10700         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10701                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10702         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10703                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10704         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10705                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10706         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10707                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10708         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10709                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10710         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10711                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10712         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10713                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10714         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10715                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10716         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10717                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10718         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10719                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10720         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10721                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10722         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10723                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10724         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10725                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10726         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10727                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10728         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10729                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
10730         { }
10731 };
10732 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10733
10734 static const struct pci_error_handlers ipr_err_handler = {
10735         .error_detected = ipr_pci_error_detected,
10736         .mmio_enabled = ipr_pci_mmio_enabled,
10737         .slot_reset = ipr_pci_slot_reset,
10738 };
10739
10740 static struct pci_driver ipr_driver = {
10741         .name = IPR_NAME,
10742         .id_table = ipr_pci_table,
10743         .probe = ipr_probe,
10744         .remove = ipr_remove,
10745         .shutdown = ipr_shutdown,
10746         .err_handler = &ipr_err_handler,
10747 };
10748
10749 /**
10750  * ipr_halt_done - Shutdown prepare completion
10751  *
10752  * Return value:
10753  *      none
10754  **/
10755 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10756 {
10757         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10758 }
10759
10760 /**
10761  * ipr_halt - Issue shutdown prepare to all adapters
10762  *
10763  * Return value:
10764  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10765  **/
10766 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10767 {
10768         struct ipr_cmnd *ipr_cmd;
10769         struct ipr_ioa_cfg *ioa_cfg;
10770         unsigned long flags = 0, driver_lock_flags;
10771
10772         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10773                 return NOTIFY_DONE;
10774
10775         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10776
10777         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10778                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10779                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10780                     (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10781                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10782                         continue;
10783                 }
10784
10785                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10786                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10787                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10788                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10789                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10790
10791                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10792                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10793         }
10794         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10795
10796         return NOTIFY_OK;
10797 }
10798
10799 static struct notifier_block ipr_notifier = {
10800         ipr_halt, NULL, 0
10801 };
10802
10803 /**
10804  * ipr_init - Module entry point
10805  *
10806  * Return value:
10807  *      0 on success / negative value on failure
10808  **/
10809 static int __init ipr_init(void)
10810 {
10811         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10812                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10813
10814         register_reboot_notifier(&ipr_notifier);
10815         return pci_register_driver(&ipr_driver);
10816 }
10817
10818 /**
10819  * ipr_exit - Module unload
10820  *
10821  * Module unload entry point.
10822  *
10823  * Return value:
10824  *      none
10825  **/
10826 static void __exit ipr_exit(void)
10827 {
10828         unregister_reboot_notifier(&ipr_notifier);
10829         pci_unregister_driver(&ipr_driver);
10830 }
10831
10832 module_init(ipr_init);
10833 module_exit(ipr_exit);