scsi: qla2xxx: remove double assignment in qla2x00_update_fcport
[linux-2.6-microblaze.git] / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 16;
102 static unsigned int ipr_fast_reboot;
103 static DEFINE_SPINLOCK(ipr_driver_lock);
104
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
107         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
108                 .mailbox = 0x0042C,
109                 .max_cmds = 100,
110                 .cache_line_size = 0x20,
111                 .clear_isr = 1,
112                 .iopoll_weight = 0,
113                 {
114                         .set_interrupt_mask_reg = 0x0022C,
115                         .clr_interrupt_mask_reg = 0x00230,
116                         .clr_interrupt_mask_reg32 = 0x00230,
117                         .sense_interrupt_mask_reg = 0x0022C,
118                         .sense_interrupt_mask_reg32 = 0x0022C,
119                         .clr_interrupt_reg = 0x00228,
120                         .clr_interrupt_reg32 = 0x00228,
121                         .sense_interrupt_reg = 0x00224,
122                         .sense_interrupt_reg32 = 0x00224,
123                         .ioarrin_reg = 0x00404,
124                         .sense_uproc_interrupt_reg = 0x00214,
125                         .sense_uproc_interrupt_reg32 = 0x00214,
126                         .set_uproc_interrupt_reg = 0x00214,
127                         .set_uproc_interrupt_reg32 = 0x00214,
128                         .clr_uproc_interrupt_reg = 0x00218,
129                         .clr_uproc_interrupt_reg32 = 0x00218
130                 }
131         },
132         { /* Snipe and Scamp */
133                 .mailbox = 0x0052C,
134                 .max_cmds = 100,
135                 .cache_line_size = 0x20,
136                 .clear_isr = 1,
137                 .iopoll_weight = 0,
138                 {
139                         .set_interrupt_mask_reg = 0x00288,
140                         .clr_interrupt_mask_reg = 0x0028C,
141                         .clr_interrupt_mask_reg32 = 0x0028C,
142                         .sense_interrupt_mask_reg = 0x00288,
143                         .sense_interrupt_mask_reg32 = 0x00288,
144                         .clr_interrupt_reg = 0x00284,
145                         .clr_interrupt_reg32 = 0x00284,
146                         .sense_interrupt_reg = 0x00280,
147                         .sense_interrupt_reg32 = 0x00280,
148                         .ioarrin_reg = 0x00504,
149                         .sense_uproc_interrupt_reg = 0x00290,
150                         .sense_uproc_interrupt_reg32 = 0x00290,
151                         .set_uproc_interrupt_reg = 0x00290,
152                         .set_uproc_interrupt_reg32 = 0x00290,
153                         .clr_uproc_interrupt_reg = 0x00294,
154                         .clr_uproc_interrupt_reg32 = 0x00294
155                 }
156         },
157         { /* CRoC */
158                 .mailbox = 0x00044,
159                 .max_cmds = 1000,
160                 .cache_line_size = 0x20,
161                 .clear_isr = 0,
162                 .iopoll_weight = 64,
163                 {
164                         .set_interrupt_mask_reg = 0x00010,
165                         .clr_interrupt_mask_reg = 0x00018,
166                         .clr_interrupt_mask_reg32 = 0x0001C,
167                         .sense_interrupt_mask_reg = 0x00010,
168                         .sense_interrupt_mask_reg32 = 0x00014,
169                         .clr_interrupt_reg = 0x00008,
170                         .clr_interrupt_reg32 = 0x0000C,
171                         .sense_interrupt_reg = 0x00000,
172                         .sense_interrupt_reg32 = 0x00004,
173                         .ioarrin_reg = 0x00070,
174                         .sense_uproc_interrupt_reg = 0x00020,
175                         .sense_uproc_interrupt_reg32 = 0x00024,
176                         .set_uproc_interrupt_reg = 0x00020,
177                         .set_uproc_interrupt_reg32 = 0x00024,
178                         .clr_uproc_interrupt_reg = 0x00028,
179                         .clr_uproc_interrupt_reg32 = 0x0002C,
180                         .init_feedback_reg = 0x0005C,
181                         .dump_addr_reg = 0x00064,
182                         .dump_data_reg = 0x00068,
183                         .endian_swap_reg = 0x00084
184                 }
185         },
186 };
187
188 static const struct ipr_chip_t ipr_chip[] = {
189         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
199 };
200
201 static int ipr_max_bus_speeds[] = {
202         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203 };
204
205 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207 module_param_named(max_speed, ipr_max_speed, uint, 0);
208 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209 module_param_named(log_level, ipr_log_level, uint, 0);
210 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211 module_param_named(testmode, ipr_testmode, int, 0);
212 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
213 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
214 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
217 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
218 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
219 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
221 module_param_named(max_devs, ipr_max_devs, int, 0);
222 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
224 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
225 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:16)");
226 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(IPR_DRIVER_VERSION);
230
231 /*  A constant array of IOASCs/URCs/Error Messages */
232 static const
233 struct ipr_error_table_t ipr_error_table[] = {
234         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
235         "8155: An unknown error was received"},
236         {0x00330000, 0, 0,
237         "Soft underlength error"},
238         {0x005A0000, 0, 0,
239         "Command to be cancelled not found"},
240         {0x00808000, 0, 0,
241         "Qualified success"},
242         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
243         "FFFE: Soft device bus error recovered by the IOA"},
244         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
245         "4101: Soft device bus fabric error"},
246         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247         "FFFC: Logical block guard error recovered by the device"},
248         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249         "FFFC: Logical block reference tag error recovered by the device"},
250         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251         "4171: Recovered scatter list tag / sequence number error"},
252         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257         "FFFD: Recovered logical block reference tag error detected by the IOA"},
258         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259         "FFFD: Logical block guard error recovered by the IOA"},
260         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
261         "FFF9: Device sector reassign successful"},
262         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
263         "FFF7: Media error recovered by device rewrite procedures"},
264         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
265         "7001: IOA sector reassignment successful"},
266         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
267         "FFF9: Soft media error. Sector reassignment recommended"},
268         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
269         "FFF7: Media error recovered by IOA rewrite procedures"},
270         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
271         "FF3D: Soft PCI bus error recovered by the IOA"},
272         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
273         "FFF6: Device hardware error recovered by the IOA"},
274         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
275         "FFF6: Device hardware error recovered by the device"},
276         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
277         "FF3D: Soft IOA error recovered by the IOA"},
278         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
279         "FFFA: Undefined device response recovered by the IOA"},
280         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
281         "FFF6: Device bus error, message or command phase"},
282         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
283         "FFFE: Task Management Function failed"},
284         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
285         "FFF6: Failure prediction threshold exceeded"},
286         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
287         "8009: Impending cache battery pack failure"},
288         {0x02040100, 0, 0,
289         "Logical Unit in process of becoming ready"},
290         {0x02040200, 0, 0,
291         "Initializing command required"},
292         {0x02040400, 0, 0,
293         "34FF: Disk device format in progress"},
294         {0x02040C00, 0, 0,
295         "Logical unit not accessible, target port in unavailable state"},
296         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297         "9070: IOA requested reset"},
298         {0x023F0000, 0, 0,
299         "Synchronization required"},
300         {0x02408500, 0, 0,
301         "IOA microcode download required"},
302         {0x02408600, 0, 0,
303         "Device bus connection is prohibited by host"},
304         {0x024E0000, 0, 0,
305         "No ready, IOA shutdown"},
306         {0x025A0000, 0, 0,
307         "Not ready, IOA has been shutdown"},
308         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
309         "3020: Storage subsystem configuration error"},
310         {0x03110B00, 0, 0,
311         "FFF5: Medium error, data unreadable, recommend reassign"},
312         {0x03110C00, 0, 0,
313         "7000: Medium error, data unreadable, do not reassign"},
314         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
315         "FFF3: Disk media format bad"},
316         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
317         "3002: Addressed device failed to respond to selection"},
318         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
319         "3100: Device bus error"},
320         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
321         "3109: IOA timed out a device command"},
322         {0x04088000, 0, 0,
323         "3120: SCSI bus is not operational"},
324         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
325         "4100: Hard device bus fabric error"},
326         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327         "310C: Logical block guard error detected by the device"},
328         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329         "310C: Logical block reference tag error detected by the device"},
330         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331         "4170: Scatter list tag / sequence number error"},
332         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333         "8150: Logical block CRC error on IOA to Host transfer"},
334         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335         "4170: Logical block sequence number error on IOA to Host transfer"},
336         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337         "310D: Logical block reference tag error detected by the IOA"},
338         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339         "310D: Logical block guard error detected by the IOA"},
340         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
341         "9000: IOA reserved area data check"},
342         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
343         "9001: IOA reserved area invalid data pattern"},
344         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
345         "9002: IOA reserved area LRC error"},
346         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347         "Hardware Error, IOA metadata access error"},
348         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
349         "102E: Out of alternate sectors for disk storage"},
350         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
351         "FFF4: Data transfer underlength error"},
352         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
353         "FFF4: Data transfer overlength error"},
354         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
355         "3400: Logical unit failure"},
356         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
357         "FFF4: Device microcode is corrupt"},
358         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
359         "8150: PCI bus error"},
360         {0x04430000, 1, 0,
361         "Unsupported device bus message received"},
362         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
363         "FFF4: Disk device problem"},
364         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
365         "8150: Permanent IOA failure"},
366         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
367         "3010: Disk device returned wrong response to IOA"},
368         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
369         "8151: IOA microcode error"},
370         {0x04448500, 0, 0,
371         "Device bus status error"},
372         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
373         "8157: IOA error requiring IOA reset to recover"},
374         {0x04448700, 0, 0,
375         "ATA device status error"},
376         {0x04490000, 0, 0,
377         "Message reject received from the device"},
378         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
379         "8008: A permanent cache battery pack failure occurred"},
380         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
381         "9090: Disk unit has been modified after the last known status"},
382         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
383         "9081: IOA detected device error"},
384         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
385         "9082: IOA detected device error"},
386         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
387         "3110: Device bus error, message or command phase"},
388         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
389         "3110: SAS Command / Task Management Function failed"},
390         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
391         "9091: Incorrect hardware configuration change has been detected"},
392         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
393         "9073: Invalid multi-adapter configuration"},
394         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
395         "4010: Incorrect connection between cascaded expanders"},
396         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
397         "4020: Connections exceed IOA design limits"},
398         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
399         "4030: Incorrect multipath connection"},
400         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
401         "4110: Unsupported enclosure function"},
402         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403         "4120: SAS cable VPD cannot be read"},
404         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
405         "FFF4: Command to logical unit failed"},
406         {0x05240000, 1, 0,
407         "Illegal request, invalid request type or request packet"},
408         {0x05250000, 0, 0,
409         "Illegal request, invalid resource handle"},
410         {0x05258000, 0, 0,
411         "Illegal request, commands not allowed to this device"},
412         {0x05258100, 0, 0,
413         "Illegal request, command not allowed to a secondary adapter"},
414         {0x05258200, 0, 0,
415         "Illegal request, command not allowed to a non-optimized resource"},
416         {0x05260000, 0, 0,
417         "Illegal request, invalid field in parameter list"},
418         {0x05260100, 0, 0,
419         "Illegal request, parameter not supported"},
420         {0x05260200, 0, 0,
421         "Illegal request, parameter value invalid"},
422         {0x052C0000, 0, 0,
423         "Illegal request, command sequence error"},
424         {0x052C8000, 1, 0,
425         "Illegal request, dual adapter support not enabled"},
426         {0x052C8100, 1, 0,
427         "Illegal request, another cable connector was physically disabled"},
428         {0x054E8000, 1, 0,
429         "Illegal request, inconsistent group id/group count"},
430         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
431         "9031: Array protection temporarily suspended, protection resuming"},
432         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
433         "9040: Array protection temporarily suspended, protection resuming"},
434         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435         "4080: IOA exceeded maximum operating temperature"},
436         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437         "4085: Service required"},
438         {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
439         "4086: SAS Adapter Hardware Configuration Error"},
440         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
441         "3140: Device bus not ready to ready transition"},
442         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
443         "FFFB: SCSI bus was reset"},
444         {0x06290500, 0, 0,
445         "FFFE: SCSI bus transition to single ended"},
446         {0x06290600, 0, 0,
447         "FFFE: SCSI bus transition to LVD"},
448         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
449         "FFFB: SCSI bus was reset by another initiator"},
450         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
451         "3029: A device replacement has occurred"},
452         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
453         "4102: Device bus fabric performance degradation"},
454         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
455         "9051: IOA cache data exists for a missing or failed device"},
456         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
457         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
458         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
459         "9025: Disk unit is not supported at its physical location"},
460         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
461         "3020: IOA detected a SCSI bus configuration error"},
462         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
463         "3150: SCSI bus configuration error"},
464         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
465         "9074: Asymmetric advanced function disk configuration"},
466         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
467         "4040: Incomplete multipath connection between IOA and enclosure"},
468         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
469         "4041: Incomplete multipath connection between enclosure and device"},
470         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
471         "9075: Incomplete multipath connection between IOA and remote IOA"},
472         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
473         "9076: Configuration error, missing remote IOA"},
474         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
475         "4050: Enclosure does not support a required multipath function"},
476         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
477         "4121: Configuration error, required cable is missing"},
478         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
479         "4122: Cable is not plugged into the correct location on remote IOA"},
480         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
481         "4123: Configuration error, invalid cable vital product data"},
482         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
483         "4124: Configuration error, both cable ends are plugged into the same IOA"},
484         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
485         "4070: Logically bad block written on device"},
486         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
487         "9041: Array protection temporarily suspended"},
488         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
489         "9042: Corrupt array parity detected on specified device"},
490         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
491         "9030: Array no longer protected due to missing or failed disk unit"},
492         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
493         "9071: Link operational transition"},
494         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
495         "9072: Link not operational transition"},
496         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
497         "9032: Array exposed but still protected"},
498         {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
499         "70DD: Device forced failed by disrupt device command"},
500         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
501         "4061: Multipath redundancy level got better"},
502         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
503         "4060: Multipath redundancy level got worse"},
504         {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
505         "9083: Device raw mode enabled"},
506         {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
507         "9084: Device raw mode disabled"},
508         {0x07270000, 0, 0,
509         "Failure due to other device"},
510         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
511         "9008: IOA does not support functions expected by devices"},
512         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
513         "9010: Cache data associated with attached devices cannot be found"},
514         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
515         "9011: Cache data belongs to devices other than those attached"},
516         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
517         "9020: Array missing 2 or more devices with only 1 device present"},
518         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
519         "9021: Array missing 2 or more devices with 2 or more devices present"},
520         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
521         "9022: Exposed array is missing a required device"},
522         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
523         "9023: Array member(s) not at required physical locations"},
524         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
525         "9024: Array not functional due to present hardware configuration"},
526         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
527         "9026: Array not functional due to present hardware configuration"},
528         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
529         "9027: Array is missing a device and parity is out of sync"},
530         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
531         "9028: Maximum number of arrays already exist"},
532         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
533         "9050: Required cache data cannot be located for a disk unit"},
534         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
535         "9052: Cache data exists for a device that has been modified"},
536         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
537         "9054: IOA resources not available due to previous problems"},
538         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
539         "9092: Disk unit requires initialization before use"},
540         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
541         "9029: Incorrect hardware configuration change has been detected"},
542         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
543         "9060: One or more disk pairs are missing from an array"},
544         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
545         "9061: One or more disks are missing from an array"},
546         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
547         "9062: One or more disks are missing from an array"},
548         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
549         "9063: Maximum number of functional arrays has been exceeded"},
550         {0x07279A00, 0, 0,
551         "Data protect, other volume set problem"},
552         {0x0B260000, 0, 0,
553         "Aborted command, invalid descriptor"},
554         {0x0B3F9000, 0, 0,
555         "Target operating conditions have changed, dual adapter takeover"},
556         {0x0B530200, 0, 0,
557         "Aborted command, medium removal prevented"},
558         {0x0B5A0000, 0, 0,
559         "Command terminated by host"},
560         {0x0B5B8000, 0, 0,
561         "Aborted command, command terminated by host"}
562 };
563
564 static const struct ipr_ses_table_entry ipr_ses_table[] = {
565         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
566         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
567         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
568         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
569         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
570         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
571         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
572         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
573         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
574         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
575         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
576         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
577         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
578 };
579
580 /*
581  *  Function Prototypes
582  */
583 static int ipr_reset_alert(struct ipr_cmnd *);
584 static void ipr_process_ccn(struct ipr_cmnd *);
585 static void ipr_process_error(struct ipr_cmnd *);
586 static void ipr_reset_ioa_job(struct ipr_cmnd *);
587 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
588                                    enum ipr_shutdown_type);
589
590 #ifdef CONFIG_SCSI_IPR_TRACE
591 /**
592  * ipr_trc_hook - Add a trace entry to the driver trace
593  * @ipr_cmd:    ipr command struct
594  * @type:               trace type
595  * @add_data:   additional data
596  *
597  * Return value:
598  *      none
599  **/
600 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
601                          u8 type, u32 add_data)
602 {
603         struct ipr_trace_entry *trace_entry;
604         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
605         unsigned int trace_index;
606
607         trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
608         trace_entry = &ioa_cfg->trace[trace_index];
609         trace_entry->time = jiffies;
610         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
611         trace_entry->type = type;
612         if (ipr_cmd->ioa_cfg->sis64)
613                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
614         else
615                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
616         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
617         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
618         trace_entry->u.add_data = add_data;
619         wmb();
620 }
621 #else
622 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
623 #endif
624
625 /**
626  * ipr_lock_and_done - Acquire lock and complete command
627  * @ipr_cmd:    ipr command struct
628  *
629  * Return value:
630  *      none
631  **/
632 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
633 {
634         unsigned long lock_flags;
635         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
636
637         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
638         ipr_cmd->done(ipr_cmd);
639         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
640 }
641
642 /**
643  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
644  * @ipr_cmd:    ipr command struct
645  *
646  * Return value:
647  *      none
648  **/
649 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
650 {
651         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
652         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
653         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
654         dma_addr_t dma_addr = ipr_cmd->dma_addr;
655         int hrrq_id;
656
657         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
658         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
659         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
660         ioarcb->data_transfer_length = 0;
661         ioarcb->read_data_transfer_length = 0;
662         ioarcb->ioadl_len = 0;
663         ioarcb->read_ioadl_len = 0;
664
665         if (ipr_cmd->ioa_cfg->sis64) {
666                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
667                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
668                 ioasa64->u.gata.status = 0;
669         } else {
670                 ioarcb->write_ioadl_addr =
671                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
672                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
673                 ioasa->u.gata.status = 0;
674         }
675
676         ioasa->hdr.ioasc = 0;
677         ioasa->hdr.residual_data_len = 0;
678         ipr_cmd->scsi_cmd = NULL;
679         ipr_cmd->qc = NULL;
680         ipr_cmd->sense_buffer[0] = 0;
681         ipr_cmd->dma_use_sg = 0;
682 }
683
684 /**
685  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
686  * @ipr_cmd:    ipr command struct
687  *
688  * Return value:
689  *      none
690  **/
691 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
692                               void (*fast_done) (struct ipr_cmnd *))
693 {
694         ipr_reinit_ipr_cmnd(ipr_cmd);
695         ipr_cmd->u.scratch = 0;
696         ipr_cmd->sibling = NULL;
697         ipr_cmd->eh_comp = NULL;
698         ipr_cmd->fast_done = fast_done;
699         timer_setup(&ipr_cmd->timer, NULL, 0);
700 }
701
702 /**
703  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
704  * @ioa_cfg:    ioa config struct
705  *
706  * Return value:
707  *      pointer to ipr command struct
708  **/
709 static
710 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
711 {
712         struct ipr_cmnd *ipr_cmd = NULL;
713
714         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
715                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
716                         struct ipr_cmnd, queue);
717                 list_del(&ipr_cmd->queue);
718         }
719
720
721         return ipr_cmd;
722 }
723
724 /**
725  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
726  * @ioa_cfg:    ioa config struct
727  *
728  * Return value:
729  *      pointer to ipr command struct
730  **/
731 static
732 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
733 {
734         struct ipr_cmnd *ipr_cmd =
735                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
736         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
737         return ipr_cmd;
738 }
739
740 /**
741  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
742  * @ioa_cfg:    ioa config struct
743  * @clr_ints:     interrupts to clear
744  *
745  * This function masks all interrupts on the adapter, then clears the
746  * interrupts specified in the mask
747  *
748  * Return value:
749  *      none
750  **/
751 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
752                                           u32 clr_ints)
753 {
754         volatile u32 int_reg;
755         int i;
756
757         /* Stop new interrupts */
758         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
759                 spin_lock(&ioa_cfg->hrrq[i]._lock);
760                 ioa_cfg->hrrq[i].allow_interrupts = 0;
761                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
762         }
763
764         /* Set interrupt mask to stop all new interrupts */
765         if (ioa_cfg->sis64)
766                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
767         else
768                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
769
770         /* Clear any pending interrupts */
771         if (ioa_cfg->sis64)
772                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
773         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
774         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
775 }
776
777 /**
778  * ipr_save_pcix_cmd_reg - Save PCI-X command register
779  * @ioa_cfg:    ioa config struct
780  *
781  * Return value:
782  *      0 on success / -EIO on failure
783  **/
784 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
785 {
786         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
787
788         if (pcix_cmd_reg == 0)
789                 return 0;
790
791         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
792                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
793                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
794                 return -EIO;
795         }
796
797         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
798         return 0;
799 }
800
801 /**
802  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
803  * @ioa_cfg:    ioa config struct
804  *
805  * Return value:
806  *      0 on success / -EIO on failure
807  **/
808 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
809 {
810         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
811
812         if (pcix_cmd_reg) {
813                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
814                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
815                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
816                         return -EIO;
817                 }
818         }
819
820         return 0;
821 }
822
823 /**
824  * __ipr_sata_eh_done - done function for aborted SATA commands
825  * @ipr_cmd:    ipr command struct
826  *
827  * This function is invoked for ops generated to SATA
828  * devices which are being aborted.
829  *
830  * Return value:
831  *      none
832  **/
833 static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
834 {
835         struct ata_queued_cmd *qc = ipr_cmd->qc;
836         struct ipr_sata_port *sata_port = qc->ap->private_data;
837
838         qc->err_mask |= AC_ERR_OTHER;
839         sata_port->ioasa.status |= ATA_BUSY;
840         ata_qc_complete(qc);
841         if (ipr_cmd->eh_comp)
842                 complete(ipr_cmd->eh_comp);
843         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
844 }
845
846 /**
847  * ipr_sata_eh_done - done function for aborted SATA commands
848  * @ipr_cmd:    ipr command struct
849  *
850  * This function is invoked for ops generated to SATA
851  * devices which are being aborted.
852  *
853  * Return value:
854  *      none
855  **/
856 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
857 {
858         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
859         unsigned long hrrq_flags;
860
861         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
862         __ipr_sata_eh_done(ipr_cmd);
863         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
864 }
865
866 /**
867  * __ipr_scsi_eh_done - mid-layer done function for aborted ops
868  * @ipr_cmd:    ipr command struct
869  *
870  * This function is invoked by the interrupt handler for
871  * ops generated by the SCSI mid-layer which are being aborted.
872  *
873  * Return value:
874  *      none
875  **/
876 static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
877 {
878         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
879
880         scsi_cmd->result |= (DID_ERROR << 16);
881
882         scsi_dma_unmap(ipr_cmd->scsi_cmd);
883         scsi_cmd->scsi_done(scsi_cmd);
884         if (ipr_cmd->eh_comp)
885                 complete(ipr_cmd->eh_comp);
886         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
887 }
888
889 /**
890  * ipr_scsi_eh_done - mid-layer done function for aborted ops
891  * @ipr_cmd:    ipr command struct
892  *
893  * This function is invoked by the interrupt handler for
894  * ops generated by the SCSI mid-layer which are being aborted.
895  *
896  * Return value:
897  *      none
898  **/
899 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
900 {
901         unsigned long hrrq_flags;
902         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
903
904         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
905         __ipr_scsi_eh_done(ipr_cmd);
906         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
907 }
908
909 /**
910  * ipr_fail_all_ops - Fails all outstanding ops.
911  * @ioa_cfg:    ioa config struct
912  *
913  * This function fails all outstanding ops.
914  *
915  * Return value:
916  *      none
917  **/
918 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
919 {
920         struct ipr_cmnd *ipr_cmd, *temp;
921         struct ipr_hrr_queue *hrrq;
922
923         ENTER;
924         for_each_hrrq(hrrq, ioa_cfg) {
925                 spin_lock(&hrrq->_lock);
926                 list_for_each_entry_safe(ipr_cmd,
927                                         temp, &hrrq->hrrq_pending_q, queue) {
928                         list_del(&ipr_cmd->queue);
929
930                         ipr_cmd->s.ioasa.hdr.ioasc =
931                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
932                         ipr_cmd->s.ioasa.hdr.ilid =
933                                 cpu_to_be32(IPR_DRIVER_ILID);
934
935                         if (ipr_cmd->scsi_cmd)
936                                 ipr_cmd->done = __ipr_scsi_eh_done;
937                         else if (ipr_cmd->qc)
938                                 ipr_cmd->done = __ipr_sata_eh_done;
939
940                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
941                                      IPR_IOASC_IOA_WAS_RESET);
942                         del_timer(&ipr_cmd->timer);
943                         ipr_cmd->done(ipr_cmd);
944                 }
945                 spin_unlock(&hrrq->_lock);
946         }
947         LEAVE;
948 }
949
950 /**
951  * ipr_send_command -  Send driver initiated requests.
952  * @ipr_cmd:            ipr command struct
953  *
954  * This function sends a command to the adapter using the correct write call.
955  * In the case of sis64, calculate the ioarcb size required. Then or in the
956  * appropriate bits.
957  *
958  * Return value:
959  *      none
960  **/
961 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
962 {
963         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
964         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
965
966         if (ioa_cfg->sis64) {
967                 /* The default size is 256 bytes */
968                 send_dma_addr |= 0x1;
969
970                 /* If the number of ioadls * size of ioadl > 128 bytes,
971                    then use a 512 byte ioarcb */
972                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
973                         send_dma_addr |= 0x4;
974                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
975         } else
976                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
977 }
978
979 /**
980  * ipr_do_req -  Send driver initiated requests.
981  * @ipr_cmd:            ipr command struct
982  * @done:                       done function
983  * @timeout_func:       timeout function
984  * @timeout:            timeout value
985  *
986  * This function sends the specified command to the adapter with the
987  * timeout given. The done function is invoked on command completion.
988  *
989  * Return value:
990  *      none
991  **/
992 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
993                        void (*done) (struct ipr_cmnd *),
994                        void (*timeout_func) (struct timer_list *), u32 timeout)
995 {
996         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
997
998         ipr_cmd->done = done;
999
1000         ipr_cmd->timer.expires = jiffies + timeout;
1001         ipr_cmd->timer.function = timeout_func;
1002
1003         add_timer(&ipr_cmd->timer);
1004
1005         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
1006
1007         ipr_send_command(ipr_cmd);
1008 }
1009
1010 /**
1011  * ipr_internal_cmd_done - Op done function for an internally generated op.
1012  * @ipr_cmd:    ipr command struct
1013  *
1014  * This function is the op done function for an internally generated,
1015  * blocking op. It simply wakes the sleeping thread.
1016  *
1017  * Return value:
1018  *      none
1019  **/
1020 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1021 {
1022         if (ipr_cmd->sibling)
1023                 ipr_cmd->sibling = NULL;
1024         else
1025                 complete(&ipr_cmd->completion);
1026 }
1027
1028 /**
1029  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1030  * @ipr_cmd:    ipr command struct
1031  * @dma_addr:   dma address
1032  * @len:        transfer length
1033  * @flags:      ioadl flag value
1034  *
1035  * This function initializes an ioadl in the case where there is only a single
1036  * descriptor.
1037  *
1038  * Return value:
1039  *      nothing
1040  **/
1041 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1042                            u32 len, int flags)
1043 {
1044         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1045         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1046
1047         ipr_cmd->dma_use_sg = 1;
1048
1049         if (ipr_cmd->ioa_cfg->sis64) {
1050                 ioadl64->flags = cpu_to_be32(flags);
1051                 ioadl64->data_len = cpu_to_be32(len);
1052                 ioadl64->address = cpu_to_be64(dma_addr);
1053
1054                 ipr_cmd->ioarcb.ioadl_len =
1055                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1056                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1057         } else {
1058                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1059                 ioadl->address = cpu_to_be32(dma_addr);
1060
1061                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1062                         ipr_cmd->ioarcb.read_ioadl_len =
1063                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1064                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1065                 } else {
1066                         ipr_cmd->ioarcb.ioadl_len =
1067                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1068                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1069                 }
1070         }
1071 }
1072
1073 /**
1074  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1075  * @ipr_cmd:    ipr command struct
1076  * @timeout_func:       function to invoke if command times out
1077  * @timeout:    timeout
1078  *
1079  * Return value:
1080  *      none
1081  **/
1082 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1083                                   void (*timeout_func) (struct timer_list *),
1084                                   u32 timeout)
1085 {
1086         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1087
1088         init_completion(&ipr_cmd->completion);
1089         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1090
1091         spin_unlock_irq(ioa_cfg->host->host_lock);
1092         wait_for_completion(&ipr_cmd->completion);
1093         spin_lock_irq(ioa_cfg->host->host_lock);
1094 }
1095
1096 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1097 {
1098         unsigned int hrrq;
1099
1100         if (ioa_cfg->hrrq_num == 1)
1101                 hrrq = 0;
1102         else {
1103                 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1104                 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1105         }
1106         return hrrq;
1107 }
1108
1109 /**
1110  * ipr_send_hcam - Send an HCAM to the adapter.
1111  * @ioa_cfg:    ioa config struct
1112  * @type:               HCAM type
1113  * @hostrcb:    hostrcb struct
1114  *
1115  * This function will send a Host Controlled Async command to the adapter.
1116  * If HCAMs are currently not allowed to be issued to the adapter, it will
1117  * place the hostrcb on the free queue.
1118  *
1119  * Return value:
1120  *      none
1121  **/
1122 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1123                           struct ipr_hostrcb *hostrcb)
1124 {
1125         struct ipr_cmnd *ipr_cmd;
1126         struct ipr_ioarcb *ioarcb;
1127
1128         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1129                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1130                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1131                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1132
1133                 ipr_cmd->u.hostrcb = hostrcb;
1134                 ioarcb = &ipr_cmd->ioarcb;
1135
1136                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1137                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1138                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1139                 ioarcb->cmd_pkt.cdb[1] = type;
1140                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1141                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1142
1143                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1144                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1145
1146                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1147                         ipr_cmd->done = ipr_process_ccn;
1148                 else
1149                         ipr_cmd->done = ipr_process_error;
1150
1151                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1152
1153                 ipr_send_command(ipr_cmd);
1154         } else {
1155                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1156         }
1157 }
1158
1159 /**
1160  * ipr_update_ata_class - Update the ata class in the resource entry
1161  * @res:        resource entry struct
1162  * @proto:      cfgte device bus protocol value
1163  *
1164  * Return value:
1165  *      none
1166  **/
1167 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1168 {
1169         switch (proto) {
1170         case IPR_PROTO_SATA:
1171         case IPR_PROTO_SAS_STP:
1172                 res->ata_class = ATA_DEV_ATA;
1173                 break;
1174         case IPR_PROTO_SATA_ATAPI:
1175         case IPR_PROTO_SAS_STP_ATAPI:
1176                 res->ata_class = ATA_DEV_ATAPI;
1177                 break;
1178         default:
1179                 res->ata_class = ATA_DEV_UNKNOWN;
1180                 break;
1181         };
1182 }
1183
1184 /**
1185  * ipr_init_res_entry - Initialize a resource entry struct.
1186  * @res:        resource entry struct
1187  * @cfgtew:     config table entry wrapper struct
1188  *
1189  * Return value:
1190  *      none
1191  **/
1192 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1193                                struct ipr_config_table_entry_wrapper *cfgtew)
1194 {
1195         int found = 0;
1196         unsigned int proto;
1197         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1198         struct ipr_resource_entry *gscsi_res = NULL;
1199
1200         res->needs_sync_complete = 0;
1201         res->in_erp = 0;
1202         res->add_to_ml = 0;
1203         res->del_from_ml = 0;
1204         res->resetting_device = 0;
1205         res->reset_occurred = 0;
1206         res->sdev = NULL;
1207         res->sata_port = NULL;
1208
1209         if (ioa_cfg->sis64) {
1210                 proto = cfgtew->u.cfgte64->proto;
1211                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1212                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1213                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1214                 res->type = cfgtew->u.cfgte64->res_type;
1215
1216                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1217                         sizeof(res->res_path));
1218
1219                 res->bus = 0;
1220                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1221                         sizeof(res->dev_lun.scsi_lun));
1222                 res->lun = scsilun_to_int(&res->dev_lun);
1223
1224                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1225                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1226                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1227                                         found = 1;
1228                                         res->target = gscsi_res->target;
1229                                         break;
1230                                 }
1231                         }
1232                         if (!found) {
1233                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1234                                                                   ioa_cfg->max_devs_supported);
1235                                 set_bit(res->target, ioa_cfg->target_ids);
1236                         }
1237                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1238                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1239                         res->target = 0;
1240                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1241                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1242                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1243                                                           ioa_cfg->max_devs_supported);
1244                         set_bit(res->target, ioa_cfg->array_ids);
1245                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1246                         res->bus = IPR_VSET_VIRTUAL_BUS;
1247                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1248                                                           ioa_cfg->max_devs_supported);
1249                         set_bit(res->target, ioa_cfg->vset_ids);
1250                 } else {
1251                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1252                                                           ioa_cfg->max_devs_supported);
1253                         set_bit(res->target, ioa_cfg->target_ids);
1254                 }
1255         } else {
1256                 proto = cfgtew->u.cfgte->proto;
1257                 res->qmodel = IPR_QUEUEING_MODEL(res);
1258                 res->flags = cfgtew->u.cfgte->flags;
1259                 if (res->flags & IPR_IS_IOA_RESOURCE)
1260                         res->type = IPR_RES_TYPE_IOAFP;
1261                 else
1262                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1263
1264                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1265                 res->target = cfgtew->u.cfgte->res_addr.target;
1266                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1267                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1268         }
1269
1270         ipr_update_ata_class(res, proto);
1271 }
1272
1273 /**
1274  * ipr_is_same_device - Determine if two devices are the same.
1275  * @res:        resource entry struct
1276  * @cfgtew:     config table entry wrapper struct
1277  *
1278  * Return value:
1279  *      1 if the devices are the same / 0 otherwise
1280  **/
1281 static int ipr_is_same_device(struct ipr_resource_entry *res,
1282                               struct ipr_config_table_entry_wrapper *cfgtew)
1283 {
1284         if (res->ioa_cfg->sis64) {
1285                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1286                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1287                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1288                                         sizeof(cfgtew->u.cfgte64->lun))) {
1289                         return 1;
1290                 }
1291         } else {
1292                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1293                     res->target == cfgtew->u.cfgte->res_addr.target &&
1294                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1295                         return 1;
1296         }
1297
1298         return 0;
1299 }
1300
1301 /**
1302  * __ipr_format_res_path - Format the resource path for printing.
1303  * @res_path:   resource path
1304  * @buf:        buffer
1305  * @len:        length of buffer provided
1306  *
1307  * Return value:
1308  *      pointer to buffer
1309  **/
1310 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1311 {
1312         int i;
1313         char *p = buffer;
1314
1315         *p = '\0';
1316         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1317         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1318                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1319
1320         return buffer;
1321 }
1322
1323 /**
1324  * ipr_format_res_path - Format the resource path for printing.
1325  * @ioa_cfg:    ioa config struct
1326  * @res_path:   resource path
1327  * @buf:        buffer
1328  * @len:        length of buffer provided
1329  *
1330  * Return value:
1331  *      pointer to buffer
1332  **/
1333 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1334                                  u8 *res_path, char *buffer, int len)
1335 {
1336         char *p = buffer;
1337
1338         *p = '\0';
1339         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1340         __ipr_format_res_path(res_path, p, len - (buffer - p));
1341         return buffer;
1342 }
1343
1344 /**
1345  * ipr_update_res_entry - Update the resource entry.
1346  * @res:        resource entry struct
1347  * @cfgtew:     config table entry wrapper struct
1348  *
1349  * Return value:
1350  *      none
1351  **/
1352 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1353                                  struct ipr_config_table_entry_wrapper *cfgtew)
1354 {
1355         char buffer[IPR_MAX_RES_PATH_LENGTH];
1356         unsigned int proto;
1357         int new_path = 0;
1358
1359         if (res->ioa_cfg->sis64) {
1360                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1361                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1362                 res->type = cfgtew->u.cfgte64->res_type;
1363
1364                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1365                         sizeof(struct ipr_std_inq_data));
1366
1367                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1368                 proto = cfgtew->u.cfgte64->proto;
1369                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1370                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1371
1372                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1373                         sizeof(res->dev_lun.scsi_lun));
1374
1375                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1376                                         sizeof(res->res_path))) {
1377                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1378                                 sizeof(res->res_path));
1379                         new_path = 1;
1380                 }
1381
1382                 if (res->sdev && new_path)
1383                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1384                                     ipr_format_res_path(res->ioa_cfg,
1385                                         res->res_path, buffer, sizeof(buffer)));
1386         } else {
1387                 res->flags = cfgtew->u.cfgte->flags;
1388                 if (res->flags & IPR_IS_IOA_RESOURCE)
1389                         res->type = IPR_RES_TYPE_IOAFP;
1390                 else
1391                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1392
1393                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1394                         sizeof(struct ipr_std_inq_data));
1395
1396                 res->qmodel = IPR_QUEUEING_MODEL(res);
1397                 proto = cfgtew->u.cfgte->proto;
1398                 res->res_handle = cfgtew->u.cfgte->res_handle;
1399         }
1400
1401         ipr_update_ata_class(res, proto);
1402 }
1403
1404 /**
1405  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1406  *                        for the resource.
1407  * @res:        resource entry struct
1408  * @cfgtew:     config table entry wrapper struct
1409  *
1410  * Return value:
1411  *      none
1412  **/
1413 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1414 {
1415         struct ipr_resource_entry *gscsi_res = NULL;
1416         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1417
1418         if (!ioa_cfg->sis64)
1419                 return;
1420
1421         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1422                 clear_bit(res->target, ioa_cfg->array_ids);
1423         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1424                 clear_bit(res->target, ioa_cfg->vset_ids);
1425         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1426                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1427                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1428                                 return;
1429                 clear_bit(res->target, ioa_cfg->target_ids);
1430
1431         } else if (res->bus == 0)
1432                 clear_bit(res->target, ioa_cfg->target_ids);
1433 }
1434
1435 /**
1436  * ipr_handle_config_change - Handle a config change from the adapter
1437  * @ioa_cfg:    ioa config struct
1438  * @hostrcb:    hostrcb
1439  *
1440  * Return value:
1441  *      none
1442  **/
1443 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1444                                      struct ipr_hostrcb *hostrcb)
1445 {
1446         struct ipr_resource_entry *res = NULL;
1447         struct ipr_config_table_entry_wrapper cfgtew;
1448         __be32 cc_res_handle;
1449
1450         u32 is_ndn = 1;
1451
1452         if (ioa_cfg->sis64) {
1453                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1454                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1455         } else {
1456                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1457                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1458         }
1459
1460         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1461                 if (res->res_handle == cc_res_handle) {
1462                         is_ndn = 0;
1463                         break;
1464                 }
1465         }
1466
1467         if (is_ndn) {
1468                 if (list_empty(&ioa_cfg->free_res_q)) {
1469                         ipr_send_hcam(ioa_cfg,
1470                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1471                                       hostrcb);
1472                         return;
1473                 }
1474
1475                 res = list_entry(ioa_cfg->free_res_q.next,
1476                                  struct ipr_resource_entry, queue);
1477
1478                 list_del(&res->queue);
1479                 ipr_init_res_entry(res, &cfgtew);
1480                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1481         }
1482
1483         ipr_update_res_entry(res, &cfgtew);
1484
1485         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1486                 if (res->sdev) {
1487                         res->del_from_ml = 1;
1488                         res->res_handle = IPR_INVALID_RES_HANDLE;
1489                         schedule_work(&ioa_cfg->work_q);
1490                 } else {
1491                         ipr_clear_res_target(res);
1492                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1493                 }
1494         } else if (!res->sdev || res->del_from_ml) {
1495                 res->add_to_ml = 1;
1496                 schedule_work(&ioa_cfg->work_q);
1497         }
1498
1499         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1500 }
1501
1502 /**
1503  * ipr_process_ccn - Op done function for a CCN.
1504  * @ipr_cmd:    ipr command struct
1505  *
1506  * This function is the op done function for a configuration
1507  * change notification host controlled async from the adapter.
1508  *
1509  * Return value:
1510  *      none
1511  **/
1512 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1513 {
1514         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1515         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1516         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1517
1518         list_del_init(&hostrcb->queue);
1519         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1520
1521         if (ioasc) {
1522                 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1523                     ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1524                         dev_err(&ioa_cfg->pdev->dev,
1525                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1526
1527                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1528         } else {
1529                 ipr_handle_config_change(ioa_cfg, hostrcb);
1530         }
1531 }
1532
1533 /**
1534  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1535  * @i:          index into buffer
1536  * @buf:                string to modify
1537  *
1538  * This function will strip all trailing whitespace, pad the end
1539  * of the string with a single space, and NULL terminate the string.
1540  *
1541  * Return value:
1542  *      new length of string
1543  **/
1544 static int strip_and_pad_whitespace(int i, char *buf)
1545 {
1546         while (i && buf[i] == ' ')
1547                 i--;
1548         buf[i+1] = ' ';
1549         buf[i+2] = '\0';
1550         return i + 2;
1551 }
1552
1553 /**
1554  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1555  * @prefix:             string to print at start of printk
1556  * @hostrcb:    hostrcb pointer
1557  * @vpd:                vendor/product id/sn struct
1558  *
1559  * Return value:
1560  *      none
1561  **/
1562 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1563                                 struct ipr_vpd *vpd)
1564 {
1565         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1566         int i = 0;
1567
1568         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1569         i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1570
1571         memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1572         i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1573
1574         memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1575         buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1576
1577         ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1578 }
1579
1580 /**
1581  * ipr_log_vpd - Log the passed VPD to the error log.
1582  * @vpd:                vendor/product id/sn struct
1583  *
1584  * Return value:
1585  *      none
1586  **/
1587 static void ipr_log_vpd(struct ipr_vpd *vpd)
1588 {
1589         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1590                     + IPR_SERIAL_NUM_LEN];
1591
1592         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1593         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1594                IPR_PROD_ID_LEN);
1595         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1596         ipr_err("Vendor/Product ID: %s\n", buffer);
1597
1598         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1599         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1600         ipr_err("    Serial Number: %s\n", buffer);
1601 }
1602
1603 /**
1604  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1605  * @prefix:             string to print at start of printk
1606  * @hostrcb:    hostrcb pointer
1607  * @vpd:                vendor/product id/sn/wwn struct
1608  *
1609  * Return value:
1610  *      none
1611  **/
1612 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1613                                     struct ipr_ext_vpd *vpd)
1614 {
1615         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1616         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1617                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1618 }
1619
1620 /**
1621  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1622  * @vpd:                vendor/product id/sn/wwn struct
1623  *
1624  * Return value:
1625  *      none
1626  **/
1627 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1628 {
1629         ipr_log_vpd(&vpd->vpd);
1630         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1631                 be32_to_cpu(vpd->wwid[1]));
1632 }
1633
1634 /**
1635  * ipr_log_enhanced_cache_error - Log a cache error.
1636  * @ioa_cfg:    ioa config struct
1637  * @hostrcb:    hostrcb struct
1638  *
1639  * Return value:
1640  *      none
1641  **/
1642 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1643                                          struct ipr_hostrcb *hostrcb)
1644 {
1645         struct ipr_hostrcb_type_12_error *error;
1646
1647         if (ioa_cfg->sis64)
1648                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1649         else
1650                 error = &hostrcb->hcam.u.error.u.type_12_error;
1651
1652         ipr_err("-----Current Configuration-----\n");
1653         ipr_err("Cache Directory Card Information:\n");
1654         ipr_log_ext_vpd(&error->ioa_vpd);
1655         ipr_err("Adapter Card Information:\n");
1656         ipr_log_ext_vpd(&error->cfc_vpd);
1657
1658         ipr_err("-----Expected Configuration-----\n");
1659         ipr_err("Cache Directory Card Information:\n");
1660         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1661         ipr_err("Adapter Card Information:\n");
1662         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1663
1664         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1665                      be32_to_cpu(error->ioa_data[0]),
1666                      be32_to_cpu(error->ioa_data[1]),
1667                      be32_to_cpu(error->ioa_data[2]));
1668 }
1669
1670 /**
1671  * ipr_log_cache_error - Log a cache error.
1672  * @ioa_cfg:    ioa config struct
1673  * @hostrcb:    hostrcb struct
1674  *
1675  * Return value:
1676  *      none
1677  **/
1678 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1679                                 struct ipr_hostrcb *hostrcb)
1680 {
1681         struct ipr_hostrcb_type_02_error *error =
1682                 &hostrcb->hcam.u.error.u.type_02_error;
1683
1684         ipr_err("-----Current Configuration-----\n");
1685         ipr_err("Cache Directory Card Information:\n");
1686         ipr_log_vpd(&error->ioa_vpd);
1687         ipr_err("Adapter Card Information:\n");
1688         ipr_log_vpd(&error->cfc_vpd);
1689
1690         ipr_err("-----Expected Configuration-----\n");
1691         ipr_err("Cache Directory Card Information:\n");
1692         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1693         ipr_err("Adapter Card Information:\n");
1694         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1695
1696         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1697                      be32_to_cpu(error->ioa_data[0]),
1698                      be32_to_cpu(error->ioa_data[1]),
1699                      be32_to_cpu(error->ioa_data[2]));
1700 }
1701
1702 /**
1703  * ipr_log_enhanced_config_error - Log a configuration error.
1704  * @ioa_cfg:    ioa config struct
1705  * @hostrcb:    hostrcb struct
1706  *
1707  * Return value:
1708  *      none
1709  **/
1710 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1711                                           struct ipr_hostrcb *hostrcb)
1712 {
1713         int errors_logged, i;
1714         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1715         struct ipr_hostrcb_type_13_error *error;
1716
1717         error = &hostrcb->hcam.u.error.u.type_13_error;
1718         errors_logged = be32_to_cpu(error->errors_logged);
1719
1720         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1721                 be32_to_cpu(error->errors_detected), errors_logged);
1722
1723         dev_entry = error->dev;
1724
1725         for (i = 0; i < errors_logged; i++, dev_entry++) {
1726                 ipr_err_separator;
1727
1728                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1729                 ipr_log_ext_vpd(&dev_entry->vpd);
1730
1731                 ipr_err("-----New Device Information-----\n");
1732                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1733
1734                 ipr_err("Cache Directory Card Information:\n");
1735                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1736
1737                 ipr_err("Adapter Card Information:\n");
1738                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1739         }
1740 }
1741
1742 /**
1743  * ipr_log_sis64_config_error - Log a device error.
1744  * @ioa_cfg:    ioa config struct
1745  * @hostrcb:    hostrcb struct
1746  *
1747  * Return value:
1748  *      none
1749  **/
1750 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1751                                        struct ipr_hostrcb *hostrcb)
1752 {
1753         int errors_logged, i;
1754         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1755         struct ipr_hostrcb_type_23_error *error;
1756         char buffer[IPR_MAX_RES_PATH_LENGTH];
1757
1758         error = &hostrcb->hcam.u.error64.u.type_23_error;
1759         errors_logged = be32_to_cpu(error->errors_logged);
1760
1761         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1762                 be32_to_cpu(error->errors_detected), errors_logged);
1763
1764         dev_entry = error->dev;
1765
1766         for (i = 0; i < errors_logged; i++, dev_entry++) {
1767                 ipr_err_separator;
1768
1769                 ipr_err("Device %d : %s", i + 1,
1770                         __ipr_format_res_path(dev_entry->res_path,
1771                                               buffer, sizeof(buffer)));
1772                 ipr_log_ext_vpd(&dev_entry->vpd);
1773
1774                 ipr_err("-----New Device Information-----\n");
1775                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1776
1777                 ipr_err("Cache Directory Card Information:\n");
1778                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1779
1780                 ipr_err("Adapter Card Information:\n");
1781                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1782         }
1783 }
1784
1785 /**
1786  * ipr_log_config_error - Log a configuration error.
1787  * @ioa_cfg:    ioa config struct
1788  * @hostrcb:    hostrcb struct
1789  *
1790  * Return value:
1791  *      none
1792  **/
1793 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1794                                  struct ipr_hostrcb *hostrcb)
1795 {
1796         int errors_logged, i;
1797         struct ipr_hostrcb_device_data_entry *dev_entry;
1798         struct ipr_hostrcb_type_03_error *error;
1799
1800         error = &hostrcb->hcam.u.error.u.type_03_error;
1801         errors_logged = be32_to_cpu(error->errors_logged);
1802
1803         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1804                 be32_to_cpu(error->errors_detected), errors_logged);
1805
1806         dev_entry = error->dev;
1807
1808         for (i = 0; i < errors_logged; i++, dev_entry++) {
1809                 ipr_err_separator;
1810
1811                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1812                 ipr_log_vpd(&dev_entry->vpd);
1813
1814                 ipr_err("-----New Device Information-----\n");
1815                 ipr_log_vpd(&dev_entry->new_vpd);
1816
1817                 ipr_err("Cache Directory Card Information:\n");
1818                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1819
1820                 ipr_err("Adapter Card Information:\n");
1821                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1822
1823                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1824                         be32_to_cpu(dev_entry->ioa_data[0]),
1825                         be32_to_cpu(dev_entry->ioa_data[1]),
1826                         be32_to_cpu(dev_entry->ioa_data[2]),
1827                         be32_to_cpu(dev_entry->ioa_data[3]),
1828                         be32_to_cpu(dev_entry->ioa_data[4]));
1829         }
1830 }
1831
1832 /**
1833  * ipr_log_enhanced_array_error - Log an array configuration error.
1834  * @ioa_cfg:    ioa config struct
1835  * @hostrcb:    hostrcb struct
1836  *
1837  * Return value:
1838  *      none
1839  **/
1840 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1841                                          struct ipr_hostrcb *hostrcb)
1842 {
1843         int i, num_entries;
1844         struct ipr_hostrcb_type_14_error *error;
1845         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1846         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1847
1848         error = &hostrcb->hcam.u.error.u.type_14_error;
1849
1850         ipr_err_separator;
1851
1852         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1853                 error->protection_level,
1854                 ioa_cfg->host->host_no,
1855                 error->last_func_vset_res_addr.bus,
1856                 error->last_func_vset_res_addr.target,
1857                 error->last_func_vset_res_addr.lun);
1858
1859         ipr_err_separator;
1860
1861         array_entry = error->array_member;
1862         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1863                             ARRAY_SIZE(error->array_member));
1864
1865         for (i = 0; i < num_entries; i++, array_entry++) {
1866                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1867                         continue;
1868
1869                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1870                         ipr_err("Exposed Array Member %d:\n", i);
1871                 else
1872                         ipr_err("Array Member %d:\n", i);
1873
1874                 ipr_log_ext_vpd(&array_entry->vpd);
1875                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1876                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1877                                  "Expected Location");
1878
1879                 ipr_err_separator;
1880         }
1881 }
1882
1883 /**
1884  * ipr_log_array_error - Log an array configuration error.
1885  * @ioa_cfg:    ioa config struct
1886  * @hostrcb:    hostrcb struct
1887  *
1888  * Return value:
1889  *      none
1890  **/
1891 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1892                                 struct ipr_hostrcb *hostrcb)
1893 {
1894         int i;
1895         struct ipr_hostrcb_type_04_error *error;
1896         struct ipr_hostrcb_array_data_entry *array_entry;
1897         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1898
1899         error = &hostrcb->hcam.u.error.u.type_04_error;
1900
1901         ipr_err_separator;
1902
1903         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1904                 error->protection_level,
1905                 ioa_cfg->host->host_no,
1906                 error->last_func_vset_res_addr.bus,
1907                 error->last_func_vset_res_addr.target,
1908                 error->last_func_vset_res_addr.lun);
1909
1910         ipr_err_separator;
1911
1912         array_entry = error->array_member;
1913
1914         for (i = 0; i < 18; i++) {
1915                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1916                         continue;
1917
1918                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1919                         ipr_err("Exposed Array Member %d:\n", i);
1920                 else
1921                         ipr_err("Array Member %d:\n", i);
1922
1923                 ipr_log_vpd(&array_entry->vpd);
1924
1925                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1926                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1927                                  "Expected Location");
1928
1929                 ipr_err_separator;
1930
1931                 if (i == 9)
1932                         array_entry = error->array_member2;
1933                 else
1934                         array_entry++;
1935         }
1936 }
1937
1938 /**
1939  * ipr_log_hex_data - Log additional hex IOA error data.
1940  * @ioa_cfg:    ioa config struct
1941  * @data:               IOA error data
1942  * @len:                data length
1943  *
1944  * Return value:
1945  *      none
1946  **/
1947 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1948 {
1949         int i;
1950
1951         if (len == 0)
1952                 return;
1953
1954         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1955                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1956
1957         for (i = 0; i < len / 4; i += 4) {
1958                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1959                         be32_to_cpu(data[i]),
1960                         be32_to_cpu(data[i+1]),
1961                         be32_to_cpu(data[i+2]),
1962                         be32_to_cpu(data[i+3]));
1963         }
1964 }
1965
1966 /**
1967  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1968  * @ioa_cfg:    ioa config struct
1969  * @hostrcb:    hostrcb struct
1970  *
1971  * Return value:
1972  *      none
1973  **/
1974 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1975                                             struct ipr_hostrcb *hostrcb)
1976 {
1977         struct ipr_hostrcb_type_17_error *error;
1978
1979         if (ioa_cfg->sis64)
1980                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1981         else
1982                 error = &hostrcb->hcam.u.error.u.type_17_error;
1983
1984         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1985         strim(error->failure_reason);
1986
1987         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1988                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1989         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1990         ipr_log_hex_data(ioa_cfg, error->data,
1991                          be32_to_cpu(hostrcb->hcam.length) -
1992                          (offsetof(struct ipr_hostrcb_error, u) +
1993                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1994 }
1995
1996 /**
1997  * ipr_log_dual_ioa_error - Log a dual adapter error.
1998  * @ioa_cfg:    ioa config struct
1999  * @hostrcb:    hostrcb struct
2000  *
2001  * Return value:
2002  *      none
2003  **/
2004 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
2005                                    struct ipr_hostrcb *hostrcb)
2006 {
2007         struct ipr_hostrcb_type_07_error *error;
2008
2009         error = &hostrcb->hcam.u.error.u.type_07_error;
2010         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2011         strim(error->failure_reason);
2012
2013         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2014                      be32_to_cpu(hostrcb->hcam.u.error.prc));
2015         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
2016         ipr_log_hex_data(ioa_cfg, error->data,
2017                          be32_to_cpu(hostrcb->hcam.length) -
2018                          (offsetof(struct ipr_hostrcb_error, u) +
2019                           offsetof(struct ipr_hostrcb_type_07_error, data)));
2020 }
2021
2022 static const struct {
2023         u8 active;
2024         char *desc;
2025 } path_active_desc[] = {
2026         { IPR_PATH_NO_INFO, "Path" },
2027         { IPR_PATH_ACTIVE, "Active path" },
2028         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2029 };
2030
2031 static const struct {
2032         u8 state;
2033         char *desc;
2034 } path_state_desc[] = {
2035         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2036         { IPR_PATH_HEALTHY, "is healthy" },
2037         { IPR_PATH_DEGRADED, "is degraded" },
2038         { IPR_PATH_FAILED, "is failed" }
2039 };
2040
2041 /**
2042  * ipr_log_fabric_path - Log a fabric path error
2043  * @hostrcb:    hostrcb struct
2044  * @fabric:             fabric descriptor
2045  *
2046  * Return value:
2047  *      none
2048  **/
2049 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2050                                 struct ipr_hostrcb_fabric_desc *fabric)
2051 {
2052         int i, j;
2053         u8 path_state = fabric->path_state;
2054         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2055         u8 state = path_state & IPR_PATH_STATE_MASK;
2056
2057         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2058                 if (path_active_desc[i].active != active)
2059                         continue;
2060
2061                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2062                         if (path_state_desc[j].state != state)
2063                                 continue;
2064
2065                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2066                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2067                                              path_active_desc[i].desc, path_state_desc[j].desc,
2068                                              fabric->ioa_port);
2069                         } else if (fabric->cascaded_expander == 0xff) {
2070                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2071                                              path_active_desc[i].desc, path_state_desc[j].desc,
2072                                              fabric->ioa_port, fabric->phy);
2073                         } else if (fabric->phy == 0xff) {
2074                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2075                                              path_active_desc[i].desc, path_state_desc[j].desc,
2076                                              fabric->ioa_port, fabric->cascaded_expander);
2077                         } else {
2078                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2079                                              path_active_desc[i].desc, path_state_desc[j].desc,
2080                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2081                         }
2082                         return;
2083                 }
2084         }
2085
2086         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2087                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2088 }
2089
2090 /**
2091  * ipr_log64_fabric_path - Log a fabric path error
2092  * @hostrcb:    hostrcb struct
2093  * @fabric:             fabric descriptor
2094  *
2095  * Return value:
2096  *      none
2097  **/
2098 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2099                                   struct ipr_hostrcb64_fabric_desc *fabric)
2100 {
2101         int i, j;
2102         u8 path_state = fabric->path_state;
2103         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2104         u8 state = path_state & IPR_PATH_STATE_MASK;
2105         char buffer[IPR_MAX_RES_PATH_LENGTH];
2106
2107         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2108                 if (path_active_desc[i].active != active)
2109                         continue;
2110
2111                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2112                         if (path_state_desc[j].state != state)
2113                                 continue;
2114
2115                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2116                                      path_active_desc[i].desc, path_state_desc[j].desc,
2117                                      ipr_format_res_path(hostrcb->ioa_cfg,
2118                                                 fabric->res_path,
2119                                                 buffer, sizeof(buffer)));
2120                         return;
2121                 }
2122         }
2123
2124         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2125                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2126                                     buffer, sizeof(buffer)));
2127 }
2128
2129 static const struct {
2130         u8 type;
2131         char *desc;
2132 } path_type_desc[] = {
2133         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2134         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2135         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2136         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2137 };
2138
2139 static const struct {
2140         u8 status;
2141         char *desc;
2142 } path_status_desc[] = {
2143         { IPR_PATH_CFG_NO_PROB, "Functional" },
2144         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2145         { IPR_PATH_CFG_FAILED, "Failed" },
2146         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2147         { IPR_PATH_NOT_DETECTED, "Missing" },
2148         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2149 };
2150
2151 static const char *link_rate[] = {
2152         "unknown",
2153         "disabled",
2154         "phy reset problem",
2155         "spinup hold",
2156         "port selector",
2157         "unknown",
2158         "unknown",
2159         "unknown",
2160         "1.5Gbps",
2161         "3.0Gbps",
2162         "unknown",
2163         "unknown",
2164         "unknown",
2165         "unknown",
2166         "unknown",
2167         "unknown"
2168 };
2169
2170 /**
2171  * ipr_log_path_elem - Log a fabric path element.
2172  * @hostrcb:    hostrcb struct
2173  * @cfg:                fabric path element struct
2174  *
2175  * Return value:
2176  *      none
2177  **/
2178 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2179                               struct ipr_hostrcb_config_element *cfg)
2180 {
2181         int i, j;
2182         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2183         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2184
2185         if (type == IPR_PATH_CFG_NOT_EXIST)
2186                 return;
2187
2188         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2189                 if (path_type_desc[i].type != type)
2190                         continue;
2191
2192                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2193                         if (path_status_desc[j].status != status)
2194                                 continue;
2195
2196                         if (type == IPR_PATH_CFG_IOA_PORT) {
2197                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2198                                              path_status_desc[j].desc, path_type_desc[i].desc,
2199                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2200                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2201                         } else {
2202                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2203                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2204                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2205                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2206                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2207                                 } else if (cfg->cascaded_expander == 0xff) {
2208                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2209                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2210                                                      path_type_desc[i].desc, cfg->phy,
2211                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2212                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2213                                 } else if (cfg->phy == 0xff) {
2214                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2215                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2216                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2217                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2218                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2219                                 } else {
2220                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2221                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2222                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2223                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2224                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2225                                 }
2226                         }
2227                         return;
2228                 }
2229         }
2230
2231         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2232                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2233                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2234                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2235 }
2236
2237 /**
2238  * ipr_log64_path_elem - Log a fabric path element.
2239  * @hostrcb:    hostrcb struct
2240  * @cfg:                fabric path element struct
2241  *
2242  * Return value:
2243  *      none
2244  **/
2245 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2246                                 struct ipr_hostrcb64_config_element *cfg)
2247 {
2248         int i, j;
2249         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2250         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2251         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2252         char buffer[IPR_MAX_RES_PATH_LENGTH];
2253
2254         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2255                 return;
2256
2257         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2258                 if (path_type_desc[i].type != type)
2259                         continue;
2260
2261                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2262                         if (path_status_desc[j].status != status)
2263                                 continue;
2264
2265                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2266                                      path_status_desc[j].desc, path_type_desc[i].desc,
2267                                      ipr_format_res_path(hostrcb->ioa_cfg,
2268                                         cfg->res_path, buffer, sizeof(buffer)),
2269                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2270                                         be32_to_cpu(cfg->wwid[0]),
2271                                         be32_to_cpu(cfg->wwid[1]));
2272                         return;
2273                 }
2274         }
2275         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2276                      "WWN=%08X%08X\n", cfg->type_status,
2277                      ipr_format_res_path(hostrcb->ioa_cfg,
2278                         cfg->res_path, buffer, sizeof(buffer)),
2279                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2280                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2281 }
2282
2283 /**
2284  * ipr_log_fabric_error - Log a fabric error.
2285  * @ioa_cfg:    ioa config struct
2286  * @hostrcb:    hostrcb struct
2287  *
2288  * Return value:
2289  *      none
2290  **/
2291 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2292                                  struct ipr_hostrcb *hostrcb)
2293 {
2294         struct ipr_hostrcb_type_20_error *error;
2295         struct ipr_hostrcb_fabric_desc *fabric;
2296         struct ipr_hostrcb_config_element *cfg;
2297         int i, add_len;
2298
2299         error = &hostrcb->hcam.u.error.u.type_20_error;
2300         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2301         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2302
2303         add_len = be32_to_cpu(hostrcb->hcam.length) -
2304                 (offsetof(struct ipr_hostrcb_error, u) +
2305                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2306
2307         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2308                 ipr_log_fabric_path(hostrcb, fabric);
2309                 for_each_fabric_cfg(fabric, cfg)
2310                         ipr_log_path_elem(hostrcb, cfg);
2311
2312                 add_len -= be16_to_cpu(fabric->length);
2313                 fabric = (struct ipr_hostrcb_fabric_desc *)
2314                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2315         }
2316
2317         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2318 }
2319
2320 /**
2321  * ipr_log_sis64_array_error - Log a sis64 array error.
2322  * @ioa_cfg:    ioa config struct
2323  * @hostrcb:    hostrcb struct
2324  *
2325  * Return value:
2326  *      none
2327  **/
2328 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2329                                       struct ipr_hostrcb *hostrcb)
2330 {
2331         int i, num_entries;
2332         struct ipr_hostrcb_type_24_error *error;
2333         struct ipr_hostrcb64_array_data_entry *array_entry;
2334         char buffer[IPR_MAX_RES_PATH_LENGTH];
2335         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2336
2337         error = &hostrcb->hcam.u.error64.u.type_24_error;
2338
2339         ipr_err_separator;
2340
2341         ipr_err("RAID %s Array Configuration: %s\n",
2342                 error->protection_level,
2343                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2344                         buffer, sizeof(buffer)));
2345
2346         ipr_err_separator;
2347
2348         array_entry = error->array_member;
2349         num_entries = min_t(u32, error->num_entries,
2350                             ARRAY_SIZE(error->array_member));
2351
2352         for (i = 0; i < num_entries; i++, array_entry++) {
2353
2354                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2355                         continue;
2356
2357                 if (error->exposed_mode_adn == i)
2358                         ipr_err("Exposed Array Member %d:\n", i);
2359                 else
2360                         ipr_err("Array Member %d:\n", i);
2361
2362                 ipr_err("Array Member %d:\n", i);
2363                 ipr_log_ext_vpd(&array_entry->vpd);
2364                 ipr_err("Current Location: %s\n",
2365                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2366                                 buffer, sizeof(buffer)));
2367                 ipr_err("Expected Location: %s\n",
2368                          ipr_format_res_path(ioa_cfg,
2369                                 array_entry->expected_res_path,
2370                                 buffer, sizeof(buffer)));
2371
2372                 ipr_err_separator;
2373         }
2374 }
2375
2376 /**
2377  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2378  * @ioa_cfg:    ioa config struct
2379  * @hostrcb:    hostrcb struct
2380  *
2381  * Return value:
2382  *      none
2383  **/
2384 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2385                                        struct ipr_hostrcb *hostrcb)
2386 {
2387         struct ipr_hostrcb_type_30_error *error;
2388         struct ipr_hostrcb64_fabric_desc *fabric;
2389         struct ipr_hostrcb64_config_element *cfg;
2390         int i, add_len;
2391
2392         error = &hostrcb->hcam.u.error64.u.type_30_error;
2393
2394         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2395         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2396
2397         add_len = be32_to_cpu(hostrcb->hcam.length) -
2398                 (offsetof(struct ipr_hostrcb64_error, u) +
2399                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2400
2401         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2402                 ipr_log64_fabric_path(hostrcb, fabric);
2403                 for_each_fabric_cfg(fabric, cfg)
2404                         ipr_log64_path_elem(hostrcb, cfg);
2405
2406                 add_len -= be16_to_cpu(fabric->length);
2407                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2408                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2409         }
2410
2411         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2412 }
2413
2414 /**
2415  * ipr_log_sis64_service_required_error - Log a sis64 service required error.
2416  * @ioa_cfg:    ioa config struct
2417  * @hostrcb:    hostrcb struct
2418  *
2419  * Return value:
2420  *      none
2421  **/
2422 static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
2423                                        struct ipr_hostrcb *hostrcb)
2424 {
2425         struct ipr_hostrcb_type_41_error *error;
2426
2427         error = &hostrcb->hcam.u.error64.u.type_41_error;
2428
2429         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2430         ipr_err("Primary Failure Reason: %s\n", error->failure_reason);
2431         ipr_log_hex_data(ioa_cfg, error->data,
2432                          be32_to_cpu(hostrcb->hcam.length) -
2433                          (offsetof(struct ipr_hostrcb_error, u) +
2434                           offsetof(struct ipr_hostrcb_type_41_error, data)));
2435 }
2436 /**
2437  * ipr_log_generic_error - Log an adapter error.
2438  * @ioa_cfg:    ioa config struct
2439  * @hostrcb:    hostrcb struct
2440  *
2441  * Return value:
2442  *      none
2443  **/
2444 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2445                                   struct ipr_hostrcb *hostrcb)
2446 {
2447         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2448                          be32_to_cpu(hostrcb->hcam.length));
2449 }
2450
2451 /**
2452  * ipr_log_sis64_device_error - Log a cache error.
2453  * @ioa_cfg:    ioa config struct
2454  * @hostrcb:    hostrcb struct
2455  *
2456  * Return value:
2457  *      none
2458  **/
2459 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2460                                          struct ipr_hostrcb *hostrcb)
2461 {
2462         struct ipr_hostrcb_type_21_error *error;
2463         char buffer[IPR_MAX_RES_PATH_LENGTH];
2464
2465         error = &hostrcb->hcam.u.error64.u.type_21_error;
2466
2467         ipr_err("-----Failing Device Information-----\n");
2468         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2469                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2470                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2471         ipr_err("Device Resource Path: %s\n",
2472                 __ipr_format_res_path(error->res_path,
2473                                       buffer, sizeof(buffer)));
2474         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2475         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2476         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2477         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2478         ipr_err("SCSI Sense Data:\n");
2479         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2480         ipr_err("SCSI Command Descriptor Block: \n");
2481         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2482
2483         ipr_err("Additional IOA Data:\n");
2484         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2485 }
2486
2487 /**
2488  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2489  * @ioasc:      IOASC
2490  *
2491  * This function will return the index of into the ipr_error_table
2492  * for the specified IOASC. If the IOASC is not in the table,
2493  * 0 will be returned, which points to the entry used for unknown errors.
2494  *
2495  * Return value:
2496  *      index into the ipr_error_table
2497  **/
2498 static u32 ipr_get_error(u32 ioasc)
2499 {
2500         int i;
2501
2502         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2503                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2504                         return i;
2505
2506         return 0;
2507 }
2508
2509 /**
2510  * ipr_handle_log_data - Log an adapter error.
2511  * @ioa_cfg:    ioa config struct
2512  * @hostrcb:    hostrcb struct
2513  *
2514  * This function logs an adapter error to the system.
2515  *
2516  * Return value:
2517  *      none
2518  **/
2519 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2520                                 struct ipr_hostrcb *hostrcb)
2521 {
2522         u32 ioasc;
2523         int error_index;
2524         struct ipr_hostrcb_type_21_error *error;
2525
2526         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2527                 return;
2528
2529         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2530                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2531
2532         if (ioa_cfg->sis64)
2533                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2534         else
2535                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2536
2537         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2538             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2539                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2540                 scsi_report_bus_reset(ioa_cfg->host,
2541                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2542         }
2543
2544         error_index = ipr_get_error(ioasc);
2545
2546         if (!ipr_error_table[error_index].log_hcam)
2547                 return;
2548
2549         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2550             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2551                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2552
2553                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2554                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2555                                 return;
2556         }
2557
2558         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2559
2560         /* Set indication we have logged an error */
2561         ioa_cfg->errors_logged++;
2562
2563         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2564                 return;
2565         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2566                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2567
2568         switch (hostrcb->hcam.overlay_id) {
2569         case IPR_HOST_RCB_OVERLAY_ID_2:
2570                 ipr_log_cache_error(ioa_cfg, hostrcb);
2571                 break;
2572         case IPR_HOST_RCB_OVERLAY_ID_3:
2573                 ipr_log_config_error(ioa_cfg, hostrcb);
2574                 break;
2575         case IPR_HOST_RCB_OVERLAY_ID_4:
2576         case IPR_HOST_RCB_OVERLAY_ID_6:
2577                 ipr_log_array_error(ioa_cfg, hostrcb);
2578                 break;
2579         case IPR_HOST_RCB_OVERLAY_ID_7:
2580                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2581                 break;
2582         case IPR_HOST_RCB_OVERLAY_ID_12:
2583                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2584                 break;
2585         case IPR_HOST_RCB_OVERLAY_ID_13:
2586                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2587                 break;
2588         case IPR_HOST_RCB_OVERLAY_ID_14:
2589         case IPR_HOST_RCB_OVERLAY_ID_16:
2590                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2591                 break;
2592         case IPR_HOST_RCB_OVERLAY_ID_17:
2593                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2594                 break;
2595         case IPR_HOST_RCB_OVERLAY_ID_20:
2596                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2597                 break;
2598         case IPR_HOST_RCB_OVERLAY_ID_21:
2599                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2600                 break;
2601         case IPR_HOST_RCB_OVERLAY_ID_23:
2602                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2603                 break;
2604         case IPR_HOST_RCB_OVERLAY_ID_24:
2605         case IPR_HOST_RCB_OVERLAY_ID_26:
2606                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2607                 break;
2608         case IPR_HOST_RCB_OVERLAY_ID_30:
2609                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2610                 break;
2611         case IPR_HOST_RCB_OVERLAY_ID_41:
2612                 ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
2613                 break;
2614         case IPR_HOST_RCB_OVERLAY_ID_1:
2615         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2616         default:
2617                 ipr_log_generic_error(ioa_cfg, hostrcb);
2618                 break;
2619         }
2620 }
2621
2622 static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2623 {
2624         struct ipr_hostrcb *hostrcb;
2625
2626         hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2627                                         struct ipr_hostrcb, queue);
2628
2629         if (unlikely(!hostrcb)) {
2630                 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2631                 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2632                                                 struct ipr_hostrcb, queue);
2633         }
2634
2635         list_del_init(&hostrcb->queue);
2636         return hostrcb;
2637 }
2638
2639 /**
2640  * ipr_process_error - Op done function for an adapter error log.
2641  * @ipr_cmd:    ipr command struct
2642  *
2643  * This function is the op done function for an error log host
2644  * controlled async from the adapter. It will log the error and
2645  * send the HCAM back to the adapter.
2646  *
2647  * Return value:
2648  *      none
2649  **/
2650 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2651 {
2652         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2653         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2654         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2655         u32 fd_ioasc;
2656
2657         if (ioa_cfg->sis64)
2658                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2659         else
2660                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2661
2662         list_del_init(&hostrcb->queue);
2663         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2664
2665         if (!ioasc) {
2666                 ipr_handle_log_data(ioa_cfg, hostrcb);
2667                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2668                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2669         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2670                    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2671                 dev_err(&ioa_cfg->pdev->dev,
2672                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2673         }
2674
2675         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2676         schedule_work(&ioa_cfg->work_q);
2677         hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2678
2679         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2680 }
2681
2682 /**
2683  * ipr_timeout -  An internally generated op has timed out.
2684  * @ipr_cmd:    ipr command struct
2685  *
2686  * This function blocks host requests and initiates an
2687  * adapter reset.
2688  *
2689  * Return value:
2690  *      none
2691  **/
2692 static void ipr_timeout(struct timer_list *t)
2693 {
2694         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2695         unsigned long lock_flags = 0;
2696         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2697
2698         ENTER;
2699         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2700
2701         ioa_cfg->errors_logged++;
2702         dev_err(&ioa_cfg->pdev->dev,
2703                 "Adapter being reset due to command timeout.\n");
2704
2705         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2706                 ioa_cfg->sdt_state = GET_DUMP;
2707
2708         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2709                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2710
2711         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2712         LEAVE;
2713 }
2714
2715 /**
2716  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2717  * @ipr_cmd:    ipr command struct
2718  *
2719  * This function blocks host requests and initiates an
2720  * adapter reset.
2721  *
2722  * Return value:
2723  *      none
2724  **/
2725 static void ipr_oper_timeout(struct timer_list *t)
2726 {
2727         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2728         unsigned long lock_flags = 0;
2729         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2730
2731         ENTER;
2732         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2733
2734         ioa_cfg->errors_logged++;
2735         dev_err(&ioa_cfg->pdev->dev,
2736                 "Adapter timed out transitioning to operational.\n");
2737
2738         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2739                 ioa_cfg->sdt_state = GET_DUMP;
2740
2741         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2742                 if (ipr_fastfail)
2743                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2744                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2745         }
2746
2747         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2748         LEAVE;
2749 }
2750
2751 /**
2752  * ipr_find_ses_entry - Find matching SES in SES table
2753  * @res:        resource entry struct of SES
2754  *
2755  * Return value:
2756  *      pointer to SES table entry / NULL on failure
2757  **/
2758 static const struct ipr_ses_table_entry *
2759 ipr_find_ses_entry(struct ipr_resource_entry *res)
2760 {
2761         int i, j, matches;
2762         struct ipr_std_inq_vpids *vpids;
2763         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2764
2765         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2766                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2767                         if (ste->compare_product_id_byte[j] == 'X') {
2768                                 vpids = &res->std_inq_data.vpids;
2769                                 if (vpids->product_id[j] == ste->product_id[j])
2770                                         matches++;
2771                                 else
2772                                         break;
2773                         } else
2774                                 matches++;
2775                 }
2776
2777                 if (matches == IPR_PROD_ID_LEN)
2778                         return ste;
2779         }
2780
2781         return NULL;
2782 }
2783
2784 /**
2785  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2786  * @ioa_cfg:    ioa config struct
2787  * @bus:                SCSI bus
2788  * @bus_width:  bus width
2789  *
2790  * Return value:
2791  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2792  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2793  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2794  *      max 160MHz = max 320MB/sec).
2795  **/
2796 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2797 {
2798         struct ipr_resource_entry *res;
2799         const struct ipr_ses_table_entry *ste;
2800         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2801
2802         /* Loop through each config table entry in the config table buffer */
2803         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2804                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2805                         continue;
2806
2807                 if (bus != res->bus)
2808                         continue;
2809
2810                 if (!(ste = ipr_find_ses_entry(res)))
2811                         continue;
2812
2813                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2814         }
2815
2816         return max_xfer_rate;
2817 }
2818
2819 /**
2820  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2821  * @ioa_cfg:            ioa config struct
2822  * @max_delay:          max delay in micro-seconds to wait
2823  *
2824  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2825  *
2826  * Return value:
2827  *      0 on success / other on failure
2828  **/
2829 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2830 {
2831         volatile u32 pcii_reg;
2832         int delay = 1;
2833
2834         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2835         while (delay < max_delay) {
2836                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2837
2838                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2839                         return 0;
2840
2841                 /* udelay cannot be used if delay is more than a few milliseconds */
2842                 if ((delay / 1000) > MAX_UDELAY_MS)
2843                         mdelay(delay / 1000);
2844                 else
2845                         udelay(delay);
2846
2847                 delay += delay;
2848         }
2849         return -EIO;
2850 }
2851
2852 /**
2853  * ipr_get_sis64_dump_data_section - Dump IOA memory
2854  * @ioa_cfg:                    ioa config struct
2855  * @start_addr:                 adapter address to dump
2856  * @dest:                       destination kernel buffer
2857  * @length_in_words:            length to dump in 4 byte words
2858  *
2859  * Return value:
2860  *      0 on success
2861  **/
2862 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2863                                            u32 start_addr,
2864                                            __be32 *dest, u32 length_in_words)
2865 {
2866         int i;
2867
2868         for (i = 0; i < length_in_words; i++) {
2869                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2870                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2871                 dest++;
2872         }
2873
2874         return 0;
2875 }
2876
2877 /**
2878  * ipr_get_ldump_data_section - Dump IOA memory
2879  * @ioa_cfg:                    ioa config struct
2880  * @start_addr:                 adapter address to dump
2881  * @dest:                               destination kernel buffer
2882  * @length_in_words:    length to dump in 4 byte words
2883  *
2884  * Return value:
2885  *      0 on success / -EIO on failure
2886  **/
2887 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2888                                       u32 start_addr,
2889                                       __be32 *dest, u32 length_in_words)
2890 {
2891         volatile u32 temp_pcii_reg;
2892         int i, delay = 0;
2893
2894         if (ioa_cfg->sis64)
2895                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2896                                                        dest, length_in_words);
2897
2898         /* Write IOA interrupt reg starting LDUMP state  */
2899         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2900                ioa_cfg->regs.set_uproc_interrupt_reg32);
2901
2902         /* Wait for IO debug acknowledge */
2903         if (ipr_wait_iodbg_ack(ioa_cfg,
2904                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2905                 dev_err(&ioa_cfg->pdev->dev,
2906                         "IOA dump long data transfer timeout\n");
2907                 return -EIO;
2908         }
2909
2910         /* Signal LDUMP interlocked - clear IO debug ack */
2911         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2912                ioa_cfg->regs.clr_interrupt_reg);
2913
2914         /* Write Mailbox with starting address */
2915         writel(start_addr, ioa_cfg->ioa_mailbox);
2916
2917         /* Signal address valid - clear IOA Reset alert */
2918         writel(IPR_UPROCI_RESET_ALERT,
2919                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2920
2921         for (i = 0; i < length_in_words; i++) {
2922                 /* Wait for IO debug acknowledge */
2923                 if (ipr_wait_iodbg_ack(ioa_cfg,
2924                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2925                         dev_err(&ioa_cfg->pdev->dev,
2926                                 "IOA dump short data transfer timeout\n");
2927                         return -EIO;
2928                 }
2929
2930                 /* Read data from mailbox and increment destination pointer */
2931                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2932                 dest++;
2933
2934                 /* For all but the last word of data, signal data received */
2935                 if (i < (length_in_words - 1)) {
2936                         /* Signal dump data received - Clear IO debug Ack */
2937                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2938                                ioa_cfg->regs.clr_interrupt_reg);
2939                 }
2940         }
2941
2942         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2943         writel(IPR_UPROCI_RESET_ALERT,
2944                ioa_cfg->regs.set_uproc_interrupt_reg32);
2945
2946         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2947                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2948
2949         /* Signal dump data received - Clear IO debug Ack */
2950         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2951                ioa_cfg->regs.clr_interrupt_reg);
2952
2953         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2954         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2955                 temp_pcii_reg =
2956                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2957
2958                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2959                         return 0;
2960
2961                 udelay(10);
2962                 delay += 10;
2963         }
2964
2965         return 0;
2966 }
2967
2968 #ifdef CONFIG_SCSI_IPR_DUMP
2969 /**
2970  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2971  * @ioa_cfg:            ioa config struct
2972  * @pci_address:        adapter address
2973  * @length:                     length of data to copy
2974  *
2975  * Copy data from PCI adapter to kernel buffer.
2976  * Note: length MUST be a 4 byte multiple
2977  * Return value:
2978  *      0 on success / other on failure
2979  **/
2980 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2981                         unsigned long pci_address, u32 length)
2982 {
2983         int bytes_copied = 0;
2984         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2985         __be32 *page;
2986         unsigned long lock_flags = 0;
2987         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2988
2989         if (ioa_cfg->sis64)
2990                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2991         else
2992                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2993
2994         while (bytes_copied < length &&
2995                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2996                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2997                     ioa_dump->page_offset == 0) {
2998                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2999
3000                         if (!page) {
3001                                 ipr_trace;
3002                                 return bytes_copied;
3003                         }
3004
3005                         ioa_dump->page_offset = 0;
3006                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
3007                         ioa_dump->next_page_index++;
3008                 } else
3009                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
3010
3011                 rem_len = length - bytes_copied;
3012                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
3013                 cur_len = min(rem_len, rem_page_len);
3014
3015                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3016                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
3017                         rc = -EIO;
3018                 } else {
3019                         rc = ipr_get_ldump_data_section(ioa_cfg,
3020                                                         pci_address + bytes_copied,
3021                                                         &page[ioa_dump->page_offset / 4],
3022                                                         (cur_len / sizeof(u32)));
3023                 }
3024                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3025
3026                 if (!rc) {
3027                         ioa_dump->page_offset += cur_len;
3028                         bytes_copied += cur_len;
3029                 } else {
3030                         ipr_trace;
3031                         break;
3032                 }
3033                 schedule();
3034         }
3035
3036         return bytes_copied;
3037 }
3038
3039 /**
3040  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3041  * @hdr:        dump entry header struct
3042  *
3043  * Return value:
3044  *      nothing
3045  **/
3046 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3047 {
3048         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3049         hdr->num_elems = 1;
3050         hdr->offset = sizeof(*hdr);
3051         hdr->status = IPR_DUMP_STATUS_SUCCESS;
3052 }
3053
3054 /**
3055  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3056  * @ioa_cfg:    ioa config struct
3057  * @driver_dump:        driver dump struct
3058  *
3059  * Return value:
3060  *      nothing
3061  **/
3062 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3063                                    struct ipr_driver_dump *driver_dump)
3064 {
3065         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3066
3067         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3068         driver_dump->ioa_type_entry.hdr.len =
3069                 sizeof(struct ipr_dump_ioa_type_entry) -
3070                 sizeof(struct ipr_dump_entry_header);
3071         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3072         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3073         driver_dump->ioa_type_entry.type = ioa_cfg->type;
3074         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3075                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3076                 ucode_vpd->minor_release[1];
3077         driver_dump->hdr.num_entries++;
3078 }
3079
3080 /**
3081  * ipr_dump_version_data - Fill in the driver version in the dump.
3082  * @ioa_cfg:    ioa config struct
3083  * @driver_dump:        driver dump struct
3084  *
3085  * Return value:
3086  *      nothing
3087  **/
3088 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3089                                   struct ipr_driver_dump *driver_dump)
3090 {
3091         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3092         driver_dump->version_entry.hdr.len =
3093                 sizeof(struct ipr_dump_version_entry) -
3094                 sizeof(struct ipr_dump_entry_header);
3095         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3096         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3097         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3098         driver_dump->hdr.num_entries++;
3099 }
3100
3101 /**
3102  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3103  * @ioa_cfg:    ioa config struct
3104  * @driver_dump:        driver dump struct
3105  *
3106  * Return value:
3107  *      nothing
3108  **/
3109 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3110                                    struct ipr_driver_dump *driver_dump)
3111 {
3112         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3113         driver_dump->trace_entry.hdr.len =
3114                 sizeof(struct ipr_dump_trace_entry) -
3115                 sizeof(struct ipr_dump_entry_header);
3116         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3117         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3118         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3119         driver_dump->hdr.num_entries++;
3120 }
3121
3122 /**
3123  * ipr_dump_location_data - Fill in the IOA location in the dump.
3124  * @ioa_cfg:    ioa config struct
3125  * @driver_dump:        driver dump struct
3126  *
3127  * Return value:
3128  *      nothing
3129  **/
3130 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3131                                    struct ipr_driver_dump *driver_dump)
3132 {
3133         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3134         driver_dump->location_entry.hdr.len =
3135                 sizeof(struct ipr_dump_location_entry) -
3136                 sizeof(struct ipr_dump_entry_header);
3137         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3138         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3139         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3140         driver_dump->hdr.num_entries++;
3141 }
3142
3143 /**
3144  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3145  * @ioa_cfg:    ioa config struct
3146  * @dump:               dump struct
3147  *
3148  * Return value:
3149  *      nothing
3150  **/
3151 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3152 {
3153         unsigned long start_addr, sdt_word;
3154         unsigned long lock_flags = 0;
3155         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3156         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3157         u32 num_entries, max_num_entries, start_off, end_off;
3158         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3159         struct ipr_sdt *sdt;
3160         int valid = 1;
3161         int i;
3162
3163         ENTER;
3164
3165         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3166
3167         if (ioa_cfg->sdt_state != READ_DUMP) {
3168                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3169                 return;
3170         }
3171
3172         if (ioa_cfg->sis64) {
3173                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3174                 ssleep(IPR_DUMP_DELAY_SECONDS);
3175                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3176         }
3177
3178         start_addr = readl(ioa_cfg->ioa_mailbox);
3179
3180         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3181                 dev_err(&ioa_cfg->pdev->dev,
3182                         "Invalid dump table format: %lx\n", start_addr);
3183                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3184                 return;
3185         }
3186
3187         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3188
3189         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3190
3191         /* Initialize the overall dump header */
3192         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3193         driver_dump->hdr.num_entries = 1;
3194         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3195         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3196         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3197         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3198
3199         ipr_dump_version_data(ioa_cfg, driver_dump);
3200         ipr_dump_location_data(ioa_cfg, driver_dump);
3201         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3202         ipr_dump_trace_data(ioa_cfg, driver_dump);
3203
3204         /* Update dump_header */
3205         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3206
3207         /* IOA Dump entry */
3208         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3209         ioa_dump->hdr.len = 0;
3210         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3211         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3212
3213         /* First entries in sdt are actually a list of dump addresses and
3214          lengths to gather the real dump data.  sdt represents the pointer
3215          to the ioa generated dump table.  Dump data will be extracted based
3216          on entries in this table */
3217         sdt = &ioa_dump->sdt;
3218
3219         if (ioa_cfg->sis64) {
3220                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3221                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3222         } else {
3223                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3224                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3225         }
3226
3227         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3228                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3229         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3230                                         bytes_to_copy / sizeof(__be32));
3231
3232         /* Smart Dump table is ready to use and the first entry is valid */
3233         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3234             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3235                 dev_err(&ioa_cfg->pdev->dev,
3236                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3237                         rc, be32_to_cpu(sdt->hdr.state));
3238                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3239                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3240                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3241                 return;
3242         }
3243
3244         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3245
3246         if (num_entries > max_num_entries)
3247                 num_entries = max_num_entries;
3248
3249         /* Update dump length to the actual data to be copied */
3250         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3251         if (ioa_cfg->sis64)
3252                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3253         else
3254                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3255
3256         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3257
3258         for (i = 0; i < num_entries; i++) {
3259                 if (ioa_dump->hdr.len > max_dump_size) {
3260                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3261                         break;
3262                 }
3263
3264                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3265                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3266                         if (ioa_cfg->sis64)
3267                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3268                         else {
3269                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3270                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3271
3272                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3273                                         bytes_to_copy = end_off - start_off;
3274                                 else
3275                                         valid = 0;
3276                         }
3277                         if (valid) {
3278                                 if (bytes_to_copy > max_dump_size) {
3279                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3280                                         continue;
3281                                 }
3282
3283                                 /* Copy data from adapter to driver buffers */
3284                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3285                                                             bytes_to_copy);
3286
3287                                 ioa_dump->hdr.len += bytes_copied;
3288
3289                                 if (bytes_copied != bytes_to_copy) {
3290                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3291                                         break;
3292                                 }
3293                         }
3294                 }
3295         }
3296
3297         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3298
3299         /* Update dump_header */
3300         driver_dump->hdr.len += ioa_dump->hdr.len;
3301         wmb();
3302         ioa_cfg->sdt_state = DUMP_OBTAINED;
3303         LEAVE;
3304 }
3305
3306 #else
3307 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3308 #endif
3309
3310 /**
3311  * ipr_release_dump - Free adapter dump memory
3312  * @kref:       kref struct
3313  *
3314  * Return value:
3315  *      nothing
3316  **/
3317 static void ipr_release_dump(struct kref *kref)
3318 {
3319         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3320         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3321         unsigned long lock_flags = 0;
3322         int i;
3323
3324         ENTER;
3325         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3326         ioa_cfg->dump = NULL;
3327         ioa_cfg->sdt_state = INACTIVE;
3328         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3329
3330         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3331                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3332
3333         vfree(dump->ioa_dump.ioa_data);
3334         kfree(dump);
3335         LEAVE;
3336 }
3337
3338 static void ipr_add_remove_thread(struct work_struct *work)
3339 {
3340         unsigned long lock_flags;
3341         struct ipr_resource_entry *res;
3342         struct scsi_device *sdev;
3343         struct ipr_ioa_cfg *ioa_cfg =
3344                 container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
3345         u8 bus, target, lun;
3346         int did_work;
3347
3348         ENTER;
3349         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3350
3351 restart:
3352         do {
3353                 did_work = 0;
3354                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3355                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3356                         return;
3357                 }
3358
3359                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3360                         if (res->del_from_ml && res->sdev) {
3361                                 did_work = 1;
3362                                 sdev = res->sdev;
3363                                 if (!scsi_device_get(sdev)) {
3364                                         if (!res->add_to_ml)
3365                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3366                                         else
3367                                                 res->del_from_ml = 0;
3368                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3369                                         scsi_remove_device(sdev);
3370                                         scsi_device_put(sdev);
3371                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3372                                 }
3373                                 break;
3374                         }
3375                 }
3376         } while (did_work);
3377
3378         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3379                 if (res->add_to_ml) {
3380                         bus = res->bus;
3381                         target = res->target;
3382                         lun = res->lun;
3383                         res->add_to_ml = 0;
3384                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3385                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3386                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3387                         goto restart;
3388                 }
3389         }
3390
3391         ioa_cfg->scan_done = 1;
3392         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3393         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3394         LEAVE;
3395 }
3396
3397 /**
3398  * ipr_worker_thread - Worker thread
3399  * @work:               ioa config struct
3400  *
3401  * Called at task level from a work thread. This function takes care
3402  * of adding and removing device from the mid-layer as configuration
3403  * changes are detected by the adapter.
3404  *
3405  * Return value:
3406  *      nothing
3407  **/
3408 static void ipr_worker_thread(struct work_struct *work)
3409 {
3410         unsigned long lock_flags;
3411         struct ipr_dump *dump;
3412         struct ipr_ioa_cfg *ioa_cfg =
3413                 container_of(work, struct ipr_ioa_cfg, work_q);
3414
3415         ENTER;
3416         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3417
3418         if (ioa_cfg->sdt_state == READ_DUMP) {
3419                 dump = ioa_cfg->dump;
3420                 if (!dump) {
3421                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3422                         return;
3423                 }
3424                 kref_get(&dump->kref);
3425                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3426                 ipr_get_ioa_dump(ioa_cfg, dump);
3427                 kref_put(&dump->kref, ipr_release_dump);
3428
3429                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3430                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3431                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3432                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3433                 return;
3434         }
3435
3436         if (ioa_cfg->scsi_unblock) {
3437                 ioa_cfg->scsi_unblock = 0;
3438                 ioa_cfg->scsi_blocked = 0;
3439                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3440                 scsi_unblock_requests(ioa_cfg->host);
3441                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3442                 if (ioa_cfg->scsi_blocked)
3443                         scsi_block_requests(ioa_cfg->host);
3444         }
3445
3446         if (!ioa_cfg->scan_enabled) {
3447                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3448                 return;
3449         }
3450
3451         schedule_work(&ioa_cfg->scsi_add_work_q);
3452
3453         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3454         LEAVE;
3455 }
3456
3457 #ifdef CONFIG_SCSI_IPR_TRACE
3458 /**
3459  * ipr_read_trace - Dump the adapter trace
3460  * @filp:               open sysfs file
3461  * @kobj:               kobject struct
3462  * @bin_attr:           bin_attribute struct
3463  * @buf:                buffer
3464  * @off:                offset
3465  * @count:              buffer size
3466  *
3467  * Return value:
3468  *      number of bytes printed to buffer
3469  **/
3470 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3471                               struct bin_attribute *bin_attr,
3472                               char *buf, loff_t off, size_t count)
3473 {
3474         struct device *dev = container_of(kobj, struct device, kobj);
3475         struct Scsi_Host *shost = class_to_shost(dev);
3476         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3477         unsigned long lock_flags = 0;
3478         ssize_t ret;
3479
3480         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3481         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3482                                 IPR_TRACE_SIZE);
3483         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3484
3485         return ret;
3486 }
3487
3488 static struct bin_attribute ipr_trace_attr = {
3489         .attr = {
3490                 .name = "trace",
3491                 .mode = S_IRUGO,
3492         },
3493         .size = 0,
3494         .read = ipr_read_trace,
3495 };
3496 #endif
3497
3498 /**
3499  * ipr_show_fw_version - Show the firmware version
3500  * @dev:        class device struct
3501  * @buf:        buffer
3502  *
3503  * Return value:
3504  *      number of bytes printed to buffer
3505  **/
3506 static ssize_t ipr_show_fw_version(struct device *dev,
3507                                    struct device_attribute *attr, char *buf)
3508 {
3509         struct Scsi_Host *shost = class_to_shost(dev);
3510         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3511         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3512         unsigned long lock_flags = 0;
3513         int len;
3514
3515         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3516         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3517                        ucode_vpd->major_release, ucode_vpd->card_type,
3518                        ucode_vpd->minor_release[0],
3519                        ucode_vpd->minor_release[1]);
3520         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3521         return len;
3522 }
3523
3524 static struct device_attribute ipr_fw_version_attr = {
3525         .attr = {
3526                 .name =         "fw_version",
3527                 .mode =         S_IRUGO,
3528         },
3529         .show = ipr_show_fw_version,
3530 };
3531
3532 /**
3533  * ipr_show_log_level - Show the adapter's error logging level
3534  * @dev:        class device struct
3535  * @buf:        buffer
3536  *
3537  * Return value:
3538  *      number of bytes printed to buffer
3539  **/
3540 static ssize_t ipr_show_log_level(struct device *dev,
3541                                    struct device_attribute *attr, char *buf)
3542 {
3543         struct Scsi_Host *shost = class_to_shost(dev);
3544         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3545         unsigned long lock_flags = 0;
3546         int len;
3547
3548         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3549         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3550         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3551         return len;
3552 }
3553
3554 /**
3555  * ipr_store_log_level - Change the adapter's error logging level
3556  * @dev:        class device struct
3557  * @buf:        buffer
3558  *
3559  * Return value:
3560  *      number of bytes printed to buffer
3561  **/
3562 static ssize_t ipr_store_log_level(struct device *dev,
3563                                    struct device_attribute *attr,
3564                                    const char *buf, size_t count)
3565 {
3566         struct Scsi_Host *shost = class_to_shost(dev);
3567         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3568         unsigned long lock_flags = 0;
3569
3570         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3571         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3572         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3573         return strlen(buf);
3574 }
3575
3576 static struct device_attribute ipr_log_level_attr = {
3577         .attr = {
3578                 .name =         "log_level",
3579                 .mode =         S_IRUGO | S_IWUSR,
3580         },
3581         .show = ipr_show_log_level,
3582         .store = ipr_store_log_level
3583 };
3584
3585 /**
3586  * ipr_store_diagnostics - IOA Diagnostics interface
3587  * @dev:        device struct
3588  * @buf:        buffer
3589  * @count:      buffer size
3590  *
3591  * This function will reset the adapter and wait a reasonable
3592  * amount of time for any errors that the adapter might log.
3593  *
3594  * Return value:
3595  *      count on success / other on failure
3596  **/
3597 static ssize_t ipr_store_diagnostics(struct device *dev,
3598                                      struct device_attribute *attr,
3599                                      const char *buf, size_t count)
3600 {
3601         struct Scsi_Host *shost = class_to_shost(dev);
3602         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3603         unsigned long lock_flags = 0;
3604         int rc = count;
3605
3606         if (!capable(CAP_SYS_ADMIN))
3607                 return -EACCES;
3608
3609         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3610         while (ioa_cfg->in_reset_reload) {
3611                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3612                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3613                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3614         }
3615
3616         ioa_cfg->errors_logged = 0;
3617         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3618
3619         if (ioa_cfg->in_reset_reload) {
3620                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3621                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3622
3623                 /* Wait for a second for any errors to be logged */
3624                 msleep(1000);
3625         } else {
3626                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3627                 return -EIO;
3628         }
3629
3630         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3631         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3632                 rc = -EIO;
3633         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3634
3635         return rc;
3636 }
3637
3638 static struct device_attribute ipr_diagnostics_attr = {
3639         .attr = {
3640                 .name =         "run_diagnostics",
3641                 .mode =         S_IWUSR,
3642         },
3643         .store = ipr_store_diagnostics
3644 };
3645
3646 /**
3647  * ipr_show_adapter_state - Show the adapter's state
3648  * @class_dev:  device struct
3649  * @buf:        buffer
3650  *
3651  * Return value:
3652  *      number of bytes printed to buffer
3653  **/
3654 static ssize_t ipr_show_adapter_state(struct device *dev,
3655                                       struct device_attribute *attr, char *buf)
3656 {
3657         struct Scsi_Host *shost = class_to_shost(dev);
3658         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3659         unsigned long lock_flags = 0;
3660         int len;
3661
3662         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3663         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3664                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3665         else
3666                 len = snprintf(buf, PAGE_SIZE, "online\n");
3667         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3668         return len;
3669 }
3670
3671 /**
3672  * ipr_store_adapter_state - Change adapter state
3673  * @dev:        device struct
3674  * @buf:        buffer
3675  * @count:      buffer size
3676  *
3677  * This function will change the adapter's state.
3678  *
3679  * Return value:
3680  *      count on success / other on failure
3681  **/
3682 static ssize_t ipr_store_adapter_state(struct device *dev,
3683                                        struct device_attribute *attr,
3684                                        const char *buf, size_t count)
3685 {
3686         struct Scsi_Host *shost = class_to_shost(dev);
3687         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3688         unsigned long lock_flags;
3689         int result = count, i;
3690
3691         if (!capable(CAP_SYS_ADMIN))
3692                 return -EACCES;
3693
3694         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3695         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3696             !strncmp(buf, "online", 6)) {
3697                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3698                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3699                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3700                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3701                 }
3702                 wmb();
3703                 ioa_cfg->reset_retries = 0;
3704                 ioa_cfg->in_ioa_bringdown = 0;
3705                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3706         }
3707         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3708         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3709
3710         return result;
3711 }
3712
3713 static struct device_attribute ipr_ioa_state_attr = {
3714         .attr = {
3715                 .name =         "online_state",
3716                 .mode =         S_IRUGO | S_IWUSR,
3717         },
3718         .show = ipr_show_adapter_state,
3719         .store = ipr_store_adapter_state
3720 };
3721
3722 /**
3723  * ipr_store_reset_adapter - Reset the adapter
3724  * @dev:        device struct
3725  * @buf:        buffer
3726  * @count:      buffer size
3727  *
3728  * This function will reset the adapter.
3729  *
3730  * Return value:
3731  *      count on success / other on failure
3732  **/
3733 static ssize_t ipr_store_reset_adapter(struct device *dev,
3734                                        struct device_attribute *attr,
3735                                        const char *buf, size_t count)
3736 {
3737         struct Scsi_Host *shost = class_to_shost(dev);
3738         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3739         unsigned long lock_flags;
3740         int result = count;
3741
3742         if (!capable(CAP_SYS_ADMIN))
3743                 return -EACCES;
3744
3745         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3746         if (!ioa_cfg->in_reset_reload)
3747                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3748         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3749         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3750
3751         return result;
3752 }
3753
3754 static struct device_attribute ipr_ioa_reset_attr = {
3755         .attr = {
3756                 .name =         "reset_host",
3757                 .mode =         S_IWUSR,
3758         },
3759         .store = ipr_store_reset_adapter
3760 };
3761
3762 static int ipr_iopoll(struct irq_poll *iop, int budget);
3763  /**
3764  * ipr_show_iopoll_weight - Show ipr polling mode
3765  * @dev:        class device struct
3766  * @buf:        buffer
3767  *
3768  * Return value:
3769  *      number of bytes printed to buffer
3770  **/
3771 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3772                                    struct device_attribute *attr, char *buf)
3773 {
3774         struct Scsi_Host *shost = class_to_shost(dev);
3775         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3776         unsigned long lock_flags = 0;
3777         int len;
3778
3779         spin_lock_irqsave(shost->host_lock, lock_flags);
3780         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3781         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3782
3783         return len;
3784 }
3785
3786 /**
3787  * ipr_store_iopoll_weight - Change the adapter's polling mode
3788  * @dev:        class device struct
3789  * @buf:        buffer
3790  *
3791  * Return value:
3792  *      number of bytes printed to buffer
3793  **/
3794 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3795                                         struct device_attribute *attr,
3796                                         const char *buf, size_t count)
3797 {
3798         struct Scsi_Host *shost = class_to_shost(dev);
3799         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3800         unsigned long user_iopoll_weight;
3801         unsigned long lock_flags = 0;
3802         int i;
3803
3804         if (!ioa_cfg->sis64) {
3805                 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3806                 return -EINVAL;
3807         }
3808         if (kstrtoul(buf, 10, &user_iopoll_weight))
3809                 return -EINVAL;
3810
3811         if (user_iopoll_weight > 256) {
3812                 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3813                 return -EINVAL;
3814         }
3815
3816         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3817                 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3818                 return strlen(buf);
3819         }
3820
3821         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3822                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3823                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3824         }
3825
3826         spin_lock_irqsave(shost->host_lock, lock_flags);
3827         ioa_cfg->iopoll_weight = user_iopoll_weight;
3828         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3829                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3830                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3831                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3832                 }
3833         }
3834         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3835
3836         return strlen(buf);
3837 }
3838
3839 static struct device_attribute ipr_iopoll_weight_attr = {
3840         .attr = {
3841                 .name =         "iopoll_weight",
3842                 .mode =         S_IRUGO | S_IWUSR,
3843         },
3844         .show = ipr_show_iopoll_weight,
3845         .store = ipr_store_iopoll_weight
3846 };
3847
3848 /**
3849  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3850  * @buf_len:            buffer length
3851  *
3852  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3853  * list to use for microcode download
3854  *
3855  * Return value:
3856  *      pointer to sglist / NULL on failure
3857  **/
3858 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3859 {
3860         int sg_size, order;
3861         struct ipr_sglist *sglist;
3862
3863         /* Get the minimum size per scatter/gather element */
3864         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3865
3866         /* Get the actual size per element */
3867         order = get_order(sg_size);
3868
3869         /* Allocate a scatter/gather list for the DMA */
3870         sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
3871         if (sglist == NULL) {
3872                 ipr_trace;
3873                 return NULL;
3874         }
3875         sglist->order = order;
3876         sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
3877                                               &sglist->num_sg);
3878         if (!sglist->scatterlist) {
3879                 kfree(sglist);
3880                 return NULL;
3881         }
3882
3883         return sglist;
3884 }
3885
3886 /**
3887  * ipr_free_ucode_buffer - Frees a microcode download buffer
3888  * @p_dnld:             scatter/gather list pointer
3889  *
3890  * Free a DMA'able ucode download buffer previously allocated with
3891  * ipr_alloc_ucode_buffer
3892  *
3893  * Return value:
3894  *      nothing
3895  **/
3896 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3897 {
3898         sgl_free_order(sglist->scatterlist, sglist->order);
3899         kfree(sglist);
3900 }
3901
3902 /**
3903  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3904  * @sglist:             scatter/gather list pointer
3905  * @buffer:             buffer pointer
3906  * @len:                buffer length
3907  *
3908  * Copy a microcode image from a user buffer into a buffer allocated by
3909  * ipr_alloc_ucode_buffer
3910  *
3911  * Return value:
3912  *      0 on success / other on failure
3913  **/
3914 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3915                                  u8 *buffer, u32 len)
3916 {
3917         int bsize_elem, i, result = 0;
3918         struct scatterlist *scatterlist;
3919         void *kaddr;
3920
3921         /* Determine the actual number of bytes per element */
3922         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3923
3924         scatterlist = sglist->scatterlist;
3925
3926         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3927                 struct page *page = sg_page(&scatterlist[i]);
3928
3929                 kaddr = kmap(page);
3930                 memcpy(kaddr, buffer, bsize_elem);
3931                 kunmap(page);
3932
3933                 scatterlist[i].length = bsize_elem;
3934
3935                 if (result != 0) {
3936                         ipr_trace;
3937                         return result;
3938                 }
3939         }
3940
3941         if (len % bsize_elem) {
3942                 struct page *page = sg_page(&scatterlist[i]);
3943
3944                 kaddr = kmap(page);
3945                 memcpy(kaddr, buffer, len % bsize_elem);
3946                 kunmap(page);
3947
3948                 scatterlist[i].length = len % bsize_elem;
3949         }
3950
3951         sglist->buffer_len = len;
3952         return result;
3953 }
3954
3955 /**
3956  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3957  * @ipr_cmd:            ipr command struct
3958  * @sglist:             scatter/gather list
3959  *
3960  * Builds a microcode download IOA data list (IOADL).
3961  *
3962  **/
3963 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3964                                     struct ipr_sglist *sglist)
3965 {
3966         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3967         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3968         struct scatterlist *scatterlist = sglist->scatterlist;
3969         int i;
3970
3971         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3972         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3973         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3974
3975         ioarcb->ioadl_len =
3976                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3977         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3978                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3979                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3980                 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3981         }
3982
3983         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3984 }
3985
3986 /**
3987  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3988  * @ipr_cmd:    ipr command struct
3989  * @sglist:             scatter/gather list
3990  *
3991  * Builds a microcode download IOA data list (IOADL).
3992  *
3993  **/
3994 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3995                                   struct ipr_sglist *sglist)
3996 {
3997         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3998         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3999         struct scatterlist *scatterlist = sglist->scatterlist;
4000         int i;
4001
4002         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
4003         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4004         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
4005
4006         ioarcb->ioadl_len =
4007                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4008
4009         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
4010                 ioadl[i].flags_and_data_len =
4011                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
4012                 ioadl[i].address =
4013                         cpu_to_be32(sg_dma_address(&scatterlist[i]));
4014         }
4015
4016         ioadl[i-1].flags_and_data_len |=
4017                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4018 }
4019
4020 /**
4021  * ipr_update_ioa_ucode - Update IOA's microcode
4022  * @ioa_cfg:    ioa config struct
4023  * @sglist:             scatter/gather list
4024  *
4025  * Initiate an adapter reset to update the IOA's microcode
4026  *
4027  * Return value:
4028  *      0 on success / -EIO on failure
4029  **/
4030 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
4031                                 struct ipr_sglist *sglist)
4032 {
4033         unsigned long lock_flags;
4034
4035         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4036         while (ioa_cfg->in_reset_reload) {
4037                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4038                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4039                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4040         }
4041
4042         if (ioa_cfg->ucode_sglist) {
4043                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4044                 dev_err(&ioa_cfg->pdev->dev,
4045                         "Microcode download already in progress\n");
4046                 return -EIO;
4047         }
4048
4049         sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4050                                         sglist->scatterlist, sglist->num_sg,
4051                                         DMA_TO_DEVICE);
4052
4053         if (!sglist->num_dma_sg) {
4054                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4055                 dev_err(&ioa_cfg->pdev->dev,
4056                         "Failed to map microcode download buffer!\n");
4057                 return -EIO;
4058         }
4059
4060         ioa_cfg->ucode_sglist = sglist;
4061         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4062         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4063         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4064
4065         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4066         ioa_cfg->ucode_sglist = NULL;
4067         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4068         return 0;
4069 }
4070
4071 /**
4072  * ipr_store_update_fw - Update the firmware on the adapter
4073  * @class_dev:  device struct
4074  * @buf:        buffer
4075  * @count:      buffer size
4076  *
4077  * This function will update the firmware on the adapter.
4078  *
4079  * Return value:
4080  *      count on success / other on failure
4081  **/
4082 static ssize_t ipr_store_update_fw(struct device *dev,
4083                                    struct device_attribute *attr,
4084                                    const char *buf, size_t count)
4085 {
4086         struct Scsi_Host *shost = class_to_shost(dev);
4087         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4088         struct ipr_ucode_image_header *image_hdr;
4089         const struct firmware *fw_entry;
4090         struct ipr_sglist *sglist;
4091         char fname[100];
4092         char *src;
4093         char *endline;
4094         int result, dnld_size;
4095
4096         if (!capable(CAP_SYS_ADMIN))
4097                 return -EACCES;
4098
4099         snprintf(fname, sizeof(fname), "%s", buf);
4100
4101         endline = strchr(fname, '\n');
4102         if (endline)
4103                 *endline = '\0';
4104
4105         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4106                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4107                 return -EIO;
4108         }
4109
4110         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4111
4112         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4113         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4114         sglist = ipr_alloc_ucode_buffer(dnld_size);
4115
4116         if (!sglist) {
4117                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4118                 release_firmware(fw_entry);
4119                 return -ENOMEM;
4120         }
4121
4122         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4123
4124         if (result) {
4125                 dev_err(&ioa_cfg->pdev->dev,
4126                         "Microcode buffer copy to DMA buffer failed\n");
4127                 goto out;
4128         }
4129
4130         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4131
4132         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4133
4134         if (!result)
4135                 result = count;
4136 out:
4137         ipr_free_ucode_buffer(sglist);
4138         release_firmware(fw_entry);
4139         return result;
4140 }
4141
4142 static struct device_attribute ipr_update_fw_attr = {
4143         .attr = {
4144                 .name =         "update_fw",
4145                 .mode =         S_IWUSR,
4146         },
4147         .store = ipr_store_update_fw
4148 };
4149
4150 /**
4151  * ipr_show_fw_type - Show the adapter's firmware type.
4152  * @dev:        class device struct
4153  * @buf:        buffer
4154  *
4155  * Return value:
4156  *      number of bytes printed to buffer
4157  **/
4158 static ssize_t ipr_show_fw_type(struct device *dev,
4159                                 struct device_attribute *attr, char *buf)
4160 {
4161         struct Scsi_Host *shost = class_to_shost(dev);
4162         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4163         unsigned long lock_flags = 0;
4164         int len;
4165
4166         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4167         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4168         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4169         return len;
4170 }
4171
4172 static struct device_attribute ipr_ioa_fw_type_attr = {
4173         .attr = {
4174                 .name =         "fw_type",
4175                 .mode =         S_IRUGO,
4176         },
4177         .show = ipr_show_fw_type
4178 };
4179
4180 static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4181                                 struct bin_attribute *bin_attr, char *buf,
4182                                 loff_t off, size_t count)
4183 {
4184         struct device *cdev = container_of(kobj, struct device, kobj);
4185         struct Scsi_Host *shost = class_to_shost(cdev);
4186         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4187         struct ipr_hostrcb *hostrcb;
4188         unsigned long lock_flags = 0;
4189         int ret;
4190
4191         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4192         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4193                                         struct ipr_hostrcb, queue);
4194         if (!hostrcb) {
4195                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4196                 return 0;
4197         }
4198         ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4199                                 sizeof(hostrcb->hcam));
4200         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4201         return ret;
4202 }
4203
4204 static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4205                                 struct bin_attribute *bin_attr, char *buf,
4206                                 loff_t off, size_t count)
4207 {
4208         struct device *cdev = container_of(kobj, struct device, kobj);
4209         struct Scsi_Host *shost = class_to_shost(cdev);
4210         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4211         struct ipr_hostrcb *hostrcb;
4212         unsigned long lock_flags = 0;
4213
4214         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4215         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4216                                         struct ipr_hostrcb, queue);
4217         if (!hostrcb) {
4218                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4219                 return count;
4220         }
4221
4222         /* Reclaim hostrcb before exit */
4223         list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4224         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4225         return count;
4226 }
4227
4228 static struct bin_attribute ipr_ioa_async_err_log = {
4229         .attr = {
4230                 .name =         "async_err_log",
4231                 .mode =         S_IRUGO | S_IWUSR,
4232         },
4233         .size = 0,
4234         .read = ipr_read_async_err_log,
4235         .write = ipr_next_async_err_log
4236 };
4237
4238 static struct device_attribute *ipr_ioa_attrs[] = {
4239         &ipr_fw_version_attr,
4240         &ipr_log_level_attr,
4241         &ipr_diagnostics_attr,
4242         &ipr_ioa_state_attr,
4243         &ipr_ioa_reset_attr,
4244         &ipr_update_fw_attr,
4245         &ipr_ioa_fw_type_attr,
4246         &ipr_iopoll_weight_attr,
4247         NULL,
4248 };
4249
4250 #ifdef CONFIG_SCSI_IPR_DUMP
4251 /**
4252  * ipr_read_dump - Dump the adapter
4253  * @filp:               open sysfs file
4254  * @kobj:               kobject struct
4255  * @bin_attr:           bin_attribute struct
4256  * @buf:                buffer
4257  * @off:                offset
4258  * @count:              buffer size
4259  *
4260  * Return value:
4261  *      number of bytes printed to buffer
4262  **/
4263 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4264                              struct bin_attribute *bin_attr,
4265                              char *buf, loff_t off, size_t count)
4266 {
4267         struct device *cdev = container_of(kobj, struct device, kobj);
4268         struct Scsi_Host *shost = class_to_shost(cdev);
4269         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4270         struct ipr_dump *dump;
4271         unsigned long lock_flags = 0;
4272         char *src;
4273         int len, sdt_end;
4274         size_t rc = count;
4275
4276         if (!capable(CAP_SYS_ADMIN))
4277                 return -EACCES;
4278
4279         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4280         dump = ioa_cfg->dump;
4281
4282         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4283                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4284                 return 0;
4285         }
4286         kref_get(&dump->kref);
4287         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4288
4289         if (off > dump->driver_dump.hdr.len) {
4290                 kref_put(&dump->kref, ipr_release_dump);
4291                 return 0;
4292         }
4293
4294         if (off + count > dump->driver_dump.hdr.len) {
4295                 count = dump->driver_dump.hdr.len - off;
4296                 rc = count;
4297         }
4298
4299         if (count && off < sizeof(dump->driver_dump)) {
4300                 if (off + count > sizeof(dump->driver_dump))
4301                         len = sizeof(dump->driver_dump) - off;
4302                 else
4303                         len = count;
4304                 src = (u8 *)&dump->driver_dump + off;
4305                 memcpy(buf, src, len);
4306                 buf += len;
4307                 off += len;
4308                 count -= len;
4309         }
4310
4311         off -= sizeof(dump->driver_dump);
4312
4313         if (ioa_cfg->sis64)
4314                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4315                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4316                            sizeof(struct ipr_sdt_entry));
4317         else
4318                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4319                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4320
4321         if (count && off < sdt_end) {
4322                 if (off + count > sdt_end)
4323                         len = sdt_end - off;
4324                 else
4325                         len = count;
4326                 src = (u8 *)&dump->ioa_dump + off;
4327                 memcpy(buf, src, len);
4328                 buf += len;
4329                 off += len;
4330                 count -= len;
4331         }
4332
4333         off -= sdt_end;
4334
4335         while (count) {
4336                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4337                         len = PAGE_ALIGN(off) - off;
4338                 else
4339                         len = count;
4340                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4341                 src += off & ~PAGE_MASK;
4342                 memcpy(buf, src, len);
4343                 buf += len;
4344                 off += len;
4345                 count -= len;
4346         }
4347
4348         kref_put(&dump->kref, ipr_release_dump);
4349         return rc;
4350 }
4351
4352 /**
4353  * ipr_alloc_dump - Prepare for adapter dump
4354  * @ioa_cfg:    ioa config struct
4355  *
4356  * Return value:
4357  *      0 on success / other on failure
4358  **/
4359 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4360 {
4361         struct ipr_dump *dump;
4362         __be32 **ioa_data;
4363         unsigned long lock_flags = 0;
4364
4365         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4366
4367         if (!dump) {
4368                 ipr_err("Dump memory allocation failed\n");
4369                 return -ENOMEM;
4370         }
4371
4372         if (ioa_cfg->sis64)
4373                 ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
4374                                               sizeof(__be32 *)));
4375         else
4376                 ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
4377                                               sizeof(__be32 *)));
4378
4379         if (!ioa_data) {
4380                 ipr_err("Dump memory allocation failed\n");
4381                 kfree(dump);
4382                 return -ENOMEM;
4383         }
4384
4385         dump->ioa_dump.ioa_data = ioa_data;
4386
4387         kref_init(&dump->kref);
4388         dump->ioa_cfg = ioa_cfg;
4389
4390         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4391
4392         if (INACTIVE != ioa_cfg->sdt_state) {
4393                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4394                 vfree(dump->ioa_dump.ioa_data);
4395                 kfree(dump);
4396                 return 0;
4397         }
4398
4399         ioa_cfg->dump = dump;
4400         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4401         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4402                 ioa_cfg->dump_taken = 1;
4403                 schedule_work(&ioa_cfg->work_q);
4404         }
4405         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4406
4407         return 0;
4408 }
4409
4410 /**
4411  * ipr_free_dump - Free adapter dump memory
4412  * @ioa_cfg:    ioa config struct
4413  *
4414  * Return value:
4415  *      0 on success / other on failure
4416  **/
4417 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4418 {
4419         struct ipr_dump *dump;
4420         unsigned long lock_flags = 0;
4421
4422         ENTER;
4423
4424         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4425         dump = ioa_cfg->dump;
4426         if (!dump) {
4427                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4428                 return 0;
4429         }
4430
4431         ioa_cfg->dump = NULL;
4432         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4433
4434         kref_put(&dump->kref, ipr_release_dump);
4435
4436         LEAVE;
4437         return 0;
4438 }
4439
4440 /**
4441  * ipr_write_dump - Setup dump state of adapter
4442  * @filp:               open sysfs file
4443  * @kobj:               kobject struct
4444  * @bin_attr:           bin_attribute struct
4445  * @buf:                buffer
4446  * @off:                offset
4447  * @count:              buffer size
4448  *
4449  * Return value:
4450  *      number of bytes printed to buffer
4451  **/
4452 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4453                               struct bin_attribute *bin_attr,
4454                               char *buf, loff_t off, size_t count)
4455 {
4456         struct device *cdev = container_of(kobj, struct device, kobj);
4457         struct Scsi_Host *shost = class_to_shost(cdev);
4458         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4459         int rc;
4460
4461         if (!capable(CAP_SYS_ADMIN))
4462                 return -EACCES;
4463
4464         if (buf[0] == '1')
4465                 rc = ipr_alloc_dump(ioa_cfg);
4466         else if (buf[0] == '0')
4467                 rc = ipr_free_dump(ioa_cfg);
4468         else
4469                 return -EINVAL;
4470
4471         if (rc)
4472                 return rc;
4473         else
4474                 return count;
4475 }
4476
4477 static struct bin_attribute ipr_dump_attr = {
4478         .attr = {
4479                 .name = "dump",
4480                 .mode = S_IRUSR | S_IWUSR,
4481         },
4482         .size = 0,
4483         .read = ipr_read_dump,
4484         .write = ipr_write_dump
4485 };
4486 #else
4487 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4488 #endif
4489
4490 /**
4491  * ipr_change_queue_depth - Change the device's queue depth
4492  * @sdev:       scsi device struct
4493  * @qdepth:     depth to set
4494  * @reason:     calling context
4495  *
4496  * Return value:
4497  *      actual depth set
4498  **/
4499 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4500 {
4501         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4502         struct ipr_resource_entry *res;
4503         unsigned long lock_flags = 0;
4504
4505         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4506         res = (struct ipr_resource_entry *)sdev->hostdata;
4507
4508         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4509                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4510         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4511
4512         scsi_change_queue_depth(sdev, qdepth);
4513         return sdev->queue_depth;
4514 }
4515
4516 /**
4517  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4518  * @dev:        device struct
4519  * @attr:       device attribute structure
4520  * @buf:        buffer
4521  *
4522  * Return value:
4523  *      number of bytes printed to buffer
4524  **/
4525 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4526 {
4527         struct scsi_device *sdev = to_scsi_device(dev);
4528         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4529         struct ipr_resource_entry *res;
4530         unsigned long lock_flags = 0;
4531         ssize_t len = -ENXIO;
4532
4533         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4534         res = (struct ipr_resource_entry *)sdev->hostdata;
4535         if (res)
4536                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4537         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4538         return len;
4539 }
4540
4541 static struct device_attribute ipr_adapter_handle_attr = {
4542         .attr = {
4543                 .name =         "adapter_handle",
4544                 .mode =         S_IRUSR,
4545         },
4546         .show = ipr_show_adapter_handle
4547 };
4548
4549 /**
4550  * ipr_show_resource_path - Show the resource path or the resource address for
4551  *                          this device.
4552  * @dev:        device struct
4553  * @attr:       device attribute structure
4554  * @buf:        buffer
4555  *
4556  * Return value:
4557  *      number of bytes printed to buffer
4558  **/
4559 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4560 {
4561         struct scsi_device *sdev = to_scsi_device(dev);
4562         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4563         struct ipr_resource_entry *res;
4564         unsigned long lock_flags = 0;
4565         ssize_t len = -ENXIO;
4566         char buffer[IPR_MAX_RES_PATH_LENGTH];
4567
4568         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4569         res = (struct ipr_resource_entry *)sdev->hostdata;
4570         if (res && ioa_cfg->sis64)
4571                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4572                                __ipr_format_res_path(res->res_path, buffer,
4573                                                      sizeof(buffer)));
4574         else if (res)
4575                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4576                                res->bus, res->target, res->lun);
4577
4578         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4579         return len;
4580 }
4581
4582 static struct device_attribute ipr_resource_path_attr = {
4583         .attr = {
4584                 .name =         "resource_path",
4585                 .mode =         S_IRUGO,
4586         },
4587         .show = ipr_show_resource_path
4588 };
4589
4590 /**
4591  * ipr_show_device_id - Show the device_id for this device.
4592  * @dev:        device struct
4593  * @attr:       device attribute structure
4594  * @buf:        buffer
4595  *
4596  * Return value:
4597  *      number of bytes printed to buffer
4598  **/
4599 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4600 {
4601         struct scsi_device *sdev = to_scsi_device(dev);
4602         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4603         struct ipr_resource_entry *res;
4604         unsigned long lock_flags = 0;
4605         ssize_t len = -ENXIO;
4606
4607         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4608         res = (struct ipr_resource_entry *)sdev->hostdata;
4609         if (res && ioa_cfg->sis64)
4610                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4611         else if (res)
4612                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4613
4614         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4615         return len;
4616 }
4617
4618 static struct device_attribute ipr_device_id_attr = {
4619         .attr = {
4620                 .name =         "device_id",
4621                 .mode =         S_IRUGO,
4622         },
4623         .show = ipr_show_device_id
4624 };
4625
4626 /**
4627  * ipr_show_resource_type - Show the resource type for this device.
4628  * @dev:        device struct
4629  * @attr:       device attribute structure
4630  * @buf:        buffer
4631  *
4632  * Return value:
4633  *      number of bytes printed to buffer
4634  **/
4635 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4636 {
4637         struct scsi_device *sdev = to_scsi_device(dev);
4638         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4639         struct ipr_resource_entry *res;
4640         unsigned long lock_flags = 0;
4641         ssize_t len = -ENXIO;
4642
4643         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4644         res = (struct ipr_resource_entry *)sdev->hostdata;
4645
4646         if (res)
4647                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4648
4649         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4650         return len;
4651 }
4652
4653 static struct device_attribute ipr_resource_type_attr = {
4654         .attr = {
4655                 .name =         "resource_type",
4656                 .mode =         S_IRUGO,
4657         },
4658         .show = ipr_show_resource_type
4659 };
4660
4661 /**
4662  * ipr_show_raw_mode - Show the adapter's raw mode
4663  * @dev:        class device struct
4664  * @buf:        buffer
4665  *
4666  * Return value:
4667  *      number of bytes printed to buffer
4668  **/
4669 static ssize_t ipr_show_raw_mode(struct device *dev,
4670                                  struct device_attribute *attr, char *buf)
4671 {
4672         struct scsi_device *sdev = to_scsi_device(dev);
4673         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4674         struct ipr_resource_entry *res;
4675         unsigned long lock_flags = 0;
4676         ssize_t len;
4677
4678         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4679         res = (struct ipr_resource_entry *)sdev->hostdata;
4680         if (res)
4681                 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4682         else
4683                 len = -ENXIO;
4684         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4685         return len;
4686 }
4687
4688 /**
4689  * ipr_store_raw_mode - Change the adapter's raw mode
4690  * @dev:        class device struct
4691  * @buf:        buffer
4692  *
4693  * Return value:
4694  *      number of bytes printed to buffer
4695  **/
4696 static ssize_t ipr_store_raw_mode(struct device *dev,
4697                                   struct device_attribute *attr,
4698                                   const char *buf, size_t count)
4699 {
4700         struct scsi_device *sdev = to_scsi_device(dev);
4701         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4702         struct ipr_resource_entry *res;
4703         unsigned long lock_flags = 0;
4704         ssize_t len;
4705
4706         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4707         res = (struct ipr_resource_entry *)sdev->hostdata;
4708         if (res) {
4709                 if (ipr_is_af_dasd_device(res)) {
4710                         res->raw_mode = simple_strtoul(buf, NULL, 10);
4711                         len = strlen(buf);
4712                         if (res->sdev)
4713                                 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4714                                         res->raw_mode ? "enabled" : "disabled");
4715                 } else
4716                         len = -EINVAL;
4717         } else
4718                 len = -ENXIO;
4719         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4720         return len;
4721 }
4722
4723 static struct device_attribute ipr_raw_mode_attr = {
4724         .attr = {
4725                 .name =         "raw_mode",
4726                 .mode =         S_IRUGO | S_IWUSR,
4727         },
4728         .show = ipr_show_raw_mode,
4729         .store = ipr_store_raw_mode
4730 };
4731
4732 static struct device_attribute *ipr_dev_attrs[] = {
4733         &ipr_adapter_handle_attr,
4734         &ipr_resource_path_attr,
4735         &ipr_device_id_attr,
4736         &ipr_resource_type_attr,
4737         &ipr_raw_mode_attr,
4738         NULL,
4739 };
4740
4741 /**
4742  * ipr_biosparam - Return the HSC mapping
4743  * @sdev:                       scsi device struct
4744  * @block_device:       block device pointer
4745  * @capacity:           capacity of the device
4746  * @parm:                       Array containing returned HSC values.
4747  *
4748  * This function generates the HSC parms that fdisk uses.
4749  * We want to make sure we return something that places partitions
4750  * on 4k boundaries for best performance with the IOA.
4751  *
4752  * Return value:
4753  *      0 on success
4754  **/
4755 static int ipr_biosparam(struct scsi_device *sdev,
4756                          struct block_device *block_device,
4757                          sector_t capacity, int *parm)
4758 {
4759         int heads, sectors;
4760         sector_t cylinders;
4761
4762         heads = 128;
4763         sectors = 32;
4764
4765         cylinders = capacity;
4766         sector_div(cylinders, (128 * 32));
4767
4768         /* return result */
4769         parm[0] = heads;
4770         parm[1] = sectors;
4771         parm[2] = cylinders;
4772
4773         return 0;
4774 }
4775
4776 /**
4777  * ipr_find_starget - Find target based on bus/target.
4778  * @starget:    scsi target struct
4779  *
4780  * Return value:
4781  *      resource entry pointer if found / NULL if not found
4782  **/
4783 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4784 {
4785         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4786         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4787         struct ipr_resource_entry *res;
4788
4789         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4790                 if ((res->bus == starget->channel) &&
4791                     (res->target == starget->id)) {
4792                         return res;
4793                 }
4794         }
4795
4796         return NULL;
4797 }
4798
4799 static struct ata_port_info sata_port_info;
4800
4801 /**
4802  * ipr_target_alloc - Prepare for commands to a SCSI target
4803  * @starget:    scsi target struct
4804  *
4805  * If the device is a SATA device, this function allocates an
4806  * ATA port with libata, else it does nothing.
4807  *
4808  * Return value:
4809  *      0 on success / non-0 on failure
4810  **/
4811 static int ipr_target_alloc(struct scsi_target *starget)
4812 {
4813         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4814         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4815         struct ipr_sata_port *sata_port;
4816         struct ata_port *ap;
4817         struct ipr_resource_entry *res;
4818         unsigned long lock_flags;
4819
4820         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4821         res = ipr_find_starget(starget);
4822         starget->hostdata = NULL;
4823
4824         if (res && ipr_is_gata(res)) {
4825                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4826                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4827                 if (!sata_port)
4828                         return -ENOMEM;
4829
4830                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4831                 if (ap) {
4832                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4833                         sata_port->ioa_cfg = ioa_cfg;
4834                         sata_port->ap = ap;
4835                         sata_port->res = res;
4836
4837                         res->sata_port = sata_port;
4838                         ap->private_data = sata_port;
4839                         starget->hostdata = sata_port;
4840                 } else {
4841                         kfree(sata_port);
4842                         return -ENOMEM;
4843                 }
4844         }
4845         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4846
4847         return 0;
4848 }
4849
4850 /**
4851  * ipr_target_destroy - Destroy a SCSI target
4852  * @starget:    scsi target struct
4853  *
4854  * If the device was a SATA device, this function frees the libata
4855  * ATA port, else it does nothing.
4856  *
4857  **/
4858 static void ipr_target_destroy(struct scsi_target *starget)
4859 {
4860         struct ipr_sata_port *sata_port = starget->hostdata;
4861         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4862         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4863
4864         if (ioa_cfg->sis64) {
4865                 if (!ipr_find_starget(starget)) {
4866                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4867                                 clear_bit(starget->id, ioa_cfg->array_ids);
4868                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4869                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4870                         else if (starget->channel == 0)
4871                                 clear_bit(starget->id, ioa_cfg->target_ids);
4872                 }
4873         }
4874
4875         if (sata_port) {
4876                 starget->hostdata = NULL;
4877                 ata_sas_port_destroy(sata_port->ap);
4878                 kfree(sata_port);
4879         }
4880 }
4881
4882 /**
4883  * ipr_find_sdev - Find device based on bus/target/lun.
4884  * @sdev:       scsi device struct
4885  *
4886  * Return value:
4887  *      resource entry pointer if found / NULL if not found
4888  **/
4889 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4890 {
4891         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4892         struct ipr_resource_entry *res;
4893
4894         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4895                 if ((res->bus == sdev->channel) &&
4896                     (res->target == sdev->id) &&
4897                     (res->lun == sdev->lun))
4898                         return res;
4899         }
4900
4901         return NULL;
4902 }
4903
4904 /**
4905  * ipr_slave_destroy - Unconfigure a SCSI device
4906  * @sdev:       scsi device struct
4907  *
4908  * Return value:
4909  *      nothing
4910  **/
4911 static void ipr_slave_destroy(struct scsi_device *sdev)
4912 {
4913         struct ipr_resource_entry *res;
4914         struct ipr_ioa_cfg *ioa_cfg;
4915         unsigned long lock_flags = 0;
4916
4917         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4918
4919         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4920         res = (struct ipr_resource_entry *) sdev->hostdata;
4921         if (res) {
4922                 if (res->sata_port)
4923                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4924                 sdev->hostdata = NULL;
4925                 res->sdev = NULL;
4926                 res->sata_port = NULL;
4927         }
4928         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4929 }
4930
4931 /**
4932  * ipr_slave_configure - Configure a SCSI device
4933  * @sdev:       scsi device struct
4934  *
4935  * This function configures the specified scsi device.
4936  *
4937  * Return value:
4938  *      0 on success
4939  **/
4940 static int ipr_slave_configure(struct scsi_device *sdev)
4941 {
4942         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4943         struct ipr_resource_entry *res;
4944         struct ata_port *ap = NULL;
4945         unsigned long lock_flags = 0;
4946         char buffer[IPR_MAX_RES_PATH_LENGTH];
4947
4948         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4949         res = sdev->hostdata;
4950         if (res) {
4951                 if (ipr_is_af_dasd_device(res))
4952                         sdev->type = TYPE_RAID;
4953                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4954                         sdev->scsi_level = 4;
4955                         sdev->no_uld_attach = 1;
4956                 }
4957                 if (ipr_is_vset_device(res)) {
4958                         sdev->scsi_level = SCSI_SPC_3;
4959                         sdev->no_report_opcodes = 1;
4960                         blk_queue_rq_timeout(sdev->request_queue,
4961                                              IPR_VSET_RW_TIMEOUT);
4962                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4963                 }
4964                 if (ipr_is_gata(res) && res->sata_port)
4965                         ap = res->sata_port->ap;
4966                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4967
4968                 if (ap) {
4969                         scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4970                         ata_sas_slave_configure(sdev, ap);
4971                 }
4972
4973                 if (ioa_cfg->sis64)
4974                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4975                                     ipr_format_res_path(ioa_cfg,
4976                                 res->res_path, buffer, sizeof(buffer)));
4977                 return 0;
4978         }
4979         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4980         return 0;
4981 }
4982
4983 /**
4984  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4985  * @sdev:       scsi device struct
4986  *
4987  * This function initializes an ATA port so that future commands
4988  * sent through queuecommand will work.
4989  *
4990  * Return value:
4991  *      0 on success
4992  **/
4993 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4994 {
4995         struct ipr_sata_port *sata_port = NULL;
4996         int rc = -ENXIO;
4997
4998         ENTER;
4999         if (sdev->sdev_target)
5000                 sata_port = sdev->sdev_target->hostdata;
5001         if (sata_port) {
5002                 rc = ata_sas_port_init(sata_port->ap);
5003                 if (rc == 0)
5004                         rc = ata_sas_sync_probe(sata_port->ap);
5005         }
5006
5007         if (rc)
5008                 ipr_slave_destroy(sdev);
5009
5010         LEAVE;
5011         return rc;
5012 }
5013
5014 /**
5015  * ipr_slave_alloc - Prepare for commands to a device.
5016  * @sdev:       scsi device struct
5017  *
5018  * This function saves a pointer to the resource entry
5019  * in the scsi device struct if the device exists. We
5020  * can then use this pointer in ipr_queuecommand when
5021  * handling new commands.
5022  *
5023  * Return value:
5024  *      0 on success / -ENXIO if device does not exist
5025  **/
5026 static int ipr_slave_alloc(struct scsi_device *sdev)
5027 {
5028         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
5029         struct ipr_resource_entry *res;
5030         unsigned long lock_flags;
5031         int rc = -ENXIO;
5032
5033         sdev->hostdata = NULL;
5034
5035         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5036
5037         res = ipr_find_sdev(sdev);
5038         if (res) {
5039                 res->sdev = sdev;
5040                 res->add_to_ml = 0;
5041                 res->in_erp = 0;
5042                 sdev->hostdata = res;
5043                 if (!ipr_is_naca_model(res))
5044                         res->needs_sync_complete = 1;
5045                 rc = 0;
5046                 if (ipr_is_gata(res)) {
5047                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5048                         return ipr_ata_slave_alloc(sdev);
5049                 }
5050         }
5051
5052         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5053
5054         return rc;
5055 }
5056
5057 /**
5058  * ipr_match_lun - Match function for specified LUN
5059  * @ipr_cmd:    ipr command struct
5060  * @device:             device to match (sdev)
5061  *
5062  * Returns:
5063  *      1 if command matches sdev / 0 if command does not match sdev
5064  **/
5065 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5066 {
5067         if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5068                 return 1;
5069         return 0;
5070 }
5071
5072 /**
5073  * ipr_cmnd_is_free - Check if a command is free or not
5074  * @ipr_cmd     ipr command struct
5075  *
5076  * Returns:
5077  *      true / false
5078  **/
5079 static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5080 {
5081         struct ipr_cmnd *loop_cmd;
5082
5083         list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5084                 if (loop_cmd == ipr_cmd)
5085                         return true;
5086         }
5087
5088         return false;
5089 }
5090
5091 /**
5092  * ipr_match_res - Match function for specified resource entry
5093  * @ipr_cmd:    ipr command struct
5094  * @resource:   resource entry to match
5095  *
5096  * Returns:
5097  *      1 if command matches sdev / 0 if command does not match sdev
5098  **/
5099 static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5100 {
5101         struct ipr_resource_entry *res = resource;
5102
5103         if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5104                 return 1;
5105         return 0;
5106 }
5107
5108 /**
5109  * ipr_wait_for_ops - Wait for matching commands to complete
5110  * @ipr_cmd:    ipr command struct
5111  * @device:             device to match (sdev)
5112  * @match:              match function to use
5113  *
5114  * Returns:
5115  *      SUCCESS / FAILED
5116  **/
5117 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5118                             int (*match)(struct ipr_cmnd *, void *))
5119 {
5120         struct ipr_cmnd *ipr_cmd;
5121         int wait, i;
5122         unsigned long flags;
5123         struct ipr_hrr_queue *hrrq;
5124         signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5125         DECLARE_COMPLETION_ONSTACK(comp);
5126
5127         ENTER;
5128         do {
5129                 wait = 0;
5130
5131                 for_each_hrrq(hrrq, ioa_cfg) {
5132                         spin_lock_irqsave(hrrq->lock, flags);
5133                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5134                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5135                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5136                                         if (match(ipr_cmd, device)) {
5137                                                 ipr_cmd->eh_comp = &comp;
5138                                                 wait++;
5139                                         }
5140                                 }
5141                         }
5142                         spin_unlock_irqrestore(hrrq->lock, flags);
5143                 }
5144
5145                 if (wait) {
5146                         timeout = wait_for_completion_timeout(&comp, timeout);
5147
5148                         if (!timeout) {
5149                                 wait = 0;
5150
5151                                 for_each_hrrq(hrrq, ioa_cfg) {
5152                                         spin_lock_irqsave(hrrq->lock, flags);
5153                                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5154                                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5155                                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5156                                                         if (match(ipr_cmd, device)) {
5157                                                                 ipr_cmd->eh_comp = NULL;
5158                                                                 wait++;
5159                                                         }
5160                                                 }
5161                                         }
5162                                         spin_unlock_irqrestore(hrrq->lock, flags);
5163                                 }
5164
5165                                 if (wait)
5166                                         dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5167                                 LEAVE;
5168                                 return wait ? FAILED : SUCCESS;
5169                         }
5170                 }
5171         } while (wait);
5172
5173         LEAVE;
5174         return SUCCESS;
5175 }
5176
5177 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
5178 {
5179         struct ipr_ioa_cfg *ioa_cfg;
5180         unsigned long lock_flags = 0;
5181         int rc = SUCCESS;
5182
5183         ENTER;
5184         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5185         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5186
5187         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5188                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5189                 dev_err(&ioa_cfg->pdev->dev,
5190                         "Adapter being reset as a result of error recovery.\n");
5191
5192                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5193                         ioa_cfg->sdt_state = GET_DUMP;
5194         }
5195
5196         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5197         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5198         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5199
5200         /* If we got hit with a host reset while we were already resetting
5201          the adapter for some reason, and the reset failed. */
5202         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5203                 ipr_trace;
5204                 rc = FAILED;
5205         }
5206
5207         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5208         LEAVE;
5209         return rc;
5210 }
5211
5212 /**
5213  * ipr_device_reset - Reset the device
5214  * @ioa_cfg:    ioa config struct
5215  * @res:                resource entry struct
5216  *
5217  * This function issues a device reset to the affected device.
5218  * If the device is a SCSI device, a LUN reset will be sent
5219  * to the device first. If that does not work, a target reset
5220  * will be sent. If the device is a SATA device, a PHY reset will
5221  * be sent.
5222  *
5223  * Return value:
5224  *      0 on success / non-zero on failure
5225  **/
5226 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5227                             struct ipr_resource_entry *res)
5228 {
5229         struct ipr_cmnd *ipr_cmd;
5230         struct ipr_ioarcb *ioarcb;
5231         struct ipr_cmd_pkt *cmd_pkt;
5232         struct ipr_ioarcb_ata_regs *regs;
5233         u32 ioasc;
5234
5235         ENTER;
5236         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5237         ioarcb = &ipr_cmd->ioarcb;
5238         cmd_pkt = &ioarcb->cmd_pkt;
5239
5240         if (ipr_cmd->ioa_cfg->sis64) {
5241                 regs = &ipr_cmd->i.ata_ioadl.regs;
5242                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5243         } else
5244                 regs = &ioarcb->u.add_data.u.regs;
5245
5246         ioarcb->res_handle = res->res_handle;
5247         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5248         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5249         if (ipr_is_gata(res)) {
5250                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5251                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5252                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5253         }
5254
5255         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5256         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5257         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5258         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5259                 if (ipr_cmd->ioa_cfg->sis64)
5260                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5261                                sizeof(struct ipr_ioasa_gata));
5262                 else
5263                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5264                                sizeof(struct ipr_ioasa_gata));
5265         }
5266
5267         LEAVE;
5268         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5269 }
5270
5271 /**
5272  * ipr_sata_reset - Reset the SATA port
5273  * @link:       SATA link to reset
5274  * @classes:    class of the attached device
5275  *
5276  * This function issues a SATA phy reset to the affected ATA link.
5277  *
5278  * Return value:
5279  *      0 on success / non-zero on failure
5280  **/
5281 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5282                                 unsigned long deadline)
5283 {
5284         struct ipr_sata_port *sata_port = link->ap->private_data;
5285         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5286         struct ipr_resource_entry *res;
5287         unsigned long lock_flags = 0;
5288         int rc = -ENXIO, ret;
5289
5290         ENTER;
5291         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5292         while (ioa_cfg->in_reset_reload) {
5293                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5294                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5295                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5296         }
5297
5298         res = sata_port->res;
5299         if (res) {
5300                 rc = ipr_device_reset(ioa_cfg, res);
5301                 *classes = res->ata_class;
5302                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5303
5304                 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5305                 if (ret != SUCCESS) {
5306                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5307                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5308                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5309
5310                         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5311                 }
5312         } else
5313                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5314
5315         LEAVE;
5316         return rc;
5317 }
5318
5319 /**
5320  * ipr_eh_dev_reset - Reset the device
5321  * @scsi_cmd:   scsi command struct
5322  *
5323  * This function issues a device reset to the affected device.
5324  * A LUN reset will be sent to the device first. If that does
5325  * not work, a target reset will be sent.
5326  *
5327  * Return value:
5328  *      SUCCESS / FAILED
5329  **/
5330 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5331 {
5332         struct ipr_cmnd *ipr_cmd;
5333         struct ipr_ioa_cfg *ioa_cfg;
5334         struct ipr_resource_entry *res;
5335         struct ata_port *ap;
5336         int rc = 0, i;
5337         struct ipr_hrr_queue *hrrq;
5338
5339         ENTER;
5340         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5341         res = scsi_cmd->device->hostdata;
5342
5343         /*
5344          * If we are currently going through reset/reload, return failed. This will force the
5345          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5346          * reset to complete
5347          */
5348         if (ioa_cfg->in_reset_reload)
5349                 return FAILED;
5350         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5351                 return FAILED;
5352
5353         for_each_hrrq(hrrq, ioa_cfg) {
5354                 spin_lock(&hrrq->_lock);
5355                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5356                         ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5357
5358                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5359                                 if (!ipr_cmd->qc)
5360                                         continue;
5361                                 if (ipr_cmnd_is_free(ipr_cmd))
5362                                         continue;
5363
5364                                 ipr_cmd->done = ipr_sata_eh_done;
5365                                 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5366                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5367                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5368                                 }
5369                         }
5370                 }
5371                 spin_unlock(&hrrq->_lock);
5372         }
5373         res->resetting_device = 1;
5374         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5375
5376         if (ipr_is_gata(res) && res->sata_port) {
5377                 ap = res->sata_port->ap;
5378                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5379                 ata_std_error_handler(ap);
5380                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5381         } else
5382                 rc = ipr_device_reset(ioa_cfg, res);
5383         res->resetting_device = 0;
5384         res->reset_occurred = 1;
5385
5386         LEAVE;
5387         return rc ? FAILED : SUCCESS;
5388 }
5389
5390 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5391 {
5392         int rc;
5393         struct ipr_ioa_cfg *ioa_cfg;
5394         struct ipr_resource_entry *res;
5395
5396         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5397         res = cmd->device->hostdata;
5398
5399         if (!res)
5400                 return FAILED;
5401
5402         spin_lock_irq(cmd->device->host->host_lock);
5403         rc = __ipr_eh_dev_reset(cmd);
5404         spin_unlock_irq(cmd->device->host->host_lock);
5405
5406         if (rc == SUCCESS) {
5407                 if (ipr_is_gata(res) && res->sata_port)
5408                         rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5409                 else
5410                         rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5411         }
5412
5413         return rc;
5414 }
5415
5416 /**
5417  * ipr_bus_reset_done - Op done function for bus reset.
5418  * @ipr_cmd:    ipr command struct
5419  *
5420  * This function is the op done function for a bus reset
5421  *
5422  * Return value:
5423  *      none
5424  **/
5425 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5426 {
5427         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5428         struct ipr_resource_entry *res;
5429
5430         ENTER;
5431         if (!ioa_cfg->sis64)
5432                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5433                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5434                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5435                                 break;
5436                         }
5437                 }
5438
5439         /*
5440          * If abort has not completed, indicate the reset has, else call the
5441          * abort's done function to wake the sleeping eh thread
5442          */
5443         if (ipr_cmd->sibling->sibling)
5444                 ipr_cmd->sibling->sibling = NULL;
5445         else
5446                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5447
5448         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5449         LEAVE;
5450 }
5451
5452 /**
5453  * ipr_abort_timeout - An abort task has timed out
5454  * @ipr_cmd:    ipr command struct
5455  *
5456  * This function handles when an abort task times out. If this
5457  * happens we issue a bus reset since we have resources tied
5458  * up that must be freed before returning to the midlayer.
5459  *
5460  * Return value:
5461  *      none
5462  **/
5463 static void ipr_abort_timeout(struct timer_list *t)
5464 {
5465         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
5466         struct ipr_cmnd *reset_cmd;
5467         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5468         struct ipr_cmd_pkt *cmd_pkt;
5469         unsigned long lock_flags = 0;
5470
5471         ENTER;
5472         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5473         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5474                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5475                 return;
5476         }
5477
5478         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5479         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5480         ipr_cmd->sibling = reset_cmd;
5481         reset_cmd->sibling = ipr_cmd;
5482         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5483         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5484         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5485         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5486         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5487
5488         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5489         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5490         LEAVE;
5491 }
5492
5493 /**
5494  * ipr_cancel_op - Cancel specified op
5495  * @scsi_cmd:   scsi command struct
5496  *
5497  * This function cancels specified op.
5498  *
5499  * Return value:
5500  *      SUCCESS / FAILED
5501  **/
5502 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5503 {
5504         struct ipr_cmnd *ipr_cmd;
5505         struct ipr_ioa_cfg *ioa_cfg;
5506         struct ipr_resource_entry *res;
5507         struct ipr_cmd_pkt *cmd_pkt;
5508         u32 ioasc, int_reg;
5509         int i, op_found = 0;
5510         struct ipr_hrr_queue *hrrq;
5511
5512         ENTER;
5513         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5514         res = scsi_cmd->device->hostdata;
5515
5516         /* If we are currently going through reset/reload, return failed.
5517          * This will force the mid-layer to call ipr_eh_host_reset,
5518          * which will then go to sleep and wait for the reset to complete
5519          */
5520         if (ioa_cfg->in_reset_reload ||
5521             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5522                 return FAILED;
5523         if (!res)
5524                 return FAILED;
5525
5526         /*
5527          * If we are aborting a timed out op, chances are that the timeout was caused
5528          * by a still not detected EEH error. In such cases, reading a register will
5529          * trigger the EEH recovery infrastructure.
5530          */
5531         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5532
5533         if (!ipr_is_gscsi(res))
5534                 return FAILED;
5535
5536         for_each_hrrq(hrrq, ioa_cfg) {
5537                 spin_lock(&hrrq->_lock);
5538                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5539                         if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5540                                 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5541                                         op_found = 1;
5542                                         break;
5543                                 }
5544                         }
5545                 }
5546                 spin_unlock(&hrrq->_lock);
5547         }
5548
5549         if (!op_found)
5550                 return SUCCESS;
5551
5552         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5553         ipr_cmd->ioarcb.res_handle = res->res_handle;
5554         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5555         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5556         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5557         ipr_cmd->u.sdev = scsi_cmd->device;
5558
5559         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5560                     scsi_cmd->cmnd[0]);
5561         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5562         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5563
5564         /*
5565          * If the abort task timed out and we sent a bus reset, we will get
5566          * one the following responses to the abort
5567          */
5568         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5569                 ioasc = 0;
5570                 ipr_trace;
5571         }
5572
5573         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5574         if (!ipr_is_naca_model(res))
5575                 res->needs_sync_complete = 1;
5576
5577         LEAVE;
5578         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5579 }
5580
5581 /**
5582  * ipr_eh_abort - Abort a single op
5583  * @scsi_cmd:   scsi command struct
5584  *
5585  * Return value:
5586  *      0 if scan in progress / 1 if scan is complete
5587  **/
5588 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5589 {
5590         unsigned long lock_flags;
5591         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5592         int rc = 0;
5593
5594         spin_lock_irqsave(shost->host_lock, lock_flags);
5595         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5596                 rc = 1;
5597         if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5598                 rc = 1;
5599         spin_unlock_irqrestore(shost->host_lock, lock_flags);
5600         return rc;
5601 }
5602
5603 /**
5604  * ipr_eh_host_reset - Reset the host adapter
5605  * @scsi_cmd:   scsi command struct
5606  *
5607  * Return value:
5608  *      SUCCESS / FAILED
5609  **/
5610 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5611 {
5612         unsigned long flags;
5613         int rc;
5614         struct ipr_ioa_cfg *ioa_cfg;
5615
5616         ENTER;
5617
5618         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5619
5620         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5621         rc = ipr_cancel_op(scsi_cmd);
5622         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5623
5624         if (rc == SUCCESS)
5625                 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5626         LEAVE;
5627         return rc;
5628 }
5629
5630 /**
5631  * ipr_handle_other_interrupt - Handle "other" interrupts
5632  * @ioa_cfg:    ioa config struct
5633  * @int_reg:    interrupt register
5634  *
5635  * Return value:
5636  *      IRQ_NONE / IRQ_HANDLED
5637  **/
5638 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5639                                               u32 int_reg)
5640 {
5641         irqreturn_t rc = IRQ_HANDLED;
5642         u32 int_mask_reg;
5643
5644         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5645         int_reg &= ~int_mask_reg;
5646
5647         /* If an interrupt on the adapter did not occur, ignore it.
5648          * Or in the case of SIS 64, check for a stage change interrupt.
5649          */
5650         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5651                 if (ioa_cfg->sis64) {
5652                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5653                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5654                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5655
5656                                 /* clear stage change */
5657                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5658                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5659                                 list_del(&ioa_cfg->reset_cmd->queue);
5660                                 del_timer(&ioa_cfg->reset_cmd->timer);
5661                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5662                                 return IRQ_HANDLED;
5663                         }
5664                 }
5665
5666                 return IRQ_NONE;
5667         }
5668
5669         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5670                 /* Mask the interrupt */
5671                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5672                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5673
5674                 list_del(&ioa_cfg->reset_cmd->queue);
5675                 del_timer(&ioa_cfg->reset_cmd->timer);
5676                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5677         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5678                 if (ioa_cfg->clear_isr) {
5679                         if (ipr_debug && printk_ratelimit())
5680                                 dev_err(&ioa_cfg->pdev->dev,
5681                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5682                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5683                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5684                         return IRQ_NONE;
5685                 }
5686         } else {
5687                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5688                         ioa_cfg->ioa_unit_checked = 1;
5689                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5690                         dev_err(&ioa_cfg->pdev->dev,
5691                                 "No Host RRQ. 0x%08X\n", int_reg);
5692                 else
5693                         dev_err(&ioa_cfg->pdev->dev,
5694                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5695
5696                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5697                         ioa_cfg->sdt_state = GET_DUMP;
5698
5699                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5700                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5701         }
5702
5703         return rc;
5704 }
5705
5706 /**
5707  * ipr_isr_eh - Interrupt service routine error handler
5708  * @ioa_cfg:    ioa config struct
5709  * @msg:        message to log
5710  *
5711  * Return value:
5712  *      none
5713  **/
5714 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5715 {
5716         ioa_cfg->errors_logged++;
5717         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5718
5719         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5720                 ioa_cfg->sdt_state = GET_DUMP;
5721
5722         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5723 }
5724
5725 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5726                                                 struct list_head *doneq)
5727 {
5728         u32 ioasc;
5729         u16 cmd_index;
5730         struct ipr_cmnd *ipr_cmd;
5731         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5732         int num_hrrq = 0;
5733
5734         /* If interrupts are disabled, ignore the interrupt */
5735         if (!hrr_queue->allow_interrupts)
5736                 return 0;
5737
5738         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5739                hrr_queue->toggle_bit) {
5740
5741                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5742                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5743                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5744
5745                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5746                              cmd_index < hrr_queue->min_cmd_id)) {
5747                         ipr_isr_eh(ioa_cfg,
5748                                 "Invalid response handle from IOA: ",
5749                                 cmd_index);
5750                         break;
5751                 }
5752
5753                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5754                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5755
5756                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5757
5758                 list_move_tail(&ipr_cmd->queue, doneq);
5759
5760                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5761                         hrr_queue->hrrq_curr++;
5762                 } else {
5763                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5764                         hrr_queue->toggle_bit ^= 1u;
5765                 }
5766                 num_hrrq++;
5767                 if (budget > 0 && num_hrrq >= budget)
5768                         break;
5769         }
5770
5771         return num_hrrq;
5772 }
5773
5774 static int ipr_iopoll(struct irq_poll *iop, int budget)
5775 {
5776         struct ipr_ioa_cfg *ioa_cfg;
5777         struct ipr_hrr_queue *hrrq;
5778         struct ipr_cmnd *ipr_cmd, *temp;
5779         unsigned long hrrq_flags;
5780         int completed_ops;
5781         LIST_HEAD(doneq);
5782
5783         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5784         ioa_cfg = hrrq->ioa_cfg;
5785
5786         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5787         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5788
5789         if (completed_ops < budget)
5790                 irq_poll_complete(iop);
5791         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5792
5793         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5794                 list_del(&ipr_cmd->queue);
5795                 del_timer(&ipr_cmd->timer);
5796                 ipr_cmd->fast_done(ipr_cmd);
5797         }
5798
5799         return completed_ops;
5800 }
5801
5802 /**
5803  * ipr_isr - Interrupt service routine
5804  * @irq:        irq number
5805  * @devp:       pointer to ioa config struct
5806  *
5807  * Return value:
5808  *      IRQ_NONE / IRQ_HANDLED
5809  **/
5810 static irqreturn_t ipr_isr(int irq, void *devp)
5811 {
5812         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5813         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5814         unsigned long hrrq_flags = 0;
5815         u32 int_reg = 0;
5816         int num_hrrq = 0;
5817         int irq_none = 0;
5818         struct ipr_cmnd *ipr_cmd, *temp;
5819         irqreturn_t rc = IRQ_NONE;
5820         LIST_HEAD(doneq);
5821
5822         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5823         /* If interrupts are disabled, ignore the interrupt */
5824         if (!hrrq->allow_interrupts) {
5825                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5826                 return IRQ_NONE;
5827         }
5828
5829         while (1) {
5830                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5831                         rc =  IRQ_HANDLED;
5832
5833                         if (!ioa_cfg->clear_isr)
5834                                 break;
5835
5836                         /* Clear the PCI interrupt */
5837                         num_hrrq = 0;
5838                         do {
5839                                 writel(IPR_PCII_HRRQ_UPDATED,
5840                                      ioa_cfg->regs.clr_interrupt_reg32);
5841                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5842                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5843                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5844
5845                 } else if (rc == IRQ_NONE && irq_none == 0) {
5846                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5847                         irq_none++;
5848                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5849                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5850                         ipr_isr_eh(ioa_cfg,
5851                                 "Error clearing HRRQ: ", num_hrrq);
5852                         rc = IRQ_HANDLED;
5853                         break;
5854                 } else
5855                         break;
5856         }
5857
5858         if (unlikely(rc == IRQ_NONE))
5859                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5860
5861         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5862         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5863                 list_del(&ipr_cmd->queue);
5864                 del_timer(&ipr_cmd->timer);
5865                 ipr_cmd->fast_done(ipr_cmd);
5866         }
5867         return rc;
5868 }
5869
5870 /**
5871  * ipr_isr_mhrrq - Interrupt service routine
5872  * @irq:        irq number
5873  * @devp:       pointer to ioa config struct
5874  *
5875  * Return value:
5876  *      IRQ_NONE / IRQ_HANDLED
5877  **/
5878 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5879 {
5880         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5881         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5882         unsigned long hrrq_flags = 0;
5883         struct ipr_cmnd *ipr_cmd, *temp;
5884         irqreturn_t rc = IRQ_NONE;
5885         LIST_HEAD(doneq);
5886
5887         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5888
5889         /* If interrupts are disabled, ignore the interrupt */
5890         if (!hrrq->allow_interrupts) {
5891                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5892                 return IRQ_NONE;
5893         }
5894
5895         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5896                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5897                        hrrq->toggle_bit) {
5898                         irq_poll_sched(&hrrq->iopoll);
5899                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5900                         return IRQ_HANDLED;
5901                 }
5902         } else {
5903                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5904                         hrrq->toggle_bit)
5905
5906                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5907                                 rc =  IRQ_HANDLED;
5908         }
5909
5910         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5911
5912         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5913                 list_del(&ipr_cmd->queue);
5914                 del_timer(&ipr_cmd->timer);
5915                 ipr_cmd->fast_done(ipr_cmd);
5916         }
5917         return rc;
5918 }
5919
5920 /**
5921  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5922  * @ioa_cfg:    ioa config struct
5923  * @ipr_cmd:    ipr command struct
5924  *
5925  * Return value:
5926  *      0 on success / -1 on failure
5927  **/
5928 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5929                              struct ipr_cmnd *ipr_cmd)
5930 {
5931         int i, nseg;
5932         struct scatterlist *sg;
5933         u32 length;
5934         u32 ioadl_flags = 0;
5935         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5936         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5937         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5938
5939         length = scsi_bufflen(scsi_cmd);
5940         if (!length)
5941                 return 0;
5942
5943         nseg = scsi_dma_map(scsi_cmd);
5944         if (nseg < 0) {
5945                 if (printk_ratelimit())
5946                         dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5947                 return -1;
5948         }
5949
5950         ipr_cmd->dma_use_sg = nseg;
5951
5952         ioarcb->data_transfer_length = cpu_to_be32(length);
5953         ioarcb->ioadl_len =
5954                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5955
5956         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5957                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5958                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5959         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5960                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5961
5962         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5963                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5964                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5965                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5966         }
5967
5968         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5969         return 0;
5970 }
5971
5972 /**
5973  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5974  * @ioa_cfg:    ioa config struct
5975  * @ipr_cmd:    ipr command struct
5976  *
5977  * Return value:
5978  *      0 on success / -1 on failure
5979  **/
5980 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5981                            struct ipr_cmnd *ipr_cmd)
5982 {
5983         int i, nseg;
5984         struct scatterlist *sg;
5985         u32 length;
5986         u32 ioadl_flags = 0;
5987         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5988         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5989         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5990
5991         length = scsi_bufflen(scsi_cmd);
5992         if (!length)
5993                 return 0;
5994
5995         nseg = scsi_dma_map(scsi_cmd);
5996         if (nseg < 0) {
5997                 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5998                 return -1;
5999         }
6000
6001         ipr_cmd->dma_use_sg = nseg;
6002
6003         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
6004                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6005                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6006                 ioarcb->data_transfer_length = cpu_to_be32(length);
6007                 ioarcb->ioadl_len =
6008                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6009         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
6010                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6011                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
6012                 ioarcb->read_ioadl_len =
6013                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6014         }
6015
6016         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
6017                 ioadl = ioarcb->u.add_data.u.ioadl;
6018                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
6019                                     offsetof(struct ipr_ioarcb, u.add_data));
6020                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6021         }
6022
6023         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
6024                 ioadl[i].flags_and_data_len =
6025                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6026                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
6027         }
6028
6029         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6030         return 0;
6031 }
6032
6033 /**
6034  * __ipr_erp_done - Process completion of ERP for a device
6035  * @ipr_cmd:            ipr command struct
6036  *
6037  * This function copies the sense buffer into the scsi_cmd
6038  * struct and pushes the scsi_done function.
6039  *
6040  * Return value:
6041  *      nothing
6042  **/
6043 static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6044 {
6045         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6046         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6047         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6048
6049         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6050                 scsi_cmd->result |= (DID_ERROR << 16);
6051                 scmd_printk(KERN_ERR, scsi_cmd,
6052                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
6053         } else {
6054                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6055                        SCSI_SENSE_BUFFERSIZE);
6056         }
6057
6058         if (res) {
6059                 if (!ipr_is_naca_model(res))
6060                         res->needs_sync_complete = 1;
6061                 res->in_erp = 0;
6062         }
6063         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6064         scsi_cmd->scsi_done(scsi_cmd);
6065         if (ipr_cmd->eh_comp)
6066                 complete(ipr_cmd->eh_comp);
6067         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6068 }
6069
6070 /**
6071  * ipr_erp_done - Process completion of ERP for a device
6072  * @ipr_cmd:            ipr command struct
6073  *
6074  * This function copies the sense buffer into the scsi_cmd
6075  * struct and pushes the scsi_done function.
6076  *
6077  * Return value:
6078  *      nothing
6079  **/
6080 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6081 {
6082         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6083         unsigned long hrrq_flags;
6084
6085         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6086         __ipr_erp_done(ipr_cmd);
6087         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6088 }
6089
6090 /**
6091  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6092  * @ipr_cmd:    ipr command struct
6093  *
6094  * Return value:
6095  *      none
6096  **/
6097 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6098 {
6099         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6100         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6101         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6102
6103         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
6104         ioarcb->data_transfer_length = 0;
6105         ioarcb->read_data_transfer_length = 0;
6106         ioarcb->ioadl_len = 0;
6107         ioarcb->read_ioadl_len = 0;
6108         ioasa->hdr.ioasc = 0;
6109         ioasa->hdr.residual_data_len = 0;
6110
6111         if (ipr_cmd->ioa_cfg->sis64)
6112                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6113                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6114         else {
6115                 ioarcb->write_ioadl_addr =
6116                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6117                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6118         }
6119 }
6120
6121 /**
6122  * __ipr_erp_request_sense - Send request sense to a device
6123  * @ipr_cmd:    ipr command struct
6124  *
6125  * This function sends a request sense to a device as a result
6126  * of a check condition.
6127  *
6128  * Return value:
6129  *      nothing
6130  **/
6131 static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6132 {
6133         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6134         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6135
6136         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6137                 __ipr_erp_done(ipr_cmd);
6138                 return;
6139         }
6140
6141         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6142
6143         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6144         cmd_pkt->cdb[0] = REQUEST_SENSE;
6145         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6146         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6147         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6148         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6149
6150         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6151                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
6152
6153         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6154                    IPR_REQUEST_SENSE_TIMEOUT * 2);
6155 }
6156
6157 /**
6158  * ipr_erp_request_sense - Send request sense to a device
6159  * @ipr_cmd:    ipr command struct
6160  *
6161  * This function sends a request sense to a device as a result
6162  * of a check condition.
6163  *
6164  * Return value:
6165  *      nothing
6166  **/
6167 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6168 {
6169         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6170         unsigned long hrrq_flags;
6171
6172         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6173         __ipr_erp_request_sense(ipr_cmd);
6174         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6175 }
6176
6177 /**
6178  * ipr_erp_cancel_all - Send cancel all to a device
6179  * @ipr_cmd:    ipr command struct
6180  *
6181  * This function sends a cancel all to a device to clear the
6182  * queue. If we are running TCQ on the device, QERR is set to 1,
6183  * which means all outstanding ops have been dropped on the floor.
6184  * Cancel all will return them to us.
6185  *
6186  * Return value:
6187  *      nothing
6188  **/
6189 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6190 {
6191         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6192         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6193         struct ipr_cmd_pkt *cmd_pkt;
6194
6195         res->in_erp = 1;
6196
6197         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6198
6199         if (!scsi_cmd->device->simple_tags) {
6200                 __ipr_erp_request_sense(ipr_cmd);
6201                 return;
6202         }
6203
6204         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6205         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6206         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6207
6208         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6209                    IPR_CANCEL_ALL_TIMEOUT);
6210 }
6211
6212 /**
6213  * ipr_dump_ioasa - Dump contents of IOASA
6214  * @ioa_cfg:    ioa config struct
6215  * @ipr_cmd:    ipr command struct
6216  * @res:                resource entry struct
6217  *
6218  * This function is invoked by the interrupt handler when ops
6219  * fail. It will log the IOASA if appropriate. Only called
6220  * for GPDD ops.
6221  *
6222  * Return value:
6223  *      none
6224  **/
6225 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
6226                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
6227 {
6228         int i;
6229         u16 data_len;
6230         u32 ioasc, fd_ioasc;
6231         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6232         __be32 *ioasa_data = (__be32 *)ioasa;
6233         int error_index;
6234
6235         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6236         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
6237
6238         if (0 == ioasc)
6239                 return;
6240
6241         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6242                 return;
6243
6244         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6245                 error_index = ipr_get_error(fd_ioasc);
6246         else
6247                 error_index = ipr_get_error(ioasc);
6248
6249         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6250                 /* Don't log an error if the IOA already logged one */
6251                 if (ioasa->hdr.ilid != 0)
6252                         return;
6253
6254                 if (!ipr_is_gscsi(res))
6255                         return;
6256
6257                 if (ipr_error_table[error_index].log_ioasa == 0)
6258                         return;
6259         }
6260
6261         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6262
6263         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6264         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6265                 data_len = sizeof(struct ipr_ioasa64);
6266         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6267                 data_len = sizeof(struct ipr_ioasa);
6268
6269         ipr_err("IOASA Dump:\n");
6270
6271         for (i = 0; i < data_len / 4; i += 4) {
6272                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6273                         be32_to_cpu(ioasa_data[i]),
6274                         be32_to_cpu(ioasa_data[i+1]),
6275                         be32_to_cpu(ioasa_data[i+2]),
6276                         be32_to_cpu(ioasa_data[i+3]));
6277         }
6278 }
6279
6280 /**
6281  * ipr_gen_sense - Generate SCSI sense data from an IOASA
6282  * @ioasa:              IOASA
6283  * @sense_buf:  sense data buffer
6284  *
6285  * Return value:
6286  *      none
6287  **/
6288 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6289 {
6290         u32 failing_lba;
6291         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6292         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6293         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6294         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6295
6296         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6297
6298         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6299                 return;
6300
6301         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6302
6303         if (ipr_is_vset_device(res) &&
6304             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6305             ioasa->u.vset.failing_lba_hi != 0) {
6306                 sense_buf[0] = 0x72;
6307                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6308                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6309                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6310
6311                 sense_buf[7] = 12;
6312                 sense_buf[8] = 0;
6313                 sense_buf[9] = 0x0A;
6314                 sense_buf[10] = 0x80;
6315
6316                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6317
6318                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6319                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6320                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6321                 sense_buf[15] = failing_lba & 0x000000ff;
6322
6323                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6324
6325                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6326                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6327                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6328                 sense_buf[19] = failing_lba & 0x000000ff;
6329         } else {
6330                 sense_buf[0] = 0x70;
6331                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6332                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6333                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6334
6335                 /* Illegal request */
6336                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6337                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6338                         sense_buf[7] = 10;      /* additional length */
6339
6340                         /* IOARCB was in error */
6341                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6342                                 sense_buf[15] = 0xC0;
6343                         else    /* Parameter data was invalid */
6344                                 sense_buf[15] = 0x80;
6345
6346                         sense_buf[16] =
6347                             ((IPR_FIELD_POINTER_MASK &
6348                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6349                         sense_buf[17] =
6350                             (IPR_FIELD_POINTER_MASK &
6351                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6352                 } else {
6353                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6354                                 if (ipr_is_vset_device(res))
6355                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6356                                 else
6357                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6358
6359                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
6360                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6361                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6362                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6363                                 sense_buf[6] = failing_lba & 0x000000ff;
6364                         }
6365
6366                         sense_buf[7] = 6;       /* additional length */
6367                 }
6368         }
6369 }
6370
6371 /**
6372  * ipr_get_autosense - Copy autosense data to sense buffer
6373  * @ipr_cmd:    ipr command struct
6374  *
6375  * This function copies the autosense buffer to the buffer
6376  * in the scsi_cmd, if there is autosense available.
6377  *
6378  * Return value:
6379  *      1 if autosense was available / 0 if not
6380  **/
6381 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6382 {
6383         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6384         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6385
6386         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6387                 return 0;
6388
6389         if (ipr_cmd->ioa_cfg->sis64)
6390                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6391                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6392                            SCSI_SENSE_BUFFERSIZE));
6393         else
6394                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6395                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6396                            SCSI_SENSE_BUFFERSIZE));
6397         return 1;
6398 }
6399
6400 /**
6401  * ipr_erp_start - Process an error response for a SCSI op
6402  * @ioa_cfg:    ioa config struct
6403  * @ipr_cmd:    ipr command struct
6404  *
6405  * This function determines whether or not to initiate ERP
6406  * on the affected device.
6407  *
6408  * Return value:
6409  *      nothing
6410  **/
6411 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6412                               struct ipr_cmnd *ipr_cmd)
6413 {
6414         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6415         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6416         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6417         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6418
6419         if (!res) {
6420                 __ipr_scsi_eh_done(ipr_cmd);
6421                 return;
6422         }
6423
6424         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6425                 ipr_gen_sense(ipr_cmd);
6426
6427         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6428
6429         switch (masked_ioasc) {
6430         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6431                 if (ipr_is_naca_model(res))
6432                         scsi_cmd->result |= (DID_ABORT << 16);
6433                 else
6434                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6435                 break;
6436         case IPR_IOASC_IR_RESOURCE_HANDLE:
6437         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6438                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6439                 break;
6440         case IPR_IOASC_HW_SEL_TIMEOUT:
6441                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6442                 if (!ipr_is_naca_model(res))
6443                         res->needs_sync_complete = 1;
6444                 break;
6445         case IPR_IOASC_SYNC_REQUIRED:
6446                 if (!res->in_erp)
6447                         res->needs_sync_complete = 1;
6448                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6449                 break;
6450         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6451         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6452                 /*
6453                  * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6454                  * so SCSI mid-layer and upper layers handle it accordingly.
6455                  */
6456                 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6457                         scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6458                 break;
6459         case IPR_IOASC_BUS_WAS_RESET:
6460         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6461                 /*
6462                  * Report the bus reset and ask for a retry. The device
6463                  * will give CC/UA the next command.
6464                  */
6465                 if (!res->resetting_device)
6466                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6467                 scsi_cmd->result |= (DID_ERROR << 16);
6468                 if (!ipr_is_naca_model(res))
6469                         res->needs_sync_complete = 1;
6470                 break;
6471         case IPR_IOASC_HW_DEV_BUS_STATUS:
6472                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6473                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6474                         if (!ipr_get_autosense(ipr_cmd)) {
6475                                 if (!ipr_is_naca_model(res)) {
6476                                         ipr_erp_cancel_all(ipr_cmd);
6477                                         return;
6478                                 }
6479                         }
6480                 }
6481                 if (!ipr_is_naca_model(res))
6482                         res->needs_sync_complete = 1;
6483                 break;
6484         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6485                 break;
6486         case IPR_IOASC_IR_NON_OPTIMIZED:
6487                 if (res->raw_mode) {
6488                         res->raw_mode = 0;
6489                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6490                 } else
6491                         scsi_cmd->result |= (DID_ERROR << 16);
6492                 break;
6493         default:
6494                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6495                         scsi_cmd->result |= (DID_ERROR << 16);
6496                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6497                         res->needs_sync_complete = 1;
6498                 break;
6499         }
6500
6501         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6502         scsi_cmd->scsi_done(scsi_cmd);
6503         if (ipr_cmd->eh_comp)
6504                 complete(ipr_cmd->eh_comp);
6505         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6506 }
6507
6508 /**
6509  * ipr_scsi_done - mid-layer done function
6510  * @ipr_cmd:    ipr command struct
6511  *
6512  * This function is invoked by the interrupt handler for
6513  * ops generated by the SCSI mid-layer
6514  *
6515  * Return value:
6516  *      none
6517  **/
6518 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6519 {
6520         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6521         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6522         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6523         unsigned long lock_flags;
6524
6525         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6526
6527         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6528                 scsi_dma_unmap(scsi_cmd);
6529
6530                 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6531                 scsi_cmd->scsi_done(scsi_cmd);
6532                 if (ipr_cmd->eh_comp)
6533                         complete(ipr_cmd->eh_comp);
6534                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6535                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6536         } else {
6537                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6538                 spin_lock(&ipr_cmd->hrrq->_lock);
6539                 ipr_erp_start(ioa_cfg, ipr_cmd);
6540                 spin_unlock(&ipr_cmd->hrrq->_lock);
6541                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6542         }
6543 }
6544
6545 /**
6546  * ipr_queuecommand - Queue a mid-layer request
6547  * @shost:              scsi host struct
6548  * @scsi_cmd:   scsi command struct
6549  *
6550  * This function queues a request generated by the mid-layer.
6551  *
6552  * Return value:
6553  *      0 on success
6554  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6555  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6556  **/
6557 static int ipr_queuecommand(struct Scsi_Host *shost,
6558                             struct scsi_cmnd *scsi_cmd)
6559 {
6560         struct ipr_ioa_cfg *ioa_cfg;
6561         struct ipr_resource_entry *res;
6562         struct ipr_ioarcb *ioarcb;
6563         struct ipr_cmnd *ipr_cmd;
6564         unsigned long hrrq_flags, lock_flags;
6565         int rc;
6566         struct ipr_hrr_queue *hrrq;
6567         int hrrq_id;
6568
6569         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6570
6571         scsi_cmd->result = (DID_OK << 16);
6572         res = scsi_cmd->device->hostdata;
6573
6574         if (ipr_is_gata(res) && res->sata_port) {
6575                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6576                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6577                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6578                 return rc;
6579         }
6580
6581         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6582         hrrq = &ioa_cfg->hrrq[hrrq_id];
6583
6584         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6585         /*
6586          * We are currently blocking all devices due to a host reset
6587          * We have told the host to stop giving us new requests, but
6588          * ERP ops don't count. FIXME
6589          */
6590         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6591                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6592                 return SCSI_MLQUEUE_HOST_BUSY;
6593         }
6594
6595         /*
6596          * FIXME - Create scsi_set_host_offline interface
6597          *  and the ioa_is_dead check can be removed
6598          */
6599         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6600                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6601                 goto err_nodev;
6602         }
6603
6604         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6605         if (ipr_cmd == NULL) {
6606                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6607                 return SCSI_MLQUEUE_HOST_BUSY;
6608         }
6609         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6610
6611         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6612         ioarcb = &ipr_cmd->ioarcb;
6613
6614         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6615         ipr_cmd->scsi_cmd = scsi_cmd;
6616         ipr_cmd->done = ipr_scsi_eh_done;
6617
6618         if (ipr_is_gscsi(res)) {
6619                 if (scsi_cmd->underflow == 0)
6620                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6621
6622                 if (res->reset_occurred) {
6623                         res->reset_occurred = 0;
6624                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6625                 }
6626         }
6627
6628         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6629                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6630
6631                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6632                 if (scsi_cmd->flags & SCMD_TAGGED)
6633                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6634                 else
6635                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6636         }
6637
6638         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6639             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6640                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6641         }
6642         if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6643                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6644
6645                 if (scsi_cmd->underflow == 0)
6646                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6647         }
6648
6649         if (ioa_cfg->sis64)
6650                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6651         else
6652                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6653
6654         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6655         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6656                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6657                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6658                 if (!rc)
6659                         scsi_dma_unmap(scsi_cmd);
6660                 return SCSI_MLQUEUE_HOST_BUSY;
6661         }
6662
6663         if (unlikely(hrrq->ioa_is_dead)) {
6664                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6665                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6666                 scsi_dma_unmap(scsi_cmd);
6667                 goto err_nodev;
6668         }
6669
6670         ioarcb->res_handle = res->res_handle;
6671         if (res->needs_sync_complete) {
6672                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6673                 res->needs_sync_complete = 0;
6674         }
6675         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6676         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6677         ipr_send_command(ipr_cmd);
6678         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6679         return 0;
6680
6681 err_nodev:
6682         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6683         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6684         scsi_cmd->result = (DID_NO_CONNECT << 16);
6685         scsi_cmd->scsi_done(scsi_cmd);
6686         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6687         return 0;
6688 }
6689
6690 /**
6691  * ipr_ioctl - IOCTL handler
6692  * @sdev:       scsi device struct
6693  * @cmd:        IOCTL cmd
6694  * @arg:        IOCTL arg
6695  *
6696  * Return value:
6697  *      0 on success / other on failure
6698  **/
6699 static int ipr_ioctl(struct scsi_device *sdev, unsigned int cmd,
6700                      void __user *arg)
6701 {
6702         struct ipr_resource_entry *res;
6703
6704         res = (struct ipr_resource_entry *)sdev->hostdata;
6705         if (res && ipr_is_gata(res)) {
6706                 if (cmd == HDIO_GET_IDENTITY)
6707                         return -ENOTTY;
6708                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6709         }
6710
6711         return -EINVAL;
6712 }
6713
6714 /**
6715  * ipr_info - Get information about the card/driver
6716  * @scsi_host:  scsi host struct
6717  *
6718  * Return value:
6719  *      pointer to buffer with description string
6720  **/
6721 static const char *ipr_ioa_info(struct Scsi_Host *host)
6722 {
6723         static char buffer[512];
6724         struct ipr_ioa_cfg *ioa_cfg;
6725         unsigned long lock_flags = 0;
6726
6727         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6728
6729         spin_lock_irqsave(host->host_lock, lock_flags);
6730         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6731         spin_unlock_irqrestore(host->host_lock, lock_flags);
6732
6733         return buffer;
6734 }
6735
6736 static struct scsi_host_template driver_template = {
6737         .module = THIS_MODULE,
6738         .name = "IPR",
6739         .info = ipr_ioa_info,
6740         .ioctl = ipr_ioctl,
6741         .queuecommand = ipr_queuecommand,
6742         .eh_abort_handler = ipr_eh_abort,
6743         .eh_device_reset_handler = ipr_eh_dev_reset,
6744         .eh_host_reset_handler = ipr_eh_host_reset,
6745         .slave_alloc = ipr_slave_alloc,
6746         .slave_configure = ipr_slave_configure,
6747         .slave_destroy = ipr_slave_destroy,
6748         .scan_finished = ipr_scan_finished,
6749         .target_alloc = ipr_target_alloc,
6750         .target_destroy = ipr_target_destroy,
6751         .change_queue_depth = ipr_change_queue_depth,
6752         .bios_param = ipr_biosparam,
6753         .can_queue = IPR_MAX_COMMANDS,
6754         .this_id = -1,
6755         .sg_tablesize = IPR_MAX_SGLIST,
6756         .max_sectors = IPR_IOA_MAX_SECTORS,
6757         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6758         .shost_attrs = ipr_ioa_attrs,
6759         .sdev_attrs = ipr_dev_attrs,
6760         .proc_name = IPR_NAME,
6761 };
6762
6763 /**
6764  * ipr_ata_phy_reset - libata phy_reset handler
6765  * @ap:         ata port to reset
6766  *
6767  **/
6768 static void ipr_ata_phy_reset(struct ata_port *ap)
6769 {
6770         unsigned long flags;
6771         struct ipr_sata_port *sata_port = ap->private_data;
6772         struct ipr_resource_entry *res = sata_port->res;
6773         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6774         int rc;
6775
6776         ENTER;
6777         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6778         while (ioa_cfg->in_reset_reload) {
6779                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6780                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6781                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6782         }
6783
6784         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6785                 goto out_unlock;
6786
6787         rc = ipr_device_reset(ioa_cfg, res);
6788
6789         if (rc) {
6790                 ap->link.device[0].class = ATA_DEV_NONE;
6791                 goto out_unlock;
6792         }
6793
6794         ap->link.device[0].class = res->ata_class;
6795         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6796                 ap->link.device[0].class = ATA_DEV_NONE;
6797
6798 out_unlock:
6799         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6800         LEAVE;
6801 }
6802
6803 /**
6804  * ipr_ata_post_internal - Cleanup after an internal command
6805  * @qc: ATA queued command
6806  *
6807  * Return value:
6808  *      none
6809  **/
6810 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6811 {
6812         struct ipr_sata_port *sata_port = qc->ap->private_data;
6813         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6814         struct ipr_cmnd *ipr_cmd;
6815         struct ipr_hrr_queue *hrrq;
6816         unsigned long flags;
6817
6818         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6819         while (ioa_cfg->in_reset_reload) {
6820                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6821                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6822                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6823         }
6824
6825         for_each_hrrq(hrrq, ioa_cfg) {
6826                 spin_lock(&hrrq->_lock);
6827                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6828                         if (ipr_cmd->qc == qc) {
6829                                 ipr_device_reset(ioa_cfg, sata_port->res);
6830                                 break;
6831                         }
6832                 }
6833                 spin_unlock(&hrrq->_lock);
6834         }
6835         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6836 }
6837
6838 /**
6839  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6840  * @regs:       destination
6841  * @tf: source ATA taskfile
6842  *
6843  * Return value:
6844  *      none
6845  **/
6846 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6847                              struct ata_taskfile *tf)
6848 {
6849         regs->feature = tf->feature;
6850         regs->nsect = tf->nsect;
6851         regs->lbal = tf->lbal;
6852         regs->lbam = tf->lbam;
6853         regs->lbah = tf->lbah;
6854         regs->device = tf->device;
6855         regs->command = tf->command;
6856         regs->hob_feature = tf->hob_feature;
6857         regs->hob_nsect = tf->hob_nsect;
6858         regs->hob_lbal = tf->hob_lbal;
6859         regs->hob_lbam = tf->hob_lbam;
6860         regs->hob_lbah = tf->hob_lbah;
6861         regs->ctl = tf->ctl;
6862 }
6863
6864 /**
6865  * ipr_sata_done - done function for SATA commands
6866  * @ipr_cmd:    ipr command struct
6867  *
6868  * This function is invoked by the interrupt handler for
6869  * ops generated by the SCSI mid-layer to SATA devices
6870  *
6871  * Return value:
6872  *      none
6873  **/
6874 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6875 {
6876         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6877         struct ata_queued_cmd *qc = ipr_cmd->qc;
6878         struct ipr_sata_port *sata_port = qc->ap->private_data;
6879         struct ipr_resource_entry *res = sata_port->res;
6880         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6881
6882         spin_lock(&ipr_cmd->hrrq->_lock);
6883         if (ipr_cmd->ioa_cfg->sis64)
6884                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6885                        sizeof(struct ipr_ioasa_gata));
6886         else
6887                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6888                        sizeof(struct ipr_ioasa_gata));
6889         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6890
6891         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6892                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6893
6894         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6895                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6896         else
6897                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6898         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6899         spin_unlock(&ipr_cmd->hrrq->_lock);
6900         ata_qc_complete(qc);
6901 }
6902
6903 /**
6904  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6905  * @ipr_cmd:    ipr command struct
6906  * @qc:         ATA queued command
6907  *
6908  **/
6909 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6910                                   struct ata_queued_cmd *qc)
6911 {
6912         u32 ioadl_flags = 0;
6913         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6914         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6915         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6916         int len = qc->nbytes;
6917         struct scatterlist *sg;
6918         unsigned int si;
6919         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6920
6921         if (len == 0)
6922                 return;
6923
6924         if (qc->dma_dir == DMA_TO_DEVICE) {
6925                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6926                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6927         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6928                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6929
6930         ioarcb->data_transfer_length = cpu_to_be32(len);
6931         ioarcb->ioadl_len =
6932                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6933         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6934                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6935
6936         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6937                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6938                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6939                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6940
6941                 last_ioadl64 = ioadl64;
6942                 ioadl64++;
6943         }
6944
6945         if (likely(last_ioadl64))
6946                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6947 }
6948
6949 /**
6950  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6951  * @ipr_cmd:    ipr command struct
6952  * @qc:         ATA queued command
6953  *
6954  **/
6955 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6956                                 struct ata_queued_cmd *qc)
6957 {
6958         u32 ioadl_flags = 0;
6959         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6960         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6961         struct ipr_ioadl_desc *last_ioadl = NULL;
6962         int len = qc->nbytes;
6963         struct scatterlist *sg;
6964         unsigned int si;
6965
6966         if (len == 0)
6967                 return;
6968
6969         if (qc->dma_dir == DMA_TO_DEVICE) {
6970                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6971                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6972                 ioarcb->data_transfer_length = cpu_to_be32(len);
6973                 ioarcb->ioadl_len =
6974                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6975         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6976                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6977                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6978                 ioarcb->read_ioadl_len =
6979                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6980         }
6981
6982         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6983                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6984                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6985
6986                 last_ioadl = ioadl;
6987                 ioadl++;
6988         }
6989
6990         if (likely(last_ioadl))
6991                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6992 }
6993
6994 /**
6995  * ipr_qc_defer - Get a free ipr_cmd
6996  * @qc: queued command
6997  *
6998  * Return value:
6999  *      0 if success
7000  **/
7001 static int ipr_qc_defer(struct ata_queued_cmd *qc)
7002 {
7003         struct ata_port *ap = qc->ap;
7004         struct ipr_sata_port *sata_port = ap->private_data;
7005         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7006         struct ipr_cmnd *ipr_cmd;
7007         struct ipr_hrr_queue *hrrq;
7008         int hrrq_id;
7009
7010         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
7011         hrrq = &ioa_cfg->hrrq[hrrq_id];
7012
7013         qc->lldd_task = NULL;
7014         spin_lock(&hrrq->_lock);
7015         if (unlikely(hrrq->ioa_is_dead)) {
7016                 spin_unlock(&hrrq->_lock);
7017                 return 0;
7018         }
7019
7020         if (unlikely(!hrrq->allow_cmds)) {
7021                 spin_unlock(&hrrq->_lock);
7022                 return ATA_DEFER_LINK;
7023         }
7024
7025         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
7026         if (ipr_cmd == NULL) {
7027                 spin_unlock(&hrrq->_lock);
7028                 return ATA_DEFER_LINK;
7029         }
7030
7031         qc->lldd_task = ipr_cmd;
7032         spin_unlock(&hrrq->_lock);
7033         return 0;
7034 }
7035
7036 /**
7037  * ipr_qc_issue - Issue a SATA qc to a device
7038  * @qc: queued command
7039  *
7040  * Return value:
7041  *      0 if success
7042  **/
7043 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7044 {
7045         struct ata_port *ap = qc->ap;
7046         struct ipr_sata_port *sata_port = ap->private_data;
7047         struct ipr_resource_entry *res = sata_port->res;
7048         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7049         struct ipr_cmnd *ipr_cmd;
7050         struct ipr_ioarcb *ioarcb;
7051         struct ipr_ioarcb_ata_regs *regs;
7052
7053         if (qc->lldd_task == NULL)
7054                 ipr_qc_defer(qc);
7055
7056         ipr_cmd = qc->lldd_task;
7057         if (ipr_cmd == NULL)
7058                 return AC_ERR_SYSTEM;
7059
7060         qc->lldd_task = NULL;
7061         spin_lock(&ipr_cmd->hrrq->_lock);
7062         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7063                         ipr_cmd->hrrq->ioa_is_dead)) {
7064                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7065                 spin_unlock(&ipr_cmd->hrrq->_lock);
7066                 return AC_ERR_SYSTEM;
7067         }
7068
7069         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
7070         ioarcb = &ipr_cmd->ioarcb;
7071
7072         if (ioa_cfg->sis64) {
7073                 regs = &ipr_cmd->i.ata_ioadl.regs;
7074                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7075         } else
7076                 regs = &ioarcb->u.add_data.u.regs;
7077
7078         memset(regs, 0, sizeof(*regs));
7079         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
7080
7081         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7082         ipr_cmd->qc = qc;
7083         ipr_cmd->done = ipr_sata_done;
7084         ipr_cmd->ioarcb.res_handle = res->res_handle;
7085         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7086         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7087         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
7088         ipr_cmd->dma_use_sg = qc->n_elem;
7089
7090         if (ioa_cfg->sis64)
7091                 ipr_build_ata_ioadl64(ipr_cmd, qc);
7092         else
7093                 ipr_build_ata_ioadl(ipr_cmd, qc);
7094
7095         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7096         ipr_copy_sata_tf(regs, &qc->tf);
7097         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
7098         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
7099
7100         switch (qc->tf.protocol) {
7101         case ATA_PROT_NODATA:
7102         case ATA_PROT_PIO:
7103                 break;
7104
7105         case ATA_PROT_DMA:
7106                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7107                 break;
7108
7109         case ATAPI_PROT_PIO:
7110         case ATAPI_PROT_NODATA:
7111                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7112                 break;
7113
7114         case ATAPI_PROT_DMA:
7115                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7116                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7117                 break;
7118
7119         default:
7120                 WARN_ON(1);
7121                 spin_unlock(&ipr_cmd->hrrq->_lock);
7122                 return AC_ERR_INVALID;
7123         }
7124
7125         ipr_send_command(ipr_cmd);
7126         spin_unlock(&ipr_cmd->hrrq->_lock);
7127
7128         return 0;
7129 }
7130
7131 /**
7132  * ipr_qc_fill_rtf - Read result TF
7133  * @qc: ATA queued command
7134  *
7135  * Return value:
7136  *      true
7137  **/
7138 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7139 {
7140         struct ipr_sata_port *sata_port = qc->ap->private_data;
7141         struct ipr_ioasa_gata *g = &sata_port->ioasa;
7142         struct ata_taskfile *tf = &qc->result_tf;
7143
7144         tf->feature = g->error;
7145         tf->nsect = g->nsect;
7146         tf->lbal = g->lbal;
7147         tf->lbam = g->lbam;
7148         tf->lbah = g->lbah;
7149         tf->device = g->device;
7150         tf->command = g->status;
7151         tf->hob_nsect = g->hob_nsect;
7152         tf->hob_lbal = g->hob_lbal;
7153         tf->hob_lbam = g->hob_lbam;
7154         tf->hob_lbah = g->hob_lbah;
7155
7156         return true;
7157 }
7158
7159 static struct ata_port_operations ipr_sata_ops = {
7160         .phy_reset = ipr_ata_phy_reset,
7161         .hardreset = ipr_sata_reset,
7162         .post_internal_cmd = ipr_ata_post_internal,
7163         .qc_prep = ata_noop_qc_prep,
7164         .qc_defer = ipr_qc_defer,
7165         .qc_issue = ipr_qc_issue,
7166         .qc_fill_rtf = ipr_qc_fill_rtf,
7167         .port_start = ata_sas_port_start,
7168         .port_stop = ata_sas_port_stop
7169 };
7170
7171 static struct ata_port_info sata_port_info = {
7172         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7173                           ATA_FLAG_SAS_HOST,
7174         .pio_mask       = ATA_PIO4_ONLY,
7175         .mwdma_mask     = ATA_MWDMA2,
7176         .udma_mask      = ATA_UDMA6,
7177         .port_ops       = &ipr_sata_ops
7178 };
7179
7180 #ifdef CONFIG_PPC_PSERIES
7181 static const u16 ipr_blocked_processors[] = {
7182         PVR_NORTHSTAR,
7183         PVR_PULSAR,
7184         PVR_POWER4,
7185         PVR_ICESTAR,
7186         PVR_SSTAR,
7187         PVR_POWER4p,
7188         PVR_630,
7189         PVR_630p
7190 };
7191
7192 /**
7193  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7194  * @ioa_cfg:    ioa cfg struct
7195  *
7196  * Adapters that use Gemstone revision < 3.1 do not work reliably on
7197  * certain pSeries hardware. This function determines if the given
7198  * adapter is in one of these confgurations or not.
7199  *
7200  * Return value:
7201  *      1 if adapter is not supported / 0 if adapter is supported
7202  **/
7203 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7204 {
7205         int i;
7206
7207         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
7208                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
7209                         if (pvr_version_is(ipr_blocked_processors[i]))
7210                                 return 1;
7211                 }
7212         }
7213         return 0;
7214 }
7215 #else
7216 #define ipr_invalid_adapter(ioa_cfg) 0
7217 #endif
7218
7219 /**
7220  * ipr_ioa_bringdown_done - IOA bring down completion.
7221  * @ipr_cmd:    ipr command struct
7222  *
7223  * This function processes the completion of an adapter bring down.
7224  * It wakes any reset sleepers.
7225  *
7226  * Return value:
7227  *      IPR_RC_JOB_RETURN
7228  **/
7229 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7230 {
7231         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7232         int i;
7233
7234         ENTER;
7235         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7236                 ipr_trace;
7237                 ioa_cfg->scsi_unblock = 1;
7238                 schedule_work(&ioa_cfg->work_q);
7239         }
7240
7241         ioa_cfg->in_reset_reload = 0;
7242         ioa_cfg->reset_retries = 0;
7243         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7244                 spin_lock(&ioa_cfg->hrrq[i]._lock);
7245                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7246                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7247         }
7248         wmb();
7249
7250         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7251         wake_up_all(&ioa_cfg->reset_wait_q);
7252         LEAVE;
7253
7254         return IPR_RC_JOB_RETURN;
7255 }
7256
7257 /**
7258  * ipr_ioa_reset_done - IOA reset completion.
7259  * @ipr_cmd:    ipr command struct
7260  *
7261  * This function processes the completion of an adapter reset.
7262  * It schedules any necessary mid-layer add/removes and
7263  * wakes any reset sleepers.
7264  *
7265  * Return value:
7266  *      IPR_RC_JOB_RETURN
7267  **/
7268 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7269 {
7270         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7271         struct ipr_resource_entry *res;
7272         int j;
7273
7274         ENTER;
7275         ioa_cfg->in_reset_reload = 0;
7276         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7277                 spin_lock(&ioa_cfg->hrrq[j]._lock);
7278                 ioa_cfg->hrrq[j].allow_cmds = 1;
7279                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7280         }
7281         wmb();
7282         ioa_cfg->reset_cmd = NULL;
7283         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7284
7285         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7286                 if (res->add_to_ml || res->del_from_ml) {
7287                         ipr_trace;
7288                         break;
7289                 }
7290         }
7291         schedule_work(&ioa_cfg->work_q);
7292
7293         for (j = 0; j < IPR_NUM_HCAMS; j++) {
7294                 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7295                 if (j < IPR_NUM_LOG_HCAMS)
7296                         ipr_send_hcam(ioa_cfg,
7297                                 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7298                                 ioa_cfg->hostrcb[j]);
7299                 else
7300                         ipr_send_hcam(ioa_cfg,
7301                                 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7302                                 ioa_cfg->hostrcb[j]);
7303         }
7304
7305         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7306         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7307
7308         ioa_cfg->reset_retries = 0;
7309         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7310         wake_up_all(&ioa_cfg->reset_wait_q);
7311
7312         ioa_cfg->scsi_unblock = 1;
7313         schedule_work(&ioa_cfg->work_q);
7314         LEAVE;
7315         return IPR_RC_JOB_RETURN;
7316 }
7317
7318 /**
7319  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7320  * @supported_dev:      supported device struct
7321  * @vpids:                      vendor product id struct
7322  *
7323  * Return value:
7324  *      none
7325  **/
7326 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7327                                  struct ipr_std_inq_vpids *vpids)
7328 {
7329         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7330         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7331         supported_dev->num_records = 1;
7332         supported_dev->data_length =
7333                 cpu_to_be16(sizeof(struct ipr_supported_device));
7334         supported_dev->reserved = 0;
7335 }
7336
7337 /**
7338  * ipr_set_supported_devs - Send Set Supported Devices for a device
7339  * @ipr_cmd:    ipr command struct
7340  *
7341  * This function sends a Set Supported Devices to the adapter
7342  *
7343  * Return value:
7344  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7345  **/
7346 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7347 {
7348         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7349         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7350         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7351         struct ipr_resource_entry *res = ipr_cmd->u.res;
7352
7353         ipr_cmd->job_step = ipr_ioa_reset_done;
7354
7355         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7356                 if (!ipr_is_scsi_disk(res))
7357                         continue;
7358
7359                 ipr_cmd->u.res = res;
7360                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7361
7362                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7363                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7364                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7365
7366                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7367                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7368                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7369                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7370
7371                 ipr_init_ioadl(ipr_cmd,
7372                                ioa_cfg->vpd_cbs_dma +
7373                                  offsetof(struct ipr_misc_cbs, supp_dev),
7374                                sizeof(struct ipr_supported_device),
7375                                IPR_IOADL_FLAGS_WRITE_LAST);
7376
7377                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7378                            IPR_SET_SUP_DEVICE_TIMEOUT);
7379
7380                 if (!ioa_cfg->sis64)
7381                         ipr_cmd->job_step = ipr_set_supported_devs;
7382                 LEAVE;
7383                 return IPR_RC_JOB_RETURN;
7384         }
7385
7386         LEAVE;
7387         return IPR_RC_JOB_CONTINUE;
7388 }
7389
7390 /**
7391  * ipr_get_mode_page - Locate specified mode page
7392  * @mode_pages: mode page buffer
7393  * @page_code:  page code to find
7394  * @len:                minimum required length for mode page
7395  *
7396  * Return value:
7397  *      pointer to mode page / NULL on failure
7398  **/
7399 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7400                                u32 page_code, u32 len)
7401 {
7402         struct ipr_mode_page_hdr *mode_hdr;
7403         u32 page_length;
7404         u32 length;
7405
7406         if (!mode_pages || (mode_pages->hdr.length == 0))
7407                 return NULL;
7408
7409         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7410         mode_hdr = (struct ipr_mode_page_hdr *)
7411                 (mode_pages->data + mode_pages->hdr.block_desc_len);
7412
7413         while (length) {
7414                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7415                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7416                                 return mode_hdr;
7417                         break;
7418                 } else {
7419                         page_length = (sizeof(struct ipr_mode_page_hdr) +
7420                                        mode_hdr->page_length);
7421                         length -= page_length;
7422                         mode_hdr = (struct ipr_mode_page_hdr *)
7423                                 ((unsigned long)mode_hdr + page_length);
7424                 }
7425         }
7426         return NULL;
7427 }
7428
7429 /**
7430  * ipr_check_term_power - Check for term power errors
7431  * @ioa_cfg:    ioa config struct
7432  * @mode_pages: IOAFP mode pages buffer
7433  *
7434  * Check the IOAFP's mode page 28 for term power errors
7435  *
7436  * Return value:
7437  *      nothing
7438  **/
7439 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7440                                  struct ipr_mode_pages *mode_pages)
7441 {
7442         int i;
7443         int entry_length;
7444         struct ipr_dev_bus_entry *bus;
7445         struct ipr_mode_page28 *mode_page;
7446
7447         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7448                                       sizeof(struct ipr_mode_page28));
7449
7450         entry_length = mode_page->entry_length;
7451
7452         bus = mode_page->bus;
7453
7454         for (i = 0; i < mode_page->num_entries; i++) {
7455                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7456                         dev_err(&ioa_cfg->pdev->dev,
7457                                 "Term power is absent on scsi bus %d\n",
7458                                 bus->res_addr.bus);
7459                 }
7460
7461                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7462         }
7463 }
7464
7465 /**
7466  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7467  * @ioa_cfg:    ioa config struct
7468  *
7469  * Looks through the config table checking for SES devices. If
7470  * the SES device is in the SES table indicating a maximum SCSI
7471  * bus speed, the speed is limited for the bus.
7472  *
7473  * Return value:
7474  *      none
7475  **/
7476 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7477 {
7478         u32 max_xfer_rate;
7479         int i;
7480
7481         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7482                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7483                                                        ioa_cfg->bus_attr[i].bus_width);
7484
7485                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7486                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7487         }
7488 }
7489
7490 /**
7491  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7492  * @ioa_cfg:    ioa config struct
7493  * @mode_pages: mode page 28 buffer
7494  *
7495  * Updates mode page 28 based on driver configuration
7496  *
7497  * Return value:
7498  *      none
7499  **/
7500 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7501                                           struct ipr_mode_pages *mode_pages)
7502 {
7503         int i, entry_length;
7504         struct ipr_dev_bus_entry *bus;
7505         struct ipr_bus_attributes *bus_attr;
7506         struct ipr_mode_page28 *mode_page;
7507
7508         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7509                                       sizeof(struct ipr_mode_page28));
7510
7511         entry_length = mode_page->entry_length;
7512
7513         /* Loop for each device bus entry */
7514         for (i = 0, bus = mode_page->bus;
7515              i < mode_page->num_entries;
7516              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7517                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7518                         dev_err(&ioa_cfg->pdev->dev,
7519                                 "Invalid resource address reported: 0x%08X\n",
7520                                 IPR_GET_PHYS_LOC(bus->res_addr));
7521                         continue;
7522                 }
7523
7524                 bus_attr = &ioa_cfg->bus_attr[i];
7525                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7526                 bus->bus_width = bus_attr->bus_width;
7527                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7528                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7529                 if (bus_attr->qas_enabled)
7530                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7531                 else
7532                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7533         }
7534 }
7535
7536 /**
7537  * ipr_build_mode_select - Build a mode select command
7538  * @ipr_cmd:    ipr command struct
7539  * @res_handle: resource handle to send command to
7540  * @parm:               Byte 2 of Mode Sense command
7541  * @dma_addr:   DMA buffer address
7542  * @xfer_len:   data transfer length
7543  *
7544  * Return value:
7545  *      none
7546  **/
7547 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7548                                   __be32 res_handle, u8 parm,
7549                                   dma_addr_t dma_addr, u8 xfer_len)
7550 {
7551         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7552
7553         ioarcb->res_handle = res_handle;
7554         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7555         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7556         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7557         ioarcb->cmd_pkt.cdb[1] = parm;
7558         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7559
7560         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7561 }
7562
7563 /**
7564  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7565  * @ipr_cmd:    ipr command struct
7566  *
7567  * This function sets up the SCSI bus attributes and sends
7568  * a Mode Select for Page 28 to activate them.
7569  *
7570  * Return value:
7571  *      IPR_RC_JOB_RETURN
7572  **/
7573 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7574 {
7575         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7576         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7577         int length;
7578
7579         ENTER;
7580         ipr_scsi_bus_speed_limit(ioa_cfg);
7581         ipr_check_term_power(ioa_cfg, mode_pages);
7582         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7583         length = mode_pages->hdr.length + 1;
7584         mode_pages->hdr.length = 0;
7585
7586         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7587                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7588                               length);
7589
7590         ipr_cmd->job_step = ipr_set_supported_devs;
7591         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7592                                     struct ipr_resource_entry, queue);
7593         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7594
7595         LEAVE;
7596         return IPR_RC_JOB_RETURN;
7597 }
7598
7599 /**
7600  * ipr_build_mode_sense - Builds a mode sense command
7601  * @ipr_cmd:    ipr command struct
7602  * @res:                resource entry struct
7603  * @parm:               Byte 2 of mode sense command
7604  * @dma_addr:   DMA address of mode sense buffer
7605  * @xfer_len:   Size of DMA buffer
7606  *
7607  * Return value:
7608  *      none
7609  **/
7610 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7611                                  __be32 res_handle,
7612                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7613 {
7614         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7615
7616         ioarcb->res_handle = res_handle;
7617         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7618         ioarcb->cmd_pkt.cdb[2] = parm;
7619         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7620         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7621
7622         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7623 }
7624
7625 /**
7626  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7627  * @ipr_cmd:    ipr command struct
7628  *
7629  * This function handles the failure of an IOA bringup command.
7630  *
7631  * Return value:
7632  *      IPR_RC_JOB_RETURN
7633  **/
7634 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7635 {
7636         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7637         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7638
7639         dev_err(&ioa_cfg->pdev->dev,
7640                 "0x%02X failed with IOASC: 0x%08X\n",
7641                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7642
7643         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7644         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7645         return IPR_RC_JOB_RETURN;
7646 }
7647
7648 /**
7649  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7650  * @ipr_cmd:    ipr command struct
7651  *
7652  * This function handles the failure of a Mode Sense to the IOAFP.
7653  * Some adapters do not handle all mode pages.
7654  *
7655  * Return value:
7656  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7657  **/
7658 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7659 {
7660         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7661         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7662
7663         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7664                 ipr_cmd->job_step = ipr_set_supported_devs;
7665                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7666                                             struct ipr_resource_entry, queue);
7667                 return IPR_RC_JOB_CONTINUE;
7668         }
7669
7670         return ipr_reset_cmd_failed(ipr_cmd);
7671 }
7672
7673 /**
7674  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7675  * @ipr_cmd:    ipr command struct
7676  *
7677  * This function send a Page 28 mode sense to the IOA to
7678  * retrieve SCSI bus attributes.
7679  *
7680  * Return value:
7681  *      IPR_RC_JOB_RETURN
7682  **/
7683 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7684 {
7685         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7686
7687         ENTER;
7688         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7689                              0x28, ioa_cfg->vpd_cbs_dma +
7690                              offsetof(struct ipr_misc_cbs, mode_pages),
7691                              sizeof(struct ipr_mode_pages));
7692
7693         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7694         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7695
7696         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7697
7698         LEAVE;
7699         return IPR_RC_JOB_RETURN;
7700 }
7701
7702 /**
7703  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7704  * @ipr_cmd:    ipr command struct
7705  *
7706  * This function enables dual IOA RAID support if possible.
7707  *
7708  * Return value:
7709  *      IPR_RC_JOB_RETURN
7710  **/
7711 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7712 {
7713         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7714         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7715         struct ipr_mode_page24 *mode_page;
7716         int length;
7717
7718         ENTER;
7719         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7720                                       sizeof(struct ipr_mode_page24));
7721
7722         if (mode_page)
7723                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7724
7725         length = mode_pages->hdr.length + 1;
7726         mode_pages->hdr.length = 0;
7727
7728         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7729                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7730                               length);
7731
7732         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7733         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7734
7735         LEAVE;
7736         return IPR_RC_JOB_RETURN;
7737 }
7738
7739 /**
7740  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7741  * @ipr_cmd:    ipr command struct
7742  *
7743  * This function handles the failure of a Mode Sense to the IOAFP.
7744  * Some adapters do not handle all mode pages.
7745  *
7746  * Return value:
7747  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7748  **/
7749 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7750 {
7751         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7752
7753         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7754                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7755                 return IPR_RC_JOB_CONTINUE;
7756         }
7757
7758         return ipr_reset_cmd_failed(ipr_cmd);
7759 }
7760
7761 /**
7762  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7763  * @ipr_cmd:    ipr command struct
7764  *
7765  * This function send a mode sense to the IOA to retrieve
7766  * the IOA Advanced Function Control mode page.
7767  *
7768  * Return value:
7769  *      IPR_RC_JOB_RETURN
7770  **/
7771 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7772 {
7773         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7774
7775         ENTER;
7776         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7777                              0x24, ioa_cfg->vpd_cbs_dma +
7778                              offsetof(struct ipr_misc_cbs, mode_pages),
7779                              sizeof(struct ipr_mode_pages));
7780
7781         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7782         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7783
7784         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7785
7786         LEAVE;
7787         return IPR_RC_JOB_RETURN;
7788 }
7789
7790 /**
7791  * ipr_init_res_table - Initialize the resource table
7792  * @ipr_cmd:    ipr command struct
7793  *
7794  * This function looks through the existing resource table, comparing
7795  * it with the config table. This function will take care of old/new
7796  * devices and schedule adding/removing them from the mid-layer
7797  * as appropriate.
7798  *
7799  * Return value:
7800  *      IPR_RC_JOB_CONTINUE
7801  **/
7802 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7803 {
7804         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7805         struct ipr_resource_entry *res, *temp;
7806         struct ipr_config_table_entry_wrapper cfgtew;
7807         int entries, found, flag, i;
7808         LIST_HEAD(old_res);
7809
7810         ENTER;
7811         if (ioa_cfg->sis64)
7812                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7813         else
7814                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7815
7816         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7817                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7818
7819         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7820                 list_move_tail(&res->queue, &old_res);
7821
7822         if (ioa_cfg->sis64)
7823                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7824         else
7825                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7826
7827         for (i = 0; i < entries; i++) {
7828                 if (ioa_cfg->sis64)
7829                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7830                 else
7831                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7832                 found = 0;
7833
7834                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7835                         if (ipr_is_same_device(res, &cfgtew)) {
7836                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7837                                 found = 1;
7838                                 break;
7839                         }
7840                 }
7841
7842                 if (!found) {
7843                         if (list_empty(&ioa_cfg->free_res_q)) {
7844                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7845                                 break;
7846                         }
7847
7848                         found = 1;
7849                         res = list_entry(ioa_cfg->free_res_q.next,
7850                                          struct ipr_resource_entry, queue);
7851                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7852                         ipr_init_res_entry(res, &cfgtew);
7853                         res->add_to_ml = 1;
7854                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7855                         res->sdev->allow_restart = 1;
7856
7857                 if (found)
7858                         ipr_update_res_entry(res, &cfgtew);
7859         }
7860
7861         list_for_each_entry_safe(res, temp, &old_res, queue) {
7862                 if (res->sdev) {
7863                         res->del_from_ml = 1;
7864                         res->res_handle = IPR_INVALID_RES_HANDLE;
7865                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7866                 }
7867         }
7868
7869         list_for_each_entry_safe(res, temp, &old_res, queue) {
7870                 ipr_clear_res_target(res);
7871                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7872         }
7873
7874         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7875                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7876         else
7877                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7878
7879         LEAVE;
7880         return IPR_RC_JOB_CONTINUE;
7881 }
7882
7883 /**
7884  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7885  * @ipr_cmd:    ipr command struct
7886  *
7887  * This function sends a Query IOA Configuration command
7888  * to the adapter to retrieve the IOA configuration table.
7889  *
7890  * Return value:
7891  *      IPR_RC_JOB_RETURN
7892  **/
7893 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7894 {
7895         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7896         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7897         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7898         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7899
7900         ENTER;
7901         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7902                 ioa_cfg->dual_raid = 1;
7903         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7904                  ucode_vpd->major_release, ucode_vpd->card_type,
7905                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7906         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7907         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7908
7909         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7910         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7911         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7912         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7913
7914         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7915                        IPR_IOADL_FLAGS_READ_LAST);
7916
7917         ipr_cmd->job_step = ipr_init_res_table;
7918
7919         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7920
7921         LEAVE;
7922         return IPR_RC_JOB_RETURN;
7923 }
7924
7925 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7926 {
7927         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7928
7929         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7930                 return IPR_RC_JOB_CONTINUE;
7931
7932         return ipr_reset_cmd_failed(ipr_cmd);
7933 }
7934
7935 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7936                                          __be32 res_handle, u8 sa_code)
7937 {
7938         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7939
7940         ioarcb->res_handle = res_handle;
7941         ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7942         ioarcb->cmd_pkt.cdb[1] = sa_code;
7943         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7944 }
7945
7946 /**
7947  * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7948  * action
7949  *
7950  * Return value:
7951  *      none
7952  **/
7953 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7954 {
7955         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7956         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7957         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7958
7959         ENTER;
7960
7961         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7962
7963         if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7964                 ipr_build_ioa_service_action(ipr_cmd,
7965                                              cpu_to_be32(IPR_IOA_RES_HANDLE),
7966                                              IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7967
7968                 ioarcb->cmd_pkt.cdb[2] = 0x40;
7969
7970                 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7971                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7972                            IPR_SET_SUP_DEVICE_TIMEOUT);
7973
7974                 LEAVE;
7975                 return IPR_RC_JOB_RETURN;
7976         }
7977
7978         LEAVE;
7979         return IPR_RC_JOB_CONTINUE;
7980 }
7981
7982 /**
7983  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7984  * @ipr_cmd:    ipr command struct
7985  *
7986  * This utility function sends an inquiry to the adapter.
7987  *
7988  * Return value:
7989  *      none
7990  **/
7991 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7992                               dma_addr_t dma_addr, u8 xfer_len)
7993 {
7994         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7995
7996         ENTER;
7997         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7998         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7999
8000         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
8001         ioarcb->cmd_pkt.cdb[1] = flags;
8002         ioarcb->cmd_pkt.cdb[2] = page;
8003         ioarcb->cmd_pkt.cdb[4] = xfer_len;
8004
8005         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
8006
8007         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
8008         LEAVE;
8009 }
8010
8011 /**
8012  * ipr_inquiry_page_supported - Is the given inquiry page supported
8013  * @page0:              inquiry page 0 buffer
8014  * @page:               page code.
8015  *
8016  * This function determines if the specified inquiry page is supported.
8017  *
8018  * Return value:
8019  *      1 if page is supported / 0 if not
8020  **/
8021 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
8022 {
8023         int i;
8024
8025         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
8026                 if (page0->page[i] == page)
8027                         return 1;
8028
8029         return 0;
8030 }
8031
8032 /**
8033  * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
8034  * @ipr_cmd:    ipr command struct
8035  *
8036  * This function sends a Page 0xC4 inquiry to the adapter
8037  * to retrieve software VPD information.
8038  *
8039  * Return value:
8040  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8041  **/
8042 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8043 {
8044         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8045         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8046         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8047
8048         ENTER;
8049         ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
8050         memset(pageC4, 0, sizeof(*pageC4));
8051
8052         if (ipr_inquiry_page_supported(page0, 0xC4)) {
8053                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8054                                   (ioa_cfg->vpd_cbs_dma
8055                                    + offsetof(struct ipr_misc_cbs,
8056                                               pageC4_data)),
8057                                   sizeof(struct ipr_inquiry_pageC4));
8058                 return IPR_RC_JOB_RETURN;
8059         }
8060
8061         LEAVE;
8062         return IPR_RC_JOB_CONTINUE;
8063 }
8064
8065 /**
8066  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8067  * @ipr_cmd:    ipr command struct
8068  *
8069  * This function sends a Page 0xD0 inquiry to the adapter
8070  * to retrieve adapter capabilities.
8071  *
8072  * Return value:
8073  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8074  **/
8075 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8076 {
8077         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8078         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8079         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8080
8081         ENTER;
8082         ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
8083         memset(cap, 0, sizeof(*cap));
8084
8085         if (ipr_inquiry_page_supported(page0, 0xD0)) {
8086                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8087                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8088                                   sizeof(struct ipr_inquiry_cap));
8089                 return IPR_RC_JOB_RETURN;
8090         }
8091
8092         LEAVE;
8093         return IPR_RC_JOB_CONTINUE;
8094 }
8095
8096 /**
8097  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8098  * @ipr_cmd:    ipr command struct
8099  *
8100  * This function sends a Page 3 inquiry to the adapter
8101  * to retrieve software VPD information.
8102  *
8103  * Return value:
8104  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8105  **/
8106 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8107 {
8108         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8109
8110         ENTER;
8111
8112         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
8113
8114         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8115                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8116                           sizeof(struct ipr_inquiry_page3));
8117
8118         LEAVE;
8119         return IPR_RC_JOB_RETURN;
8120 }
8121
8122 /**
8123  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8124  * @ipr_cmd:    ipr command struct
8125  *
8126  * This function sends a Page 0 inquiry to the adapter
8127  * to retrieve supported inquiry pages.
8128  *
8129  * Return value:
8130  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8131  **/
8132 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8133 {
8134         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8135         char type[5];
8136
8137         ENTER;
8138
8139         /* Grab the type out of the VPD and store it away */
8140         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8141         type[4] = '\0';
8142         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8143
8144         if (ipr_invalid_adapter(ioa_cfg)) {
8145                 dev_err(&ioa_cfg->pdev->dev,
8146                         "Adapter not supported in this hardware configuration.\n");
8147
8148                 if (!ipr_testmode) {
8149                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8150                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8151                         list_add_tail(&ipr_cmd->queue,
8152                                         &ioa_cfg->hrrq->hrrq_free_q);
8153                         return IPR_RC_JOB_RETURN;
8154                 }
8155         }
8156
8157         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
8158
8159         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8160                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8161                           sizeof(struct ipr_inquiry_page0));
8162
8163         LEAVE;
8164         return IPR_RC_JOB_RETURN;
8165 }
8166
8167 /**
8168  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8169  * @ipr_cmd:    ipr command struct
8170  *
8171  * This function sends a standard inquiry to the adapter.
8172  *
8173  * Return value:
8174  *      IPR_RC_JOB_RETURN
8175  **/
8176 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8177 {
8178         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8179
8180         ENTER;
8181         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
8182
8183         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8184                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8185                           sizeof(struct ipr_ioa_vpd));
8186
8187         LEAVE;
8188         return IPR_RC_JOB_RETURN;
8189 }
8190
8191 /**
8192  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8193  * @ipr_cmd:    ipr command struct
8194  *
8195  * This function send an Identify Host Request Response Queue
8196  * command to establish the HRRQ with the adapter.
8197  *
8198  * Return value:
8199  *      IPR_RC_JOB_RETURN
8200  **/
8201 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
8202 {
8203         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8204         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8205         struct ipr_hrr_queue *hrrq;
8206
8207         ENTER;
8208         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
8209         if (ioa_cfg->identify_hrrq_index == 0)
8210                 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
8211
8212         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8213                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
8214
8215                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8216                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8217
8218                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8219                 if (ioa_cfg->sis64)
8220                         ioarcb->cmd_pkt.cdb[1] = 0x1;
8221
8222                 if (ioa_cfg->nvectors == 1)
8223                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8224                 else
8225                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8226
8227                 ioarcb->cmd_pkt.cdb[2] =
8228                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8229                 ioarcb->cmd_pkt.cdb[3] =
8230                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8231                 ioarcb->cmd_pkt.cdb[4] =
8232                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8233                 ioarcb->cmd_pkt.cdb[5] =
8234                         ((u64) hrrq->host_rrq_dma) & 0xff;
8235                 ioarcb->cmd_pkt.cdb[7] =
8236                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8237                 ioarcb->cmd_pkt.cdb[8] =
8238                         (sizeof(u32) * hrrq->size) & 0xff;
8239
8240                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8241                         ioarcb->cmd_pkt.cdb[9] =
8242                                         ioa_cfg->identify_hrrq_index;
8243
8244                 if (ioa_cfg->sis64) {
8245                         ioarcb->cmd_pkt.cdb[10] =
8246                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8247                         ioarcb->cmd_pkt.cdb[11] =
8248                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8249                         ioarcb->cmd_pkt.cdb[12] =
8250                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8251                         ioarcb->cmd_pkt.cdb[13] =
8252                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8253                 }
8254
8255                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8256                         ioarcb->cmd_pkt.cdb[14] =
8257                                         ioa_cfg->identify_hrrq_index;
8258
8259                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8260                            IPR_INTERNAL_TIMEOUT);
8261
8262                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8263                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8264
8265                 LEAVE;
8266                 return IPR_RC_JOB_RETURN;
8267         }
8268
8269         LEAVE;
8270         return IPR_RC_JOB_CONTINUE;
8271 }
8272
8273 /**
8274  * ipr_reset_timer_done - Adapter reset timer function
8275  * @ipr_cmd:    ipr command struct
8276  *
8277  * Description: This function is used in adapter reset processing
8278  * for timing events. If the reset_cmd pointer in the IOA
8279  * config struct is not this adapter's we are doing nested
8280  * resets and fail_all_ops will take care of freeing the
8281  * command block.
8282  *
8283  * Return value:
8284  *      none
8285  **/
8286 static void ipr_reset_timer_done(struct timer_list *t)
8287 {
8288         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
8289         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8290         unsigned long lock_flags = 0;
8291
8292         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8293
8294         if (ioa_cfg->reset_cmd == ipr_cmd) {
8295                 list_del(&ipr_cmd->queue);
8296                 ipr_cmd->done(ipr_cmd);
8297         }
8298
8299         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8300 }
8301
8302 /**
8303  * ipr_reset_start_timer - Start a timer for adapter reset job
8304  * @ipr_cmd:    ipr command struct
8305  * @timeout:    timeout value
8306  *
8307  * Description: This function is used in adapter reset processing
8308  * for timing events. If the reset_cmd pointer in the IOA
8309  * config struct is not this adapter's we are doing nested
8310  * resets and fail_all_ops will take care of freeing the
8311  * command block.
8312  *
8313  * Return value:
8314  *      none
8315  **/
8316 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8317                                   unsigned long timeout)
8318 {
8319
8320         ENTER;
8321         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8322         ipr_cmd->done = ipr_reset_ioa_job;
8323
8324         ipr_cmd->timer.expires = jiffies + timeout;
8325         ipr_cmd->timer.function = ipr_reset_timer_done;
8326         add_timer(&ipr_cmd->timer);
8327 }
8328
8329 /**
8330  * ipr_init_ioa_mem - Initialize ioa_cfg control block
8331  * @ioa_cfg:    ioa cfg struct
8332  *
8333  * Return value:
8334  *      nothing
8335  **/
8336 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8337 {
8338         struct ipr_hrr_queue *hrrq;
8339
8340         for_each_hrrq(hrrq, ioa_cfg) {
8341                 spin_lock(&hrrq->_lock);
8342                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8343
8344                 /* Initialize Host RRQ pointers */
8345                 hrrq->hrrq_start = hrrq->host_rrq;
8346                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8347                 hrrq->hrrq_curr = hrrq->hrrq_start;
8348                 hrrq->toggle_bit = 1;
8349                 spin_unlock(&hrrq->_lock);
8350         }
8351         wmb();
8352
8353         ioa_cfg->identify_hrrq_index = 0;
8354         if (ioa_cfg->hrrq_num == 1)
8355                 atomic_set(&ioa_cfg->hrrq_index, 0);
8356         else
8357                 atomic_set(&ioa_cfg->hrrq_index, 1);
8358
8359         /* Zero out config table */
8360         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8361 }
8362
8363 /**
8364  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8365  * @ipr_cmd:    ipr command struct
8366  *
8367  * Return value:
8368  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8369  **/
8370 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8371 {
8372         unsigned long stage, stage_time;
8373         u32 feedback;
8374         volatile u32 int_reg;
8375         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8376         u64 maskval = 0;
8377
8378         feedback = readl(ioa_cfg->regs.init_feedback_reg);
8379         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8380         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8381
8382         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8383
8384         /* sanity check the stage_time value */
8385         if (stage_time == 0)
8386                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8387         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8388                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8389         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8390                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8391
8392         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8393                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8394                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8395                 stage_time = ioa_cfg->transop_timeout;
8396                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8397         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8398                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8399                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8400                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8401                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
8402                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8403                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8404                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8405                         return IPR_RC_JOB_CONTINUE;
8406                 }
8407         }
8408
8409         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8410         ipr_cmd->timer.function = ipr_oper_timeout;
8411         ipr_cmd->done = ipr_reset_ioa_job;
8412         add_timer(&ipr_cmd->timer);
8413
8414         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8415
8416         return IPR_RC_JOB_RETURN;
8417 }
8418
8419 /**
8420  * ipr_reset_enable_ioa - Enable the IOA following a reset.
8421  * @ipr_cmd:    ipr command struct
8422  *
8423  * This function reinitializes some control blocks and
8424  * enables destructive diagnostics on the adapter.
8425  *
8426  * Return value:
8427  *      IPR_RC_JOB_RETURN
8428  **/
8429 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8430 {
8431         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8432         volatile u32 int_reg;
8433         volatile u64 maskval;
8434         int i;
8435
8436         ENTER;
8437         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8438         ipr_init_ioa_mem(ioa_cfg);
8439
8440         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8441                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8442                 ioa_cfg->hrrq[i].allow_interrupts = 1;
8443                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8444         }
8445         if (ioa_cfg->sis64) {
8446                 /* Set the adapter to the correct endian mode. */
8447                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8448                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8449         }
8450
8451         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8452
8453         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8454                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8455                        ioa_cfg->regs.clr_interrupt_mask_reg32);
8456                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8457                 return IPR_RC_JOB_CONTINUE;
8458         }
8459
8460         /* Enable destructive diagnostics on IOA */
8461         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8462
8463         if (ioa_cfg->sis64) {
8464                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8465                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8466                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8467         } else
8468                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8469
8470         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8471
8472         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8473
8474         if (ioa_cfg->sis64) {
8475                 ipr_cmd->job_step = ipr_reset_next_stage;
8476                 return IPR_RC_JOB_CONTINUE;
8477         }
8478
8479         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8480         ipr_cmd->timer.function = ipr_oper_timeout;
8481         ipr_cmd->done = ipr_reset_ioa_job;
8482         add_timer(&ipr_cmd->timer);
8483         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8484
8485         LEAVE;
8486         return IPR_RC_JOB_RETURN;
8487 }
8488
8489 /**
8490  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8491  * @ipr_cmd:    ipr command struct
8492  *
8493  * This function is invoked when an adapter dump has run out
8494  * of processing time.
8495  *
8496  * Return value:
8497  *      IPR_RC_JOB_CONTINUE
8498  **/
8499 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8500 {
8501         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8502
8503         if (ioa_cfg->sdt_state == GET_DUMP)
8504                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8505         else if (ioa_cfg->sdt_state == READ_DUMP)
8506                 ioa_cfg->sdt_state = ABORT_DUMP;
8507
8508         ioa_cfg->dump_timeout = 1;
8509         ipr_cmd->job_step = ipr_reset_alert;
8510
8511         return IPR_RC_JOB_CONTINUE;
8512 }
8513
8514 /**
8515  * ipr_unit_check_no_data - Log a unit check/no data error log
8516  * @ioa_cfg:            ioa config struct
8517  *
8518  * Logs an error indicating the adapter unit checked, but for some
8519  * reason, we were unable to fetch the unit check buffer.
8520  *
8521  * Return value:
8522  *      nothing
8523  **/
8524 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8525 {
8526         ioa_cfg->errors_logged++;
8527         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8528 }
8529
8530 /**
8531  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8532  * @ioa_cfg:            ioa config struct
8533  *
8534  * Fetches the unit check buffer from the adapter by clocking the data
8535  * through the mailbox register.
8536  *
8537  * Return value:
8538  *      nothing
8539  **/
8540 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8541 {
8542         unsigned long mailbox;
8543         struct ipr_hostrcb *hostrcb;
8544         struct ipr_uc_sdt sdt;
8545         int rc, length;
8546         u32 ioasc;
8547
8548         mailbox = readl(ioa_cfg->ioa_mailbox);
8549
8550         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8551                 ipr_unit_check_no_data(ioa_cfg);
8552                 return;
8553         }
8554
8555         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8556         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8557                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8558
8559         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8560             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8561             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8562                 ipr_unit_check_no_data(ioa_cfg);
8563                 return;
8564         }
8565
8566         /* Find length of the first sdt entry (UC buffer) */
8567         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8568                 length = be32_to_cpu(sdt.entry[0].end_token);
8569         else
8570                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8571                           be32_to_cpu(sdt.entry[0].start_token)) &
8572                           IPR_FMT2_MBX_ADDR_MASK;
8573
8574         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8575                              struct ipr_hostrcb, queue);
8576         list_del_init(&hostrcb->queue);
8577         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8578
8579         rc = ipr_get_ldump_data_section(ioa_cfg,
8580                                         be32_to_cpu(sdt.entry[0].start_token),
8581                                         (__be32 *)&hostrcb->hcam,
8582                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8583
8584         if (!rc) {
8585                 ipr_handle_log_data(ioa_cfg, hostrcb);
8586                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8587                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8588                     ioa_cfg->sdt_state == GET_DUMP)
8589                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8590         } else
8591                 ipr_unit_check_no_data(ioa_cfg);
8592
8593         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8594 }
8595
8596 /**
8597  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8598  * @ipr_cmd:    ipr command struct
8599  *
8600  * Description: This function will call to get the unit check buffer.
8601  *
8602  * Return value:
8603  *      IPR_RC_JOB_RETURN
8604  **/
8605 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8606 {
8607         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8608
8609         ENTER;
8610         ioa_cfg->ioa_unit_checked = 0;
8611         ipr_get_unit_check_buffer(ioa_cfg);
8612         ipr_cmd->job_step = ipr_reset_alert;
8613         ipr_reset_start_timer(ipr_cmd, 0);
8614
8615         LEAVE;
8616         return IPR_RC_JOB_RETURN;
8617 }
8618
8619 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8620 {
8621         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8622
8623         ENTER;
8624
8625         if (ioa_cfg->sdt_state != GET_DUMP)
8626                 return IPR_RC_JOB_RETURN;
8627
8628         if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8629             (readl(ioa_cfg->regs.sense_interrupt_reg) &
8630              IPR_PCII_MAILBOX_STABLE)) {
8631
8632                 if (!ipr_cmd->u.time_left)
8633                         dev_err(&ioa_cfg->pdev->dev,
8634                                 "Timed out waiting for Mailbox register.\n");
8635
8636                 ioa_cfg->sdt_state = READ_DUMP;
8637                 ioa_cfg->dump_timeout = 0;
8638                 if (ioa_cfg->sis64)
8639                         ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8640                 else
8641                         ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8642                 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8643                 schedule_work(&ioa_cfg->work_q);
8644
8645         } else {
8646                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8647                 ipr_reset_start_timer(ipr_cmd,
8648                                       IPR_CHECK_FOR_RESET_TIMEOUT);
8649         }
8650
8651         LEAVE;
8652         return IPR_RC_JOB_RETURN;
8653 }
8654
8655 /**
8656  * ipr_reset_restore_cfg_space - Restore PCI config space.
8657  * @ipr_cmd:    ipr command struct
8658  *
8659  * Description: This function restores the saved PCI config space of
8660  * the adapter, fails all outstanding ops back to the callers, and
8661  * fetches the dump/unit check if applicable to this reset.
8662  *
8663  * Return value:
8664  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8665  **/
8666 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8667 {
8668         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8669         u32 int_reg;
8670
8671         ENTER;
8672         ioa_cfg->pdev->state_saved = true;
8673         pci_restore_state(ioa_cfg->pdev);
8674
8675         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8676                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8677                 return IPR_RC_JOB_CONTINUE;
8678         }
8679
8680         ipr_fail_all_ops(ioa_cfg);
8681
8682         if (ioa_cfg->sis64) {
8683                 /* Set the adapter to the correct endian mode. */
8684                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8685                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8686         }
8687
8688         if (ioa_cfg->ioa_unit_checked) {
8689                 if (ioa_cfg->sis64) {
8690                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8691                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8692                         return IPR_RC_JOB_RETURN;
8693                 } else {
8694                         ioa_cfg->ioa_unit_checked = 0;
8695                         ipr_get_unit_check_buffer(ioa_cfg);
8696                         ipr_cmd->job_step = ipr_reset_alert;
8697                         ipr_reset_start_timer(ipr_cmd, 0);
8698                         return IPR_RC_JOB_RETURN;
8699                 }
8700         }
8701
8702         if (ioa_cfg->in_ioa_bringdown) {
8703                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8704         } else if (ioa_cfg->sdt_state == GET_DUMP) {
8705                 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8706                 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8707         } else {
8708                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8709         }
8710
8711         LEAVE;
8712         return IPR_RC_JOB_CONTINUE;
8713 }
8714
8715 /**
8716  * ipr_reset_bist_done - BIST has completed on the adapter.
8717  * @ipr_cmd:    ipr command struct
8718  *
8719  * Description: Unblock config space and resume the reset process.
8720  *
8721  * Return value:
8722  *      IPR_RC_JOB_CONTINUE
8723  **/
8724 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8725 {
8726         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8727
8728         ENTER;
8729         if (ioa_cfg->cfg_locked)
8730                 pci_cfg_access_unlock(ioa_cfg->pdev);
8731         ioa_cfg->cfg_locked = 0;
8732         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8733         LEAVE;
8734         return IPR_RC_JOB_CONTINUE;
8735 }
8736
8737 /**
8738  * ipr_reset_start_bist - Run BIST on the adapter.
8739  * @ipr_cmd:    ipr command struct
8740  *
8741  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8742  *
8743  * Return value:
8744  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8745  **/
8746 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8747 {
8748         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8749         int rc = PCIBIOS_SUCCESSFUL;
8750
8751         ENTER;
8752         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8753                 writel(IPR_UPROCI_SIS64_START_BIST,
8754                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8755         else
8756                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8757
8758         if (rc == PCIBIOS_SUCCESSFUL) {
8759                 ipr_cmd->job_step = ipr_reset_bist_done;
8760                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8761                 rc = IPR_RC_JOB_RETURN;
8762         } else {
8763                 if (ioa_cfg->cfg_locked)
8764                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8765                 ioa_cfg->cfg_locked = 0;
8766                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8767                 rc = IPR_RC_JOB_CONTINUE;
8768         }
8769
8770         LEAVE;
8771         return rc;
8772 }
8773
8774 /**
8775  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8776  * @ipr_cmd:    ipr command struct
8777  *
8778  * Description: This clears PCI reset to the adapter and delays two seconds.
8779  *
8780  * Return value:
8781  *      IPR_RC_JOB_RETURN
8782  **/
8783 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8784 {
8785         ENTER;
8786         ipr_cmd->job_step = ipr_reset_bist_done;
8787         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8788         LEAVE;
8789         return IPR_RC_JOB_RETURN;
8790 }
8791
8792 /**
8793  * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8794  * @work:       work struct
8795  *
8796  * Description: This pulses warm reset to a slot.
8797  *
8798  **/
8799 static void ipr_reset_reset_work(struct work_struct *work)
8800 {
8801         struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8802         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8803         struct pci_dev *pdev = ioa_cfg->pdev;
8804         unsigned long lock_flags = 0;
8805
8806         ENTER;
8807         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8808         msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8809         pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8810
8811         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8812         if (ioa_cfg->reset_cmd == ipr_cmd)
8813                 ipr_reset_ioa_job(ipr_cmd);
8814         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8815         LEAVE;
8816 }
8817
8818 /**
8819  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8820  * @ipr_cmd:    ipr command struct
8821  *
8822  * Description: This asserts PCI reset to the adapter.
8823  *
8824  * Return value:
8825  *      IPR_RC_JOB_RETURN
8826  **/
8827 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8828 {
8829         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8830
8831         ENTER;
8832         INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8833         queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8834         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8835         LEAVE;
8836         return IPR_RC_JOB_RETURN;
8837 }
8838
8839 /**
8840  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8841  * @ipr_cmd:    ipr command struct
8842  *
8843  * Description: This attempts to block config access to the IOA.
8844  *
8845  * Return value:
8846  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8847  **/
8848 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8849 {
8850         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8851         int rc = IPR_RC_JOB_CONTINUE;
8852
8853         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8854                 ioa_cfg->cfg_locked = 1;
8855                 ipr_cmd->job_step = ioa_cfg->reset;
8856         } else {
8857                 if (ipr_cmd->u.time_left) {
8858                         rc = IPR_RC_JOB_RETURN;
8859                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8860                         ipr_reset_start_timer(ipr_cmd,
8861                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8862                 } else {
8863                         ipr_cmd->job_step = ioa_cfg->reset;
8864                         dev_err(&ioa_cfg->pdev->dev,
8865                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8866                 }
8867         }
8868
8869         return rc;
8870 }
8871
8872 /**
8873  * ipr_reset_block_config_access - Block config access to the IOA
8874  * @ipr_cmd:    ipr command struct
8875  *
8876  * Description: This attempts to block config access to the IOA
8877  *
8878  * Return value:
8879  *      IPR_RC_JOB_CONTINUE
8880  **/
8881 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8882 {
8883         ipr_cmd->ioa_cfg->cfg_locked = 0;
8884         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8885         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8886         return IPR_RC_JOB_CONTINUE;
8887 }
8888
8889 /**
8890  * ipr_reset_allowed - Query whether or not IOA can be reset
8891  * @ioa_cfg:    ioa config struct
8892  *
8893  * Return value:
8894  *      0 if reset not allowed / non-zero if reset is allowed
8895  **/
8896 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8897 {
8898         volatile u32 temp_reg;
8899
8900         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8901         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8902 }
8903
8904 /**
8905  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8906  * @ipr_cmd:    ipr command struct
8907  *
8908  * Description: This function waits for adapter permission to run BIST,
8909  * then runs BIST. If the adapter does not give permission after a
8910  * reasonable time, we will reset the adapter anyway. The impact of
8911  * resetting the adapter without warning the adapter is the risk of
8912  * losing the persistent error log on the adapter. If the adapter is
8913  * reset while it is writing to the flash on the adapter, the flash
8914  * segment will have bad ECC and be zeroed.
8915  *
8916  * Return value:
8917  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8918  **/
8919 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8920 {
8921         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8922         int rc = IPR_RC_JOB_RETURN;
8923
8924         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8925                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8926                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8927         } else {
8928                 ipr_cmd->job_step = ipr_reset_block_config_access;
8929                 rc = IPR_RC_JOB_CONTINUE;
8930         }
8931
8932         return rc;
8933 }
8934
8935 /**
8936  * ipr_reset_alert - Alert the adapter of a pending reset
8937  * @ipr_cmd:    ipr command struct
8938  *
8939  * Description: This function alerts the adapter that it will be reset.
8940  * If memory space is not currently enabled, proceed directly
8941  * to running BIST on the adapter. The timer must always be started
8942  * so we guarantee we do not run BIST from ipr_isr.
8943  *
8944  * Return value:
8945  *      IPR_RC_JOB_RETURN
8946  **/
8947 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8948 {
8949         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8950         u16 cmd_reg;
8951         int rc;
8952
8953         ENTER;
8954         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8955
8956         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8957                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8958                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8959                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8960         } else {
8961                 ipr_cmd->job_step = ipr_reset_block_config_access;
8962         }
8963
8964         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8965         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8966
8967         LEAVE;
8968         return IPR_RC_JOB_RETURN;
8969 }
8970
8971 /**
8972  * ipr_reset_quiesce_done - Complete IOA disconnect
8973  * @ipr_cmd:    ipr command struct
8974  *
8975  * Description: Freeze the adapter to complete quiesce processing
8976  *
8977  * Return value:
8978  *      IPR_RC_JOB_CONTINUE
8979  **/
8980 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8981 {
8982         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8983
8984         ENTER;
8985         ipr_cmd->job_step = ipr_ioa_bringdown_done;
8986         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8987         LEAVE;
8988         return IPR_RC_JOB_CONTINUE;
8989 }
8990
8991 /**
8992  * ipr_reset_cancel_hcam_done - Check for outstanding commands
8993  * @ipr_cmd:    ipr command struct
8994  *
8995  * Description: Ensure nothing is outstanding to the IOA and
8996  *                      proceed with IOA disconnect. Otherwise reset the IOA.
8997  *
8998  * Return value:
8999  *      IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
9000  **/
9001 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
9002 {
9003         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9004         struct ipr_cmnd *loop_cmd;
9005         struct ipr_hrr_queue *hrrq;
9006         int rc = IPR_RC_JOB_CONTINUE;
9007         int count = 0;
9008
9009         ENTER;
9010         ipr_cmd->job_step = ipr_reset_quiesce_done;
9011
9012         for_each_hrrq(hrrq, ioa_cfg) {
9013                 spin_lock(&hrrq->_lock);
9014                 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
9015                         count++;
9016                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9017                         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9018                         rc = IPR_RC_JOB_RETURN;
9019                         break;
9020                 }
9021                 spin_unlock(&hrrq->_lock);
9022
9023                 if (count)
9024                         break;
9025         }
9026
9027         LEAVE;
9028         return rc;
9029 }
9030
9031 /**
9032  * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
9033  * @ipr_cmd:    ipr command struct
9034  *
9035  * Description: Cancel any oustanding HCAMs to the IOA.
9036  *
9037  * Return value:
9038  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9039  **/
9040 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9041 {
9042         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9043         int rc = IPR_RC_JOB_CONTINUE;
9044         struct ipr_cmd_pkt *cmd_pkt;
9045         struct ipr_cmnd *hcam_cmd;
9046         struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9047
9048         ENTER;
9049         ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9050
9051         if (!hrrq->ioa_is_dead) {
9052                 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9053                         list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9054                                 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9055                                         continue;
9056
9057                                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9058                                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9059                                 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9060                                 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9061                                 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9062                                 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9063                                 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9064                                 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9065                                 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9066                                 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9067                                 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9068                                 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9069                                 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9070                                 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9071
9072                                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9073                                            IPR_CANCEL_TIMEOUT);
9074
9075                                 rc = IPR_RC_JOB_RETURN;
9076                                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9077                                 break;
9078                         }
9079                 }
9080         } else
9081                 ipr_cmd->job_step = ipr_reset_alert;
9082
9083         LEAVE;
9084         return rc;
9085 }
9086
9087 /**
9088  * ipr_reset_ucode_download_done - Microcode download completion
9089  * @ipr_cmd:    ipr command struct
9090  *
9091  * Description: This function unmaps the microcode download buffer.
9092  *
9093  * Return value:
9094  *      IPR_RC_JOB_CONTINUE
9095  **/
9096 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9097 {
9098         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9099         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9100
9101         dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
9102                      sglist->num_sg, DMA_TO_DEVICE);
9103
9104         ipr_cmd->job_step = ipr_reset_alert;
9105         return IPR_RC_JOB_CONTINUE;
9106 }
9107
9108 /**
9109  * ipr_reset_ucode_download - Download microcode to the adapter
9110  * @ipr_cmd:    ipr command struct
9111  *
9112  * Description: This function checks to see if it there is microcode
9113  * to download to the adapter. If there is, a download is performed.
9114  *
9115  * Return value:
9116  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9117  **/
9118 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9119 {
9120         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9121         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9122
9123         ENTER;
9124         ipr_cmd->job_step = ipr_reset_alert;
9125
9126         if (!sglist)
9127                 return IPR_RC_JOB_CONTINUE;
9128
9129         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9130         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9131         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9132         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9133         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9134         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9135         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9136
9137         if (ioa_cfg->sis64)
9138                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9139         else
9140                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
9141         ipr_cmd->job_step = ipr_reset_ucode_download_done;
9142
9143         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9144                    IPR_WRITE_BUFFER_TIMEOUT);
9145
9146         LEAVE;
9147         return IPR_RC_JOB_RETURN;
9148 }
9149
9150 /**
9151  * ipr_reset_shutdown_ioa - Shutdown the adapter
9152  * @ipr_cmd:    ipr command struct
9153  *
9154  * Description: This function issues an adapter shutdown of the
9155  * specified type to the specified adapter as part of the
9156  * adapter reset job.
9157  *
9158  * Return value:
9159  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9160  **/
9161 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9162 {
9163         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9164         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9165         unsigned long timeout;
9166         int rc = IPR_RC_JOB_CONTINUE;
9167
9168         ENTER;
9169         if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9170                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9171         else if (shutdown_type != IPR_SHUTDOWN_NONE &&
9172                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
9173                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9174                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9175                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9176                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9177
9178                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9179                         timeout = IPR_SHUTDOWN_TIMEOUT;
9180                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9181                         timeout = IPR_INTERNAL_TIMEOUT;
9182                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9183                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
9184                 else
9185                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
9186
9187                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9188
9189                 rc = IPR_RC_JOB_RETURN;
9190                 ipr_cmd->job_step = ipr_reset_ucode_download;
9191         } else
9192                 ipr_cmd->job_step = ipr_reset_alert;
9193
9194         LEAVE;
9195         return rc;
9196 }
9197
9198 /**
9199  * ipr_reset_ioa_job - Adapter reset job
9200  * @ipr_cmd:    ipr command struct
9201  *
9202  * Description: This function is the job router for the adapter reset job.
9203  *
9204  * Return value:
9205  *      none
9206  **/
9207 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9208 {
9209         u32 rc, ioasc;
9210         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9211
9212         do {
9213                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
9214
9215                 if (ioa_cfg->reset_cmd != ipr_cmd) {
9216                         /*
9217                          * We are doing nested adapter resets and this is
9218                          * not the current reset job.
9219                          */
9220                         list_add_tail(&ipr_cmd->queue,
9221                                         &ipr_cmd->hrrq->hrrq_free_q);
9222                         return;
9223                 }
9224
9225                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
9226                         rc = ipr_cmd->job_step_failed(ipr_cmd);
9227                         if (rc == IPR_RC_JOB_RETURN)
9228                                 return;
9229                 }
9230
9231                 ipr_reinit_ipr_cmnd(ipr_cmd);
9232                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
9233                 rc = ipr_cmd->job_step(ipr_cmd);
9234         } while (rc == IPR_RC_JOB_CONTINUE);
9235 }
9236
9237 /**
9238  * _ipr_initiate_ioa_reset - Initiate an adapter reset
9239  * @ioa_cfg:            ioa config struct
9240  * @job_step:           first job step of reset job
9241  * @shutdown_type:      shutdown type
9242  *
9243  * Description: This function will initiate the reset of the given adapter
9244  * starting at the selected job step.
9245  * If the caller needs to wait on the completion of the reset,
9246  * the caller must sleep on the reset_wait_q.
9247  *
9248  * Return value:
9249  *      none
9250  **/
9251 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9252                                     int (*job_step) (struct ipr_cmnd *),
9253                                     enum ipr_shutdown_type shutdown_type)
9254 {
9255         struct ipr_cmnd *ipr_cmd;
9256         int i;
9257
9258         ioa_cfg->in_reset_reload = 1;
9259         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9260                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9261                 ioa_cfg->hrrq[i].allow_cmds = 0;
9262                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9263         }
9264         wmb();
9265         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9266                 ioa_cfg->scsi_unblock = 0;
9267                 ioa_cfg->scsi_blocked = 1;
9268                 scsi_block_requests(ioa_cfg->host);
9269         }
9270
9271         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9272         ioa_cfg->reset_cmd = ipr_cmd;
9273         ipr_cmd->job_step = job_step;
9274         ipr_cmd->u.shutdown_type = shutdown_type;
9275
9276         ipr_reset_ioa_job(ipr_cmd);
9277 }
9278
9279 /**
9280  * ipr_initiate_ioa_reset - Initiate an adapter reset
9281  * @ioa_cfg:            ioa config struct
9282  * @shutdown_type:      shutdown type
9283  *
9284  * Description: This function will initiate the reset of the given adapter.
9285  * If the caller needs to wait on the completion of the reset,
9286  * the caller must sleep on the reset_wait_q.
9287  *
9288  * Return value:
9289  *      none
9290  **/
9291 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9292                                    enum ipr_shutdown_type shutdown_type)
9293 {
9294         int i;
9295
9296         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9297                 return;
9298
9299         if (ioa_cfg->in_reset_reload) {
9300                 if (ioa_cfg->sdt_state == GET_DUMP)
9301                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9302                 else if (ioa_cfg->sdt_state == READ_DUMP)
9303                         ioa_cfg->sdt_state = ABORT_DUMP;
9304         }
9305
9306         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9307                 dev_err(&ioa_cfg->pdev->dev,
9308                         "IOA taken offline - error recovery failed\n");
9309
9310                 ioa_cfg->reset_retries = 0;
9311                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9312                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9313                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
9314                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9315                 }
9316                 wmb();
9317
9318                 if (ioa_cfg->in_ioa_bringdown) {
9319                         ioa_cfg->reset_cmd = NULL;
9320                         ioa_cfg->in_reset_reload = 0;
9321                         ipr_fail_all_ops(ioa_cfg);
9322                         wake_up_all(&ioa_cfg->reset_wait_q);
9323
9324                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9325                                 ioa_cfg->scsi_unblock = 1;
9326                                 schedule_work(&ioa_cfg->work_q);
9327                         }
9328                         return;
9329                 } else {
9330                         ioa_cfg->in_ioa_bringdown = 1;
9331                         shutdown_type = IPR_SHUTDOWN_NONE;
9332                 }
9333         }
9334
9335         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9336                                 shutdown_type);
9337 }
9338
9339 /**
9340  * ipr_reset_freeze - Hold off all I/O activity
9341  * @ipr_cmd:    ipr command struct
9342  *
9343  * Description: If the PCI slot is frozen, hold off all I/O
9344  * activity; then, as soon as the slot is available again,
9345  * initiate an adapter reset.
9346  */
9347 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9348 {
9349         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9350         int i;
9351
9352         /* Disallow new interrupts, avoid loop */
9353         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9354                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9355                 ioa_cfg->hrrq[i].allow_interrupts = 0;
9356                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9357         }
9358         wmb();
9359         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9360         ipr_cmd->done = ipr_reset_ioa_job;
9361         return IPR_RC_JOB_RETURN;
9362 }
9363
9364 /**
9365  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9366  * @pdev:       PCI device struct
9367  *
9368  * Description: This routine is called to tell us that the MMIO
9369  * access to the IOA has been restored
9370  */
9371 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9372 {
9373         unsigned long flags = 0;
9374         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9375
9376         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9377         if (!ioa_cfg->probe_done)
9378                 pci_save_state(pdev);
9379         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9380         return PCI_ERS_RESULT_NEED_RESET;
9381 }
9382
9383 /**
9384  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9385  * @pdev:       PCI device struct
9386  *
9387  * Description: This routine is called to tell us that the PCI bus
9388  * is down. Can't do anything here, except put the device driver
9389  * into a holding pattern, waiting for the PCI bus to come back.
9390  */
9391 static void ipr_pci_frozen(struct pci_dev *pdev)
9392 {
9393         unsigned long flags = 0;
9394         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9395
9396         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9397         if (ioa_cfg->probe_done)
9398                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9399         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9400 }
9401
9402 /**
9403  * ipr_pci_slot_reset - Called when PCI slot has been reset.
9404  * @pdev:       PCI device struct
9405  *
9406  * Description: This routine is called by the pci error recovery
9407  * code after the PCI slot has been reset, just before we
9408  * should resume normal operations.
9409  */
9410 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9411 {
9412         unsigned long flags = 0;
9413         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9414
9415         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9416         if (ioa_cfg->probe_done) {
9417                 if (ioa_cfg->needs_warm_reset)
9418                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9419                 else
9420                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9421                                                 IPR_SHUTDOWN_NONE);
9422         } else
9423                 wake_up_all(&ioa_cfg->eeh_wait_q);
9424         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9425         return PCI_ERS_RESULT_RECOVERED;
9426 }
9427
9428 /**
9429  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9430  * @pdev:       PCI device struct
9431  *
9432  * Description: This routine is called when the PCI bus has
9433  * permanently failed.
9434  */
9435 static void ipr_pci_perm_failure(struct pci_dev *pdev)
9436 {
9437         unsigned long flags = 0;
9438         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9439         int i;
9440
9441         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9442         if (ioa_cfg->probe_done) {
9443                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9444                         ioa_cfg->sdt_state = ABORT_DUMP;
9445                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9446                 ioa_cfg->in_ioa_bringdown = 1;
9447                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9448                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9449                         ioa_cfg->hrrq[i].allow_cmds = 0;
9450                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9451                 }
9452                 wmb();
9453                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9454         } else
9455                 wake_up_all(&ioa_cfg->eeh_wait_q);
9456         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9457 }
9458
9459 /**
9460  * ipr_pci_error_detected - Called when a PCI error is detected.
9461  * @pdev:       PCI device struct
9462  * @state:      PCI channel state
9463  *
9464  * Description: Called when a PCI error is detected.
9465  *
9466  * Return value:
9467  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9468  */
9469 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9470                                                pci_channel_state_t state)
9471 {
9472         switch (state) {
9473         case pci_channel_io_frozen:
9474                 ipr_pci_frozen(pdev);
9475                 return PCI_ERS_RESULT_CAN_RECOVER;
9476         case pci_channel_io_perm_failure:
9477                 ipr_pci_perm_failure(pdev);
9478                 return PCI_ERS_RESULT_DISCONNECT;
9479                 break;
9480         default:
9481                 break;
9482         }
9483         return PCI_ERS_RESULT_NEED_RESET;
9484 }
9485
9486 /**
9487  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9488  * @ioa_cfg:    ioa cfg struct
9489  *
9490  * Description: This is the second phase of adapter initialization
9491  * This function takes care of initilizing the adapter to the point
9492  * where it can accept new commands.
9493
9494  * Return value:
9495  *      0 on success / -EIO on failure
9496  **/
9497 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9498 {
9499         int rc = 0;
9500         unsigned long host_lock_flags = 0;
9501
9502         ENTER;
9503         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9504         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9505         ioa_cfg->probe_done = 1;
9506         if (ioa_cfg->needs_hard_reset) {
9507                 ioa_cfg->needs_hard_reset = 0;
9508                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9509         } else
9510                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9511                                         IPR_SHUTDOWN_NONE);
9512         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9513
9514         LEAVE;
9515         return rc;
9516 }
9517
9518 /**
9519  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9520  * @ioa_cfg:    ioa config struct
9521  *
9522  * Return value:
9523  *      none
9524  **/
9525 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9526 {
9527         int i;
9528
9529         if (ioa_cfg->ipr_cmnd_list) {
9530                 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9531                         if (ioa_cfg->ipr_cmnd_list[i])
9532                                 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9533                                               ioa_cfg->ipr_cmnd_list[i],
9534                                               ioa_cfg->ipr_cmnd_list_dma[i]);
9535
9536                         ioa_cfg->ipr_cmnd_list[i] = NULL;
9537                 }
9538         }
9539
9540         if (ioa_cfg->ipr_cmd_pool)
9541                 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9542
9543         kfree(ioa_cfg->ipr_cmnd_list);
9544         kfree(ioa_cfg->ipr_cmnd_list_dma);
9545         ioa_cfg->ipr_cmnd_list = NULL;
9546         ioa_cfg->ipr_cmnd_list_dma = NULL;
9547         ioa_cfg->ipr_cmd_pool = NULL;
9548 }
9549
9550 /**
9551  * ipr_free_mem - Frees memory allocated for an adapter
9552  * @ioa_cfg:    ioa cfg struct
9553  *
9554  * Return value:
9555  *      nothing
9556  **/
9557 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9558 {
9559         int i;
9560
9561         kfree(ioa_cfg->res_entries);
9562         dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9563                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9564         ipr_free_cmd_blks(ioa_cfg);
9565
9566         for (i = 0; i < ioa_cfg->hrrq_num; i++)
9567                 dma_free_coherent(&ioa_cfg->pdev->dev,
9568                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9569                                   ioa_cfg->hrrq[i].host_rrq,
9570                                   ioa_cfg->hrrq[i].host_rrq_dma);
9571
9572         dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9573                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9574
9575         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9576                 dma_free_coherent(&ioa_cfg->pdev->dev,
9577                                   sizeof(struct ipr_hostrcb),
9578                                   ioa_cfg->hostrcb[i],
9579                                   ioa_cfg->hostrcb_dma[i]);
9580         }
9581
9582         ipr_free_dump(ioa_cfg);
9583         kfree(ioa_cfg->trace);
9584 }
9585
9586 /**
9587  * ipr_free_irqs - Free all allocated IRQs for the adapter.
9588  * @ioa_cfg:    ipr cfg struct
9589  *
9590  * This function frees all allocated IRQs for the
9591  * specified adapter.
9592  *
9593  * Return value:
9594  *      none
9595  **/
9596 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9597 {
9598         struct pci_dev *pdev = ioa_cfg->pdev;
9599         int i;
9600
9601         for (i = 0; i < ioa_cfg->nvectors; i++)
9602                 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9603         pci_free_irq_vectors(pdev);
9604 }
9605
9606 /**
9607  * ipr_free_all_resources - Free all allocated resources for an adapter.
9608  * @ipr_cmd:    ipr command struct
9609  *
9610  * This function frees all allocated resources for the
9611  * specified adapter.
9612  *
9613  * Return value:
9614  *      none
9615  **/
9616 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9617 {
9618         struct pci_dev *pdev = ioa_cfg->pdev;
9619
9620         ENTER;
9621         ipr_free_irqs(ioa_cfg);
9622         if (ioa_cfg->reset_work_q)
9623                 destroy_workqueue(ioa_cfg->reset_work_q);
9624         iounmap(ioa_cfg->hdw_dma_regs);
9625         pci_release_regions(pdev);
9626         ipr_free_mem(ioa_cfg);
9627         scsi_host_put(ioa_cfg->host);
9628         pci_disable_device(pdev);
9629         LEAVE;
9630 }
9631
9632 /**
9633  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9634  * @ioa_cfg:    ioa config struct
9635  *
9636  * Return value:
9637  *      0 on success / -ENOMEM on allocation failure
9638  **/
9639 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9640 {
9641         struct ipr_cmnd *ipr_cmd;
9642         struct ipr_ioarcb *ioarcb;
9643         dma_addr_t dma_addr;
9644         int i, entries_each_hrrq, hrrq_id = 0;
9645
9646         ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9647                                                 sizeof(struct ipr_cmnd), 512, 0);
9648
9649         if (!ioa_cfg->ipr_cmd_pool)
9650                 return -ENOMEM;
9651
9652         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9653         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9654
9655         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9656                 ipr_free_cmd_blks(ioa_cfg);
9657                 return -ENOMEM;
9658         }
9659
9660         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9661                 if (ioa_cfg->hrrq_num > 1) {
9662                         if (i == 0) {
9663                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9664                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
9665                                 ioa_cfg->hrrq[i].max_cmd_id =
9666                                         (entries_each_hrrq - 1);
9667                         } else {
9668                                 entries_each_hrrq =
9669                                         IPR_NUM_BASE_CMD_BLKS/
9670                                         (ioa_cfg->hrrq_num - 1);
9671                                 ioa_cfg->hrrq[i].min_cmd_id =
9672                                         IPR_NUM_INTERNAL_CMD_BLKS +
9673                                         (i - 1) * entries_each_hrrq;
9674                                 ioa_cfg->hrrq[i].max_cmd_id =
9675                                         (IPR_NUM_INTERNAL_CMD_BLKS +
9676                                         i * entries_each_hrrq - 1);
9677                         }
9678                 } else {
9679                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
9680                         ioa_cfg->hrrq[i].min_cmd_id = 0;
9681                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9682                 }
9683                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9684         }
9685
9686         BUG_ON(ioa_cfg->hrrq_num == 0);
9687
9688         i = IPR_NUM_CMD_BLKS -
9689                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9690         if (i > 0) {
9691                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9692                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9693         }
9694
9695         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9696                 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
9697                                 GFP_KERNEL, &dma_addr);
9698
9699                 if (!ipr_cmd) {
9700                         ipr_free_cmd_blks(ioa_cfg);
9701                         return -ENOMEM;
9702                 }
9703
9704                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9705                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9706
9707                 ioarcb = &ipr_cmd->ioarcb;
9708                 ipr_cmd->dma_addr = dma_addr;
9709                 if (ioa_cfg->sis64)
9710                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9711                 else
9712                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9713
9714                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9715                 if (ioa_cfg->sis64) {
9716                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9717                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9718                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9719                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9720                 } else {
9721                         ioarcb->write_ioadl_addr =
9722                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9723                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9724                         ioarcb->ioasa_host_pci_addr =
9725                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9726                 }
9727                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9728                 ipr_cmd->cmd_index = i;
9729                 ipr_cmd->ioa_cfg = ioa_cfg;
9730                 ipr_cmd->sense_buffer_dma = dma_addr +
9731                         offsetof(struct ipr_cmnd, sense_buffer);
9732
9733                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9734                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9735                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9736                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9737                         hrrq_id++;
9738         }
9739
9740         return 0;
9741 }
9742
9743 /**
9744  * ipr_alloc_mem - Allocate memory for an adapter
9745  * @ioa_cfg:    ioa config struct
9746  *
9747  * Return value:
9748  *      0 on success / non-zero for error
9749  **/
9750 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9751 {
9752         struct pci_dev *pdev = ioa_cfg->pdev;
9753         int i, rc = -ENOMEM;
9754
9755         ENTER;
9756         ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
9757                                        sizeof(struct ipr_resource_entry),
9758                                        GFP_KERNEL);
9759
9760         if (!ioa_cfg->res_entries)
9761                 goto out;
9762
9763         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9764                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9765                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9766         }
9767
9768         ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9769                                               sizeof(struct ipr_misc_cbs),
9770                                               &ioa_cfg->vpd_cbs_dma,
9771                                               GFP_KERNEL);
9772
9773         if (!ioa_cfg->vpd_cbs)
9774                 goto out_free_res_entries;
9775
9776         if (ipr_alloc_cmd_blks(ioa_cfg))
9777                 goto out_free_vpd_cbs;
9778
9779         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9780                 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9781                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9782                                         &ioa_cfg->hrrq[i].host_rrq_dma,
9783                                         GFP_KERNEL);
9784
9785                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9786                         while (--i > 0)
9787                                 dma_free_coherent(&pdev->dev,
9788                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9789                                         ioa_cfg->hrrq[i].host_rrq,
9790                                         ioa_cfg->hrrq[i].host_rrq_dma);
9791                         goto out_ipr_free_cmd_blocks;
9792                 }
9793                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9794         }
9795
9796         ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9797                                                   ioa_cfg->cfg_table_size,
9798                                                   &ioa_cfg->cfg_table_dma,
9799                                                   GFP_KERNEL);
9800
9801         if (!ioa_cfg->u.cfg_table)
9802                 goto out_free_host_rrq;
9803
9804         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9805                 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9806                                                          sizeof(struct ipr_hostrcb),
9807                                                          &ioa_cfg->hostrcb_dma[i],
9808                                                          GFP_KERNEL);
9809
9810                 if (!ioa_cfg->hostrcb[i])
9811                         goto out_free_hostrcb_dma;
9812
9813                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9814                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9815                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9816                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9817         }
9818
9819         ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
9820                                  sizeof(struct ipr_trace_entry),
9821                                  GFP_KERNEL);
9822
9823         if (!ioa_cfg->trace)
9824                 goto out_free_hostrcb_dma;
9825
9826         rc = 0;
9827 out:
9828         LEAVE;
9829         return rc;
9830
9831 out_free_hostrcb_dma:
9832         while (i-- > 0) {
9833                 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9834                                   ioa_cfg->hostrcb[i],
9835                                   ioa_cfg->hostrcb_dma[i]);
9836         }
9837         dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9838                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9839 out_free_host_rrq:
9840         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9841                 dma_free_coherent(&pdev->dev,
9842                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9843                                   ioa_cfg->hrrq[i].host_rrq,
9844                                   ioa_cfg->hrrq[i].host_rrq_dma);
9845         }
9846 out_ipr_free_cmd_blocks:
9847         ipr_free_cmd_blks(ioa_cfg);
9848 out_free_vpd_cbs:
9849         dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9850                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9851 out_free_res_entries:
9852         kfree(ioa_cfg->res_entries);
9853         goto out;
9854 }
9855
9856 /**
9857  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9858  * @ioa_cfg:    ioa config struct
9859  *
9860  * Return value:
9861  *      none
9862  **/
9863 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9864 {
9865         int i;
9866
9867         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9868                 ioa_cfg->bus_attr[i].bus = i;
9869                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9870                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9871                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9872                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9873                 else
9874                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9875         }
9876 }
9877
9878 /**
9879  * ipr_init_regs - Initialize IOA registers
9880  * @ioa_cfg:    ioa config struct
9881  *
9882  * Return value:
9883  *      none
9884  **/
9885 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9886 {
9887         const struct ipr_interrupt_offsets *p;
9888         struct ipr_interrupts *t;
9889         void __iomem *base;
9890
9891         p = &ioa_cfg->chip_cfg->regs;
9892         t = &ioa_cfg->regs;
9893         base = ioa_cfg->hdw_dma_regs;
9894
9895         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9896         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9897         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9898         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9899         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9900         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9901         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9902         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9903         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9904         t->ioarrin_reg = base + p->ioarrin_reg;
9905         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9906         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9907         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9908         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9909         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9910         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9911
9912         if (ioa_cfg->sis64) {
9913                 t->init_feedback_reg = base + p->init_feedback_reg;
9914                 t->dump_addr_reg = base + p->dump_addr_reg;
9915                 t->dump_data_reg = base + p->dump_data_reg;
9916                 t->endian_swap_reg = base + p->endian_swap_reg;
9917         }
9918 }
9919
9920 /**
9921  * ipr_init_ioa_cfg - Initialize IOA config struct
9922  * @ioa_cfg:    ioa config struct
9923  * @host:               scsi host struct
9924  * @pdev:               PCI dev struct
9925  *
9926  * Return value:
9927  *      none
9928  **/
9929 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9930                              struct Scsi_Host *host, struct pci_dev *pdev)
9931 {
9932         int i;
9933
9934         ioa_cfg->host = host;
9935         ioa_cfg->pdev = pdev;
9936         ioa_cfg->log_level = ipr_log_level;
9937         ioa_cfg->doorbell = IPR_DOORBELL;
9938         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9939         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9940         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9941         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9942         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9943         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9944
9945         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9946         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9947         INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9948         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9949         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9950         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9951         INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
9952         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9953         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9954         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9955         ioa_cfg->sdt_state = INACTIVE;
9956
9957         ipr_initialize_bus_attr(ioa_cfg);
9958         ioa_cfg->max_devs_supported = ipr_max_devs;
9959
9960         if (ioa_cfg->sis64) {
9961                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9962                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9963                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9964                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9965                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9966                                            + ((sizeof(struct ipr_config_table_entry64)
9967                                                * ioa_cfg->max_devs_supported)));
9968         } else {
9969                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9970                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9971                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9972                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9973                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9974                                            + ((sizeof(struct ipr_config_table_entry)
9975                                                * ioa_cfg->max_devs_supported)));
9976         }
9977
9978         host->max_channel = IPR_VSET_BUS;
9979         host->unique_id = host->host_no;
9980         host->max_cmd_len = IPR_MAX_CDB_LEN;
9981         host->can_queue = ioa_cfg->max_cmds;
9982         pci_set_drvdata(pdev, ioa_cfg);
9983
9984         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9985                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9986                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9987                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9988                 if (i == 0)
9989                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9990                 else
9991                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9992         }
9993 }
9994
9995 /**
9996  * ipr_get_chip_info - Find adapter chip information
9997  * @dev_id:             PCI device id struct
9998  *
9999  * Return value:
10000  *      ptr to chip information on success / NULL on failure
10001  **/
10002 static const struct ipr_chip_t *
10003 ipr_get_chip_info(const struct pci_device_id *dev_id)
10004 {
10005         int i;
10006
10007         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
10008                 if (ipr_chip[i].vendor == dev_id->vendor &&
10009                     ipr_chip[i].device == dev_id->device)
10010                         return &ipr_chip[i];
10011         return NULL;
10012 }
10013
10014 /**
10015  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
10016  *                                              during probe time
10017  * @ioa_cfg:    ioa config struct
10018  *
10019  * Return value:
10020  *      None
10021  **/
10022 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
10023 {
10024         struct pci_dev *pdev = ioa_cfg->pdev;
10025
10026         if (pci_channel_offline(pdev)) {
10027                 wait_event_timeout(ioa_cfg->eeh_wait_q,
10028                                    !pci_channel_offline(pdev),
10029                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
10030                 pci_restore_state(pdev);
10031         }
10032 }
10033
10034 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
10035 {
10036         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
10037
10038         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
10039                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
10040                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10041                 ioa_cfg->vectors_info[vec_idx].
10042                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10043         }
10044 }
10045
10046 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10047                 struct pci_dev *pdev)
10048 {
10049         int i, rc;
10050
10051         for (i = 1; i < ioa_cfg->nvectors; i++) {
10052                 rc = request_irq(pci_irq_vector(pdev, i),
10053                         ipr_isr_mhrrq,
10054                         0,
10055                         ioa_cfg->vectors_info[i].desc,
10056                         &ioa_cfg->hrrq[i]);
10057                 if (rc) {
10058                         while (--i >= 0)
10059                                 free_irq(pci_irq_vector(pdev, i),
10060                                         &ioa_cfg->hrrq[i]);
10061                         return rc;
10062                 }
10063         }
10064         return 0;
10065 }
10066
10067 /**
10068  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10069  * @pdev:               PCI device struct
10070  *
10071  * Description: Simply set the msi_received flag to 1 indicating that
10072  * Message Signaled Interrupts are supported.
10073  *
10074  * Return value:
10075  *      0 on success / non-zero on failure
10076  **/
10077 static irqreturn_t ipr_test_intr(int irq, void *devp)
10078 {
10079         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10080         unsigned long lock_flags = 0;
10081         irqreturn_t rc = IRQ_HANDLED;
10082
10083         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
10084         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10085
10086         ioa_cfg->msi_received = 1;
10087         wake_up(&ioa_cfg->msi_wait_q);
10088
10089         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10090         return rc;
10091 }
10092
10093 /**
10094  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10095  * @pdev:               PCI device struct
10096  *
10097  * Description: This routine sets up and initiates a test interrupt to determine
10098  * if the interrupt is received via the ipr_test_intr() service routine.
10099  * If the tests fails, the driver will fall back to LSI.
10100  *
10101  * Return value:
10102  *      0 on success / non-zero on failure
10103  **/
10104 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10105 {
10106         int rc;
10107         volatile u32 int_reg;
10108         unsigned long lock_flags = 0;
10109         int irq = pci_irq_vector(pdev, 0);
10110
10111         ENTER;
10112
10113         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10114         init_waitqueue_head(&ioa_cfg->msi_wait_q);
10115         ioa_cfg->msi_received = 0;
10116         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10117         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
10118         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10119         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10120
10121         rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10122         if (rc) {
10123                 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
10124                 return rc;
10125         } else if (ipr_debug)
10126                 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
10127
10128         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
10129         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10130         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
10131         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10132         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10133
10134         if (!ioa_cfg->msi_received) {
10135                 /* MSI test failed */
10136                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
10137                 rc = -EOPNOTSUPP;
10138         } else if (ipr_debug)
10139                 dev_info(&pdev->dev, "MSI test succeeded.\n");
10140
10141         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10142
10143         free_irq(irq, ioa_cfg);
10144
10145         LEAVE;
10146
10147         return rc;
10148 }
10149
10150  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10151  * @pdev:               PCI device struct
10152  * @dev_id:             PCI device id struct
10153  *
10154  * Return value:
10155  *      0 on success / non-zero on failure
10156  **/
10157 static int ipr_probe_ioa(struct pci_dev *pdev,
10158                          const struct pci_device_id *dev_id)
10159 {
10160         struct ipr_ioa_cfg *ioa_cfg;
10161         struct Scsi_Host *host;
10162         unsigned long ipr_regs_pci;
10163         void __iomem *ipr_regs;
10164         int rc = PCIBIOS_SUCCESSFUL;
10165         volatile u32 mask, uproc, interrupts;
10166         unsigned long lock_flags, driver_lock_flags;
10167         unsigned int irq_flag;
10168
10169         ENTER;
10170
10171         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
10172         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10173
10174         if (!host) {
10175                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10176                 rc = -ENOMEM;
10177                 goto out;
10178         }
10179
10180         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10181         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
10182         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
10183
10184         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
10185
10186         if (!ioa_cfg->ipr_chip) {
10187                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10188                         dev_id->vendor, dev_id->device);
10189                 goto out_scsi_host_put;
10190         }
10191
10192         /* set SIS 32 or SIS 64 */
10193         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
10194         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10195         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10196         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10197
10198         if (ipr_transop_timeout)
10199                 ioa_cfg->transop_timeout = ipr_transop_timeout;
10200         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10201                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10202         else
10203                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10204
10205         ioa_cfg->revid = pdev->revision;
10206
10207         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10208
10209         ipr_regs_pci = pci_resource_start(pdev, 0);
10210
10211         rc = pci_request_regions(pdev, IPR_NAME);
10212         if (rc < 0) {
10213                 dev_err(&pdev->dev,
10214                         "Couldn't register memory range of registers\n");
10215                 goto out_scsi_host_put;
10216         }
10217
10218         rc = pci_enable_device(pdev);
10219
10220         if (rc || pci_channel_offline(pdev)) {
10221                 if (pci_channel_offline(pdev)) {
10222                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10223                         rc = pci_enable_device(pdev);
10224                 }
10225
10226                 if (rc) {
10227                         dev_err(&pdev->dev, "Cannot enable adapter\n");
10228                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10229                         goto out_release_regions;
10230                 }
10231         }
10232
10233         ipr_regs = pci_ioremap_bar(pdev, 0);
10234
10235         if (!ipr_regs) {
10236                 dev_err(&pdev->dev,
10237                         "Couldn't map memory range of registers\n");
10238                 rc = -ENOMEM;
10239                 goto out_disable;
10240         }
10241
10242         ioa_cfg->hdw_dma_regs = ipr_regs;
10243         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10244         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10245
10246         ipr_init_regs(ioa_cfg);
10247
10248         if (ioa_cfg->sis64) {
10249                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10250                 if (rc < 0) {
10251                         dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10252                         rc = dma_set_mask_and_coherent(&pdev->dev,
10253                                                        DMA_BIT_MASK(32));
10254                 }
10255         } else
10256                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10257
10258         if (rc < 0) {
10259                 dev_err(&pdev->dev, "Failed to set DMA mask\n");
10260                 goto cleanup_nomem;
10261         }
10262
10263         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10264                                    ioa_cfg->chip_cfg->cache_line_size);
10265
10266         if (rc != PCIBIOS_SUCCESSFUL) {
10267                 dev_err(&pdev->dev, "Write of cache line size failed\n");
10268                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10269                 rc = -EIO;
10270                 goto cleanup_nomem;
10271         }
10272
10273         /* Issue MMIO read to ensure card is not in EEH */
10274         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10275         ipr_wait_for_pci_err_recovery(ioa_cfg);
10276
10277         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10278                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10279                         IPR_MAX_MSIX_VECTORS);
10280                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10281         }
10282
10283         irq_flag = PCI_IRQ_LEGACY;
10284         if (ioa_cfg->ipr_chip->has_msi)
10285                 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10286         rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10287         if (rc < 0) {
10288                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10289                 goto cleanup_nomem;
10290         }
10291         ioa_cfg->nvectors = rc;
10292
10293         if (!pdev->msi_enabled && !pdev->msix_enabled)
10294                 ioa_cfg->clear_isr = 1;
10295
10296         pci_set_master(pdev);
10297
10298         if (pci_channel_offline(pdev)) {
10299                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10300                 pci_set_master(pdev);
10301                 if (pci_channel_offline(pdev)) {
10302                         rc = -EIO;
10303                         goto out_msi_disable;
10304                 }
10305         }
10306
10307         if (pdev->msi_enabled || pdev->msix_enabled) {
10308                 rc = ipr_test_msi(ioa_cfg, pdev);
10309                 switch (rc) {
10310                 case 0:
10311                         dev_info(&pdev->dev,
10312                                 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10313                                 pdev->msix_enabled ? "-X" : "");
10314                         break;
10315                 case -EOPNOTSUPP:
10316                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10317                         pci_free_irq_vectors(pdev);
10318
10319                         ioa_cfg->nvectors = 1;
10320                         ioa_cfg->clear_isr = 1;
10321                         break;
10322                 default:
10323                         goto out_msi_disable;
10324                 }
10325         }
10326
10327         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10328                                 (unsigned int)num_online_cpus(),
10329                                 (unsigned int)IPR_MAX_HRRQ_NUM);
10330
10331         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10332                 goto out_msi_disable;
10333
10334         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10335                 goto out_msi_disable;
10336
10337         rc = ipr_alloc_mem(ioa_cfg);
10338         if (rc < 0) {
10339                 dev_err(&pdev->dev,
10340                         "Couldn't allocate enough memory for device driver!\n");
10341                 goto out_msi_disable;
10342         }
10343
10344         /* Save away PCI config space for use following IOA reset */
10345         rc = pci_save_state(pdev);
10346
10347         if (rc != PCIBIOS_SUCCESSFUL) {
10348                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10349                 rc = -EIO;
10350                 goto cleanup_nolog;
10351         }
10352
10353         /*
10354          * If HRRQ updated interrupt is not masked, or reset alert is set,
10355          * the card is in an unknown state and needs a hard reset
10356          */
10357         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10358         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10359         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10360         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10361                 ioa_cfg->needs_hard_reset = 1;
10362         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10363                 ioa_cfg->needs_hard_reset = 1;
10364         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10365                 ioa_cfg->ioa_unit_checked = 1;
10366
10367         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10368         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10369         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10370
10371         if (pdev->msi_enabled || pdev->msix_enabled) {
10372                 name_msi_vectors(ioa_cfg);
10373                 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
10374                         ioa_cfg->vectors_info[0].desc,
10375                         &ioa_cfg->hrrq[0]);
10376                 if (!rc)
10377                         rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
10378         } else {
10379                 rc = request_irq(pdev->irq, ipr_isr,
10380                          IRQF_SHARED,
10381                          IPR_NAME, &ioa_cfg->hrrq[0]);
10382         }
10383         if (rc) {
10384                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10385                         pdev->irq, rc);
10386                 goto cleanup_nolog;
10387         }
10388
10389         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10390             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10391                 ioa_cfg->needs_warm_reset = 1;
10392                 ioa_cfg->reset = ipr_reset_slot_reset;
10393
10394                 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10395                                                                 WQ_MEM_RECLAIM, host->host_no);
10396
10397                 if (!ioa_cfg->reset_work_q) {
10398                         dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10399                         rc = -ENOMEM;
10400                         goto out_free_irq;
10401                 }
10402         } else
10403                 ioa_cfg->reset = ipr_reset_start_bist;
10404
10405         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10406         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10407         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10408
10409         LEAVE;
10410 out:
10411         return rc;
10412
10413 out_free_irq:
10414         ipr_free_irqs(ioa_cfg);
10415 cleanup_nolog:
10416         ipr_free_mem(ioa_cfg);
10417 out_msi_disable:
10418         ipr_wait_for_pci_err_recovery(ioa_cfg);
10419         pci_free_irq_vectors(pdev);
10420 cleanup_nomem:
10421         iounmap(ipr_regs);
10422 out_disable:
10423         pci_disable_device(pdev);
10424 out_release_regions:
10425         pci_release_regions(pdev);
10426 out_scsi_host_put:
10427         scsi_host_put(host);
10428         goto out;
10429 }
10430
10431 /**
10432  * ipr_initiate_ioa_bringdown - Bring down an adapter
10433  * @ioa_cfg:            ioa config struct
10434  * @shutdown_type:      shutdown type
10435  *
10436  * Description: This function will initiate bringing down the adapter.
10437  * This consists of issuing an IOA shutdown to the adapter
10438  * to flush the cache, and running BIST.
10439  * If the caller needs to wait on the completion of the reset,
10440  * the caller must sleep on the reset_wait_q.
10441  *
10442  * Return value:
10443  *      none
10444  **/
10445 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10446                                        enum ipr_shutdown_type shutdown_type)
10447 {
10448         ENTER;
10449         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10450                 ioa_cfg->sdt_state = ABORT_DUMP;
10451         ioa_cfg->reset_retries = 0;
10452         ioa_cfg->in_ioa_bringdown = 1;
10453         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10454         LEAVE;
10455 }
10456
10457 /**
10458  * __ipr_remove - Remove a single adapter
10459  * @pdev:       pci device struct
10460  *
10461  * Adapter hot plug remove entry point.
10462  *
10463  * Return value:
10464  *      none
10465  **/
10466 static void __ipr_remove(struct pci_dev *pdev)
10467 {
10468         unsigned long host_lock_flags = 0;
10469         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10470         int i;
10471         unsigned long driver_lock_flags;
10472         ENTER;
10473
10474         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10475         while (ioa_cfg->in_reset_reload) {
10476                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10477                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10478                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10479         }
10480
10481         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10482                 spin_lock(&ioa_cfg->hrrq[i]._lock);
10483                 ioa_cfg->hrrq[i].removing_ioa = 1;
10484                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10485         }
10486         wmb();
10487         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10488
10489         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10490         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10491         flush_work(&ioa_cfg->work_q);
10492         if (ioa_cfg->reset_work_q)
10493                 flush_workqueue(ioa_cfg->reset_work_q);
10494         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10495         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10496
10497         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10498         list_del(&ioa_cfg->queue);
10499         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10500
10501         if (ioa_cfg->sdt_state == ABORT_DUMP)
10502                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10503         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10504
10505         ipr_free_all_resources(ioa_cfg);
10506
10507         LEAVE;
10508 }
10509
10510 /**
10511  * ipr_remove - IOA hot plug remove entry point
10512  * @pdev:       pci device struct
10513  *
10514  * Adapter hot plug remove entry point.
10515  *
10516  * Return value:
10517  *      none
10518  **/
10519 static void ipr_remove(struct pci_dev *pdev)
10520 {
10521         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10522
10523         ENTER;
10524
10525         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10526                               &ipr_trace_attr);
10527         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10528                              &ipr_dump_attr);
10529         sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10530                         &ipr_ioa_async_err_log);
10531         scsi_remove_host(ioa_cfg->host);
10532
10533         __ipr_remove(pdev);
10534
10535         LEAVE;
10536 }
10537
10538 /**
10539  * ipr_probe - Adapter hot plug add entry point
10540  *
10541  * Return value:
10542  *      0 on success / non-zero on failure
10543  **/
10544 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10545 {
10546         struct ipr_ioa_cfg *ioa_cfg;
10547         unsigned long flags;
10548         int rc, i;
10549
10550         rc = ipr_probe_ioa(pdev, dev_id);
10551
10552         if (rc)
10553                 return rc;
10554
10555         ioa_cfg = pci_get_drvdata(pdev);
10556         rc = ipr_probe_ioa_part2(ioa_cfg);
10557
10558         if (rc) {
10559                 __ipr_remove(pdev);
10560                 return rc;
10561         }
10562
10563         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10564
10565         if (rc) {
10566                 __ipr_remove(pdev);
10567                 return rc;
10568         }
10569
10570         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10571                                    &ipr_trace_attr);
10572
10573         if (rc) {
10574                 scsi_remove_host(ioa_cfg->host);
10575                 __ipr_remove(pdev);
10576                 return rc;
10577         }
10578
10579         rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10580                         &ipr_ioa_async_err_log);
10581
10582         if (rc) {
10583                 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10584                                 &ipr_dump_attr);
10585                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10586                                 &ipr_trace_attr);
10587                 scsi_remove_host(ioa_cfg->host);
10588                 __ipr_remove(pdev);
10589                 return rc;
10590         }
10591
10592         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10593                                    &ipr_dump_attr);
10594
10595         if (rc) {
10596                 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10597                                       &ipr_ioa_async_err_log);
10598                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10599                                       &ipr_trace_attr);
10600                 scsi_remove_host(ioa_cfg->host);
10601                 __ipr_remove(pdev);
10602                 return rc;
10603         }
10604         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10605         ioa_cfg->scan_enabled = 1;
10606         schedule_work(&ioa_cfg->work_q);
10607         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10608
10609         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10610
10611         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10612                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10613                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10614                                         ioa_cfg->iopoll_weight, ipr_iopoll);
10615                 }
10616         }
10617
10618         scsi_scan_host(ioa_cfg->host);
10619
10620         return 0;
10621 }
10622
10623 /**
10624  * ipr_shutdown - Shutdown handler.
10625  * @pdev:       pci device struct
10626  *
10627  * This function is invoked upon system shutdown/reboot. It will issue
10628  * an adapter shutdown to the adapter to flush the write cache.
10629  *
10630  * Return value:
10631  *      none
10632  **/
10633 static void ipr_shutdown(struct pci_dev *pdev)
10634 {
10635         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10636         unsigned long lock_flags = 0;
10637         enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10638         int i;
10639
10640         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10641         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10642                 ioa_cfg->iopoll_weight = 0;
10643                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10644                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10645         }
10646
10647         while (ioa_cfg->in_reset_reload) {
10648                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10649                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10650                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10651         }
10652
10653         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10654                 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10655
10656         ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10657         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10658         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10659         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10660                 ipr_free_irqs(ioa_cfg);
10661                 pci_disable_device(ioa_cfg->pdev);
10662         }
10663 }
10664
10665 static struct pci_device_id ipr_pci_table[] = {
10666         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10667                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10668         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10669                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10670         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10671                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10672         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10673                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10674         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10675                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10676         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10677                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10678         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10679                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10680         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10681                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10682                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10683         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10684               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10685         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10686               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10687               IPR_USE_LONG_TRANSOP_TIMEOUT },
10688         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10689               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10690               IPR_USE_LONG_TRANSOP_TIMEOUT },
10691         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10692               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10693         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10694               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10695               IPR_USE_LONG_TRANSOP_TIMEOUT},
10696         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10697               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10698               IPR_USE_LONG_TRANSOP_TIMEOUT },
10699         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10700               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10701               IPR_USE_LONG_TRANSOP_TIMEOUT },
10702         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10703               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10704         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10705               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10706         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10707               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10708               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10709         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10710                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10711         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10712                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10713         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10714                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10715                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10716         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10717                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10718                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10719         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10720                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10721         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10722                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10723         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10724                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10725         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10726                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10727         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10728                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10729         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10730                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10731         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10732                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10733         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10734                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10735         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10736                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10737         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10738                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10739         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10740                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10741         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10742                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10743         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10744                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10745         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10746                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10747         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10748                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10749         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10750                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10751         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10752                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10753         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10754                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10755         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10756                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10757         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10758                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10759         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10760                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10761         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10762                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10763         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10764                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10765         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10766                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10767         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10768                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10769         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10770                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10771         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10772                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10773         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10774                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
10775         { }
10776 };
10777 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10778
10779 static const struct pci_error_handlers ipr_err_handler = {
10780         .error_detected = ipr_pci_error_detected,
10781         .mmio_enabled = ipr_pci_mmio_enabled,
10782         .slot_reset = ipr_pci_slot_reset,
10783 };
10784
10785 static struct pci_driver ipr_driver = {
10786         .name = IPR_NAME,
10787         .id_table = ipr_pci_table,
10788         .probe = ipr_probe,
10789         .remove = ipr_remove,
10790         .shutdown = ipr_shutdown,
10791         .err_handler = &ipr_err_handler,
10792 };
10793
10794 /**
10795  * ipr_halt_done - Shutdown prepare completion
10796  *
10797  * Return value:
10798  *      none
10799  **/
10800 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10801 {
10802         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10803 }
10804
10805 /**
10806  * ipr_halt - Issue shutdown prepare to all adapters
10807  *
10808  * Return value:
10809  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10810  **/
10811 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10812 {
10813         struct ipr_cmnd *ipr_cmd;
10814         struct ipr_ioa_cfg *ioa_cfg;
10815         unsigned long flags = 0, driver_lock_flags;
10816
10817         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10818                 return NOTIFY_DONE;
10819
10820         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10821
10822         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10823                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10824                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10825                     (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10826                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10827                         continue;
10828                 }
10829
10830                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10831                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10832                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10833                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10834                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10835
10836                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10837                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10838         }
10839         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10840
10841         return NOTIFY_OK;
10842 }
10843
10844 static struct notifier_block ipr_notifier = {
10845         ipr_halt, NULL, 0
10846 };
10847
10848 /**
10849  * ipr_init - Module entry point
10850  *
10851  * Return value:
10852  *      0 on success / negative value on failure
10853  **/
10854 static int __init ipr_init(void)
10855 {
10856         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10857                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10858
10859         register_reboot_notifier(&ipr_notifier);
10860         return pci_register_driver(&ipr_driver);
10861 }
10862
10863 /**
10864  * ipr_exit - Module unload
10865  *
10866  * Module unload entry point.
10867  *
10868  * Return value:
10869  *      none
10870  **/
10871 static void __exit ipr_exit(void)
10872 {
10873         unregister_reboot_notifier(&ipr_notifier);
10874         pci_unregister_driver(&ipr_driver);
10875 }
10876
10877 module_init(ipr_init);
10878 module_exit(ipr_exit);