Merge tag 'cgroup-for-5.20' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
[linux-2.6-microblaze.git] / drivers / scsi / ipr.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * ipr.c -- driver for IBM Power Linux RAID adapters
4  *
5  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6  *
7  * Copyright (C) 2003, 2004 IBM Corporation
8  */
9
10 /*
11  * Notes:
12  *
13  * This driver is used to control the following SCSI adapters:
14  *
15  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
16  *
17  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
18  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
19  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
20  *              Embedded SCSI adapter on p615 and p655 systems
21  *
22  * Supported Hardware Features:
23  *      - Ultra 320 SCSI controller
24  *      - PCI-X host interface
25  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
26  *      - Non-Volatile Write Cache
27  *      - Supports attachment of non-RAID disks, tape, and optical devices
28  *      - RAID Levels 0, 5, 10
29  *      - Hot spare
30  *      - Background Parity Checking
31  *      - Background Data Scrubbing
32  *      - Ability to increase the capacity of an existing RAID 5 disk array
33  *              by adding disks
34  *
35  * Driver Features:
36  *      - Tagged command queuing
37  *      - Adapter microcode download
38  *      - PCI hot plug
39  *      - SCSI device hot plug
40  *
41  */
42
43 #include <linux/fs.h>
44 #include <linux/init.h>
45 #include <linux/types.h>
46 #include <linux/errno.h>
47 #include <linux/kernel.h>
48 #include <linux/slab.h>
49 #include <linux/vmalloc.h>
50 #include <linux/ioport.h>
51 #include <linux/delay.h>
52 #include <linux/pci.h>
53 #include <linux/wait.h>
54 #include <linux/spinlock.h>
55 #include <linux/sched.h>
56 #include <linux/interrupt.h>
57 #include <linux/blkdev.h>
58 #include <linux/firmware.h>
59 #include <linux/module.h>
60 #include <linux/moduleparam.h>
61 #include <linux/libata.h>
62 #include <linux/hdreg.h>
63 #include <linux/reboot.h>
64 #include <linux/stringify.h>
65 #include <asm/io.h>
66 #include <asm/irq.h>
67 #include <asm/processor.h>
68 #include <scsi/scsi.h>
69 #include <scsi/scsi_host.h>
70 #include <scsi/scsi_tcq.h>
71 #include <scsi/scsi_eh.h>
72 #include <scsi/scsi_cmnd.h>
73 #include "ipr.h"
74
75 /*
76  *   Global Data
77  */
78 static LIST_HEAD(ipr_ioa_head);
79 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
80 static unsigned int ipr_max_speed = 1;
81 static int ipr_testmode = 0;
82 static unsigned int ipr_fastfail = 0;
83 static unsigned int ipr_transop_timeout = 0;
84 static unsigned int ipr_debug = 0;
85 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
86 static unsigned int ipr_dual_ioa_raid = 1;
87 static unsigned int ipr_number_of_msix = 16;
88 static unsigned int ipr_fast_reboot;
89 static DEFINE_SPINLOCK(ipr_driver_lock);
90
91 /* This table describes the differences between DMA controller chips */
92 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
93         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
94                 .mailbox = 0x0042C,
95                 .max_cmds = 100,
96                 .cache_line_size = 0x20,
97                 .clear_isr = 1,
98                 .iopoll_weight = 0,
99                 {
100                         .set_interrupt_mask_reg = 0x0022C,
101                         .clr_interrupt_mask_reg = 0x00230,
102                         .clr_interrupt_mask_reg32 = 0x00230,
103                         .sense_interrupt_mask_reg = 0x0022C,
104                         .sense_interrupt_mask_reg32 = 0x0022C,
105                         .clr_interrupt_reg = 0x00228,
106                         .clr_interrupt_reg32 = 0x00228,
107                         .sense_interrupt_reg = 0x00224,
108                         .sense_interrupt_reg32 = 0x00224,
109                         .ioarrin_reg = 0x00404,
110                         .sense_uproc_interrupt_reg = 0x00214,
111                         .sense_uproc_interrupt_reg32 = 0x00214,
112                         .set_uproc_interrupt_reg = 0x00214,
113                         .set_uproc_interrupt_reg32 = 0x00214,
114                         .clr_uproc_interrupt_reg = 0x00218,
115                         .clr_uproc_interrupt_reg32 = 0x00218
116                 }
117         },
118         { /* Snipe and Scamp */
119                 .mailbox = 0x0052C,
120                 .max_cmds = 100,
121                 .cache_line_size = 0x20,
122                 .clear_isr = 1,
123                 .iopoll_weight = 0,
124                 {
125                         .set_interrupt_mask_reg = 0x00288,
126                         .clr_interrupt_mask_reg = 0x0028C,
127                         .clr_interrupt_mask_reg32 = 0x0028C,
128                         .sense_interrupt_mask_reg = 0x00288,
129                         .sense_interrupt_mask_reg32 = 0x00288,
130                         .clr_interrupt_reg = 0x00284,
131                         .clr_interrupt_reg32 = 0x00284,
132                         .sense_interrupt_reg = 0x00280,
133                         .sense_interrupt_reg32 = 0x00280,
134                         .ioarrin_reg = 0x00504,
135                         .sense_uproc_interrupt_reg = 0x00290,
136                         .sense_uproc_interrupt_reg32 = 0x00290,
137                         .set_uproc_interrupt_reg = 0x00290,
138                         .set_uproc_interrupt_reg32 = 0x00290,
139                         .clr_uproc_interrupt_reg = 0x00294,
140                         .clr_uproc_interrupt_reg32 = 0x00294
141                 }
142         },
143         { /* CRoC */
144                 .mailbox = 0x00044,
145                 .max_cmds = 1000,
146                 .cache_line_size = 0x20,
147                 .clear_isr = 0,
148                 .iopoll_weight = 64,
149                 {
150                         .set_interrupt_mask_reg = 0x00010,
151                         .clr_interrupt_mask_reg = 0x00018,
152                         .clr_interrupt_mask_reg32 = 0x0001C,
153                         .sense_interrupt_mask_reg = 0x00010,
154                         .sense_interrupt_mask_reg32 = 0x00014,
155                         .clr_interrupt_reg = 0x00008,
156                         .clr_interrupt_reg32 = 0x0000C,
157                         .sense_interrupt_reg = 0x00000,
158                         .sense_interrupt_reg32 = 0x00004,
159                         .ioarrin_reg = 0x00070,
160                         .sense_uproc_interrupt_reg = 0x00020,
161                         .sense_uproc_interrupt_reg32 = 0x00024,
162                         .set_uproc_interrupt_reg = 0x00020,
163                         .set_uproc_interrupt_reg32 = 0x00024,
164                         .clr_uproc_interrupt_reg = 0x00028,
165                         .clr_uproc_interrupt_reg32 = 0x0002C,
166                         .init_feedback_reg = 0x0005C,
167                         .dump_addr_reg = 0x00064,
168                         .dump_data_reg = 0x00068,
169                         .endian_swap_reg = 0x00084
170                 }
171         },
172 };
173
174 static const struct ipr_chip_t ipr_chip[] = {
175         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
176         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
177         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
178         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
179         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
180         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
181         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
182         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
183         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
184         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
185 };
186
187 static int ipr_max_bus_speeds[] = {
188         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
189 };
190
191 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
192 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
193 module_param_named(max_speed, ipr_max_speed, uint, 0);
194 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
195 module_param_named(log_level, ipr_log_level, uint, 0);
196 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
197 module_param_named(testmode, ipr_testmode, int, 0);
198 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
199 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
200 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
201 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
202 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
203 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
204 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
205 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
206 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
207 module_param_named(max_devs, ipr_max_devs, int, 0);
208 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
209                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
210 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
211 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:16)");
212 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
213 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
214 MODULE_LICENSE("GPL");
215 MODULE_VERSION(IPR_DRIVER_VERSION);
216
217 /*  A constant array of IOASCs/URCs/Error Messages */
218 static const
219 struct ipr_error_table_t ipr_error_table[] = {
220         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
221         "8155: An unknown error was received"},
222         {0x00330000, 0, 0,
223         "Soft underlength error"},
224         {0x005A0000, 0, 0,
225         "Command to be cancelled not found"},
226         {0x00808000, 0, 0,
227         "Qualified success"},
228         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
229         "FFFE: Soft device bus error recovered by the IOA"},
230         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
231         "4101: Soft device bus fabric error"},
232         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
233         "FFFC: Logical block guard error recovered by the device"},
234         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
235         "FFFC: Logical block reference tag error recovered by the device"},
236         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
237         "4171: Recovered scatter list tag / sequence number error"},
238         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
239         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
240         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
241         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
242         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
243         "FFFD: Recovered logical block reference tag error detected by the IOA"},
244         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
245         "FFFD: Logical block guard error recovered by the IOA"},
246         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
247         "FFF9: Device sector reassign successful"},
248         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
249         "FFF7: Media error recovered by device rewrite procedures"},
250         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
251         "7001: IOA sector reassignment successful"},
252         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
253         "FFF9: Soft media error. Sector reassignment recommended"},
254         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
255         "FFF7: Media error recovered by IOA rewrite procedures"},
256         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
257         "FF3D: Soft PCI bus error recovered by the IOA"},
258         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
259         "FFF6: Device hardware error recovered by the IOA"},
260         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
261         "FFF6: Device hardware error recovered by the device"},
262         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
263         "FF3D: Soft IOA error recovered by the IOA"},
264         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
265         "FFFA: Undefined device response recovered by the IOA"},
266         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
267         "FFF6: Device bus error, message or command phase"},
268         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
269         "FFFE: Task Management Function failed"},
270         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
271         "FFF6: Failure prediction threshold exceeded"},
272         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
273         "8009: Impending cache battery pack failure"},
274         {0x02040100, 0, 0,
275         "Logical Unit in process of becoming ready"},
276         {0x02040200, 0, 0,
277         "Initializing command required"},
278         {0x02040400, 0, 0,
279         "34FF: Disk device format in progress"},
280         {0x02040C00, 0, 0,
281         "Logical unit not accessible, target port in unavailable state"},
282         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
283         "9070: IOA requested reset"},
284         {0x023F0000, 0, 0,
285         "Synchronization required"},
286         {0x02408500, 0, 0,
287         "IOA microcode download required"},
288         {0x02408600, 0, 0,
289         "Device bus connection is prohibited by host"},
290         {0x024E0000, 0, 0,
291         "No ready, IOA shutdown"},
292         {0x025A0000, 0, 0,
293         "Not ready, IOA has been shutdown"},
294         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
295         "3020: Storage subsystem configuration error"},
296         {0x03110B00, 0, 0,
297         "FFF5: Medium error, data unreadable, recommend reassign"},
298         {0x03110C00, 0, 0,
299         "7000: Medium error, data unreadable, do not reassign"},
300         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
301         "FFF3: Disk media format bad"},
302         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
303         "3002: Addressed device failed to respond to selection"},
304         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
305         "3100: Device bus error"},
306         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
307         "3109: IOA timed out a device command"},
308         {0x04088000, 0, 0,
309         "3120: SCSI bus is not operational"},
310         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
311         "4100: Hard device bus fabric error"},
312         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
313         "310C: Logical block guard error detected by the device"},
314         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
315         "310C: Logical block reference tag error detected by the device"},
316         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
317         "4170: Scatter list tag / sequence number error"},
318         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
319         "8150: Logical block CRC error on IOA to Host transfer"},
320         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
321         "4170: Logical block sequence number error on IOA to Host transfer"},
322         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
323         "310D: Logical block reference tag error detected by the IOA"},
324         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
325         "310D: Logical block guard error detected by the IOA"},
326         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
327         "9000: IOA reserved area data check"},
328         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
329         "9001: IOA reserved area invalid data pattern"},
330         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
331         "9002: IOA reserved area LRC error"},
332         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
333         "Hardware Error, IOA metadata access error"},
334         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
335         "102E: Out of alternate sectors for disk storage"},
336         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
337         "FFF4: Data transfer underlength error"},
338         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
339         "FFF4: Data transfer overlength error"},
340         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
341         "3400: Logical unit failure"},
342         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
343         "FFF4: Device microcode is corrupt"},
344         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
345         "8150: PCI bus error"},
346         {0x04430000, 1, 0,
347         "Unsupported device bus message received"},
348         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
349         "FFF4: Disk device problem"},
350         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
351         "8150: Permanent IOA failure"},
352         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
353         "3010: Disk device returned wrong response to IOA"},
354         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
355         "8151: IOA microcode error"},
356         {0x04448500, 0, 0,
357         "Device bus status error"},
358         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
359         "8157: IOA error requiring IOA reset to recover"},
360         {0x04448700, 0, 0,
361         "ATA device status error"},
362         {0x04490000, 0, 0,
363         "Message reject received from the device"},
364         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
365         "8008: A permanent cache battery pack failure occurred"},
366         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
367         "9090: Disk unit has been modified after the last known status"},
368         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
369         "9081: IOA detected device error"},
370         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
371         "9082: IOA detected device error"},
372         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
373         "3110: Device bus error, message or command phase"},
374         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
375         "3110: SAS Command / Task Management Function failed"},
376         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
377         "9091: Incorrect hardware configuration change has been detected"},
378         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
379         "9073: Invalid multi-adapter configuration"},
380         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
381         "4010: Incorrect connection between cascaded expanders"},
382         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
383         "4020: Connections exceed IOA design limits"},
384         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
385         "4030: Incorrect multipath connection"},
386         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
387         "4110: Unsupported enclosure function"},
388         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
389         "4120: SAS cable VPD cannot be read"},
390         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
391         "FFF4: Command to logical unit failed"},
392         {0x05240000, 1, 0,
393         "Illegal request, invalid request type or request packet"},
394         {0x05250000, 0, 0,
395         "Illegal request, invalid resource handle"},
396         {0x05258000, 0, 0,
397         "Illegal request, commands not allowed to this device"},
398         {0x05258100, 0, 0,
399         "Illegal request, command not allowed to a secondary adapter"},
400         {0x05258200, 0, 0,
401         "Illegal request, command not allowed to a non-optimized resource"},
402         {0x05260000, 0, 0,
403         "Illegal request, invalid field in parameter list"},
404         {0x05260100, 0, 0,
405         "Illegal request, parameter not supported"},
406         {0x05260200, 0, 0,
407         "Illegal request, parameter value invalid"},
408         {0x052C0000, 0, 0,
409         "Illegal request, command sequence error"},
410         {0x052C8000, 1, 0,
411         "Illegal request, dual adapter support not enabled"},
412         {0x052C8100, 1, 0,
413         "Illegal request, another cable connector was physically disabled"},
414         {0x054E8000, 1, 0,
415         "Illegal request, inconsistent group id/group count"},
416         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
417         "9031: Array protection temporarily suspended, protection resuming"},
418         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
419         "9040: Array protection temporarily suspended, protection resuming"},
420         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
421         "4080: IOA exceeded maximum operating temperature"},
422         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
423         "4085: Service required"},
424         {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
425         "4086: SAS Adapter Hardware Configuration Error"},
426         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
427         "3140: Device bus not ready to ready transition"},
428         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
429         "FFFB: SCSI bus was reset"},
430         {0x06290500, 0, 0,
431         "FFFE: SCSI bus transition to single ended"},
432         {0x06290600, 0, 0,
433         "FFFE: SCSI bus transition to LVD"},
434         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
435         "FFFB: SCSI bus was reset by another initiator"},
436         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
437         "3029: A device replacement has occurred"},
438         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
439         "4102: Device bus fabric performance degradation"},
440         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
441         "9051: IOA cache data exists for a missing or failed device"},
442         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
443         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
444         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
445         "9025: Disk unit is not supported at its physical location"},
446         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
447         "3020: IOA detected a SCSI bus configuration error"},
448         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
449         "3150: SCSI bus configuration error"},
450         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
451         "9074: Asymmetric advanced function disk configuration"},
452         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
453         "4040: Incomplete multipath connection between IOA and enclosure"},
454         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
455         "4041: Incomplete multipath connection between enclosure and device"},
456         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
457         "9075: Incomplete multipath connection between IOA and remote IOA"},
458         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
459         "9076: Configuration error, missing remote IOA"},
460         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
461         "4050: Enclosure does not support a required multipath function"},
462         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
463         "4121: Configuration error, required cable is missing"},
464         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
465         "4122: Cable is not plugged into the correct location on remote IOA"},
466         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
467         "4123: Configuration error, invalid cable vital product data"},
468         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
469         "4124: Configuration error, both cable ends are plugged into the same IOA"},
470         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
471         "4070: Logically bad block written on device"},
472         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
473         "9041: Array protection temporarily suspended"},
474         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
475         "9042: Corrupt array parity detected on specified device"},
476         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
477         "9030: Array no longer protected due to missing or failed disk unit"},
478         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
479         "9071: Link operational transition"},
480         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
481         "9072: Link not operational transition"},
482         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
483         "9032: Array exposed but still protected"},
484         {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
485         "70DD: Device forced failed by disrupt device command"},
486         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
487         "4061: Multipath redundancy level got better"},
488         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
489         "4060: Multipath redundancy level got worse"},
490         {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
491         "9083: Device raw mode enabled"},
492         {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
493         "9084: Device raw mode disabled"},
494         {0x07270000, 0, 0,
495         "Failure due to other device"},
496         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
497         "9008: IOA does not support functions expected by devices"},
498         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
499         "9010: Cache data associated with attached devices cannot be found"},
500         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
501         "9011: Cache data belongs to devices other than those attached"},
502         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
503         "9020: Array missing 2 or more devices with only 1 device present"},
504         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
505         "9021: Array missing 2 or more devices with 2 or more devices present"},
506         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
507         "9022: Exposed array is missing a required device"},
508         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
509         "9023: Array member(s) not at required physical locations"},
510         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
511         "9024: Array not functional due to present hardware configuration"},
512         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
513         "9026: Array not functional due to present hardware configuration"},
514         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
515         "9027: Array is missing a device and parity is out of sync"},
516         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
517         "9028: Maximum number of arrays already exist"},
518         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
519         "9050: Required cache data cannot be located for a disk unit"},
520         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
521         "9052: Cache data exists for a device that has been modified"},
522         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
523         "9054: IOA resources not available due to previous problems"},
524         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
525         "9092: Disk unit requires initialization before use"},
526         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
527         "9029: Incorrect hardware configuration change has been detected"},
528         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
529         "9060: One or more disk pairs are missing from an array"},
530         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
531         "9061: One or more disks are missing from an array"},
532         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
533         "9062: One or more disks are missing from an array"},
534         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
535         "9063: Maximum number of functional arrays has been exceeded"},
536         {0x07279A00, 0, 0,
537         "Data protect, other volume set problem"},
538         {0x0B260000, 0, 0,
539         "Aborted command, invalid descriptor"},
540         {0x0B3F9000, 0, 0,
541         "Target operating conditions have changed, dual adapter takeover"},
542         {0x0B530200, 0, 0,
543         "Aborted command, medium removal prevented"},
544         {0x0B5A0000, 0, 0,
545         "Command terminated by host"},
546         {0x0B5B8000, 0, 0,
547         "Aborted command, command terminated by host"}
548 };
549
550 static const struct ipr_ses_table_entry ipr_ses_table[] = {
551         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
552         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
553         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
554         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
555         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
556         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
557         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
558         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
559         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
560         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
561         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
562         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
563         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
564 };
565
566 /*
567  *  Function Prototypes
568  */
569 static int ipr_reset_alert(struct ipr_cmnd *);
570 static void ipr_process_ccn(struct ipr_cmnd *);
571 static void ipr_process_error(struct ipr_cmnd *);
572 static void ipr_reset_ioa_job(struct ipr_cmnd *);
573 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
574                                    enum ipr_shutdown_type);
575
576 #ifdef CONFIG_SCSI_IPR_TRACE
577 /**
578  * ipr_trc_hook - Add a trace entry to the driver trace
579  * @ipr_cmd:    ipr command struct
580  * @type:               trace type
581  * @add_data:   additional data
582  *
583  * Return value:
584  *      none
585  **/
586 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
587                          u8 type, u32 add_data)
588 {
589         struct ipr_trace_entry *trace_entry;
590         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
591         unsigned int trace_index;
592
593         trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
594         trace_entry = &ioa_cfg->trace[trace_index];
595         trace_entry->time = jiffies;
596         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
597         trace_entry->type = type;
598         if (ipr_cmd->ioa_cfg->sis64)
599                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
600         else
601                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
602         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
603         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
604         trace_entry->u.add_data = add_data;
605         wmb();
606 }
607 #else
608 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
609 #endif
610
611 /**
612  * ipr_lock_and_done - Acquire lock and complete command
613  * @ipr_cmd:    ipr command struct
614  *
615  * Return value:
616  *      none
617  **/
618 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
619 {
620         unsigned long lock_flags;
621         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
622
623         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
624         ipr_cmd->done(ipr_cmd);
625         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
626 }
627
628 /**
629  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
630  * @ipr_cmd:    ipr command struct
631  *
632  * Return value:
633  *      none
634  **/
635 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
636 {
637         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
638         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
639         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
640         dma_addr_t dma_addr = ipr_cmd->dma_addr;
641         int hrrq_id;
642
643         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
644         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
645         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
646         ioarcb->data_transfer_length = 0;
647         ioarcb->read_data_transfer_length = 0;
648         ioarcb->ioadl_len = 0;
649         ioarcb->read_ioadl_len = 0;
650
651         if (ipr_cmd->ioa_cfg->sis64) {
652                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
653                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
654                 ioasa64->u.gata.status = 0;
655         } else {
656                 ioarcb->write_ioadl_addr =
657                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
658                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
659                 ioasa->u.gata.status = 0;
660         }
661
662         ioasa->hdr.ioasc = 0;
663         ioasa->hdr.residual_data_len = 0;
664         ipr_cmd->scsi_cmd = NULL;
665         ipr_cmd->qc = NULL;
666         ipr_cmd->sense_buffer[0] = 0;
667         ipr_cmd->dma_use_sg = 0;
668 }
669
670 /**
671  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
672  * @ipr_cmd:    ipr command struct
673  * @fast_done:  fast done function call-back
674  *
675  * Return value:
676  *      none
677  **/
678 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
679                               void (*fast_done) (struct ipr_cmnd *))
680 {
681         ipr_reinit_ipr_cmnd(ipr_cmd);
682         ipr_cmd->u.scratch = 0;
683         ipr_cmd->sibling = NULL;
684         ipr_cmd->eh_comp = NULL;
685         ipr_cmd->fast_done = fast_done;
686         timer_setup(&ipr_cmd->timer, NULL, 0);
687 }
688
689 /**
690  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
691  * @hrrq:       hrr queue
692  *
693  * Return value:
694  *      pointer to ipr command struct
695  **/
696 static
697 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
698 {
699         struct ipr_cmnd *ipr_cmd = NULL;
700
701         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
702                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
703                         struct ipr_cmnd, queue);
704                 list_del(&ipr_cmd->queue);
705         }
706
707
708         return ipr_cmd;
709 }
710
711 /**
712  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
713  * @ioa_cfg:    ioa config struct
714  *
715  * Return value:
716  *      pointer to ipr command struct
717  **/
718 static
719 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
720 {
721         struct ipr_cmnd *ipr_cmd =
722                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
723         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
724         return ipr_cmd;
725 }
726
727 /**
728  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
729  * @ioa_cfg:    ioa config struct
730  * @clr_ints:     interrupts to clear
731  *
732  * This function masks all interrupts on the adapter, then clears the
733  * interrupts specified in the mask
734  *
735  * Return value:
736  *      none
737  **/
738 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
739                                           u32 clr_ints)
740 {
741         int i;
742
743         /* Stop new interrupts */
744         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
745                 spin_lock(&ioa_cfg->hrrq[i]._lock);
746                 ioa_cfg->hrrq[i].allow_interrupts = 0;
747                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
748         }
749
750         /* Set interrupt mask to stop all new interrupts */
751         if (ioa_cfg->sis64)
752                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
753         else
754                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
755
756         /* Clear any pending interrupts */
757         if (ioa_cfg->sis64)
758                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
759         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
760         readl(ioa_cfg->regs.sense_interrupt_reg);
761 }
762
763 /**
764  * ipr_save_pcix_cmd_reg - Save PCI-X command register
765  * @ioa_cfg:    ioa config struct
766  *
767  * Return value:
768  *      0 on success / -EIO on failure
769  **/
770 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
771 {
772         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
773
774         if (pcix_cmd_reg == 0)
775                 return 0;
776
777         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
778                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
779                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
780                 return -EIO;
781         }
782
783         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
784         return 0;
785 }
786
787 /**
788  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
789  * @ioa_cfg:    ioa config struct
790  *
791  * Return value:
792  *      0 on success / -EIO on failure
793  **/
794 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
795 {
796         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
797
798         if (pcix_cmd_reg) {
799                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
800                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
801                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
802                         return -EIO;
803                 }
804         }
805
806         return 0;
807 }
808
809 /**
810  * __ipr_sata_eh_done - done function for aborted SATA commands
811  * @ipr_cmd:    ipr command struct
812  *
813  * This function is invoked for ops generated to SATA
814  * devices which are being aborted.
815  *
816  * Return value:
817  *      none
818  **/
819 static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
820 {
821         struct ata_queued_cmd *qc = ipr_cmd->qc;
822         struct ipr_sata_port *sata_port = qc->ap->private_data;
823
824         qc->err_mask |= AC_ERR_OTHER;
825         sata_port->ioasa.status |= ATA_BUSY;
826         ata_qc_complete(qc);
827         if (ipr_cmd->eh_comp)
828                 complete(ipr_cmd->eh_comp);
829         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
830 }
831
832 /**
833  * ipr_sata_eh_done - done function for aborted SATA commands
834  * @ipr_cmd:    ipr command struct
835  *
836  * This function is invoked for ops generated to SATA
837  * devices which are being aborted.
838  *
839  * Return value:
840  *      none
841  **/
842 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
843 {
844         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
845         unsigned long hrrq_flags;
846
847         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
848         __ipr_sata_eh_done(ipr_cmd);
849         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
850 }
851
852 /**
853  * __ipr_scsi_eh_done - mid-layer done function for aborted ops
854  * @ipr_cmd:    ipr command struct
855  *
856  * This function is invoked by the interrupt handler for
857  * ops generated by the SCSI mid-layer which are being aborted.
858  *
859  * Return value:
860  *      none
861  **/
862 static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
863 {
864         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
865
866         scsi_cmd->result |= (DID_ERROR << 16);
867
868         scsi_dma_unmap(ipr_cmd->scsi_cmd);
869         scsi_done(scsi_cmd);
870         if (ipr_cmd->eh_comp)
871                 complete(ipr_cmd->eh_comp);
872         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
873 }
874
875 /**
876  * ipr_scsi_eh_done - mid-layer done function for aborted ops
877  * @ipr_cmd:    ipr command struct
878  *
879  * This function is invoked by the interrupt handler for
880  * ops generated by the SCSI mid-layer which are being aborted.
881  *
882  * Return value:
883  *      none
884  **/
885 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
886 {
887         unsigned long hrrq_flags;
888         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
889
890         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
891         __ipr_scsi_eh_done(ipr_cmd);
892         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
893 }
894
895 /**
896  * ipr_fail_all_ops - Fails all outstanding ops.
897  * @ioa_cfg:    ioa config struct
898  *
899  * This function fails all outstanding ops.
900  *
901  * Return value:
902  *      none
903  **/
904 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
905 {
906         struct ipr_cmnd *ipr_cmd, *temp;
907         struct ipr_hrr_queue *hrrq;
908
909         ENTER;
910         for_each_hrrq(hrrq, ioa_cfg) {
911                 spin_lock(&hrrq->_lock);
912                 list_for_each_entry_safe(ipr_cmd,
913                                         temp, &hrrq->hrrq_pending_q, queue) {
914                         list_del(&ipr_cmd->queue);
915
916                         ipr_cmd->s.ioasa.hdr.ioasc =
917                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
918                         ipr_cmd->s.ioasa.hdr.ilid =
919                                 cpu_to_be32(IPR_DRIVER_ILID);
920
921                         if (ipr_cmd->scsi_cmd)
922                                 ipr_cmd->done = __ipr_scsi_eh_done;
923                         else if (ipr_cmd->qc)
924                                 ipr_cmd->done = __ipr_sata_eh_done;
925
926                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
927                                      IPR_IOASC_IOA_WAS_RESET);
928                         del_timer(&ipr_cmd->timer);
929                         ipr_cmd->done(ipr_cmd);
930                 }
931                 spin_unlock(&hrrq->_lock);
932         }
933         LEAVE;
934 }
935
936 /**
937  * ipr_send_command -  Send driver initiated requests.
938  * @ipr_cmd:            ipr command struct
939  *
940  * This function sends a command to the adapter using the correct write call.
941  * In the case of sis64, calculate the ioarcb size required. Then or in the
942  * appropriate bits.
943  *
944  * Return value:
945  *      none
946  **/
947 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
948 {
949         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
950         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
951
952         if (ioa_cfg->sis64) {
953                 /* The default size is 256 bytes */
954                 send_dma_addr |= 0x1;
955
956                 /* If the number of ioadls * size of ioadl > 128 bytes,
957                    then use a 512 byte ioarcb */
958                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
959                         send_dma_addr |= 0x4;
960                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
961         } else
962                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
963 }
964
965 /**
966  * ipr_do_req -  Send driver initiated requests.
967  * @ipr_cmd:            ipr command struct
968  * @done:                       done function
969  * @timeout_func:       timeout function
970  * @timeout:            timeout value
971  *
972  * This function sends the specified command to the adapter with the
973  * timeout given. The done function is invoked on command completion.
974  *
975  * Return value:
976  *      none
977  **/
978 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
979                        void (*done) (struct ipr_cmnd *),
980                        void (*timeout_func) (struct timer_list *), u32 timeout)
981 {
982         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
983
984         ipr_cmd->done = done;
985
986         ipr_cmd->timer.expires = jiffies + timeout;
987         ipr_cmd->timer.function = timeout_func;
988
989         add_timer(&ipr_cmd->timer);
990
991         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
992
993         ipr_send_command(ipr_cmd);
994 }
995
996 /**
997  * ipr_internal_cmd_done - Op done function for an internally generated op.
998  * @ipr_cmd:    ipr command struct
999  *
1000  * This function is the op done function for an internally generated,
1001  * blocking op. It simply wakes the sleeping thread.
1002  *
1003  * Return value:
1004  *      none
1005  **/
1006 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1007 {
1008         if (ipr_cmd->sibling)
1009                 ipr_cmd->sibling = NULL;
1010         else
1011                 complete(&ipr_cmd->completion);
1012 }
1013
1014 /**
1015  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1016  * @ipr_cmd:    ipr command struct
1017  * @dma_addr:   dma address
1018  * @len:        transfer length
1019  * @flags:      ioadl flag value
1020  *
1021  * This function initializes an ioadl in the case where there is only a single
1022  * descriptor.
1023  *
1024  * Return value:
1025  *      nothing
1026  **/
1027 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1028                            u32 len, int flags)
1029 {
1030         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1031         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1032
1033         ipr_cmd->dma_use_sg = 1;
1034
1035         if (ipr_cmd->ioa_cfg->sis64) {
1036                 ioadl64->flags = cpu_to_be32(flags);
1037                 ioadl64->data_len = cpu_to_be32(len);
1038                 ioadl64->address = cpu_to_be64(dma_addr);
1039
1040                 ipr_cmd->ioarcb.ioadl_len =
1041                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1042                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1043         } else {
1044                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1045                 ioadl->address = cpu_to_be32(dma_addr);
1046
1047                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1048                         ipr_cmd->ioarcb.read_ioadl_len =
1049                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1050                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1051                 } else {
1052                         ipr_cmd->ioarcb.ioadl_len =
1053                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1054                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1055                 }
1056         }
1057 }
1058
1059 /**
1060  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1061  * @ipr_cmd:    ipr command struct
1062  * @timeout_func:       function to invoke if command times out
1063  * @timeout:    timeout
1064  *
1065  * Return value:
1066  *      none
1067  **/
1068 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1069                                   void (*timeout_func) (struct timer_list *),
1070                                   u32 timeout)
1071 {
1072         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1073
1074         init_completion(&ipr_cmd->completion);
1075         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1076
1077         spin_unlock_irq(ioa_cfg->host->host_lock);
1078         wait_for_completion(&ipr_cmd->completion);
1079         spin_lock_irq(ioa_cfg->host->host_lock);
1080 }
1081
1082 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1083 {
1084         unsigned int hrrq;
1085
1086         if (ioa_cfg->hrrq_num == 1)
1087                 hrrq = 0;
1088         else {
1089                 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1090                 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1091         }
1092         return hrrq;
1093 }
1094
1095 /**
1096  * ipr_send_hcam - Send an HCAM to the adapter.
1097  * @ioa_cfg:    ioa config struct
1098  * @type:               HCAM type
1099  * @hostrcb:    hostrcb struct
1100  *
1101  * This function will send a Host Controlled Async command to the adapter.
1102  * If HCAMs are currently not allowed to be issued to the adapter, it will
1103  * place the hostrcb on the free queue.
1104  *
1105  * Return value:
1106  *      none
1107  **/
1108 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1109                           struct ipr_hostrcb *hostrcb)
1110 {
1111         struct ipr_cmnd *ipr_cmd;
1112         struct ipr_ioarcb *ioarcb;
1113
1114         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1115                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1116                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1117                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1118
1119                 ipr_cmd->u.hostrcb = hostrcb;
1120                 ioarcb = &ipr_cmd->ioarcb;
1121
1122                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1123                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1124                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1125                 ioarcb->cmd_pkt.cdb[1] = type;
1126                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1127                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1128
1129                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1130                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1131
1132                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1133                         ipr_cmd->done = ipr_process_ccn;
1134                 else
1135                         ipr_cmd->done = ipr_process_error;
1136
1137                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1138
1139                 ipr_send_command(ipr_cmd);
1140         } else {
1141                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1142         }
1143 }
1144
1145 /**
1146  * ipr_update_ata_class - Update the ata class in the resource entry
1147  * @res:        resource entry struct
1148  * @proto:      cfgte device bus protocol value
1149  *
1150  * Return value:
1151  *      none
1152  **/
1153 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1154 {
1155         switch (proto) {
1156         case IPR_PROTO_SATA:
1157         case IPR_PROTO_SAS_STP:
1158                 res->ata_class = ATA_DEV_ATA;
1159                 break;
1160         case IPR_PROTO_SATA_ATAPI:
1161         case IPR_PROTO_SAS_STP_ATAPI:
1162                 res->ata_class = ATA_DEV_ATAPI;
1163                 break;
1164         default:
1165                 res->ata_class = ATA_DEV_UNKNOWN;
1166                 break;
1167         }
1168 }
1169
1170 /**
1171  * ipr_init_res_entry - Initialize a resource entry struct.
1172  * @res:        resource entry struct
1173  * @cfgtew:     config table entry wrapper struct
1174  *
1175  * Return value:
1176  *      none
1177  **/
1178 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1179                                struct ipr_config_table_entry_wrapper *cfgtew)
1180 {
1181         int found = 0;
1182         unsigned int proto;
1183         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1184         struct ipr_resource_entry *gscsi_res = NULL;
1185
1186         res->needs_sync_complete = 0;
1187         res->in_erp = 0;
1188         res->add_to_ml = 0;
1189         res->del_from_ml = 0;
1190         res->resetting_device = 0;
1191         res->reset_occurred = 0;
1192         res->sdev = NULL;
1193         res->sata_port = NULL;
1194
1195         if (ioa_cfg->sis64) {
1196                 proto = cfgtew->u.cfgte64->proto;
1197                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1198                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1199                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1200                 res->type = cfgtew->u.cfgte64->res_type;
1201
1202                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1203                         sizeof(res->res_path));
1204
1205                 res->bus = 0;
1206                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1207                         sizeof(res->dev_lun.scsi_lun));
1208                 res->lun = scsilun_to_int(&res->dev_lun);
1209
1210                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1211                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1212                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1213                                         found = 1;
1214                                         res->target = gscsi_res->target;
1215                                         break;
1216                                 }
1217                         }
1218                         if (!found) {
1219                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1220                                                                   ioa_cfg->max_devs_supported);
1221                                 set_bit(res->target, ioa_cfg->target_ids);
1222                         }
1223                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1224                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1225                         res->target = 0;
1226                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1227                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1228                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1229                                                           ioa_cfg->max_devs_supported);
1230                         set_bit(res->target, ioa_cfg->array_ids);
1231                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1232                         res->bus = IPR_VSET_VIRTUAL_BUS;
1233                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1234                                                           ioa_cfg->max_devs_supported);
1235                         set_bit(res->target, ioa_cfg->vset_ids);
1236                 } else {
1237                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1238                                                           ioa_cfg->max_devs_supported);
1239                         set_bit(res->target, ioa_cfg->target_ids);
1240                 }
1241         } else {
1242                 proto = cfgtew->u.cfgte->proto;
1243                 res->qmodel = IPR_QUEUEING_MODEL(res);
1244                 res->flags = cfgtew->u.cfgte->flags;
1245                 if (res->flags & IPR_IS_IOA_RESOURCE)
1246                         res->type = IPR_RES_TYPE_IOAFP;
1247                 else
1248                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1249
1250                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1251                 res->target = cfgtew->u.cfgte->res_addr.target;
1252                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1253                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1254         }
1255
1256         ipr_update_ata_class(res, proto);
1257 }
1258
1259 /**
1260  * ipr_is_same_device - Determine if two devices are the same.
1261  * @res:        resource entry struct
1262  * @cfgtew:     config table entry wrapper struct
1263  *
1264  * Return value:
1265  *      1 if the devices are the same / 0 otherwise
1266  **/
1267 static int ipr_is_same_device(struct ipr_resource_entry *res,
1268                               struct ipr_config_table_entry_wrapper *cfgtew)
1269 {
1270         if (res->ioa_cfg->sis64) {
1271                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1272                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1273                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1274                                         sizeof(cfgtew->u.cfgte64->lun))) {
1275                         return 1;
1276                 }
1277         } else {
1278                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1279                     res->target == cfgtew->u.cfgte->res_addr.target &&
1280                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1281                         return 1;
1282         }
1283
1284         return 0;
1285 }
1286
1287 /**
1288  * __ipr_format_res_path - Format the resource path for printing.
1289  * @res_path:   resource path
1290  * @buffer:     buffer
1291  * @len:        length of buffer provided
1292  *
1293  * Return value:
1294  *      pointer to buffer
1295  **/
1296 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1297 {
1298         int i;
1299         char *p = buffer;
1300
1301         *p = '\0';
1302         p += scnprintf(p, buffer + len - p, "%02X", res_path[0]);
1303         for (i = 1; res_path[i] != 0xff && i < IPR_RES_PATH_BYTES; i++)
1304                 p += scnprintf(p, buffer + len - p, "-%02X", res_path[i]);
1305
1306         return buffer;
1307 }
1308
1309 /**
1310  * ipr_format_res_path - Format the resource path for printing.
1311  * @ioa_cfg:    ioa config struct
1312  * @res_path:   resource path
1313  * @buffer:     buffer
1314  * @len:        length of buffer provided
1315  *
1316  * Return value:
1317  *      pointer to buffer
1318  **/
1319 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1320                                  u8 *res_path, char *buffer, int len)
1321 {
1322         char *p = buffer;
1323
1324         *p = '\0';
1325         p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1326         __ipr_format_res_path(res_path, p, len - (p - buffer));
1327         return buffer;
1328 }
1329
1330 /**
1331  * ipr_update_res_entry - Update the resource entry.
1332  * @res:        resource entry struct
1333  * @cfgtew:     config table entry wrapper struct
1334  *
1335  * Return value:
1336  *      none
1337  **/
1338 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1339                                  struct ipr_config_table_entry_wrapper *cfgtew)
1340 {
1341         char buffer[IPR_MAX_RES_PATH_LENGTH];
1342         unsigned int proto;
1343         int new_path = 0;
1344
1345         if (res->ioa_cfg->sis64) {
1346                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1347                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1348                 res->type = cfgtew->u.cfgte64->res_type;
1349
1350                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1351                         sizeof(struct ipr_std_inq_data));
1352
1353                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1354                 proto = cfgtew->u.cfgte64->proto;
1355                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1356                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1357
1358                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1359                         sizeof(res->dev_lun.scsi_lun));
1360
1361                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1362                                         sizeof(res->res_path))) {
1363                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1364                                 sizeof(res->res_path));
1365                         new_path = 1;
1366                 }
1367
1368                 if (res->sdev && new_path)
1369                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1370                                     ipr_format_res_path(res->ioa_cfg,
1371                                         res->res_path, buffer, sizeof(buffer)));
1372         } else {
1373                 res->flags = cfgtew->u.cfgte->flags;
1374                 if (res->flags & IPR_IS_IOA_RESOURCE)
1375                         res->type = IPR_RES_TYPE_IOAFP;
1376                 else
1377                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1378
1379                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1380                         sizeof(struct ipr_std_inq_data));
1381
1382                 res->qmodel = IPR_QUEUEING_MODEL(res);
1383                 proto = cfgtew->u.cfgte->proto;
1384                 res->res_handle = cfgtew->u.cfgte->res_handle;
1385         }
1386
1387         ipr_update_ata_class(res, proto);
1388 }
1389
1390 /**
1391  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1392  *                        for the resource.
1393  * @res:        resource entry struct
1394  *
1395  * Return value:
1396  *      none
1397  **/
1398 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1399 {
1400         struct ipr_resource_entry *gscsi_res = NULL;
1401         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1402
1403         if (!ioa_cfg->sis64)
1404                 return;
1405
1406         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1407                 clear_bit(res->target, ioa_cfg->array_ids);
1408         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1409                 clear_bit(res->target, ioa_cfg->vset_ids);
1410         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1411                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1412                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1413                                 return;
1414                 clear_bit(res->target, ioa_cfg->target_ids);
1415
1416         } else if (res->bus == 0)
1417                 clear_bit(res->target, ioa_cfg->target_ids);
1418 }
1419
1420 /**
1421  * ipr_handle_config_change - Handle a config change from the adapter
1422  * @ioa_cfg:    ioa config struct
1423  * @hostrcb:    hostrcb
1424  *
1425  * Return value:
1426  *      none
1427  **/
1428 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1429                                      struct ipr_hostrcb *hostrcb)
1430 {
1431         struct ipr_resource_entry *res = NULL;
1432         struct ipr_config_table_entry_wrapper cfgtew;
1433         __be32 cc_res_handle;
1434
1435         u32 is_ndn = 1;
1436
1437         if (ioa_cfg->sis64) {
1438                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1439                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1440         } else {
1441                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1442                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1443         }
1444
1445         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1446                 if (res->res_handle == cc_res_handle) {
1447                         is_ndn = 0;
1448                         break;
1449                 }
1450         }
1451
1452         if (is_ndn) {
1453                 if (list_empty(&ioa_cfg->free_res_q)) {
1454                         ipr_send_hcam(ioa_cfg,
1455                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1456                                       hostrcb);
1457                         return;
1458                 }
1459
1460                 res = list_entry(ioa_cfg->free_res_q.next,
1461                                  struct ipr_resource_entry, queue);
1462
1463                 list_del(&res->queue);
1464                 ipr_init_res_entry(res, &cfgtew);
1465                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1466         }
1467
1468         ipr_update_res_entry(res, &cfgtew);
1469
1470         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1471                 if (res->sdev) {
1472                         res->del_from_ml = 1;
1473                         res->res_handle = IPR_INVALID_RES_HANDLE;
1474                         schedule_work(&ioa_cfg->work_q);
1475                 } else {
1476                         ipr_clear_res_target(res);
1477                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1478                 }
1479         } else if (!res->sdev || res->del_from_ml) {
1480                 res->add_to_ml = 1;
1481                 schedule_work(&ioa_cfg->work_q);
1482         }
1483
1484         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1485 }
1486
1487 /**
1488  * ipr_process_ccn - Op done function for a CCN.
1489  * @ipr_cmd:    ipr command struct
1490  *
1491  * This function is the op done function for a configuration
1492  * change notification host controlled async from the adapter.
1493  *
1494  * Return value:
1495  *      none
1496  **/
1497 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1498 {
1499         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1500         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1501         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1502
1503         list_del_init(&hostrcb->queue);
1504         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1505
1506         if (ioasc) {
1507                 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1508                     ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1509                         dev_err(&ioa_cfg->pdev->dev,
1510                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1511
1512                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1513         } else {
1514                 ipr_handle_config_change(ioa_cfg, hostrcb);
1515         }
1516 }
1517
1518 /**
1519  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1520  * @i:          index into buffer
1521  * @buf:                string to modify
1522  *
1523  * This function will strip all trailing whitespace, pad the end
1524  * of the string with a single space, and NULL terminate the string.
1525  *
1526  * Return value:
1527  *      new length of string
1528  **/
1529 static int strip_and_pad_whitespace(int i, char *buf)
1530 {
1531         while (i && buf[i] == ' ')
1532                 i--;
1533         buf[i+1] = ' ';
1534         buf[i+2] = '\0';
1535         return i + 2;
1536 }
1537
1538 /**
1539  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1540  * @prefix:             string to print at start of printk
1541  * @hostrcb:    hostrcb pointer
1542  * @vpd:                vendor/product id/sn struct
1543  *
1544  * Return value:
1545  *      none
1546  **/
1547 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1548                                 struct ipr_vpd *vpd)
1549 {
1550         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1551         int i = 0;
1552
1553         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1554         i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1555
1556         memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1557         i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1558
1559         memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1560         buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1561
1562         ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1563 }
1564
1565 /**
1566  * ipr_log_vpd - Log the passed VPD to the error log.
1567  * @vpd:                vendor/product id/sn struct
1568  *
1569  * Return value:
1570  *      none
1571  **/
1572 static void ipr_log_vpd(struct ipr_vpd *vpd)
1573 {
1574         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1575                     + IPR_SERIAL_NUM_LEN];
1576
1577         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1578         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1579                IPR_PROD_ID_LEN);
1580         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1581         ipr_err("Vendor/Product ID: %s\n", buffer);
1582
1583         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1584         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1585         ipr_err("    Serial Number: %s\n", buffer);
1586 }
1587
1588 /**
1589  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1590  * @prefix:             string to print at start of printk
1591  * @hostrcb:    hostrcb pointer
1592  * @vpd:                vendor/product id/sn/wwn struct
1593  *
1594  * Return value:
1595  *      none
1596  **/
1597 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1598                                     struct ipr_ext_vpd *vpd)
1599 {
1600         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1601         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1602                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1603 }
1604
1605 /**
1606  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1607  * @vpd:                vendor/product id/sn/wwn struct
1608  *
1609  * Return value:
1610  *      none
1611  **/
1612 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1613 {
1614         ipr_log_vpd(&vpd->vpd);
1615         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1616                 be32_to_cpu(vpd->wwid[1]));
1617 }
1618
1619 /**
1620  * ipr_log_enhanced_cache_error - Log a cache error.
1621  * @ioa_cfg:    ioa config struct
1622  * @hostrcb:    hostrcb struct
1623  *
1624  * Return value:
1625  *      none
1626  **/
1627 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1628                                          struct ipr_hostrcb *hostrcb)
1629 {
1630         struct ipr_hostrcb_type_12_error *error;
1631
1632         if (ioa_cfg->sis64)
1633                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1634         else
1635                 error = &hostrcb->hcam.u.error.u.type_12_error;
1636
1637         ipr_err("-----Current Configuration-----\n");
1638         ipr_err("Cache Directory Card Information:\n");
1639         ipr_log_ext_vpd(&error->ioa_vpd);
1640         ipr_err("Adapter Card Information:\n");
1641         ipr_log_ext_vpd(&error->cfc_vpd);
1642
1643         ipr_err("-----Expected Configuration-----\n");
1644         ipr_err("Cache Directory Card Information:\n");
1645         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1646         ipr_err("Adapter Card Information:\n");
1647         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1648
1649         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1650                      be32_to_cpu(error->ioa_data[0]),
1651                      be32_to_cpu(error->ioa_data[1]),
1652                      be32_to_cpu(error->ioa_data[2]));
1653 }
1654
1655 /**
1656  * ipr_log_cache_error - Log a cache error.
1657  * @ioa_cfg:    ioa config struct
1658  * @hostrcb:    hostrcb struct
1659  *
1660  * Return value:
1661  *      none
1662  **/
1663 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1664                                 struct ipr_hostrcb *hostrcb)
1665 {
1666         struct ipr_hostrcb_type_02_error *error =
1667                 &hostrcb->hcam.u.error.u.type_02_error;
1668
1669         ipr_err("-----Current Configuration-----\n");
1670         ipr_err("Cache Directory Card Information:\n");
1671         ipr_log_vpd(&error->ioa_vpd);
1672         ipr_err("Adapter Card Information:\n");
1673         ipr_log_vpd(&error->cfc_vpd);
1674
1675         ipr_err("-----Expected Configuration-----\n");
1676         ipr_err("Cache Directory Card Information:\n");
1677         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1678         ipr_err("Adapter Card Information:\n");
1679         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1680
1681         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1682                      be32_to_cpu(error->ioa_data[0]),
1683                      be32_to_cpu(error->ioa_data[1]),
1684                      be32_to_cpu(error->ioa_data[2]));
1685 }
1686
1687 /**
1688  * ipr_log_enhanced_config_error - Log a configuration error.
1689  * @ioa_cfg:    ioa config struct
1690  * @hostrcb:    hostrcb struct
1691  *
1692  * Return value:
1693  *      none
1694  **/
1695 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1696                                           struct ipr_hostrcb *hostrcb)
1697 {
1698         int errors_logged, i;
1699         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1700         struct ipr_hostrcb_type_13_error *error;
1701
1702         error = &hostrcb->hcam.u.error.u.type_13_error;
1703         errors_logged = be32_to_cpu(error->errors_logged);
1704
1705         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1706                 be32_to_cpu(error->errors_detected), errors_logged);
1707
1708         dev_entry = error->dev;
1709
1710         for (i = 0; i < errors_logged; i++, dev_entry++) {
1711                 ipr_err_separator;
1712
1713                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1714                 ipr_log_ext_vpd(&dev_entry->vpd);
1715
1716                 ipr_err("-----New Device Information-----\n");
1717                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1718
1719                 ipr_err("Cache Directory Card Information:\n");
1720                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1721
1722                 ipr_err("Adapter Card Information:\n");
1723                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1724         }
1725 }
1726
1727 /**
1728  * ipr_log_sis64_config_error - Log a device error.
1729  * @ioa_cfg:    ioa config struct
1730  * @hostrcb:    hostrcb struct
1731  *
1732  * Return value:
1733  *      none
1734  **/
1735 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1736                                        struct ipr_hostrcb *hostrcb)
1737 {
1738         int errors_logged, i;
1739         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1740         struct ipr_hostrcb_type_23_error *error;
1741         char buffer[IPR_MAX_RES_PATH_LENGTH];
1742
1743         error = &hostrcb->hcam.u.error64.u.type_23_error;
1744         errors_logged = be32_to_cpu(error->errors_logged);
1745
1746         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1747                 be32_to_cpu(error->errors_detected), errors_logged);
1748
1749         dev_entry = error->dev;
1750
1751         for (i = 0; i < errors_logged; i++, dev_entry++) {
1752                 ipr_err_separator;
1753
1754                 ipr_err("Device %d : %s", i + 1,
1755                         __ipr_format_res_path(dev_entry->res_path,
1756                                               buffer, sizeof(buffer)));
1757                 ipr_log_ext_vpd(&dev_entry->vpd);
1758
1759                 ipr_err("-----New Device Information-----\n");
1760                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1761
1762                 ipr_err("Cache Directory Card Information:\n");
1763                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1764
1765                 ipr_err("Adapter Card Information:\n");
1766                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1767         }
1768 }
1769
1770 /**
1771  * ipr_log_config_error - Log a configuration error.
1772  * @ioa_cfg:    ioa config struct
1773  * @hostrcb:    hostrcb struct
1774  *
1775  * Return value:
1776  *      none
1777  **/
1778 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1779                                  struct ipr_hostrcb *hostrcb)
1780 {
1781         int errors_logged, i;
1782         struct ipr_hostrcb_device_data_entry *dev_entry;
1783         struct ipr_hostrcb_type_03_error *error;
1784
1785         error = &hostrcb->hcam.u.error.u.type_03_error;
1786         errors_logged = be32_to_cpu(error->errors_logged);
1787
1788         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1789                 be32_to_cpu(error->errors_detected), errors_logged);
1790
1791         dev_entry = error->dev;
1792
1793         for (i = 0; i < errors_logged; i++, dev_entry++) {
1794                 ipr_err_separator;
1795
1796                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1797                 ipr_log_vpd(&dev_entry->vpd);
1798
1799                 ipr_err("-----New Device Information-----\n");
1800                 ipr_log_vpd(&dev_entry->new_vpd);
1801
1802                 ipr_err("Cache Directory Card Information:\n");
1803                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1804
1805                 ipr_err("Adapter Card Information:\n");
1806                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1807
1808                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1809                         be32_to_cpu(dev_entry->ioa_data[0]),
1810                         be32_to_cpu(dev_entry->ioa_data[1]),
1811                         be32_to_cpu(dev_entry->ioa_data[2]),
1812                         be32_to_cpu(dev_entry->ioa_data[3]),
1813                         be32_to_cpu(dev_entry->ioa_data[4]));
1814         }
1815 }
1816
1817 /**
1818  * ipr_log_enhanced_array_error - Log an array configuration error.
1819  * @ioa_cfg:    ioa config struct
1820  * @hostrcb:    hostrcb struct
1821  *
1822  * Return value:
1823  *      none
1824  **/
1825 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1826                                          struct ipr_hostrcb *hostrcb)
1827 {
1828         int i, num_entries;
1829         struct ipr_hostrcb_type_14_error *error;
1830         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1831         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1832
1833         error = &hostrcb->hcam.u.error.u.type_14_error;
1834
1835         ipr_err_separator;
1836
1837         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1838                 error->protection_level,
1839                 ioa_cfg->host->host_no,
1840                 error->last_func_vset_res_addr.bus,
1841                 error->last_func_vset_res_addr.target,
1842                 error->last_func_vset_res_addr.lun);
1843
1844         ipr_err_separator;
1845
1846         array_entry = error->array_member;
1847         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1848                             ARRAY_SIZE(error->array_member));
1849
1850         for (i = 0; i < num_entries; i++, array_entry++) {
1851                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1852                         continue;
1853
1854                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1855                         ipr_err("Exposed Array Member %d:\n", i);
1856                 else
1857                         ipr_err("Array Member %d:\n", i);
1858
1859                 ipr_log_ext_vpd(&array_entry->vpd);
1860                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1861                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1862                                  "Expected Location");
1863
1864                 ipr_err_separator;
1865         }
1866 }
1867
1868 /**
1869  * ipr_log_array_error - Log an array configuration error.
1870  * @ioa_cfg:    ioa config struct
1871  * @hostrcb:    hostrcb struct
1872  *
1873  * Return value:
1874  *      none
1875  **/
1876 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1877                                 struct ipr_hostrcb *hostrcb)
1878 {
1879         int i;
1880         struct ipr_hostrcb_type_04_error *error;
1881         struct ipr_hostrcb_array_data_entry *array_entry;
1882         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1883
1884         error = &hostrcb->hcam.u.error.u.type_04_error;
1885
1886         ipr_err_separator;
1887
1888         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1889                 error->protection_level,
1890                 ioa_cfg->host->host_no,
1891                 error->last_func_vset_res_addr.bus,
1892                 error->last_func_vset_res_addr.target,
1893                 error->last_func_vset_res_addr.lun);
1894
1895         ipr_err_separator;
1896
1897         array_entry = error->array_member;
1898
1899         for (i = 0; i < 18; i++) {
1900                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1901                         continue;
1902
1903                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1904                         ipr_err("Exposed Array Member %d:\n", i);
1905                 else
1906                         ipr_err("Array Member %d:\n", i);
1907
1908                 ipr_log_vpd(&array_entry->vpd);
1909
1910                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1911                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1912                                  "Expected Location");
1913
1914                 ipr_err_separator;
1915
1916                 if (i == 9)
1917                         array_entry = error->array_member2;
1918                 else
1919                         array_entry++;
1920         }
1921 }
1922
1923 /**
1924  * ipr_log_hex_data - Log additional hex IOA error data.
1925  * @ioa_cfg:    ioa config struct
1926  * @data:               IOA error data
1927  * @len:                data length
1928  *
1929  * Return value:
1930  *      none
1931  **/
1932 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1933 {
1934         int i;
1935
1936         if (len == 0)
1937                 return;
1938
1939         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1940                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1941
1942         for (i = 0; i < len / 4; i += 4) {
1943                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1944                         be32_to_cpu(data[i]),
1945                         be32_to_cpu(data[i+1]),
1946                         be32_to_cpu(data[i+2]),
1947                         be32_to_cpu(data[i+3]));
1948         }
1949 }
1950
1951 /**
1952  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1953  * @ioa_cfg:    ioa config struct
1954  * @hostrcb:    hostrcb struct
1955  *
1956  * Return value:
1957  *      none
1958  **/
1959 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1960                                             struct ipr_hostrcb *hostrcb)
1961 {
1962         struct ipr_hostrcb_type_17_error *error;
1963
1964         if (ioa_cfg->sis64)
1965                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1966         else
1967                 error = &hostrcb->hcam.u.error.u.type_17_error;
1968
1969         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1970         strim(error->failure_reason);
1971
1972         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1973                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1974         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1975         ipr_log_hex_data(ioa_cfg, error->data,
1976                          be32_to_cpu(hostrcb->hcam.length) -
1977                          (offsetof(struct ipr_hostrcb_error, u) +
1978                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1979 }
1980
1981 /**
1982  * ipr_log_dual_ioa_error - Log a dual adapter error.
1983  * @ioa_cfg:    ioa config struct
1984  * @hostrcb:    hostrcb struct
1985  *
1986  * Return value:
1987  *      none
1988  **/
1989 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1990                                    struct ipr_hostrcb *hostrcb)
1991 {
1992         struct ipr_hostrcb_type_07_error *error;
1993
1994         error = &hostrcb->hcam.u.error.u.type_07_error;
1995         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1996         strim(error->failure_reason);
1997
1998         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1999                      be32_to_cpu(hostrcb->hcam.u.error.prc));
2000         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
2001         ipr_log_hex_data(ioa_cfg, error->data,
2002                          be32_to_cpu(hostrcb->hcam.length) -
2003                          (offsetof(struct ipr_hostrcb_error, u) +
2004                           offsetof(struct ipr_hostrcb_type_07_error, data)));
2005 }
2006
2007 static const struct {
2008         u8 active;
2009         char *desc;
2010 } path_active_desc[] = {
2011         { IPR_PATH_NO_INFO, "Path" },
2012         { IPR_PATH_ACTIVE, "Active path" },
2013         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2014 };
2015
2016 static const struct {
2017         u8 state;
2018         char *desc;
2019 } path_state_desc[] = {
2020         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2021         { IPR_PATH_HEALTHY, "is healthy" },
2022         { IPR_PATH_DEGRADED, "is degraded" },
2023         { IPR_PATH_FAILED, "is failed" }
2024 };
2025
2026 /**
2027  * ipr_log_fabric_path - Log a fabric path error
2028  * @hostrcb:    hostrcb struct
2029  * @fabric:             fabric descriptor
2030  *
2031  * Return value:
2032  *      none
2033  **/
2034 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2035                                 struct ipr_hostrcb_fabric_desc *fabric)
2036 {
2037         int i, j;
2038         u8 path_state = fabric->path_state;
2039         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2040         u8 state = path_state & IPR_PATH_STATE_MASK;
2041
2042         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2043                 if (path_active_desc[i].active != active)
2044                         continue;
2045
2046                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2047                         if (path_state_desc[j].state != state)
2048                                 continue;
2049
2050                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2051                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2052                                              path_active_desc[i].desc, path_state_desc[j].desc,
2053                                              fabric->ioa_port);
2054                         } else if (fabric->cascaded_expander == 0xff) {
2055                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2056                                              path_active_desc[i].desc, path_state_desc[j].desc,
2057                                              fabric->ioa_port, fabric->phy);
2058                         } else if (fabric->phy == 0xff) {
2059                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2060                                              path_active_desc[i].desc, path_state_desc[j].desc,
2061                                              fabric->ioa_port, fabric->cascaded_expander);
2062                         } else {
2063                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2064                                              path_active_desc[i].desc, path_state_desc[j].desc,
2065                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2066                         }
2067                         return;
2068                 }
2069         }
2070
2071         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2072                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2073 }
2074
2075 /**
2076  * ipr_log64_fabric_path - Log a fabric path error
2077  * @hostrcb:    hostrcb struct
2078  * @fabric:             fabric descriptor
2079  *
2080  * Return value:
2081  *      none
2082  **/
2083 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2084                                   struct ipr_hostrcb64_fabric_desc *fabric)
2085 {
2086         int i, j;
2087         u8 path_state = fabric->path_state;
2088         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2089         u8 state = path_state & IPR_PATH_STATE_MASK;
2090         char buffer[IPR_MAX_RES_PATH_LENGTH];
2091
2092         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2093                 if (path_active_desc[i].active != active)
2094                         continue;
2095
2096                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2097                         if (path_state_desc[j].state != state)
2098                                 continue;
2099
2100                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2101                                      path_active_desc[i].desc, path_state_desc[j].desc,
2102                                      ipr_format_res_path(hostrcb->ioa_cfg,
2103                                                 fabric->res_path,
2104                                                 buffer, sizeof(buffer)));
2105                         return;
2106                 }
2107         }
2108
2109         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2110                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2111                                     buffer, sizeof(buffer)));
2112 }
2113
2114 static const struct {
2115         u8 type;
2116         char *desc;
2117 } path_type_desc[] = {
2118         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2119         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2120         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2121         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2122 };
2123
2124 static const struct {
2125         u8 status;
2126         char *desc;
2127 } path_status_desc[] = {
2128         { IPR_PATH_CFG_NO_PROB, "Functional" },
2129         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2130         { IPR_PATH_CFG_FAILED, "Failed" },
2131         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2132         { IPR_PATH_NOT_DETECTED, "Missing" },
2133         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2134 };
2135
2136 static const char *link_rate[] = {
2137         "unknown",
2138         "disabled",
2139         "phy reset problem",
2140         "spinup hold",
2141         "port selector",
2142         "unknown",
2143         "unknown",
2144         "unknown",
2145         "1.5Gbps",
2146         "3.0Gbps",
2147         "unknown",
2148         "unknown",
2149         "unknown",
2150         "unknown",
2151         "unknown",
2152         "unknown"
2153 };
2154
2155 /**
2156  * ipr_log_path_elem - Log a fabric path element.
2157  * @hostrcb:    hostrcb struct
2158  * @cfg:                fabric path element struct
2159  *
2160  * Return value:
2161  *      none
2162  **/
2163 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2164                               struct ipr_hostrcb_config_element *cfg)
2165 {
2166         int i, j;
2167         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2168         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2169
2170         if (type == IPR_PATH_CFG_NOT_EXIST)
2171                 return;
2172
2173         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2174                 if (path_type_desc[i].type != type)
2175                         continue;
2176
2177                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2178                         if (path_status_desc[j].status != status)
2179                                 continue;
2180
2181                         if (type == IPR_PATH_CFG_IOA_PORT) {
2182                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2183                                              path_status_desc[j].desc, path_type_desc[i].desc,
2184                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2185                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2186                         } else {
2187                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2188                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2189                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2190                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2191                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2192                                 } else if (cfg->cascaded_expander == 0xff) {
2193                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2194                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2195                                                      path_type_desc[i].desc, cfg->phy,
2196                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2197                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2198                                 } else if (cfg->phy == 0xff) {
2199                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2200                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2201                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2202                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2203                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2204                                 } else {
2205                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2206                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2207                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2208                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2209                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2210                                 }
2211                         }
2212                         return;
2213                 }
2214         }
2215
2216         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2217                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2218                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2219                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2220 }
2221
2222 /**
2223  * ipr_log64_path_elem - Log a fabric path element.
2224  * @hostrcb:    hostrcb struct
2225  * @cfg:                fabric path element struct
2226  *
2227  * Return value:
2228  *      none
2229  **/
2230 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2231                                 struct ipr_hostrcb64_config_element *cfg)
2232 {
2233         int i, j;
2234         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2235         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2236         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2237         char buffer[IPR_MAX_RES_PATH_LENGTH];
2238
2239         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2240                 return;
2241
2242         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2243                 if (path_type_desc[i].type != type)
2244                         continue;
2245
2246                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2247                         if (path_status_desc[j].status != status)
2248                                 continue;
2249
2250                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2251                                      path_status_desc[j].desc, path_type_desc[i].desc,
2252                                      ipr_format_res_path(hostrcb->ioa_cfg,
2253                                         cfg->res_path, buffer, sizeof(buffer)),
2254                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2255                                         be32_to_cpu(cfg->wwid[0]),
2256                                         be32_to_cpu(cfg->wwid[1]));
2257                         return;
2258                 }
2259         }
2260         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2261                      "WWN=%08X%08X\n", cfg->type_status,
2262                      ipr_format_res_path(hostrcb->ioa_cfg,
2263                         cfg->res_path, buffer, sizeof(buffer)),
2264                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2265                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2266 }
2267
2268 /**
2269  * ipr_log_fabric_error - Log a fabric error.
2270  * @ioa_cfg:    ioa config struct
2271  * @hostrcb:    hostrcb struct
2272  *
2273  * Return value:
2274  *      none
2275  **/
2276 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2277                                  struct ipr_hostrcb *hostrcb)
2278 {
2279         struct ipr_hostrcb_type_20_error *error;
2280         struct ipr_hostrcb_fabric_desc *fabric;
2281         struct ipr_hostrcb_config_element *cfg;
2282         int i, add_len;
2283
2284         error = &hostrcb->hcam.u.error.u.type_20_error;
2285         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2286         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2287
2288         add_len = be32_to_cpu(hostrcb->hcam.length) -
2289                 (offsetof(struct ipr_hostrcb_error, u) +
2290                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2291
2292         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2293                 ipr_log_fabric_path(hostrcb, fabric);
2294                 for_each_fabric_cfg(fabric, cfg)
2295                         ipr_log_path_elem(hostrcb, cfg);
2296
2297                 add_len -= be16_to_cpu(fabric->length);
2298                 fabric = (struct ipr_hostrcb_fabric_desc *)
2299                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2300         }
2301
2302         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2303 }
2304
2305 /**
2306  * ipr_log_sis64_array_error - Log a sis64 array error.
2307  * @ioa_cfg:    ioa config struct
2308  * @hostrcb:    hostrcb struct
2309  *
2310  * Return value:
2311  *      none
2312  **/
2313 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2314                                       struct ipr_hostrcb *hostrcb)
2315 {
2316         int i, num_entries;
2317         struct ipr_hostrcb_type_24_error *error;
2318         struct ipr_hostrcb64_array_data_entry *array_entry;
2319         char buffer[IPR_MAX_RES_PATH_LENGTH];
2320         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2321
2322         error = &hostrcb->hcam.u.error64.u.type_24_error;
2323
2324         ipr_err_separator;
2325
2326         ipr_err("RAID %s Array Configuration: %s\n",
2327                 error->protection_level,
2328                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2329                         buffer, sizeof(buffer)));
2330
2331         ipr_err_separator;
2332
2333         array_entry = error->array_member;
2334         num_entries = min_t(u32, error->num_entries,
2335                             ARRAY_SIZE(error->array_member));
2336
2337         for (i = 0; i < num_entries; i++, array_entry++) {
2338
2339                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2340                         continue;
2341
2342                 if (error->exposed_mode_adn == i)
2343                         ipr_err("Exposed Array Member %d:\n", i);
2344                 else
2345                         ipr_err("Array Member %d:\n", i);
2346
2347                 ipr_err("Array Member %d:\n", i);
2348                 ipr_log_ext_vpd(&array_entry->vpd);
2349                 ipr_err("Current Location: %s\n",
2350                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2351                                 buffer, sizeof(buffer)));
2352                 ipr_err("Expected Location: %s\n",
2353                          ipr_format_res_path(ioa_cfg,
2354                                 array_entry->expected_res_path,
2355                                 buffer, sizeof(buffer)));
2356
2357                 ipr_err_separator;
2358         }
2359 }
2360
2361 /**
2362  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2363  * @ioa_cfg:    ioa config struct
2364  * @hostrcb:    hostrcb struct
2365  *
2366  * Return value:
2367  *      none
2368  **/
2369 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2370                                        struct ipr_hostrcb *hostrcb)
2371 {
2372         struct ipr_hostrcb_type_30_error *error;
2373         struct ipr_hostrcb64_fabric_desc *fabric;
2374         struct ipr_hostrcb64_config_element *cfg;
2375         int i, add_len;
2376
2377         error = &hostrcb->hcam.u.error64.u.type_30_error;
2378
2379         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2380         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2381
2382         add_len = be32_to_cpu(hostrcb->hcam.length) -
2383                 (offsetof(struct ipr_hostrcb64_error, u) +
2384                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2385
2386         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2387                 ipr_log64_fabric_path(hostrcb, fabric);
2388                 for_each_fabric_cfg(fabric, cfg)
2389                         ipr_log64_path_elem(hostrcb, cfg);
2390
2391                 add_len -= be16_to_cpu(fabric->length);
2392                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2393                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2394         }
2395
2396         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2397 }
2398
2399 /**
2400  * ipr_log_sis64_service_required_error - Log a sis64 service required error.
2401  * @ioa_cfg:    ioa config struct
2402  * @hostrcb:    hostrcb struct
2403  *
2404  * Return value:
2405  *      none
2406  **/
2407 static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
2408                                        struct ipr_hostrcb *hostrcb)
2409 {
2410         struct ipr_hostrcb_type_41_error *error;
2411
2412         error = &hostrcb->hcam.u.error64.u.type_41_error;
2413
2414         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2415         ipr_err("Primary Failure Reason: %s\n", error->failure_reason);
2416         ipr_log_hex_data(ioa_cfg, error->data,
2417                          be32_to_cpu(hostrcb->hcam.length) -
2418                          (offsetof(struct ipr_hostrcb_error, u) +
2419                           offsetof(struct ipr_hostrcb_type_41_error, data)));
2420 }
2421 /**
2422  * ipr_log_generic_error - Log an adapter error.
2423  * @ioa_cfg:    ioa config struct
2424  * @hostrcb:    hostrcb struct
2425  *
2426  * Return value:
2427  *      none
2428  **/
2429 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2430                                   struct ipr_hostrcb *hostrcb)
2431 {
2432         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2433                          be32_to_cpu(hostrcb->hcam.length));
2434 }
2435
2436 /**
2437  * ipr_log_sis64_device_error - Log a cache error.
2438  * @ioa_cfg:    ioa config struct
2439  * @hostrcb:    hostrcb struct
2440  *
2441  * Return value:
2442  *      none
2443  **/
2444 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2445                                          struct ipr_hostrcb *hostrcb)
2446 {
2447         struct ipr_hostrcb_type_21_error *error;
2448         char buffer[IPR_MAX_RES_PATH_LENGTH];
2449
2450         error = &hostrcb->hcam.u.error64.u.type_21_error;
2451
2452         ipr_err("-----Failing Device Information-----\n");
2453         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2454                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2455                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2456         ipr_err("Device Resource Path: %s\n",
2457                 __ipr_format_res_path(error->res_path,
2458                                       buffer, sizeof(buffer)));
2459         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2460         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2461         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2462         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2463         ipr_err("SCSI Sense Data:\n");
2464         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2465         ipr_err("SCSI Command Descriptor Block: \n");
2466         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2467
2468         ipr_err("Additional IOA Data:\n");
2469         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2470 }
2471
2472 /**
2473  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2474  * @ioasc:      IOASC
2475  *
2476  * This function will return the index of into the ipr_error_table
2477  * for the specified IOASC. If the IOASC is not in the table,
2478  * 0 will be returned, which points to the entry used for unknown errors.
2479  *
2480  * Return value:
2481  *      index into the ipr_error_table
2482  **/
2483 static u32 ipr_get_error(u32 ioasc)
2484 {
2485         int i;
2486
2487         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2488                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2489                         return i;
2490
2491         return 0;
2492 }
2493
2494 /**
2495  * ipr_handle_log_data - Log an adapter error.
2496  * @ioa_cfg:    ioa config struct
2497  * @hostrcb:    hostrcb struct
2498  *
2499  * This function logs an adapter error to the system.
2500  *
2501  * Return value:
2502  *      none
2503  **/
2504 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2505                                 struct ipr_hostrcb *hostrcb)
2506 {
2507         u32 ioasc;
2508         int error_index;
2509         struct ipr_hostrcb_type_21_error *error;
2510
2511         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2512                 return;
2513
2514         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2515                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2516
2517         if (ioa_cfg->sis64)
2518                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2519         else
2520                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2521
2522         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2523             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2524                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2525                 scsi_report_bus_reset(ioa_cfg->host,
2526                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2527         }
2528
2529         error_index = ipr_get_error(ioasc);
2530
2531         if (!ipr_error_table[error_index].log_hcam)
2532                 return;
2533
2534         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2535             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2536                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2537
2538                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2539                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2540                                 return;
2541         }
2542
2543         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2544
2545         /* Set indication we have logged an error */
2546         ioa_cfg->errors_logged++;
2547
2548         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2549                 return;
2550         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2551                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2552
2553         switch (hostrcb->hcam.overlay_id) {
2554         case IPR_HOST_RCB_OVERLAY_ID_2:
2555                 ipr_log_cache_error(ioa_cfg, hostrcb);
2556                 break;
2557         case IPR_HOST_RCB_OVERLAY_ID_3:
2558                 ipr_log_config_error(ioa_cfg, hostrcb);
2559                 break;
2560         case IPR_HOST_RCB_OVERLAY_ID_4:
2561         case IPR_HOST_RCB_OVERLAY_ID_6:
2562                 ipr_log_array_error(ioa_cfg, hostrcb);
2563                 break;
2564         case IPR_HOST_RCB_OVERLAY_ID_7:
2565                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2566                 break;
2567         case IPR_HOST_RCB_OVERLAY_ID_12:
2568                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2569                 break;
2570         case IPR_HOST_RCB_OVERLAY_ID_13:
2571                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2572                 break;
2573         case IPR_HOST_RCB_OVERLAY_ID_14:
2574         case IPR_HOST_RCB_OVERLAY_ID_16:
2575                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2576                 break;
2577         case IPR_HOST_RCB_OVERLAY_ID_17:
2578                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2579                 break;
2580         case IPR_HOST_RCB_OVERLAY_ID_20:
2581                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2582                 break;
2583         case IPR_HOST_RCB_OVERLAY_ID_21:
2584                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2585                 break;
2586         case IPR_HOST_RCB_OVERLAY_ID_23:
2587                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2588                 break;
2589         case IPR_HOST_RCB_OVERLAY_ID_24:
2590         case IPR_HOST_RCB_OVERLAY_ID_26:
2591                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2592                 break;
2593         case IPR_HOST_RCB_OVERLAY_ID_30:
2594                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2595                 break;
2596         case IPR_HOST_RCB_OVERLAY_ID_41:
2597                 ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
2598                 break;
2599         case IPR_HOST_RCB_OVERLAY_ID_1:
2600         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2601         default:
2602                 ipr_log_generic_error(ioa_cfg, hostrcb);
2603                 break;
2604         }
2605 }
2606
2607 static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2608 {
2609         struct ipr_hostrcb *hostrcb;
2610
2611         hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2612                                         struct ipr_hostrcb, queue);
2613
2614         if (unlikely(!hostrcb)) {
2615                 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2616                 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2617                                                 struct ipr_hostrcb, queue);
2618         }
2619
2620         list_del_init(&hostrcb->queue);
2621         return hostrcb;
2622 }
2623
2624 /**
2625  * ipr_process_error - Op done function for an adapter error log.
2626  * @ipr_cmd:    ipr command struct
2627  *
2628  * This function is the op done function for an error log host
2629  * controlled async from the adapter. It will log the error and
2630  * send the HCAM back to the adapter.
2631  *
2632  * Return value:
2633  *      none
2634  **/
2635 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2636 {
2637         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2638         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2639         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2640         u32 fd_ioasc;
2641
2642         if (ioa_cfg->sis64)
2643                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2644         else
2645                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2646
2647         list_del_init(&hostrcb->queue);
2648         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2649
2650         if (!ioasc) {
2651                 ipr_handle_log_data(ioa_cfg, hostrcb);
2652                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2653                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2654         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2655                    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2656                 dev_err(&ioa_cfg->pdev->dev,
2657                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2658         }
2659
2660         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2661         schedule_work(&ioa_cfg->work_q);
2662         hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2663
2664         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2665 }
2666
2667 /**
2668  * ipr_timeout -  An internally generated op has timed out.
2669  * @t: Timer context used to fetch ipr command struct
2670  *
2671  * This function blocks host requests and initiates an
2672  * adapter reset.
2673  *
2674  * Return value:
2675  *      none
2676  **/
2677 static void ipr_timeout(struct timer_list *t)
2678 {
2679         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2680         unsigned long lock_flags = 0;
2681         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2682
2683         ENTER;
2684         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2685
2686         ioa_cfg->errors_logged++;
2687         dev_err(&ioa_cfg->pdev->dev,
2688                 "Adapter being reset due to command timeout.\n");
2689
2690         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2691                 ioa_cfg->sdt_state = GET_DUMP;
2692
2693         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2694                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2695
2696         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2697         LEAVE;
2698 }
2699
2700 /**
2701  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2702  * @t: Timer context used to fetch ipr command struct
2703  *
2704  * This function blocks host requests and initiates an
2705  * adapter reset.
2706  *
2707  * Return value:
2708  *      none
2709  **/
2710 static void ipr_oper_timeout(struct timer_list *t)
2711 {
2712         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2713         unsigned long lock_flags = 0;
2714         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2715
2716         ENTER;
2717         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2718
2719         ioa_cfg->errors_logged++;
2720         dev_err(&ioa_cfg->pdev->dev,
2721                 "Adapter timed out transitioning to operational.\n");
2722
2723         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2724                 ioa_cfg->sdt_state = GET_DUMP;
2725
2726         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2727                 if (ipr_fastfail)
2728                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2729                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2730         }
2731
2732         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2733         LEAVE;
2734 }
2735
2736 /**
2737  * ipr_find_ses_entry - Find matching SES in SES table
2738  * @res:        resource entry struct of SES
2739  *
2740  * Return value:
2741  *      pointer to SES table entry / NULL on failure
2742  **/
2743 static const struct ipr_ses_table_entry *
2744 ipr_find_ses_entry(struct ipr_resource_entry *res)
2745 {
2746         int i, j, matches;
2747         struct ipr_std_inq_vpids *vpids;
2748         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2749
2750         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2751                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2752                         if (ste->compare_product_id_byte[j] == 'X') {
2753                                 vpids = &res->std_inq_data.vpids;
2754                                 if (vpids->product_id[j] == ste->product_id[j])
2755                                         matches++;
2756                                 else
2757                                         break;
2758                         } else
2759                                 matches++;
2760                 }
2761
2762                 if (matches == IPR_PROD_ID_LEN)
2763                         return ste;
2764         }
2765
2766         return NULL;
2767 }
2768
2769 /**
2770  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2771  * @ioa_cfg:    ioa config struct
2772  * @bus:                SCSI bus
2773  * @bus_width:  bus width
2774  *
2775  * Return value:
2776  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2777  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2778  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2779  *      max 160MHz = max 320MB/sec).
2780  **/
2781 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2782 {
2783         struct ipr_resource_entry *res;
2784         const struct ipr_ses_table_entry *ste;
2785         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2786
2787         /* Loop through each config table entry in the config table buffer */
2788         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2789                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2790                         continue;
2791
2792                 if (bus != res->bus)
2793                         continue;
2794
2795                 if (!(ste = ipr_find_ses_entry(res)))
2796                         continue;
2797
2798                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2799         }
2800
2801         return max_xfer_rate;
2802 }
2803
2804 /**
2805  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2806  * @ioa_cfg:            ioa config struct
2807  * @max_delay:          max delay in micro-seconds to wait
2808  *
2809  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2810  *
2811  * Return value:
2812  *      0 on success / other on failure
2813  **/
2814 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2815 {
2816         volatile u32 pcii_reg;
2817         int delay = 1;
2818
2819         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2820         while (delay < max_delay) {
2821                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2822
2823                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2824                         return 0;
2825
2826                 /* udelay cannot be used if delay is more than a few milliseconds */
2827                 if ((delay / 1000) > MAX_UDELAY_MS)
2828                         mdelay(delay / 1000);
2829                 else
2830                         udelay(delay);
2831
2832                 delay += delay;
2833         }
2834         return -EIO;
2835 }
2836
2837 /**
2838  * ipr_get_sis64_dump_data_section - Dump IOA memory
2839  * @ioa_cfg:                    ioa config struct
2840  * @start_addr:                 adapter address to dump
2841  * @dest:                       destination kernel buffer
2842  * @length_in_words:            length to dump in 4 byte words
2843  *
2844  * Return value:
2845  *      0 on success
2846  **/
2847 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2848                                            u32 start_addr,
2849                                            __be32 *dest, u32 length_in_words)
2850 {
2851         int i;
2852
2853         for (i = 0; i < length_in_words; i++) {
2854                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2855                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2856                 dest++;
2857         }
2858
2859         return 0;
2860 }
2861
2862 /**
2863  * ipr_get_ldump_data_section - Dump IOA memory
2864  * @ioa_cfg:                    ioa config struct
2865  * @start_addr:                 adapter address to dump
2866  * @dest:                               destination kernel buffer
2867  * @length_in_words:    length to dump in 4 byte words
2868  *
2869  * Return value:
2870  *      0 on success / -EIO on failure
2871  **/
2872 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2873                                       u32 start_addr,
2874                                       __be32 *dest, u32 length_in_words)
2875 {
2876         volatile u32 temp_pcii_reg;
2877         int i, delay = 0;
2878
2879         if (ioa_cfg->sis64)
2880                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2881                                                        dest, length_in_words);
2882
2883         /* Write IOA interrupt reg starting LDUMP state  */
2884         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2885                ioa_cfg->regs.set_uproc_interrupt_reg32);
2886
2887         /* Wait for IO debug acknowledge */
2888         if (ipr_wait_iodbg_ack(ioa_cfg,
2889                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2890                 dev_err(&ioa_cfg->pdev->dev,
2891                         "IOA dump long data transfer timeout\n");
2892                 return -EIO;
2893         }
2894
2895         /* Signal LDUMP interlocked - clear IO debug ack */
2896         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2897                ioa_cfg->regs.clr_interrupt_reg);
2898
2899         /* Write Mailbox with starting address */
2900         writel(start_addr, ioa_cfg->ioa_mailbox);
2901
2902         /* Signal address valid - clear IOA Reset alert */
2903         writel(IPR_UPROCI_RESET_ALERT,
2904                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2905
2906         for (i = 0; i < length_in_words; i++) {
2907                 /* Wait for IO debug acknowledge */
2908                 if (ipr_wait_iodbg_ack(ioa_cfg,
2909                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2910                         dev_err(&ioa_cfg->pdev->dev,
2911                                 "IOA dump short data transfer timeout\n");
2912                         return -EIO;
2913                 }
2914
2915                 /* Read data from mailbox and increment destination pointer */
2916                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2917                 dest++;
2918
2919                 /* For all but the last word of data, signal data received */
2920                 if (i < (length_in_words - 1)) {
2921                         /* Signal dump data received - Clear IO debug Ack */
2922                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2923                                ioa_cfg->regs.clr_interrupt_reg);
2924                 }
2925         }
2926
2927         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2928         writel(IPR_UPROCI_RESET_ALERT,
2929                ioa_cfg->regs.set_uproc_interrupt_reg32);
2930
2931         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2932                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2933
2934         /* Signal dump data received - Clear IO debug Ack */
2935         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2936                ioa_cfg->regs.clr_interrupt_reg);
2937
2938         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2939         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2940                 temp_pcii_reg =
2941                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2942
2943                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2944                         return 0;
2945
2946                 udelay(10);
2947                 delay += 10;
2948         }
2949
2950         return 0;
2951 }
2952
2953 #ifdef CONFIG_SCSI_IPR_DUMP
2954 /**
2955  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2956  * @ioa_cfg:            ioa config struct
2957  * @pci_address:        adapter address
2958  * @length:                     length of data to copy
2959  *
2960  * Copy data from PCI adapter to kernel buffer.
2961  * Note: length MUST be a 4 byte multiple
2962  * Return value:
2963  *      0 on success / other on failure
2964  **/
2965 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2966                         unsigned long pci_address, u32 length)
2967 {
2968         int bytes_copied = 0;
2969         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2970         __be32 *page;
2971         unsigned long lock_flags = 0;
2972         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2973
2974         if (ioa_cfg->sis64)
2975                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2976         else
2977                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2978
2979         while (bytes_copied < length &&
2980                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2981                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2982                     ioa_dump->page_offset == 0) {
2983                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2984
2985                         if (!page) {
2986                                 ipr_trace;
2987                                 return bytes_copied;
2988                         }
2989
2990                         ioa_dump->page_offset = 0;
2991                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2992                         ioa_dump->next_page_index++;
2993                 } else
2994                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2995
2996                 rem_len = length - bytes_copied;
2997                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2998                 cur_len = min(rem_len, rem_page_len);
2999
3000                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3001                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
3002                         rc = -EIO;
3003                 } else {
3004                         rc = ipr_get_ldump_data_section(ioa_cfg,
3005                                                         pci_address + bytes_copied,
3006                                                         &page[ioa_dump->page_offset / 4],
3007                                                         (cur_len / sizeof(u32)));
3008                 }
3009                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3010
3011                 if (!rc) {
3012                         ioa_dump->page_offset += cur_len;
3013                         bytes_copied += cur_len;
3014                 } else {
3015                         ipr_trace;
3016                         break;
3017                 }
3018                 schedule();
3019         }
3020
3021         return bytes_copied;
3022 }
3023
3024 /**
3025  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3026  * @hdr:        dump entry header struct
3027  *
3028  * Return value:
3029  *      nothing
3030  **/
3031 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3032 {
3033         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3034         hdr->num_elems = 1;
3035         hdr->offset = sizeof(*hdr);
3036         hdr->status = IPR_DUMP_STATUS_SUCCESS;
3037 }
3038
3039 /**
3040  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3041  * @ioa_cfg:    ioa config struct
3042  * @driver_dump:        driver dump struct
3043  *
3044  * Return value:
3045  *      nothing
3046  **/
3047 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3048                                    struct ipr_driver_dump *driver_dump)
3049 {
3050         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3051
3052         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3053         driver_dump->ioa_type_entry.hdr.len =
3054                 sizeof(struct ipr_dump_ioa_type_entry) -
3055                 sizeof(struct ipr_dump_entry_header);
3056         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3057         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3058         driver_dump->ioa_type_entry.type = ioa_cfg->type;
3059         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3060                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3061                 ucode_vpd->minor_release[1];
3062         driver_dump->hdr.num_entries++;
3063 }
3064
3065 /**
3066  * ipr_dump_version_data - Fill in the driver version in the dump.
3067  * @ioa_cfg:    ioa config struct
3068  * @driver_dump:        driver dump struct
3069  *
3070  * Return value:
3071  *      nothing
3072  **/
3073 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3074                                   struct ipr_driver_dump *driver_dump)
3075 {
3076         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3077         driver_dump->version_entry.hdr.len =
3078                 sizeof(struct ipr_dump_version_entry) -
3079                 sizeof(struct ipr_dump_entry_header);
3080         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3081         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3082         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3083         driver_dump->hdr.num_entries++;
3084 }
3085
3086 /**
3087  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3088  * @ioa_cfg:    ioa config struct
3089  * @driver_dump:        driver dump struct
3090  *
3091  * Return value:
3092  *      nothing
3093  **/
3094 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3095                                    struct ipr_driver_dump *driver_dump)
3096 {
3097         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3098         driver_dump->trace_entry.hdr.len =
3099                 sizeof(struct ipr_dump_trace_entry) -
3100                 sizeof(struct ipr_dump_entry_header);
3101         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3102         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3103         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3104         driver_dump->hdr.num_entries++;
3105 }
3106
3107 /**
3108  * ipr_dump_location_data - Fill in the IOA location in the dump.
3109  * @ioa_cfg:    ioa config struct
3110  * @driver_dump:        driver dump struct
3111  *
3112  * Return value:
3113  *      nothing
3114  **/
3115 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3116                                    struct ipr_driver_dump *driver_dump)
3117 {
3118         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3119         driver_dump->location_entry.hdr.len =
3120                 sizeof(struct ipr_dump_location_entry) -
3121                 sizeof(struct ipr_dump_entry_header);
3122         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3123         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3124         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3125         driver_dump->hdr.num_entries++;
3126 }
3127
3128 /**
3129  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3130  * @ioa_cfg:    ioa config struct
3131  * @dump:               dump struct
3132  *
3133  * Return value:
3134  *      nothing
3135  **/
3136 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3137 {
3138         unsigned long start_addr, sdt_word;
3139         unsigned long lock_flags = 0;
3140         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3141         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3142         u32 num_entries, max_num_entries, start_off, end_off;
3143         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3144         struct ipr_sdt *sdt;
3145         int valid = 1;
3146         int i;
3147
3148         ENTER;
3149
3150         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3151
3152         if (ioa_cfg->sdt_state != READ_DUMP) {
3153                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3154                 return;
3155         }
3156
3157         if (ioa_cfg->sis64) {
3158                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3159                 ssleep(IPR_DUMP_DELAY_SECONDS);
3160                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3161         }
3162
3163         start_addr = readl(ioa_cfg->ioa_mailbox);
3164
3165         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3166                 dev_err(&ioa_cfg->pdev->dev,
3167                         "Invalid dump table format: %lx\n", start_addr);
3168                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3169                 return;
3170         }
3171
3172         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3173
3174         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3175
3176         /* Initialize the overall dump header */
3177         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3178         driver_dump->hdr.num_entries = 1;
3179         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3180         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3181         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3182         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3183
3184         ipr_dump_version_data(ioa_cfg, driver_dump);
3185         ipr_dump_location_data(ioa_cfg, driver_dump);
3186         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3187         ipr_dump_trace_data(ioa_cfg, driver_dump);
3188
3189         /* Update dump_header */
3190         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3191
3192         /* IOA Dump entry */
3193         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3194         ioa_dump->hdr.len = 0;
3195         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3196         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3197
3198         /* First entries in sdt are actually a list of dump addresses and
3199          lengths to gather the real dump data.  sdt represents the pointer
3200          to the ioa generated dump table.  Dump data will be extracted based
3201          on entries in this table */
3202         sdt = &ioa_dump->sdt;
3203
3204         if (ioa_cfg->sis64) {
3205                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3206                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3207         } else {
3208                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3209                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3210         }
3211
3212         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3213                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3214         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3215                                         bytes_to_copy / sizeof(__be32));
3216
3217         /* Smart Dump table is ready to use and the first entry is valid */
3218         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3219             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3220                 dev_err(&ioa_cfg->pdev->dev,
3221                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3222                         rc, be32_to_cpu(sdt->hdr.state));
3223                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3224                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3225                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3226                 return;
3227         }
3228
3229         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3230
3231         if (num_entries > max_num_entries)
3232                 num_entries = max_num_entries;
3233
3234         /* Update dump length to the actual data to be copied */
3235         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3236         if (ioa_cfg->sis64)
3237                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3238         else
3239                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3240
3241         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3242
3243         for (i = 0; i < num_entries; i++) {
3244                 if (ioa_dump->hdr.len > max_dump_size) {
3245                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3246                         break;
3247                 }
3248
3249                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3250                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3251                         if (ioa_cfg->sis64)
3252                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3253                         else {
3254                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3255                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3256
3257                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3258                                         bytes_to_copy = end_off - start_off;
3259                                 else
3260                                         valid = 0;
3261                         }
3262                         if (valid) {
3263                                 if (bytes_to_copy > max_dump_size) {
3264                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3265                                         continue;
3266                                 }
3267
3268                                 /* Copy data from adapter to driver buffers */
3269                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3270                                                             bytes_to_copy);
3271
3272                                 ioa_dump->hdr.len += bytes_copied;
3273
3274                                 if (bytes_copied != bytes_to_copy) {
3275                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3276                                         break;
3277                                 }
3278                         }
3279                 }
3280         }
3281
3282         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3283
3284         /* Update dump_header */
3285         driver_dump->hdr.len += ioa_dump->hdr.len;
3286         wmb();
3287         ioa_cfg->sdt_state = DUMP_OBTAINED;
3288         LEAVE;
3289 }
3290
3291 #else
3292 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3293 #endif
3294
3295 /**
3296  * ipr_release_dump - Free adapter dump memory
3297  * @kref:       kref struct
3298  *
3299  * Return value:
3300  *      nothing
3301  **/
3302 static void ipr_release_dump(struct kref *kref)
3303 {
3304         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3305         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3306         unsigned long lock_flags = 0;
3307         int i;
3308
3309         ENTER;
3310         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3311         ioa_cfg->dump = NULL;
3312         ioa_cfg->sdt_state = INACTIVE;
3313         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3314
3315         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3316                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3317
3318         vfree(dump->ioa_dump.ioa_data);
3319         kfree(dump);
3320         LEAVE;
3321 }
3322
3323 static void ipr_add_remove_thread(struct work_struct *work)
3324 {
3325         unsigned long lock_flags;
3326         struct ipr_resource_entry *res;
3327         struct scsi_device *sdev;
3328         struct ipr_ioa_cfg *ioa_cfg =
3329                 container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
3330         u8 bus, target, lun;
3331         int did_work;
3332
3333         ENTER;
3334         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3335
3336 restart:
3337         do {
3338                 did_work = 0;
3339                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3340                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3341                         return;
3342                 }
3343
3344                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3345                         if (res->del_from_ml && res->sdev) {
3346                                 did_work = 1;
3347                                 sdev = res->sdev;
3348                                 if (!scsi_device_get(sdev)) {
3349                                         if (!res->add_to_ml)
3350                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3351                                         else
3352                                                 res->del_from_ml = 0;
3353                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3354                                         scsi_remove_device(sdev);
3355                                         scsi_device_put(sdev);
3356                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3357                                 }
3358                                 break;
3359                         }
3360                 }
3361         } while (did_work);
3362
3363         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3364                 if (res->add_to_ml) {
3365                         bus = res->bus;
3366                         target = res->target;
3367                         lun = res->lun;
3368                         res->add_to_ml = 0;
3369                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3370                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3371                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3372                         goto restart;
3373                 }
3374         }
3375
3376         ioa_cfg->scan_done = 1;
3377         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3378         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3379         LEAVE;
3380 }
3381
3382 /**
3383  * ipr_worker_thread - Worker thread
3384  * @work:               ioa config struct
3385  *
3386  * Called at task level from a work thread. This function takes care
3387  * of adding and removing device from the mid-layer as configuration
3388  * changes are detected by the adapter.
3389  *
3390  * Return value:
3391  *      nothing
3392  **/
3393 static void ipr_worker_thread(struct work_struct *work)
3394 {
3395         unsigned long lock_flags;
3396         struct ipr_dump *dump;
3397         struct ipr_ioa_cfg *ioa_cfg =
3398                 container_of(work, struct ipr_ioa_cfg, work_q);
3399
3400         ENTER;
3401         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3402
3403         if (ioa_cfg->sdt_state == READ_DUMP) {
3404                 dump = ioa_cfg->dump;
3405                 if (!dump) {
3406                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3407                         return;
3408                 }
3409                 kref_get(&dump->kref);
3410                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3411                 ipr_get_ioa_dump(ioa_cfg, dump);
3412                 kref_put(&dump->kref, ipr_release_dump);
3413
3414                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3415                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3416                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3417                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3418                 return;
3419         }
3420
3421         if (ioa_cfg->scsi_unblock) {
3422                 ioa_cfg->scsi_unblock = 0;
3423                 ioa_cfg->scsi_blocked = 0;
3424                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3425                 scsi_unblock_requests(ioa_cfg->host);
3426                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3427                 if (ioa_cfg->scsi_blocked)
3428                         scsi_block_requests(ioa_cfg->host);
3429         }
3430
3431         if (!ioa_cfg->scan_enabled) {
3432                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3433                 return;
3434         }
3435
3436         schedule_work(&ioa_cfg->scsi_add_work_q);
3437
3438         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3439         LEAVE;
3440 }
3441
3442 #ifdef CONFIG_SCSI_IPR_TRACE
3443 /**
3444  * ipr_read_trace - Dump the adapter trace
3445  * @filp:               open sysfs file
3446  * @kobj:               kobject struct
3447  * @bin_attr:           bin_attribute struct
3448  * @buf:                buffer
3449  * @off:                offset
3450  * @count:              buffer size
3451  *
3452  * Return value:
3453  *      number of bytes printed to buffer
3454  **/
3455 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3456                               struct bin_attribute *bin_attr,
3457                               char *buf, loff_t off, size_t count)
3458 {
3459         struct device *dev = kobj_to_dev(kobj);
3460         struct Scsi_Host *shost = class_to_shost(dev);
3461         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3462         unsigned long lock_flags = 0;
3463         ssize_t ret;
3464
3465         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3466         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3467                                 IPR_TRACE_SIZE);
3468         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3469
3470         return ret;
3471 }
3472
3473 static struct bin_attribute ipr_trace_attr = {
3474         .attr = {
3475                 .name = "trace",
3476                 .mode = S_IRUGO,
3477         },
3478         .size = 0,
3479         .read = ipr_read_trace,
3480 };
3481 #endif
3482
3483 /**
3484  * ipr_show_fw_version - Show the firmware version
3485  * @dev:        class device struct
3486  * @attr:       device attribute (unused)
3487  * @buf:        buffer
3488  *
3489  * Return value:
3490  *      number of bytes printed to buffer
3491  **/
3492 static ssize_t ipr_show_fw_version(struct device *dev,
3493                                    struct device_attribute *attr, char *buf)
3494 {
3495         struct Scsi_Host *shost = class_to_shost(dev);
3496         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3497         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3498         unsigned long lock_flags = 0;
3499         int len;
3500
3501         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3502         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3503                        ucode_vpd->major_release, ucode_vpd->card_type,
3504                        ucode_vpd->minor_release[0],
3505                        ucode_vpd->minor_release[1]);
3506         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3507         return len;
3508 }
3509
3510 static struct device_attribute ipr_fw_version_attr = {
3511         .attr = {
3512                 .name =         "fw_version",
3513                 .mode =         S_IRUGO,
3514         },
3515         .show = ipr_show_fw_version,
3516 };
3517
3518 /**
3519  * ipr_show_log_level - Show the adapter's error logging level
3520  * @dev:        class device struct
3521  * @attr:       device attribute (unused)
3522  * @buf:        buffer
3523  *
3524  * Return value:
3525  *      number of bytes printed to buffer
3526  **/
3527 static ssize_t ipr_show_log_level(struct device *dev,
3528                                    struct device_attribute *attr, char *buf)
3529 {
3530         struct Scsi_Host *shost = class_to_shost(dev);
3531         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3532         unsigned long lock_flags = 0;
3533         int len;
3534
3535         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3536         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3537         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3538         return len;
3539 }
3540
3541 /**
3542  * ipr_store_log_level - Change the adapter's error logging level
3543  * @dev:        class device struct
3544  * @attr:       device attribute (unused)
3545  * @buf:        buffer
3546  * @count:      buffer size
3547  *
3548  * Return value:
3549  *      number of bytes printed to buffer
3550  **/
3551 static ssize_t ipr_store_log_level(struct device *dev,
3552                                    struct device_attribute *attr,
3553                                    const char *buf, size_t count)
3554 {
3555         struct Scsi_Host *shost = class_to_shost(dev);
3556         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3557         unsigned long lock_flags = 0;
3558
3559         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3560         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3561         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3562         return strlen(buf);
3563 }
3564
3565 static struct device_attribute ipr_log_level_attr = {
3566         .attr = {
3567                 .name =         "log_level",
3568                 .mode =         S_IRUGO | S_IWUSR,
3569         },
3570         .show = ipr_show_log_level,
3571         .store = ipr_store_log_level
3572 };
3573
3574 /**
3575  * ipr_store_diagnostics - IOA Diagnostics interface
3576  * @dev:        device struct
3577  * @attr:       device attribute (unused)
3578  * @buf:        buffer
3579  * @count:      buffer size
3580  *
3581  * This function will reset the adapter and wait a reasonable
3582  * amount of time for any errors that the adapter might log.
3583  *
3584  * Return value:
3585  *      count on success / other on failure
3586  **/
3587 static ssize_t ipr_store_diagnostics(struct device *dev,
3588                                      struct device_attribute *attr,
3589                                      const char *buf, size_t count)
3590 {
3591         struct Scsi_Host *shost = class_to_shost(dev);
3592         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3593         unsigned long lock_flags = 0;
3594         int rc = count;
3595
3596         if (!capable(CAP_SYS_ADMIN))
3597                 return -EACCES;
3598
3599         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3600         while (ioa_cfg->in_reset_reload) {
3601                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3602                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3603                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3604         }
3605
3606         ioa_cfg->errors_logged = 0;
3607         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3608
3609         if (ioa_cfg->in_reset_reload) {
3610                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3611                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3612
3613                 /* Wait for a second for any errors to be logged */
3614                 msleep(1000);
3615         } else {
3616                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3617                 return -EIO;
3618         }
3619
3620         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3621         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3622                 rc = -EIO;
3623         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3624
3625         return rc;
3626 }
3627
3628 static struct device_attribute ipr_diagnostics_attr = {
3629         .attr = {
3630                 .name =         "run_diagnostics",
3631                 .mode =         S_IWUSR,
3632         },
3633         .store = ipr_store_diagnostics
3634 };
3635
3636 /**
3637  * ipr_show_adapter_state - Show the adapter's state
3638  * @dev:        device struct
3639  * @attr:       device attribute (unused)
3640  * @buf:        buffer
3641  *
3642  * Return value:
3643  *      number of bytes printed to buffer
3644  **/
3645 static ssize_t ipr_show_adapter_state(struct device *dev,
3646                                       struct device_attribute *attr, char *buf)
3647 {
3648         struct Scsi_Host *shost = class_to_shost(dev);
3649         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3650         unsigned long lock_flags = 0;
3651         int len;
3652
3653         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3654         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3655                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3656         else
3657                 len = snprintf(buf, PAGE_SIZE, "online\n");
3658         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3659         return len;
3660 }
3661
3662 /**
3663  * ipr_store_adapter_state - Change adapter state
3664  * @dev:        device struct
3665  * @attr:       device attribute (unused)
3666  * @buf:        buffer
3667  * @count:      buffer size
3668  *
3669  * This function will change the adapter's state.
3670  *
3671  * Return value:
3672  *      count on success / other on failure
3673  **/
3674 static ssize_t ipr_store_adapter_state(struct device *dev,
3675                                        struct device_attribute *attr,
3676                                        const char *buf, size_t count)
3677 {
3678         struct Scsi_Host *shost = class_to_shost(dev);
3679         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3680         unsigned long lock_flags;
3681         int result = count, i;
3682
3683         if (!capable(CAP_SYS_ADMIN))
3684                 return -EACCES;
3685
3686         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3687         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3688             !strncmp(buf, "online", 6)) {
3689                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3690                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3691                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3692                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3693                 }
3694                 wmb();
3695                 ioa_cfg->reset_retries = 0;
3696                 ioa_cfg->in_ioa_bringdown = 0;
3697                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3698         }
3699         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3700         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3701
3702         return result;
3703 }
3704
3705 static struct device_attribute ipr_ioa_state_attr = {
3706         .attr = {
3707                 .name =         "online_state",
3708                 .mode =         S_IRUGO | S_IWUSR,
3709         },
3710         .show = ipr_show_adapter_state,
3711         .store = ipr_store_adapter_state
3712 };
3713
3714 /**
3715  * ipr_store_reset_adapter - Reset the adapter
3716  * @dev:        device struct
3717  * @attr:       device attribute (unused)
3718  * @buf:        buffer
3719  * @count:      buffer size
3720  *
3721  * This function will reset the adapter.
3722  *
3723  * Return value:
3724  *      count on success / other on failure
3725  **/
3726 static ssize_t ipr_store_reset_adapter(struct device *dev,
3727                                        struct device_attribute *attr,
3728                                        const char *buf, size_t count)
3729 {
3730         struct Scsi_Host *shost = class_to_shost(dev);
3731         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3732         unsigned long lock_flags;
3733         int result = count;
3734
3735         if (!capable(CAP_SYS_ADMIN))
3736                 return -EACCES;
3737
3738         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3739         if (!ioa_cfg->in_reset_reload)
3740                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3741         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3742         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3743
3744         return result;
3745 }
3746
3747 static struct device_attribute ipr_ioa_reset_attr = {
3748         .attr = {
3749                 .name =         "reset_host",
3750                 .mode =         S_IWUSR,
3751         },
3752         .store = ipr_store_reset_adapter
3753 };
3754
3755 static int ipr_iopoll(struct irq_poll *iop, int budget);
3756  /**
3757  * ipr_show_iopoll_weight - Show ipr polling mode
3758  * @dev:        class device struct
3759  * @attr:       device attribute (unused)
3760  * @buf:        buffer
3761  *
3762  * Return value:
3763  *      number of bytes printed to buffer
3764  **/
3765 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3766                                    struct device_attribute *attr, char *buf)
3767 {
3768         struct Scsi_Host *shost = class_to_shost(dev);
3769         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3770         unsigned long lock_flags = 0;
3771         int len;
3772
3773         spin_lock_irqsave(shost->host_lock, lock_flags);
3774         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3775         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3776
3777         return len;
3778 }
3779
3780 /**
3781  * ipr_store_iopoll_weight - Change the adapter's polling mode
3782  * @dev:        class device struct
3783  * @attr:       device attribute (unused)
3784  * @buf:        buffer
3785  * @count:      buffer size
3786  *
3787  * Return value:
3788  *      number of bytes printed to buffer
3789  **/
3790 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3791                                         struct device_attribute *attr,
3792                                         const char *buf, size_t count)
3793 {
3794         struct Scsi_Host *shost = class_to_shost(dev);
3795         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3796         unsigned long user_iopoll_weight;
3797         unsigned long lock_flags = 0;
3798         int i;
3799
3800         if (!ioa_cfg->sis64) {
3801                 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3802                 return -EINVAL;
3803         }
3804         if (kstrtoul(buf, 10, &user_iopoll_weight))
3805                 return -EINVAL;
3806
3807         if (user_iopoll_weight > 256) {
3808                 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3809                 return -EINVAL;
3810         }
3811
3812         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3813                 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3814                 return strlen(buf);
3815         }
3816
3817         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3818                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3819                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3820         }
3821
3822         spin_lock_irqsave(shost->host_lock, lock_flags);
3823         ioa_cfg->iopoll_weight = user_iopoll_weight;
3824         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3825                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3826                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3827                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3828                 }
3829         }
3830         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3831
3832         return strlen(buf);
3833 }
3834
3835 static struct device_attribute ipr_iopoll_weight_attr = {
3836         .attr = {
3837                 .name =         "iopoll_weight",
3838                 .mode =         S_IRUGO | S_IWUSR,
3839         },
3840         .show = ipr_show_iopoll_weight,
3841         .store = ipr_store_iopoll_weight
3842 };
3843
3844 /**
3845  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3846  * @buf_len:            buffer length
3847  *
3848  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3849  * list to use for microcode download
3850  *
3851  * Return value:
3852  *      pointer to sglist / NULL on failure
3853  **/
3854 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3855 {
3856         int sg_size, order;
3857         struct ipr_sglist *sglist;
3858
3859         /* Get the minimum size per scatter/gather element */
3860         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3861
3862         /* Get the actual size per element */
3863         order = get_order(sg_size);
3864
3865         /* Allocate a scatter/gather list for the DMA */
3866         sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
3867         if (sglist == NULL) {
3868                 ipr_trace;
3869                 return NULL;
3870         }
3871         sglist->order = order;
3872         sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
3873                                               &sglist->num_sg);
3874         if (!sglist->scatterlist) {
3875                 kfree(sglist);
3876                 return NULL;
3877         }
3878
3879         return sglist;
3880 }
3881
3882 /**
3883  * ipr_free_ucode_buffer - Frees a microcode download buffer
3884  * @sglist:             scatter/gather list pointer
3885  *
3886  * Free a DMA'able ucode download buffer previously allocated with
3887  * ipr_alloc_ucode_buffer
3888  *
3889  * Return value:
3890  *      nothing
3891  **/
3892 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3893 {
3894         sgl_free_order(sglist->scatterlist, sglist->order);
3895         kfree(sglist);
3896 }
3897
3898 /**
3899  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3900  * @sglist:             scatter/gather list pointer
3901  * @buffer:             buffer pointer
3902  * @len:                buffer length
3903  *
3904  * Copy a microcode image from a user buffer into a buffer allocated by
3905  * ipr_alloc_ucode_buffer
3906  *
3907  * Return value:
3908  *      0 on success / other on failure
3909  **/
3910 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3911                                  u8 *buffer, u32 len)
3912 {
3913         int bsize_elem, i, result = 0;
3914         struct scatterlist *sg;
3915         void *kaddr;
3916
3917         /* Determine the actual number of bytes per element */
3918         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3919
3920         sg = sglist->scatterlist;
3921
3922         for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg),
3923                         buffer += bsize_elem) {
3924                 struct page *page = sg_page(sg);
3925
3926                 kaddr = kmap(page);
3927                 memcpy(kaddr, buffer, bsize_elem);
3928                 kunmap(page);
3929
3930                 sg->length = bsize_elem;
3931
3932                 if (result != 0) {
3933                         ipr_trace;
3934                         return result;
3935                 }
3936         }
3937
3938         if (len % bsize_elem) {
3939                 struct page *page = sg_page(sg);
3940
3941                 kaddr = kmap(page);
3942                 memcpy(kaddr, buffer, len % bsize_elem);
3943                 kunmap(page);
3944
3945                 sg->length = len % bsize_elem;
3946         }
3947
3948         sglist->buffer_len = len;
3949         return result;
3950 }
3951
3952 /**
3953  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3954  * @ipr_cmd:            ipr command struct
3955  * @sglist:             scatter/gather list
3956  *
3957  * Builds a microcode download IOA data list (IOADL).
3958  *
3959  **/
3960 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3961                                     struct ipr_sglist *sglist)
3962 {
3963         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3964         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3965         struct scatterlist *scatterlist = sglist->scatterlist;
3966         struct scatterlist *sg;
3967         int i;
3968
3969         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3970         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3971         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3972
3973         ioarcb->ioadl_len =
3974                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3975         for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
3976                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3977                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
3978                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
3979         }
3980
3981         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3982 }
3983
3984 /**
3985  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3986  * @ipr_cmd:    ipr command struct
3987  * @sglist:             scatter/gather list
3988  *
3989  * Builds a microcode download IOA data list (IOADL).
3990  *
3991  **/
3992 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3993                                   struct ipr_sglist *sglist)
3994 {
3995         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3996         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3997         struct scatterlist *scatterlist = sglist->scatterlist;
3998         struct scatterlist *sg;
3999         int i;
4000
4001         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
4002         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4003         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
4004
4005         ioarcb->ioadl_len =
4006                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4007
4008         for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
4009                 ioadl[i].flags_and_data_len =
4010                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(sg));
4011                 ioadl[i].address =
4012                         cpu_to_be32(sg_dma_address(sg));
4013         }
4014
4015         ioadl[i-1].flags_and_data_len |=
4016                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4017 }
4018
4019 /**
4020  * ipr_update_ioa_ucode - Update IOA's microcode
4021  * @ioa_cfg:    ioa config struct
4022  * @sglist:             scatter/gather list
4023  *
4024  * Initiate an adapter reset to update the IOA's microcode
4025  *
4026  * Return value:
4027  *      0 on success / -EIO on failure
4028  **/
4029 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
4030                                 struct ipr_sglist *sglist)
4031 {
4032         unsigned long lock_flags;
4033
4034         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4035         while (ioa_cfg->in_reset_reload) {
4036                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4037                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4038                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4039         }
4040
4041         if (ioa_cfg->ucode_sglist) {
4042                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4043                 dev_err(&ioa_cfg->pdev->dev,
4044                         "Microcode download already in progress\n");
4045                 return -EIO;
4046         }
4047
4048         sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4049                                         sglist->scatterlist, sglist->num_sg,
4050                                         DMA_TO_DEVICE);
4051
4052         if (!sglist->num_dma_sg) {
4053                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4054                 dev_err(&ioa_cfg->pdev->dev,
4055                         "Failed to map microcode download buffer!\n");
4056                 return -EIO;
4057         }
4058
4059         ioa_cfg->ucode_sglist = sglist;
4060         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4061         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4062         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4063
4064         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4065         ioa_cfg->ucode_sglist = NULL;
4066         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4067         return 0;
4068 }
4069
4070 /**
4071  * ipr_store_update_fw - Update the firmware on the adapter
4072  * @dev:        device struct
4073  * @attr:       device attribute (unused)
4074  * @buf:        buffer
4075  * @count:      buffer size
4076  *
4077  * This function will update the firmware on the adapter.
4078  *
4079  * Return value:
4080  *      count on success / other on failure
4081  **/
4082 static ssize_t ipr_store_update_fw(struct device *dev,
4083                                    struct device_attribute *attr,
4084                                    const char *buf, size_t count)
4085 {
4086         struct Scsi_Host *shost = class_to_shost(dev);
4087         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4088         struct ipr_ucode_image_header *image_hdr;
4089         const struct firmware *fw_entry;
4090         struct ipr_sglist *sglist;
4091         char fname[100];
4092         char *src;
4093         char *endline;
4094         int result, dnld_size;
4095
4096         if (!capable(CAP_SYS_ADMIN))
4097                 return -EACCES;
4098
4099         snprintf(fname, sizeof(fname), "%s", buf);
4100
4101         endline = strchr(fname, '\n');
4102         if (endline)
4103                 *endline = '\0';
4104
4105         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4106                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4107                 return -EIO;
4108         }
4109
4110         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4111
4112         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4113         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4114         sglist = ipr_alloc_ucode_buffer(dnld_size);
4115
4116         if (!sglist) {
4117                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4118                 release_firmware(fw_entry);
4119                 return -ENOMEM;
4120         }
4121
4122         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4123
4124         if (result) {
4125                 dev_err(&ioa_cfg->pdev->dev,
4126                         "Microcode buffer copy to DMA buffer failed\n");
4127                 goto out;
4128         }
4129
4130         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4131
4132         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4133
4134         if (!result)
4135                 result = count;
4136 out:
4137         ipr_free_ucode_buffer(sglist);
4138         release_firmware(fw_entry);
4139         return result;
4140 }
4141
4142 static struct device_attribute ipr_update_fw_attr = {
4143         .attr = {
4144                 .name =         "update_fw",
4145                 .mode =         S_IWUSR,
4146         },
4147         .store = ipr_store_update_fw
4148 };
4149
4150 /**
4151  * ipr_show_fw_type - Show the adapter's firmware type.
4152  * @dev:        class device struct
4153  * @attr:       device attribute (unused)
4154  * @buf:        buffer
4155  *
4156  * Return value:
4157  *      number of bytes printed to buffer
4158  **/
4159 static ssize_t ipr_show_fw_type(struct device *dev,
4160                                 struct device_attribute *attr, char *buf)
4161 {
4162         struct Scsi_Host *shost = class_to_shost(dev);
4163         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4164         unsigned long lock_flags = 0;
4165         int len;
4166
4167         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4168         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4169         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4170         return len;
4171 }
4172
4173 static struct device_attribute ipr_ioa_fw_type_attr = {
4174         .attr = {
4175                 .name =         "fw_type",
4176                 .mode =         S_IRUGO,
4177         },
4178         .show = ipr_show_fw_type
4179 };
4180
4181 static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4182                                 struct bin_attribute *bin_attr, char *buf,
4183                                 loff_t off, size_t count)
4184 {
4185         struct device *cdev = kobj_to_dev(kobj);
4186         struct Scsi_Host *shost = class_to_shost(cdev);
4187         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4188         struct ipr_hostrcb *hostrcb;
4189         unsigned long lock_flags = 0;
4190         int ret;
4191
4192         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4193         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4194                                         struct ipr_hostrcb, queue);
4195         if (!hostrcb) {
4196                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4197                 return 0;
4198         }
4199         ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4200                                 sizeof(hostrcb->hcam));
4201         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4202         return ret;
4203 }
4204
4205 static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4206                                 struct bin_attribute *bin_attr, char *buf,
4207                                 loff_t off, size_t count)
4208 {
4209         struct device *cdev = kobj_to_dev(kobj);
4210         struct Scsi_Host *shost = class_to_shost(cdev);
4211         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4212         struct ipr_hostrcb *hostrcb;
4213         unsigned long lock_flags = 0;
4214
4215         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4216         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4217                                         struct ipr_hostrcb, queue);
4218         if (!hostrcb) {
4219                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4220                 return count;
4221         }
4222
4223         /* Reclaim hostrcb before exit */
4224         list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4225         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4226         return count;
4227 }
4228
4229 static struct bin_attribute ipr_ioa_async_err_log = {
4230         .attr = {
4231                 .name =         "async_err_log",
4232                 .mode =         S_IRUGO | S_IWUSR,
4233         },
4234         .size = 0,
4235         .read = ipr_read_async_err_log,
4236         .write = ipr_next_async_err_log
4237 };
4238
4239 static struct attribute *ipr_ioa_attrs[] = {
4240         &ipr_fw_version_attr.attr,
4241         &ipr_log_level_attr.attr,
4242         &ipr_diagnostics_attr.attr,
4243         &ipr_ioa_state_attr.attr,
4244         &ipr_ioa_reset_attr.attr,
4245         &ipr_update_fw_attr.attr,
4246         &ipr_ioa_fw_type_attr.attr,
4247         &ipr_iopoll_weight_attr.attr,
4248         NULL,
4249 };
4250
4251 ATTRIBUTE_GROUPS(ipr_ioa);
4252
4253 #ifdef CONFIG_SCSI_IPR_DUMP
4254 /**
4255  * ipr_read_dump - Dump the adapter
4256  * @filp:               open sysfs file
4257  * @kobj:               kobject struct
4258  * @bin_attr:           bin_attribute struct
4259  * @buf:                buffer
4260  * @off:                offset
4261  * @count:              buffer size
4262  *
4263  * Return value:
4264  *      number of bytes printed to buffer
4265  **/
4266 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4267                              struct bin_attribute *bin_attr,
4268                              char *buf, loff_t off, size_t count)
4269 {
4270         struct device *cdev = kobj_to_dev(kobj);
4271         struct Scsi_Host *shost = class_to_shost(cdev);
4272         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4273         struct ipr_dump *dump;
4274         unsigned long lock_flags = 0;
4275         char *src;
4276         int len, sdt_end;
4277         size_t rc = count;
4278
4279         if (!capable(CAP_SYS_ADMIN))
4280                 return -EACCES;
4281
4282         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4283         dump = ioa_cfg->dump;
4284
4285         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4286                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4287                 return 0;
4288         }
4289         kref_get(&dump->kref);
4290         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4291
4292         if (off > dump->driver_dump.hdr.len) {
4293                 kref_put(&dump->kref, ipr_release_dump);
4294                 return 0;
4295         }
4296
4297         if (off + count > dump->driver_dump.hdr.len) {
4298                 count = dump->driver_dump.hdr.len - off;
4299                 rc = count;
4300         }
4301
4302         if (count && off < sizeof(dump->driver_dump)) {
4303                 if (off + count > sizeof(dump->driver_dump))
4304                         len = sizeof(dump->driver_dump) - off;
4305                 else
4306                         len = count;
4307                 src = (u8 *)&dump->driver_dump + off;
4308                 memcpy(buf, src, len);
4309                 buf += len;
4310                 off += len;
4311                 count -= len;
4312         }
4313
4314         off -= sizeof(dump->driver_dump);
4315
4316         if (ioa_cfg->sis64)
4317                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4318                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4319                            sizeof(struct ipr_sdt_entry));
4320         else
4321                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4322                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4323
4324         if (count && off < sdt_end) {
4325                 if (off + count > sdt_end)
4326                         len = sdt_end - off;
4327                 else
4328                         len = count;
4329                 src = (u8 *)&dump->ioa_dump + off;
4330                 memcpy(buf, src, len);
4331                 buf += len;
4332                 off += len;
4333                 count -= len;
4334         }
4335
4336         off -= sdt_end;
4337
4338         while (count) {
4339                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4340                         len = PAGE_ALIGN(off) - off;
4341                 else
4342                         len = count;
4343                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4344                 src += off & ~PAGE_MASK;
4345                 memcpy(buf, src, len);
4346                 buf += len;
4347                 off += len;
4348                 count -= len;
4349         }
4350
4351         kref_put(&dump->kref, ipr_release_dump);
4352         return rc;
4353 }
4354
4355 /**
4356  * ipr_alloc_dump - Prepare for adapter dump
4357  * @ioa_cfg:    ioa config struct
4358  *
4359  * Return value:
4360  *      0 on success / other on failure
4361  **/
4362 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4363 {
4364         struct ipr_dump *dump;
4365         __be32 **ioa_data;
4366         unsigned long lock_flags = 0;
4367
4368         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4369
4370         if (!dump) {
4371                 ipr_err("Dump memory allocation failed\n");
4372                 return -ENOMEM;
4373         }
4374
4375         if (ioa_cfg->sis64)
4376                 ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
4377                                               sizeof(__be32 *)));
4378         else
4379                 ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
4380                                               sizeof(__be32 *)));
4381
4382         if (!ioa_data) {
4383                 ipr_err("Dump memory allocation failed\n");
4384                 kfree(dump);
4385                 return -ENOMEM;
4386         }
4387
4388         dump->ioa_dump.ioa_data = ioa_data;
4389
4390         kref_init(&dump->kref);
4391         dump->ioa_cfg = ioa_cfg;
4392
4393         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4394
4395         if (INACTIVE != ioa_cfg->sdt_state) {
4396                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4397                 vfree(dump->ioa_dump.ioa_data);
4398                 kfree(dump);
4399                 return 0;
4400         }
4401
4402         ioa_cfg->dump = dump;
4403         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4404         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4405                 ioa_cfg->dump_taken = 1;
4406                 schedule_work(&ioa_cfg->work_q);
4407         }
4408         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4409
4410         return 0;
4411 }
4412
4413 /**
4414  * ipr_free_dump - Free adapter dump memory
4415  * @ioa_cfg:    ioa config struct
4416  *
4417  * Return value:
4418  *      0 on success / other on failure
4419  **/
4420 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4421 {
4422         struct ipr_dump *dump;
4423         unsigned long lock_flags = 0;
4424
4425         ENTER;
4426
4427         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4428         dump = ioa_cfg->dump;
4429         if (!dump) {
4430                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4431                 return 0;
4432         }
4433
4434         ioa_cfg->dump = NULL;
4435         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4436
4437         kref_put(&dump->kref, ipr_release_dump);
4438
4439         LEAVE;
4440         return 0;
4441 }
4442
4443 /**
4444  * ipr_write_dump - Setup dump state of adapter
4445  * @filp:               open sysfs file
4446  * @kobj:               kobject struct
4447  * @bin_attr:           bin_attribute struct
4448  * @buf:                buffer
4449  * @off:                offset
4450  * @count:              buffer size
4451  *
4452  * Return value:
4453  *      number of bytes printed to buffer
4454  **/
4455 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4456                               struct bin_attribute *bin_attr,
4457                               char *buf, loff_t off, size_t count)
4458 {
4459         struct device *cdev = kobj_to_dev(kobj);
4460         struct Scsi_Host *shost = class_to_shost(cdev);
4461         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4462         int rc;
4463
4464         if (!capable(CAP_SYS_ADMIN))
4465                 return -EACCES;
4466
4467         if (buf[0] == '1')
4468                 rc = ipr_alloc_dump(ioa_cfg);
4469         else if (buf[0] == '0')
4470                 rc = ipr_free_dump(ioa_cfg);
4471         else
4472                 return -EINVAL;
4473
4474         if (rc)
4475                 return rc;
4476         else
4477                 return count;
4478 }
4479
4480 static struct bin_attribute ipr_dump_attr = {
4481         .attr = {
4482                 .name = "dump",
4483                 .mode = S_IRUSR | S_IWUSR,
4484         },
4485         .size = 0,
4486         .read = ipr_read_dump,
4487         .write = ipr_write_dump
4488 };
4489 #else
4490 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4491 #endif
4492
4493 /**
4494  * ipr_change_queue_depth - Change the device's queue depth
4495  * @sdev:       scsi device struct
4496  * @qdepth:     depth to set
4497  *
4498  * Return value:
4499  *      actual depth set
4500  **/
4501 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4502 {
4503         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4504         struct ipr_resource_entry *res;
4505         unsigned long lock_flags = 0;
4506
4507         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4508         res = (struct ipr_resource_entry *)sdev->hostdata;
4509
4510         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4511                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4512         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4513
4514         scsi_change_queue_depth(sdev, qdepth);
4515         return sdev->queue_depth;
4516 }
4517
4518 /**
4519  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4520  * @dev:        device struct
4521  * @attr:       device attribute structure
4522  * @buf:        buffer
4523  *
4524  * Return value:
4525  *      number of bytes printed to buffer
4526  **/
4527 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4528 {
4529         struct scsi_device *sdev = to_scsi_device(dev);
4530         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4531         struct ipr_resource_entry *res;
4532         unsigned long lock_flags = 0;
4533         ssize_t len = -ENXIO;
4534
4535         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4536         res = (struct ipr_resource_entry *)sdev->hostdata;
4537         if (res)
4538                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4539         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4540         return len;
4541 }
4542
4543 static struct device_attribute ipr_adapter_handle_attr = {
4544         .attr = {
4545                 .name =         "adapter_handle",
4546                 .mode =         S_IRUSR,
4547         },
4548         .show = ipr_show_adapter_handle
4549 };
4550
4551 /**
4552  * ipr_show_resource_path - Show the resource path or the resource address for
4553  *                          this device.
4554  * @dev:        device struct
4555  * @attr:       device attribute structure
4556  * @buf:        buffer
4557  *
4558  * Return value:
4559  *      number of bytes printed to buffer
4560  **/
4561 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4562 {
4563         struct scsi_device *sdev = to_scsi_device(dev);
4564         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4565         struct ipr_resource_entry *res;
4566         unsigned long lock_flags = 0;
4567         ssize_t len = -ENXIO;
4568         char buffer[IPR_MAX_RES_PATH_LENGTH];
4569
4570         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4571         res = (struct ipr_resource_entry *)sdev->hostdata;
4572         if (res && ioa_cfg->sis64)
4573                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4574                                __ipr_format_res_path(res->res_path, buffer,
4575                                                      sizeof(buffer)));
4576         else if (res)
4577                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4578                                res->bus, res->target, res->lun);
4579
4580         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4581         return len;
4582 }
4583
4584 static struct device_attribute ipr_resource_path_attr = {
4585         .attr = {
4586                 .name =         "resource_path",
4587                 .mode =         S_IRUGO,
4588         },
4589         .show = ipr_show_resource_path
4590 };
4591
4592 /**
4593  * ipr_show_device_id - Show the device_id for this device.
4594  * @dev:        device struct
4595  * @attr:       device attribute structure
4596  * @buf:        buffer
4597  *
4598  * Return value:
4599  *      number of bytes printed to buffer
4600  **/
4601 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4602 {
4603         struct scsi_device *sdev = to_scsi_device(dev);
4604         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4605         struct ipr_resource_entry *res;
4606         unsigned long lock_flags = 0;
4607         ssize_t len = -ENXIO;
4608
4609         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4610         res = (struct ipr_resource_entry *)sdev->hostdata;
4611         if (res && ioa_cfg->sis64)
4612                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4613         else if (res)
4614                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4615
4616         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4617         return len;
4618 }
4619
4620 static struct device_attribute ipr_device_id_attr = {
4621         .attr = {
4622                 .name =         "device_id",
4623                 .mode =         S_IRUGO,
4624         },
4625         .show = ipr_show_device_id
4626 };
4627
4628 /**
4629  * ipr_show_resource_type - Show the resource type for this device.
4630  * @dev:        device struct
4631  * @attr:       device attribute structure
4632  * @buf:        buffer
4633  *
4634  * Return value:
4635  *      number of bytes printed to buffer
4636  **/
4637 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4638 {
4639         struct scsi_device *sdev = to_scsi_device(dev);
4640         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4641         struct ipr_resource_entry *res;
4642         unsigned long lock_flags = 0;
4643         ssize_t len = -ENXIO;
4644
4645         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4646         res = (struct ipr_resource_entry *)sdev->hostdata;
4647
4648         if (res)
4649                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4650
4651         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4652         return len;
4653 }
4654
4655 static struct device_attribute ipr_resource_type_attr = {
4656         .attr = {
4657                 .name =         "resource_type",
4658                 .mode =         S_IRUGO,
4659         },
4660         .show = ipr_show_resource_type
4661 };
4662
4663 /**
4664  * ipr_show_raw_mode - Show the adapter's raw mode
4665  * @dev:        class device struct
4666  * @attr:       device attribute (unused)
4667  * @buf:        buffer
4668  *
4669  * Return value:
4670  *      number of bytes printed to buffer
4671  **/
4672 static ssize_t ipr_show_raw_mode(struct device *dev,
4673                                  struct device_attribute *attr, char *buf)
4674 {
4675         struct scsi_device *sdev = to_scsi_device(dev);
4676         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4677         struct ipr_resource_entry *res;
4678         unsigned long lock_flags = 0;
4679         ssize_t len;
4680
4681         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4682         res = (struct ipr_resource_entry *)sdev->hostdata;
4683         if (res)
4684                 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4685         else
4686                 len = -ENXIO;
4687         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4688         return len;
4689 }
4690
4691 /**
4692  * ipr_store_raw_mode - Change the adapter's raw mode
4693  * @dev:        class device struct
4694  * @attr:       device attribute (unused)
4695  * @buf:        buffer
4696  * @count:              buffer size
4697  *
4698  * Return value:
4699  *      number of bytes printed to buffer
4700  **/
4701 static ssize_t ipr_store_raw_mode(struct device *dev,
4702                                   struct device_attribute *attr,
4703                                   const char *buf, size_t count)
4704 {
4705         struct scsi_device *sdev = to_scsi_device(dev);
4706         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4707         struct ipr_resource_entry *res;
4708         unsigned long lock_flags = 0;
4709         ssize_t len;
4710
4711         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4712         res = (struct ipr_resource_entry *)sdev->hostdata;
4713         if (res) {
4714                 if (ipr_is_af_dasd_device(res)) {
4715                         res->raw_mode = simple_strtoul(buf, NULL, 10);
4716                         len = strlen(buf);
4717                         if (res->sdev)
4718                                 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4719                                         res->raw_mode ? "enabled" : "disabled");
4720                 } else
4721                         len = -EINVAL;
4722         } else
4723                 len = -ENXIO;
4724         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4725         return len;
4726 }
4727
4728 static struct device_attribute ipr_raw_mode_attr = {
4729         .attr = {
4730                 .name =         "raw_mode",
4731                 .mode =         S_IRUGO | S_IWUSR,
4732         },
4733         .show = ipr_show_raw_mode,
4734         .store = ipr_store_raw_mode
4735 };
4736
4737 static struct attribute *ipr_dev_attrs[] = {
4738         &ipr_adapter_handle_attr.attr,
4739         &ipr_resource_path_attr.attr,
4740         &ipr_device_id_attr.attr,
4741         &ipr_resource_type_attr.attr,
4742         &ipr_raw_mode_attr.attr,
4743         NULL,
4744 };
4745
4746 ATTRIBUTE_GROUPS(ipr_dev);
4747
4748 /**
4749  * ipr_biosparam - Return the HSC mapping
4750  * @sdev:                       scsi device struct
4751  * @block_device:       block device pointer
4752  * @capacity:           capacity of the device
4753  * @parm:                       Array containing returned HSC values.
4754  *
4755  * This function generates the HSC parms that fdisk uses.
4756  * We want to make sure we return something that places partitions
4757  * on 4k boundaries for best performance with the IOA.
4758  *
4759  * Return value:
4760  *      0 on success
4761  **/
4762 static int ipr_biosparam(struct scsi_device *sdev,
4763                          struct block_device *block_device,
4764                          sector_t capacity, int *parm)
4765 {
4766         int heads, sectors;
4767         sector_t cylinders;
4768
4769         heads = 128;
4770         sectors = 32;
4771
4772         cylinders = capacity;
4773         sector_div(cylinders, (128 * 32));
4774
4775         /* return result */
4776         parm[0] = heads;
4777         parm[1] = sectors;
4778         parm[2] = cylinders;
4779
4780         return 0;
4781 }
4782
4783 /**
4784  * ipr_find_starget - Find target based on bus/target.
4785  * @starget:    scsi target struct
4786  *
4787  * Return value:
4788  *      resource entry pointer if found / NULL if not found
4789  **/
4790 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4791 {
4792         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4793         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4794         struct ipr_resource_entry *res;
4795
4796         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4797                 if ((res->bus == starget->channel) &&
4798                     (res->target == starget->id)) {
4799                         return res;
4800                 }
4801         }
4802
4803         return NULL;
4804 }
4805
4806 static struct ata_port_info sata_port_info;
4807
4808 /**
4809  * ipr_target_alloc - Prepare for commands to a SCSI target
4810  * @starget:    scsi target struct
4811  *
4812  * If the device is a SATA device, this function allocates an
4813  * ATA port with libata, else it does nothing.
4814  *
4815  * Return value:
4816  *      0 on success / non-0 on failure
4817  **/
4818 static int ipr_target_alloc(struct scsi_target *starget)
4819 {
4820         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4821         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4822         struct ipr_sata_port *sata_port;
4823         struct ata_port *ap;
4824         struct ipr_resource_entry *res;
4825         unsigned long lock_flags;
4826
4827         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4828         res = ipr_find_starget(starget);
4829         starget->hostdata = NULL;
4830
4831         if (res && ipr_is_gata(res)) {
4832                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4833                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4834                 if (!sata_port)
4835                         return -ENOMEM;
4836
4837                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4838                 if (ap) {
4839                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4840                         sata_port->ioa_cfg = ioa_cfg;
4841                         sata_port->ap = ap;
4842                         sata_port->res = res;
4843
4844                         res->sata_port = sata_port;
4845                         ap->private_data = sata_port;
4846                         starget->hostdata = sata_port;
4847                 } else {
4848                         kfree(sata_port);
4849                         return -ENOMEM;
4850                 }
4851         }
4852         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4853
4854         return 0;
4855 }
4856
4857 /**
4858  * ipr_target_destroy - Destroy a SCSI target
4859  * @starget:    scsi target struct
4860  *
4861  * If the device was a SATA device, this function frees the libata
4862  * ATA port, else it does nothing.
4863  *
4864  **/
4865 static void ipr_target_destroy(struct scsi_target *starget)
4866 {
4867         struct ipr_sata_port *sata_port = starget->hostdata;
4868         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4869         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4870
4871         if (ioa_cfg->sis64) {
4872                 if (!ipr_find_starget(starget)) {
4873                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4874                                 clear_bit(starget->id, ioa_cfg->array_ids);
4875                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4876                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4877                         else if (starget->channel == 0)
4878                                 clear_bit(starget->id, ioa_cfg->target_ids);
4879                 }
4880         }
4881
4882         if (sata_port) {
4883                 starget->hostdata = NULL;
4884                 ata_sas_port_destroy(sata_port->ap);
4885                 kfree(sata_port);
4886         }
4887 }
4888
4889 /**
4890  * ipr_find_sdev - Find device based on bus/target/lun.
4891  * @sdev:       scsi device struct
4892  *
4893  * Return value:
4894  *      resource entry pointer if found / NULL if not found
4895  **/
4896 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4897 {
4898         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4899         struct ipr_resource_entry *res;
4900
4901         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4902                 if ((res->bus == sdev->channel) &&
4903                     (res->target == sdev->id) &&
4904                     (res->lun == sdev->lun))
4905                         return res;
4906         }
4907
4908         return NULL;
4909 }
4910
4911 /**
4912  * ipr_slave_destroy - Unconfigure a SCSI device
4913  * @sdev:       scsi device struct
4914  *
4915  * Return value:
4916  *      nothing
4917  **/
4918 static void ipr_slave_destroy(struct scsi_device *sdev)
4919 {
4920         struct ipr_resource_entry *res;
4921         struct ipr_ioa_cfg *ioa_cfg;
4922         unsigned long lock_flags = 0;
4923
4924         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4925
4926         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4927         res = (struct ipr_resource_entry *) sdev->hostdata;
4928         if (res) {
4929                 if (res->sata_port)
4930                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4931                 sdev->hostdata = NULL;
4932                 res->sdev = NULL;
4933                 res->sata_port = NULL;
4934         }
4935         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4936 }
4937
4938 /**
4939  * ipr_slave_configure - Configure a SCSI device
4940  * @sdev:       scsi device struct
4941  *
4942  * This function configures the specified scsi device.
4943  *
4944  * Return value:
4945  *      0 on success
4946  **/
4947 static int ipr_slave_configure(struct scsi_device *sdev)
4948 {
4949         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4950         struct ipr_resource_entry *res;
4951         struct ata_port *ap = NULL;
4952         unsigned long lock_flags = 0;
4953         char buffer[IPR_MAX_RES_PATH_LENGTH];
4954
4955         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4956         res = sdev->hostdata;
4957         if (res) {
4958                 if (ipr_is_af_dasd_device(res))
4959                         sdev->type = TYPE_RAID;
4960                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4961                         sdev->scsi_level = 4;
4962                         sdev->no_uld_attach = 1;
4963                 }
4964                 if (ipr_is_vset_device(res)) {
4965                         sdev->scsi_level = SCSI_SPC_3;
4966                         sdev->no_report_opcodes = 1;
4967                         blk_queue_rq_timeout(sdev->request_queue,
4968                                              IPR_VSET_RW_TIMEOUT);
4969                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4970                 }
4971                 if (ipr_is_gata(res) && res->sata_port)
4972                         ap = res->sata_port->ap;
4973                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4974
4975                 if (ap) {
4976                         scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4977                         ata_sas_slave_configure(sdev, ap);
4978                 }
4979
4980                 if (ioa_cfg->sis64)
4981                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4982                                     ipr_format_res_path(ioa_cfg,
4983                                 res->res_path, buffer, sizeof(buffer)));
4984                 return 0;
4985         }
4986         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4987         return 0;
4988 }
4989
4990 /**
4991  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4992  * @sdev:       scsi device struct
4993  *
4994  * This function initializes an ATA port so that future commands
4995  * sent through queuecommand will work.
4996  *
4997  * Return value:
4998  *      0 on success
4999  **/
5000 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
5001 {
5002         struct ipr_sata_port *sata_port = NULL;
5003         int rc = -ENXIO;
5004
5005         ENTER;
5006         if (sdev->sdev_target)
5007                 sata_port = sdev->sdev_target->hostdata;
5008         if (sata_port) {
5009                 rc = ata_sas_port_init(sata_port->ap);
5010                 if (rc == 0)
5011                         rc = ata_sas_sync_probe(sata_port->ap);
5012         }
5013
5014         if (rc)
5015                 ipr_slave_destroy(sdev);
5016
5017         LEAVE;
5018         return rc;
5019 }
5020
5021 /**
5022  * ipr_slave_alloc - Prepare for commands to a device.
5023  * @sdev:       scsi device struct
5024  *
5025  * This function saves a pointer to the resource entry
5026  * in the scsi device struct if the device exists. We
5027  * can then use this pointer in ipr_queuecommand when
5028  * handling new commands.
5029  *
5030  * Return value:
5031  *      0 on success / -ENXIO if device does not exist
5032  **/
5033 static int ipr_slave_alloc(struct scsi_device *sdev)
5034 {
5035         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
5036         struct ipr_resource_entry *res;
5037         unsigned long lock_flags;
5038         int rc = -ENXIO;
5039
5040         sdev->hostdata = NULL;
5041
5042         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5043
5044         res = ipr_find_sdev(sdev);
5045         if (res) {
5046                 res->sdev = sdev;
5047                 res->add_to_ml = 0;
5048                 res->in_erp = 0;
5049                 sdev->hostdata = res;
5050                 if (!ipr_is_naca_model(res))
5051                         res->needs_sync_complete = 1;
5052                 rc = 0;
5053                 if (ipr_is_gata(res)) {
5054                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5055                         return ipr_ata_slave_alloc(sdev);
5056                 }
5057         }
5058
5059         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5060
5061         return rc;
5062 }
5063
5064 /**
5065  * ipr_match_lun - Match function for specified LUN
5066  * @ipr_cmd:    ipr command struct
5067  * @device:             device to match (sdev)
5068  *
5069  * Returns:
5070  *      1 if command matches sdev / 0 if command does not match sdev
5071  **/
5072 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5073 {
5074         if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5075                 return 1;
5076         return 0;
5077 }
5078
5079 /**
5080  * ipr_cmnd_is_free - Check if a command is free or not
5081  * @ipr_cmd:    ipr command struct
5082  *
5083  * Returns:
5084  *      true / false
5085  **/
5086 static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5087 {
5088         struct ipr_cmnd *loop_cmd;
5089
5090         list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5091                 if (loop_cmd == ipr_cmd)
5092                         return true;
5093         }
5094
5095         return false;
5096 }
5097
5098 /**
5099  * ipr_match_res - Match function for specified resource entry
5100  * @ipr_cmd:    ipr command struct
5101  * @resource:   resource entry to match
5102  *
5103  * Returns:
5104  *      1 if command matches sdev / 0 if command does not match sdev
5105  **/
5106 static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5107 {
5108         struct ipr_resource_entry *res = resource;
5109
5110         if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5111                 return 1;
5112         return 0;
5113 }
5114
5115 /**
5116  * ipr_wait_for_ops - Wait for matching commands to complete
5117  * @ioa_cfg:    ioa config struct
5118  * @device:             device to match (sdev)
5119  * @match:              match function to use
5120  *
5121  * Returns:
5122  *      SUCCESS / FAILED
5123  **/
5124 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5125                             int (*match)(struct ipr_cmnd *, void *))
5126 {
5127         struct ipr_cmnd *ipr_cmd;
5128         int wait, i;
5129         unsigned long flags;
5130         struct ipr_hrr_queue *hrrq;
5131         signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5132         DECLARE_COMPLETION_ONSTACK(comp);
5133
5134         ENTER;
5135         do {
5136                 wait = 0;
5137
5138                 for_each_hrrq(hrrq, ioa_cfg) {
5139                         spin_lock_irqsave(hrrq->lock, flags);
5140                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5141                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5142                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5143                                         if (match(ipr_cmd, device)) {
5144                                                 ipr_cmd->eh_comp = &comp;
5145                                                 wait++;
5146                                         }
5147                                 }
5148                         }
5149                         spin_unlock_irqrestore(hrrq->lock, flags);
5150                 }
5151
5152                 if (wait) {
5153                         timeout = wait_for_completion_timeout(&comp, timeout);
5154
5155                         if (!timeout) {
5156                                 wait = 0;
5157
5158                                 for_each_hrrq(hrrq, ioa_cfg) {
5159                                         spin_lock_irqsave(hrrq->lock, flags);
5160                                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5161                                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5162                                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5163                                                         if (match(ipr_cmd, device)) {
5164                                                                 ipr_cmd->eh_comp = NULL;
5165                                                                 wait++;
5166                                                         }
5167                                                 }
5168                                         }
5169                                         spin_unlock_irqrestore(hrrq->lock, flags);
5170                                 }
5171
5172                                 if (wait)
5173                                         dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5174                                 LEAVE;
5175                                 return wait ? FAILED : SUCCESS;
5176                         }
5177                 }
5178         } while (wait);
5179
5180         LEAVE;
5181         return SUCCESS;
5182 }
5183
5184 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
5185 {
5186         struct ipr_ioa_cfg *ioa_cfg;
5187         unsigned long lock_flags = 0;
5188         int rc = SUCCESS;
5189
5190         ENTER;
5191         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5192         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5193
5194         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5195                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5196                 dev_err(&ioa_cfg->pdev->dev,
5197                         "Adapter being reset as a result of error recovery.\n");
5198
5199                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5200                         ioa_cfg->sdt_state = GET_DUMP;
5201         }
5202
5203         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5204         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5205         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5206
5207         /* If we got hit with a host reset while we were already resetting
5208          the adapter for some reason, and the reset failed. */
5209         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5210                 ipr_trace;
5211                 rc = FAILED;
5212         }
5213
5214         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5215         LEAVE;
5216         return rc;
5217 }
5218
5219 /**
5220  * ipr_device_reset - Reset the device
5221  * @ioa_cfg:    ioa config struct
5222  * @res:                resource entry struct
5223  *
5224  * This function issues a device reset to the affected device.
5225  * If the device is a SCSI device, a LUN reset will be sent
5226  * to the device first. If that does not work, a target reset
5227  * will be sent. If the device is a SATA device, a PHY reset will
5228  * be sent.
5229  *
5230  * Return value:
5231  *      0 on success / non-zero on failure
5232  **/
5233 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5234                             struct ipr_resource_entry *res)
5235 {
5236         struct ipr_cmnd *ipr_cmd;
5237         struct ipr_ioarcb *ioarcb;
5238         struct ipr_cmd_pkt *cmd_pkt;
5239         struct ipr_ioarcb_ata_regs *regs;
5240         u32 ioasc;
5241
5242         ENTER;
5243         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5244         ioarcb = &ipr_cmd->ioarcb;
5245         cmd_pkt = &ioarcb->cmd_pkt;
5246
5247         if (ipr_cmd->ioa_cfg->sis64) {
5248                 regs = &ipr_cmd->i.ata_ioadl.regs;
5249                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5250         } else
5251                 regs = &ioarcb->u.add_data.u.regs;
5252
5253         ioarcb->res_handle = res->res_handle;
5254         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5255         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5256         if (ipr_is_gata(res)) {
5257                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5258                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5259                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5260         }
5261
5262         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5263         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5264         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5265         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5266                 if (ipr_cmd->ioa_cfg->sis64)
5267                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5268                                sizeof(struct ipr_ioasa_gata));
5269                 else
5270                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5271                                sizeof(struct ipr_ioasa_gata));
5272         }
5273
5274         LEAVE;
5275         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5276 }
5277
5278 /**
5279  * ipr_sata_reset - Reset the SATA port
5280  * @link:       SATA link to reset
5281  * @classes:    class of the attached device
5282  * @deadline:   unused
5283  *
5284  * This function issues a SATA phy reset to the affected ATA link.
5285  *
5286  * Return value:
5287  *      0 on success / non-zero on failure
5288  **/
5289 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5290                                 unsigned long deadline)
5291 {
5292         struct ipr_sata_port *sata_port = link->ap->private_data;
5293         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5294         struct ipr_resource_entry *res;
5295         unsigned long lock_flags = 0;
5296         int rc = -ENXIO, ret;
5297
5298         ENTER;
5299         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5300         while (ioa_cfg->in_reset_reload) {
5301                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5302                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5303                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5304         }
5305
5306         res = sata_port->res;
5307         if (res) {
5308                 rc = ipr_device_reset(ioa_cfg, res);
5309                 *classes = res->ata_class;
5310                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5311
5312                 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5313                 if (ret != SUCCESS) {
5314                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5315                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5316                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5317
5318                         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5319                 }
5320         } else
5321                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5322
5323         LEAVE;
5324         return rc;
5325 }
5326
5327 /**
5328  * __ipr_eh_dev_reset - Reset the device
5329  * @scsi_cmd:   scsi command struct
5330  *
5331  * This function issues a device reset to the affected device.
5332  * A LUN reset will be sent to the device first. If that does
5333  * not work, a target reset will be sent.
5334  *
5335  * Return value:
5336  *      SUCCESS / FAILED
5337  **/
5338 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5339 {
5340         struct ipr_cmnd *ipr_cmd;
5341         struct ipr_ioa_cfg *ioa_cfg;
5342         struct ipr_resource_entry *res;
5343         struct ata_port *ap;
5344         int rc = 0, i;
5345         struct ipr_hrr_queue *hrrq;
5346
5347         ENTER;
5348         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5349         res = scsi_cmd->device->hostdata;
5350
5351         /*
5352          * If we are currently going through reset/reload, return failed. This will force the
5353          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5354          * reset to complete
5355          */
5356         if (ioa_cfg->in_reset_reload)
5357                 return FAILED;
5358         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5359                 return FAILED;
5360
5361         for_each_hrrq(hrrq, ioa_cfg) {
5362                 spin_lock(&hrrq->_lock);
5363                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5364                         ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5365
5366                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5367                                 if (!ipr_cmd->qc)
5368                                         continue;
5369                                 if (ipr_cmnd_is_free(ipr_cmd))
5370                                         continue;
5371
5372                                 ipr_cmd->done = ipr_sata_eh_done;
5373                                 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5374                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5375                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5376                                 }
5377                         }
5378                 }
5379                 spin_unlock(&hrrq->_lock);
5380         }
5381         res->resetting_device = 1;
5382         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5383
5384         if (ipr_is_gata(res) && res->sata_port) {
5385                 ap = res->sata_port->ap;
5386                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5387                 ata_std_error_handler(ap);
5388                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5389         } else
5390                 rc = ipr_device_reset(ioa_cfg, res);
5391         res->resetting_device = 0;
5392         res->reset_occurred = 1;
5393
5394         LEAVE;
5395         return rc ? FAILED : SUCCESS;
5396 }
5397
5398 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5399 {
5400         int rc;
5401         struct ipr_ioa_cfg *ioa_cfg;
5402         struct ipr_resource_entry *res;
5403
5404         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5405         res = cmd->device->hostdata;
5406
5407         if (!res)
5408                 return FAILED;
5409
5410         spin_lock_irq(cmd->device->host->host_lock);
5411         rc = __ipr_eh_dev_reset(cmd);
5412         spin_unlock_irq(cmd->device->host->host_lock);
5413
5414         if (rc == SUCCESS) {
5415                 if (ipr_is_gata(res) && res->sata_port)
5416                         rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5417                 else
5418                         rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5419         }
5420
5421         return rc;
5422 }
5423
5424 /**
5425  * ipr_bus_reset_done - Op done function for bus reset.
5426  * @ipr_cmd:    ipr command struct
5427  *
5428  * This function is the op done function for a bus reset
5429  *
5430  * Return value:
5431  *      none
5432  **/
5433 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5434 {
5435         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5436         struct ipr_resource_entry *res;
5437
5438         ENTER;
5439         if (!ioa_cfg->sis64)
5440                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5441                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5442                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5443                                 break;
5444                         }
5445                 }
5446
5447         /*
5448          * If abort has not completed, indicate the reset has, else call the
5449          * abort's done function to wake the sleeping eh thread
5450          */
5451         if (ipr_cmd->sibling->sibling)
5452                 ipr_cmd->sibling->sibling = NULL;
5453         else
5454                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5455
5456         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5457         LEAVE;
5458 }
5459
5460 /**
5461  * ipr_abort_timeout - An abort task has timed out
5462  * @t: Timer context used to fetch ipr command struct
5463  *
5464  * This function handles when an abort task times out. If this
5465  * happens we issue a bus reset since we have resources tied
5466  * up that must be freed before returning to the midlayer.
5467  *
5468  * Return value:
5469  *      none
5470  **/
5471 static void ipr_abort_timeout(struct timer_list *t)
5472 {
5473         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
5474         struct ipr_cmnd *reset_cmd;
5475         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5476         struct ipr_cmd_pkt *cmd_pkt;
5477         unsigned long lock_flags = 0;
5478
5479         ENTER;
5480         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5481         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5482                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5483                 return;
5484         }
5485
5486         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5487         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5488         ipr_cmd->sibling = reset_cmd;
5489         reset_cmd->sibling = ipr_cmd;
5490         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5491         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5492         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5493         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5494         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5495
5496         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5497         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5498         LEAVE;
5499 }
5500
5501 /**
5502  * ipr_cancel_op - Cancel specified op
5503  * @scsi_cmd:   scsi command struct
5504  *
5505  * This function cancels specified op.
5506  *
5507  * Return value:
5508  *      SUCCESS / FAILED
5509  **/
5510 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5511 {
5512         struct ipr_cmnd *ipr_cmd;
5513         struct ipr_ioa_cfg *ioa_cfg;
5514         struct ipr_resource_entry *res;
5515         struct ipr_cmd_pkt *cmd_pkt;
5516         u32 ioasc;
5517         int i, op_found = 0;
5518         struct ipr_hrr_queue *hrrq;
5519
5520         ENTER;
5521         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5522         res = scsi_cmd->device->hostdata;
5523
5524         /* If we are currently going through reset/reload, return failed.
5525          * This will force the mid-layer to call ipr_eh_host_reset,
5526          * which will then go to sleep and wait for the reset to complete
5527          */
5528         if (ioa_cfg->in_reset_reload ||
5529             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5530                 return FAILED;
5531         if (!res)
5532                 return FAILED;
5533
5534         /*
5535          * If we are aborting a timed out op, chances are that the timeout was caused
5536          * by a still not detected EEH error. In such cases, reading a register will
5537          * trigger the EEH recovery infrastructure.
5538          */
5539         readl(ioa_cfg->regs.sense_interrupt_reg);
5540
5541         if (!ipr_is_gscsi(res))
5542                 return FAILED;
5543
5544         for_each_hrrq(hrrq, ioa_cfg) {
5545                 spin_lock(&hrrq->_lock);
5546                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5547                         if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5548                                 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5549                                         op_found = 1;
5550                                         break;
5551                                 }
5552                         }
5553                 }
5554                 spin_unlock(&hrrq->_lock);
5555         }
5556
5557         if (!op_found)
5558                 return SUCCESS;
5559
5560         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5561         ipr_cmd->ioarcb.res_handle = res->res_handle;
5562         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5563         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5564         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5565         ipr_cmd->u.sdev = scsi_cmd->device;
5566
5567         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5568                     scsi_cmd->cmnd[0]);
5569         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5570         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5571
5572         /*
5573          * If the abort task timed out and we sent a bus reset, we will get
5574          * one the following responses to the abort
5575          */
5576         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5577                 ioasc = 0;
5578                 ipr_trace;
5579         }
5580
5581         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5582         if (!ipr_is_naca_model(res))
5583                 res->needs_sync_complete = 1;
5584
5585         LEAVE;
5586         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5587 }
5588
5589 /**
5590  * ipr_scan_finished - Report whether scan is done
5591  * @shost:           scsi host struct
5592  * @elapsed_time:    elapsed time
5593  *
5594  * Return value:
5595  *      0 if scan in progress / 1 if scan is complete
5596  **/
5597 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5598 {
5599         unsigned long lock_flags;
5600         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5601         int rc = 0;
5602
5603         spin_lock_irqsave(shost->host_lock, lock_flags);
5604         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5605                 rc = 1;
5606         if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5607                 rc = 1;
5608         spin_unlock_irqrestore(shost->host_lock, lock_flags);
5609         return rc;
5610 }
5611
5612 /**
5613  * ipr_eh_abort - Reset the host adapter
5614  * @scsi_cmd:   scsi command struct
5615  *
5616  * Return value:
5617  *      SUCCESS / FAILED
5618  **/
5619 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5620 {
5621         unsigned long flags;
5622         int rc;
5623         struct ipr_ioa_cfg *ioa_cfg;
5624
5625         ENTER;
5626
5627         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5628
5629         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5630         rc = ipr_cancel_op(scsi_cmd);
5631         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5632
5633         if (rc == SUCCESS)
5634                 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5635         LEAVE;
5636         return rc;
5637 }
5638
5639 /**
5640  * ipr_handle_other_interrupt - Handle "other" interrupts
5641  * @ioa_cfg:    ioa config struct
5642  * @int_reg:    interrupt register
5643  *
5644  * Return value:
5645  *      IRQ_NONE / IRQ_HANDLED
5646  **/
5647 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5648                                               u32 int_reg)
5649 {
5650         irqreturn_t rc = IRQ_HANDLED;
5651         u32 int_mask_reg;
5652
5653         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5654         int_reg &= ~int_mask_reg;
5655
5656         /* If an interrupt on the adapter did not occur, ignore it.
5657          * Or in the case of SIS 64, check for a stage change interrupt.
5658          */
5659         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5660                 if (ioa_cfg->sis64) {
5661                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5662                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5663                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5664
5665                                 /* clear stage change */
5666                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5667                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5668                                 list_del(&ioa_cfg->reset_cmd->queue);
5669                                 del_timer(&ioa_cfg->reset_cmd->timer);
5670                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5671                                 return IRQ_HANDLED;
5672                         }
5673                 }
5674
5675                 return IRQ_NONE;
5676         }
5677
5678         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5679                 /* Mask the interrupt */
5680                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5681                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5682
5683                 list_del(&ioa_cfg->reset_cmd->queue);
5684                 del_timer(&ioa_cfg->reset_cmd->timer);
5685                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5686         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5687                 if (ioa_cfg->clear_isr) {
5688                         if (ipr_debug && printk_ratelimit())
5689                                 dev_err(&ioa_cfg->pdev->dev,
5690                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5691                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5692                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5693                         return IRQ_NONE;
5694                 }
5695         } else {
5696                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5697                         ioa_cfg->ioa_unit_checked = 1;
5698                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5699                         dev_err(&ioa_cfg->pdev->dev,
5700                                 "No Host RRQ. 0x%08X\n", int_reg);
5701                 else
5702                         dev_err(&ioa_cfg->pdev->dev,
5703                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5704
5705                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5706                         ioa_cfg->sdt_state = GET_DUMP;
5707
5708                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5709                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5710         }
5711
5712         return rc;
5713 }
5714
5715 /**
5716  * ipr_isr_eh - Interrupt service routine error handler
5717  * @ioa_cfg:    ioa config struct
5718  * @msg:        message to log
5719  * @number:     various meanings depending on the caller/message
5720  *
5721  * Return value:
5722  *      none
5723  **/
5724 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5725 {
5726         ioa_cfg->errors_logged++;
5727         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5728
5729         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5730                 ioa_cfg->sdt_state = GET_DUMP;
5731
5732         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5733 }
5734
5735 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5736                                                 struct list_head *doneq)
5737 {
5738         u32 ioasc;
5739         u16 cmd_index;
5740         struct ipr_cmnd *ipr_cmd;
5741         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5742         int num_hrrq = 0;
5743
5744         /* If interrupts are disabled, ignore the interrupt */
5745         if (!hrr_queue->allow_interrupts)
5746                 return 0;
5747
5748         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5749                hrr_queue->toggle_bit) {
5750
5751                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5752                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5753                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5754
5755                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5756                              cmd_index < hrr_queue->min_cmd_id)) {
5757                         ipr_isr_eh(ioa_cfg,
5758                                 "Invalid response handle from IOA: ",
5759                                 cmd_index);
5760                         break;
5761                 }
5762
5763                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5764                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5765
5766                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5767
5768                 list_move_tail(&ipr_cmd->queue, doneq);
5769
5770                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5771                         hrr_queue->hrrq_curr++;
5772                 } else {
5773                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5774                         hrr_queue->toggle_bit ^= 1u;
5775                 }
5776                 num_hrrq++;
5777                 if (budget > 0 && num_hrrq >= budget)
5778                         break;
5779         }
5780
5781         return num_hrrq;
5782 }
5783
5784 static int ipr_iopoll(struct irq_poll *iop, int budget)
5785 {
5786         struct ipr_hrr_queue *hrrq;
5787         struct ipr_cmnd *ipr_cmd, *temp;
5788         unsigned long hrrq_flags;
5789         int completed_ops;
5790         LIST_HEAD(doneq);
5791
5792         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5793
5794         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5795         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5796
5797         if (completed_ops < budget)
5798                 irq_poll_complete(iop);
5799         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5800
5801         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5802                 list_del(&ipr_cmd->queue);
5803                 del_timer(&ipr_cmd->timer);
5804                 ipr_cmd->fast_done(ipr_cmd);
5805         }
5806
5807         return completed_ops;
5808 }
5809
5810 /**
5811  * ipr_isr - Interrupt service routine
5812  * @irq:        irq number
5813  * @devp:       pointer to ioa config struct
5814  *
5815  * Return value:
5816  *      IRQ_NONE / IRQ_HANDLED
5817  **/
5818 static irqreturn_t ipr_isr(int irq, void *devp)
5819 {
5820         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5821         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5822         unsigned long hrrq_flags = 0;
5823         u32 int_reg = 0;
5824         int num_hrrq = 0;
5825         int irq_none = 0;
5826         struct ipr_cmnd *ipr_cmd, *temp;
5827         irqreturn_t rc = IRQ_NONE;
5828         LIST_HEAD(doneq);
5829
5830         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5831         /* If interrupts are disabled, ignore the interrupt */
5832         if (!hrrq->allow_interrupts) {
5833                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5834                 return IRQ_NONE;
5835         }
5836
5837         while (1) {
5838                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5839                         rc =  IRQ_HANDLED;
5840
5841                         if (!ioa_cfg->clear_isr)
5842                                 break;
5843
5844                         /* Clear the PCI interrupt */
5845                         num_hrrq = 0;
5846                         do {
5847                                 writel(IPR_PCII_HRRQ_UPDATED,
5848                                      ioa_cfg->regs.clr_interrupt_reg32);
5849                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5850                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5851                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5852
5853                 } else if (rc == IRQ_NONE && irq_none == 0) {
5854                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5855                         irq_none++;
5856                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5857                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5858                         ipr_isr_eh(ioa_cfg,
5859                                 "Error clearing HRRQ: ", num_hrrq);
5860                         rc = IRQ_HANDLED;
5861                         break;
5862                 } else
5863                         break;
5864         }
5865
5866         if (unlikely(rc == IRQ_NONE))
5867                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5868
5869         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5870         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5871                 list_del(&ipr_cmd->queue);
5872                 del_timer(&ipr_cmd->timer);
5873                 ipr_cmd->fast_done(ipr_cmd);
5874         }
5875         return rc;
5876 }
5877
5878 /**
5879  * ipr_isr_mhrrq - Interrupt service routine
5880  * @irq:        irq number
5881  * @devp:       pointer to ioa config struct
5882  *
5883  * Return value:
5884  *      IRQ_NONE / IRQ_HANDLED
5885  **/
5886 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5887 {
5888         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5889         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5890         unsigned long hrrq_flags = 0;
5891         struct ipr_cmnd *ipr_cmd, *temp;
5892         irqreturn_t rc = IRQ_NONE;
5893         LIST_HEAD(doneq);
5894
5895         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5896
5897         /* If interrupts are disabled, ignore the interrupt */
5898         if (!hrrq->allow_interrupts) {
5899                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5900                 return IRQ_NONE;
5901         }
5902
5903         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5904                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5905                        hrrq->toggle_bit) {
5906                         irq_poll_sched(&hrrq->iopoll);
5907                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5908                         return IRQ_HANDLED;
5909                 }
5910         } else {
5911                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5912                         hrrq->toggle_bit)
5913
5914                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5915                                 rc =  IRQ_HANDLED;
5916         }
5917
5918         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5919
5920         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5921                 list_del(&ipr_cmd->queue);
5922                 del_timer(&ipr_cmd->timer);
5923                 ipr_cmd->fast_done(ipr_cmd);
5924         }
5925         return rc;
5926 }
5927
5928 /**
5929  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5930  * @ioa_cfg:    ioa config struct
5931  * @ipr_cmd:    ipr command struct
5932  *
5933  * Return value:
5934  *      0 on success / -1 on failure
5935  **/
5936 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5937                              struct ipr_cmnd *ipr_cmd)
5938 {
5939         int i, nseg;
5940         struct scatterlist *sg;
5941         u32 length;
5942         u32 ioadl_flags = 0;
5943         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5944         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5945         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5946
5947         length = scsi_bufflen(scsi_cmd);
5948         if (!length)
5949                 return 0;
5950
5951         nseg = scsi_dma_map(scsi_cmd);
5952         if (nseg < 0) {
5953                 if (printk_ratelimit())
5954                         dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5955                 return -1;
5956         }
5957
5958         ipr_cmd->dma_use_sg = nseg;
5959
5960         ioarcb->data_transfer_length = cpu_to_be32(length);
5961         ioarcb->ioadl_len =
5962                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5963
5964         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5965                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5966                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5967         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5968                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5969
5970         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5971                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5972                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5973                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5974         }
5975
5976         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5977         return 0;
5978 }
5979
5980 /**
5981  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5982  * @ioa_cfg:    ioa config struct
5983  * @ipr_cmd:    ipr command struct
5984  *
5985  * Return value:
5986  *      0 on success / -1 on failure
5987  **/
5988 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5989                            struct ipr_cmnd *ipr_cmd)
5990 {
5991         int i, nseg;
5992         struct scatterlist *sg;
5993         u32 length;
5994         u32 ioadl_flags = 0;
5995         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5996         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5997         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5998
5999         length = scsi_bufflen(scsi_cmd);
6000         if (!length)
6001                 return 0;
6002
6003         nseg = scsi_dma_map(scsi_cmd);
6004         if (nseg < 0) {
6005                 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
6006                 return -1;
6007         }
6008
6009         ipr_cmd->dma_use_sg = nseg;
6010
6011         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
6012                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6013                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6014                 ioarcb->data_transfer_length = cpu_to_be32(length);
6015                 ioarcb->ioadl_len =
6016                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6017         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
6018                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6019                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
6020                 ioarcb->read_ioadl_len =
6021                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6022         }
6023
6024         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
6025                 ioadl = ioarcb->u.add_data.u.ioadl;
6026                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
6027                                     offsetof(struct ipr_ioarcb, u.add_data));
6028                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6029         }
6030
6031         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
6032                 ioadl[i].flags_and_data_len =
6033                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6034                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
6035         }
6036
6037         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6038         return 0;
6039 }
6040
6041 /**
6042  * __ipr_erp_done - Process completion of ERP for a device
6043  * @ipr_cmd:            ipr command struct
6044  *
6045  * This function copies the sense buffer into the scsi_cmd
6046  * struct and pushes the scsi_done function.
6047  *
6048  * Return value:
6049  *      nothing
6050  **/
6051 static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6052 {
6053         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6054         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6055         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6056
6057         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6058                 scsi_cmd->result |= (DID_ERROR << 16);
6059                 scmd_printk(KERN_ERR, scsi_cmd,
6060                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
6061         } else {
6062                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6063                        SCSI_SENSE_BUFFERSIZE);
6064         }
6065
6066         if (res) {
6067                 if (!ipr_is_naca_model(res))
6068                         res->needs_sync_complete = 1;
6069                 res->in_erp = 0;
6070         }
6071         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6072         scsi_done(scsi_cmd);
6073         if (ipr_cmd->eh_comp)
6074                 complete(ipr_cmd->eh_comp);
6075         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6076 }
6077
6078 /**
6079  * ipr_erp_done - Process completion of ERP for a device
6080  * @ipr_cmd:            ipr command struct
6081  *
6082  * This function copies the sense buffer into the scsi_cmd
6083  * struct and pushes the scsi_done function.
6084  *
6085  * Return value:
6086  *      nothing
6087  **/
6088 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6089 {
6090         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6091         unsigned long hrrq_flags;
6092
6093         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6094         __ipr_erp_done(ipr_cmd);
6095         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6096 }
6097
6098 /**
6099  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6100  * @ipr_cmd:    ipr command struct
6101  *
6102  * Return value:
6103  *      none
6104  **/
6105 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6106 {
6107         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6108         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6109         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6110
6111         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
6112         ioarcb->data_transfer_length = 0;
6113         ioarcb->read_data_transfer_length = 0;
6114         ioarcb->ioadl_len = 0;
6115         ioarcb->read_ioadl_len = 0;
6116         ioasa->hdr.ioasc = 0;
6117         ioasa->hdr.residual_data_len = 0;
6118
6119         if (ipr_cmd->ioa_cfg->sis64)
6120                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6121                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6122         else {
6123                 ioarcb->write_ioadl_addr =
6124                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6125                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6126         }
6127 }
6128
6129 /**
6130  * __ipr_erp_request_sense - Send request sense to a device
6131  * @ipr_cmd:    ipr command struct
6132  *
6133  * This function sends a request sense to a device as a result
6134  * of a check condition.
6135  *
6136  * Return value:
6137  *      nothing
6138  **/
6139 static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6140 {
6141         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6142         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6143
6144         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6145                 __ipr_erp_done(ipr_cmd);
6146                 return;
6147         }
6148
6149         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6150
6151         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6152         cmd_pkt->cdb[0] = REQUEST_SENSE;
6153         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6154         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6155         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6156         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6157
6158         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6159                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
6160
6161         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6162                    IPR_REQUEST_SENSE_TIMEOUT * 2);
6163 }
6164
6165 /**
6166  * ipr_erp_request_sense - Send request sense to a device
6167  * @ipr_cmd:    ipr command struct
6168  *
6169  * This function sends a request sense to a device as a result
6170  * of a check condition.
6171  *
6172  * Return value:
6173  *      nothing
6174  **/
6175 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6176 {
6177         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6178         unsigned long hrrq_flags;
6179
6180         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6181         __ipr_erp_request_sense(ipr_cmd);
6182         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6183 }
6184
6185 /**
6186  * ipr_erp_cancel_all - Send cancel all to a device
6187  * @ipr_cmd:    ipr command struct
6188  *
6189  * This function sends a cancel all to a device to clear the
6190  * queue. If we are running TCQ on the device, QERR is set to 1,
6191  * which means all outstanding ops have been dropped on the floor.
6192  * Cancel all will return them to us.
6193  *
6194  * Return value:
6195  *      nothing
6196  **/
6197 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6198 {
6199         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6200         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6201         struct ipr_cmd_pkt *cmd_pkt;
6202
6203         res->in_erp = 1;
6204
6205         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6206
6207         if (!scsi_cmd->device->simple_tags) {
6208                 __ipr_erp_request_sense(ipr_cmd);
6209                 return;
6210         }
6211
6212         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6213         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6214         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6215
6216         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6217                    IPR_CANCEL_ALL_TIMEOUT);
6218 }
6219
6220 /**
6221  * ipr_dump_ioasa - Dump contents of IOASA
6222  * @ioa_cfg:    ioa config struct
6223  * @ipr_cmd:    ipr command struct
6224  * @res:                resource entry struct
6225  *
6226  * This function is invoked by the interrupt handler when ops
6227  * fail. It will log the IOASA if appropriate. Only called
6228  * for GPDD ops.
6229  *
6230  * Return value:
6231  *      none
6232  **/
6233 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
6234                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
6235 {
6236         int i;
6237         u16 data_len;
6238         u32 ioasc, fd_ioasc;
6239         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6240         __be32 *ioasa_data = (__be32 *)ioasa;
6241         int error_index;
6242
6243         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6244         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
6245
6246         if (0 == ioasc)
6247                 return;
6248
6249         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6250                 return;
6251
6252         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6253                 error_index = ipr_get_error(fd_ioasc);
6254         else
6255                 error_index = ipr_get_error(ioasc);
6256
6257         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6258                 /* Don't log an error if the IOA already logged one */
6259                 if (ioasa->hdr.ilid != 0)
6260                         return;
6261
6262                 if (!ipr_is_gscsi(res))
6263                         return;
6264
6265                 if (ipr_error_table[error_index].log_ioasa == 0)
6266                         return;
6267         }
6268
6269         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6270
6271         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6272         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6273                 data_len = sizeof(struct ipr_ioasa64);
6274         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6275                 data_len = sizeof(struct ipr_ioasa);
6276
6277         ipr_err("IOASA Dump:\n");
6278
6279         for (i = 0; i < data_len / 4; i += 4) {
6280                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6281                         be32_to_cpu(ioasa_data[i]),
6282                         be32_to_cpu(ioasa_data[i+1]),
6283                         be32_to_cpu(ioasa_data[i+2]),
6284                         be32_to_cpu(ioasa_data[i+3]));
6285         }
6286 }
6287
6288 /**
6289  * ipr_gen_sense - Generate SCSI sense data from an IOASA
6290  * @ipr_cmd:    ipr command struct
6291  *
6292  * Return value:
6293  *      none
6294  **/
6295 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6296 {
6297         u32 failing_lba;
6298         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6299         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6300         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6301         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6302
6303         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6304
6305         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6306                 return;
6307
6308         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6309
6310         if (ipr_is_vset_device(res) &&
6311             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6312             ioasa->u.vset.failing_lba_hi != 0) {
6313                 sense_buf[0] = 0x72;
6314                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6315                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6316                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6317
6318                 sense_buf[7] = 12;
6319                 sense_buf[8] = 0;
6320                 sense_buf[9] = 0x0A;
6321                 sense_buf[10] = 0x80;
6322
6323                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6324
6325                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6326                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6327                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6328                 sense_buf[15] = failing_lba & 0x000000ff;
6329
6330                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6331
6332                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6333                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6334                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6335                 sense_buf[19] = failing_lba & 0x000000ff;
6336         } else {
6337                 sense_buf[0] = 0x70;
6338                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6339                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6340                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6341
6342                 /* Illegal request */
6343                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6344                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6345                         sense_buf[7] = 10;      /* additional length */
6346
6347                         /* IOARCB was in error */
6348                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6349                                 sense_buf[15] = 0xC0;
6350                         else    /* Parameter data was invalid */
6351                                 sense_buf[15] = 0x80;
6352
6353                         sense_buf[16] =
6354                             ((IPR_FIELD_POINTER_MASK &
6355                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6356                         sense_buf[17] =
6357                             (IPR_FIELD_POINTER_MASK &
6358                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6359                 } else {
6360                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6361                                 if (ipr_is_vset_device(res))
6362                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6363                                 else
6364                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6365
6366                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
6367                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6368                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6369                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6370                                 sense_buf[6] = failing_lba & 0x000000ff;
6371                         }
6372
6373                         sense_buf[7] = 6;       /* additional length */
6374                 }
6375         }
6376 }
6377
6378 /**
6379  * ipr_get_autosense - Copy autosense data to sense buffer
6380  * @ipr_cmd:    ipr command struct
6381  *
6382  * This function copies the autosense buffer to the buffer
6383  * in the scsi_cmd, if there is autosense available.
6384  *
6385  * Return value:
6386  *      1 if autosense was available / 0 if not
6387  **/
6388 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6389 {
6390         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6391         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6392
6393         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6394                 return 0;
6395
6396         if (ipr_cmd->ioa_cfg->sis64)
6397                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6398                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6399                            SCSI_SENSE_BUFFERSIZE));
6400         else
6401                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6402                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6403                            SCSI_SENSE_BUFFERSIZE));
6404         return 1;
6405 }
6406
6407 /**
6408  * ipr_erp_start - Process an error response for a SCSI op
6409  * @ioa_cfg:    ioa config struct
6410  * @ipr_cmd:    ipr command struct
6411  *
6412  * This function determines whether or not to initiate ERP
6413  * on the affected device.
6414  *
6415  * Return value:
6416  *      nothing
6417  **/
6418 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6419                               struct ipr_cmnd *ipr_cmd)
6420 {
6421         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6422         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6423         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6424         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6425
6426         if (!res) {
6427                 __ipr_scsi_eh_done(ipr_cmd);
6428                 return;
6429         }
6430
6431         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6432                 ipr_gen_sense(ipr_cmd);
6433
6434         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6435
6436         switch (masked_ioasc) {
6437         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6438                 if (ipr_is_naca_model(res))
6439                         scsi_cmd->result |= (DID_ABORT << 16);
6440                 else
6441                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6442                 break;
6443         case IPR_IOASC_IR_RESOURCE_HANDLE:
6444         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6445                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6446                 break;
6447         case IPR_IOASC_HW_SEL_TIMEOUT:
6448                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6449                 if (!ipr_is_naca_model(res))
6450                         res->needs_sync_complete = 1;
6451                 break;
6452         case IPR_IOASC_SYNC_REQUIRED:
6453                 if (!res->in_erp)
6454                         res->needs_sync_complete = 1;
6455                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6456                 break;
6457         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6458         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6459                 /*
6460                  * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6461                  * so SCSI mid-layer and upper layers handle it accordingly.
6462                  */
6463                 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6464                         scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6465                 break;
6466         case IPR_IOASC_BUS_WAS_RESET:
6467         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6468                 /*
6469                  * Report the bus reset and ask for a retry. The device
6470                  * will give CC/UA the next command.
6471                  */
6472                 if (!res->resetting_device)
6473                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6474                 scsi_cmd->result |= (DID_ERROR << 16);
6475                 if (!ipr_is_naca_model(res))
6476                         res->needs_sync_complete = 1;
6477                 break;
6478         case IPR_IOASC_HW_DEV_BUS_STATUS:
6479                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6480                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6481                         if (!ipr_get_autosense(ipr_cmd)) {
6482                                 if (!ipr_is_naca_model(res)) {
6483                                         ipr_erp_cancel_all(ipr_cmd);
6484                                         return;
6485                                 }
6486                         }
6487                 }
6488                 if (!ipr_is_naca_model(res))
6489                         res->needs_sync_complete = 1;
6490                 break;
6491         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6492                 break;
6493         case IPR_IOASC_IR_NON_OPTIMIZED:
6494                 if (res->raw_mode) {
6495                         res->raw_mode = 0;
6496                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6497                 } else
6498                         scsi_cmd->result |= (DID_ERROR << 16);
6499                 break;
6500         default:
6501                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6502                         scsi_cmd->result |= (DID_ERROR << 16);
6503                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6504                         res->needs_sync_complete = 1;
6505                 break;
6506         }
6507
6508         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6509         scsi_done(scsi_cmd);
6510         if (ipr_cmd->eh_comp)
6511                 complete(ipr_cmd->eh_comp);
6512         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6513 }
6514
6515 /**
6516  * ipr_scsi_done - mid-layer done function
6517  * @ipr_cmd:    ipr command struct
6518  *
6519  * This function is invoked by the interrupt handler for
6520  * ops generated by the SCSI mid-layer
6521  *
6522  * Return value:
6523  *      none
6524  **/
6525 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6526 {
6527         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6528         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6529         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6530         unsigned long lock_flags;
6531
6532         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6533
6534         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6535                 scsi_dma_unmap(scsi_cmd);
6536
6537                 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6538                 scsi_done(scsi_cmd);
6539                 if (ipr_cmd->eh_comp)
6540                         complete(ipr_cmd->eh_comp);
6541                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6542                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6543         } else {
6544                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6545                 spin_lock(&ipr_cmd->hrrq->_lock);
6546                 ipr_erp_start(ioa_cfg, ipr_cmd);
6547                 spin_unlock(&ipr_cmd->hrrq->_lock);
6548                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6549         }
6550 }
6551
6552 /**
6553  * ipr_queuecommand - Queue a mid-layer request
6554  * @shost:              scsi host struct
6555  * @scsi_cmd:   scsi command struct
6556  *
6557  * This function queues a request generated by the mid-layer.
6558  *
6559  * Return value:
6560  *      0 on success
6561  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6562  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6563  **/
6564 static int ipr_queuecommand(struct Scsi_Host *shost,
6565                             struct scsi_cmnd *scsi_cmd)
6566 {
6567         struct ipr_ioa_cfg *ioa_cfg;
6568         struct ipr_resource_entry *res;
6569         struct ipr_ioarcb *ioarcb;
6570         struct ipr_cmnd *ipr_cmd;
6571         unsigned long hrrq_flags, lock_flags;
6572         int rc;
6573         struct ipr_hrr_queue *hrrq;
6574         int hrrq_id;
6575
6576         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6577
6578         scsi_cmd->result = (DID_OK << 16);
6579         res = scsi_cmd->device->hostdata;
6580
6581         if (ipr_is_gata(res) && res->sata_port) {
6582                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6583                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6584                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6585                 return rc;
6586         }
6587
6588         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6589         hrrq = &ioa_cfg->hrrq[hrrq_id];
6590
6591         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6592         /*
6593          * We are currently blocking all devices due to a host reset
6594          * We have told the host to stop giving us new requests, but
6595          * ERP ops don't count. FIXME
6596          */
6597         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6598                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6599                 return SCSI_MLQUEUE_HOST_BUSY;
6600         }
6601
6602         /*
6603          * FIXME - Create scsi_set_host_offline interface
6604          *  and the ioa_is_dead check can be removed
6605          */
6606         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6607                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6608                 goto err_nodev;
6609         }
6610
6611         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6612         if (ipr_cmd == NULL) {
6613                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6614                 return SCSI_MLQUEUE_HOST_BUSY;
6615         }
6616         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6617
6618         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6619         ioarcb = &ipr_cmd->ioarcb;
6620
6621         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6622         ipr_cmd->scsi_cmd = scsi_cmd;
6623         ipr_cmd->done = ipr_scsi_eh_done;
6624
6625         if (ipr_is_gscsi(res)) {
6626                 if (scsi_cmd->underflow == 0)
6627                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6628
6629                 if (res->reset_occurred) {
6630                         res->reset_occurred = 0;
6631                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6632                 }
6633         }
6634
6635         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6636                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6637
6638                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6639                 if (scsi_cmd->flags & SCMD_TAGGED)
6640                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6641                 else
6642                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6643         }
6644
6645         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6646             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6647                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6648         }
6649         if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6650                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6651
6652                 if (scsi_cmd->underflow == 0)
6653                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6654         }
6655
6656         if (ioa_cfg->sis64)
6657                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6658         else
6659                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6660
6661         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6662         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6663                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6664                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6665                 if (!rc)
6666                         scsi_dma_unmap(scsi_cmd);
6667                 return SCSI_MLQUEUE_HOST_BUSY;
6668         }
6669
6670         if (unlikely(hrrq->ioa_is_dead)) {
6671                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6672                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6673                 scsi_dma_unmap(scsi_cmd);
6674                 goto err_nodev;
6675         }
6676
6677         ioarcb->res_handle = res->res_handle;
6678         if (res->needs_sync_complete) {
6679                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6680                 res->needs_sync_complete = 0;
6681         }
6682         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6683         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6684         ipr_send_command(ipr_cmd);
6685         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6686         return 0;
6687
6688 err_nodev:
6689         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6690         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6691         scsi_cmd->result = (DID_NO_CONNECT << 16);
6692         scsi_done(scsi_cmd);
6693         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6694         return 0;
6695 }
6696
6697 /**
6698  * ipr_ioctl - IOCTL handler
6699  * @sdev:       scsi device struct
6700  * @cmd:        IOCTL cmd
6701  * @arg:        IOCTL arg
6702  *
6703  * Return value:
6704  *      0 on success / other on failure
6705  **/
6706 static int ipr_ioctl(struct scsi_device *sdev, unsigned int cmd,
6707                      void __user *arg)
6708 {
6709         struct ipr_resource_entry *res;
6710
6711         res = (struct ipr_resource_entry *)sdev->hostdata;
6712         if (res && ipr_is_gata(res)) {
6713                 if (cmd == HDIO_GET_IDENTITY)
6714                         return -ENOTTY;
6715                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6716         }
6717
6718         return -EINVAL;
6719 }
6720
6721 /**
6722  * ipr_ioa_info - Get information about the card/driver
6723  * @host:       scsi host struct
6724  *
6725  * Return value:
6726  *      pointer to buffer with description string
6727  **/
6728 static const char *ipr_ioa_info(struct Scsi_Host *host)
6729 {
6730         static char buffer[512];
6731         struct ipr_ioa_cfg *ioa_cfg;
6732         unsigned long lock_flags = 0;
6733
6734         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6735
6736         spin_lock_irqsave(host->host_lock, lock_flags);
6737         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6738         spin_unlock_irqrestore(host->host_lock, lock_flags);
6739
6740         return buffer;
6741 }
6742
6743 static struct scsi_host_template driver_template = {
6744         .module = THIS_MODULE,
6745         .name = "IPR",
6746         .info = ipr_ioa_info,
6747         .ioctl = ipr_ioctl,
6748 #ifdef CONFIG_COMPAT
6749         .compat_ioctl = ipr_ioctl,
6750 #endif
6751         .queuecommand = ipr_queuecommand,
6752         .dma_need_drain = ata_scsi_dma_need_drain,
6753         .eh_abort_handler = ipr_eh_abort,
6754         .eh_device_reset_handler = ipr_eh_dev_reset,
6755         .eh_host_reset_handler = ipr_eh_host_reset,
6756         .slave_alloc = ipr_slave_alloc,
6757         .slave_configure = ipr_slave_configure,
6758         .slave_destroy = ipr_slave_destroy,
6759         .scan_finished = ipr_scan_finished,
6760         .target_alloc = ipr_target_alloc,
6761         .target_destroy = ipr_target_destroy,
6762         .change_queue_depth = ipr_change_queue_depth,
6763         .bios_param = ipr_biosparam,
6764         .can_queue = IPR_MAX_COMMANDS,
6765         .this_id = -1,
6766         .sg_tablesize = IPR_MAX_SGLIST,
6767         .max_sectors = IPR_IOA_MAX_SECTORS,
6768         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6769         .shost_groups = ipr_ioa_groups,
6770         .sdev_groups = ipr_dev_groups,
6771         .proc_name = IPR_NAME,
6772 };
6773
6774 /**
6775  * ipr_ata_phy_reset - libata phy_reset handler
6776  * @ap:         ata port to reset
6777  *
6778  **/
6779 static void ipr_ata_phy_reset(struct ata_port *ap)
6780 {
6781         unsigned long flags;
6782         struct ipr_sata_port *sata_port = ap->private_data;
6783         struct ipr_resource_entry *res = sata_port->res;
6784         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6785         int rc;
6786
6787         ENTER;
6788         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6789         while (ioa_cfg->in_reset_reload) {
6790                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6791                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6792                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6793         }
6794
6795         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6796                 goto out_unlock;
6797
6798         rc = ipr_device_reset(ioa_cfg, res);
6799
6800         if (rc) {
6801                 ap->link.device[0].class = ATA_DEV_NONE;
6802                 goto out_unlock;
6803         }
6804
6805         ap->link.device[0].class = res->ata_class;
6806         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6807                 ap->link.device[0].class = ATA_DEV_NONE;
6808
6809 out_unlock:
6810         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6811         LEAVE;
6812 }
6813
6814 /**
6815  * ipr_ata_post_internal - Cleanup after an internal command
6816  * @qc: ATA queued command
6817  *
6818  * Return value:
6819  *      none
6820  **/
6821 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6822 {
6823         struct ipr_sata_port *sata_port = qc->ap->private_data;
6824         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6825         struct ipr_cmnd *ipr_cmd;
6826         struct ipr_hrr_queue *hrrq;
6827         unsigned long flags;
6828
6829         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6830         while (ioa_cfg->in_reset_reload) {
6831                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6832                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6833                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6834         }
6835
6836         for_each_hrrq(hrrq, ioa_cfg) {
6837                 spin_lock(&hrrq->_lock);
6838                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6839                         if (ipr_cmd->qc == qc) {
6840                                 ipr_device_reset(ioa_cfg, sata_port->res);
6841                                 break;
6842                         }
6843                 }
6844                 spin_unlock(&hrrq->_lock);
6845         }
6846         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6847 }
6848
6849 /**
6850  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6851  * @regs:       destination
6852  * @tf: source ATA taskfile
6853  *
6854  * Return value:
6855  *      none
6856  **/
6857 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6858                              struct ata_taskfile *tf)
6859 {
6860         regs->feature = tf->feature;
6861         regs->nsect = tf->nsect;
6862         regs->lbal = tf->lbal;
6863         regs->lbam = tf->lbam;
6864         regs->lbah = tf->lbah;
6865         regs->device = tf->device;
6866         regs->command = tf->command;
6867         regs->hob_feature = tf->hob_feature;
6868         regs->hob_nsect = tf->hob_nsect;
6869         regs->hob_lbal = tf->hob_lbal;
6870         regs->hob_lbam = tf->hob_lbam;
6871         regs->hob_lbah = tf->hob_lbah;
6872         regs->ctl = tf->ctl;
6873 }
6874
6875 /**
6876  * ipr_sata_done - done function for SATA commands
6877  * @ipr_cmd:    ipr command struct
6878  *
6879  * This function is invoked by the interrupt handler for
6880  * ops generated by the SCSI mid-layer to SATA devices
6881  *
6882  * Return value:
6883  *      none
6884  **/
6885 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6886 {
6887         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6888         struct ata_queued_cmd *qc = ipr_cmd->qc;
6889         struct ipr_sata_port *sata_port = qc->ap->private_data;
6890         struct ipr_resource_entry *res = sata_port->res;
6891         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6892
6893         spin_lock(&ipr_cmd->hrrq->_lock);
6894         if (ipr_cmd->ioa_cfg->sis64)
6895                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6896                        sizeof(struct ipr_ioasa_gata));
6897         else
6898                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6899                        sizeof(struct ipr_ioasa_gata));
6900         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6901
6902         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6903                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6904
6905         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6906                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6907         else
6908                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6909         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6910         spin_unlock(&ipr_cmd->hrrq->_lock);
6911         ata_qc_complete(qc);
6912 }
6913
6914 /**
6915  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6916  * @ipr_cmd:    ipr command struct
6917  * @qc:         ATA queued command
6918  *
6919  **/
6920 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6921                                   struct ata_queued_cmd *qc)
6922 {
6923         u32 ioadl_flags = 0;
6924         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6925         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6926         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6927         int len = qc->nbytes;
6928         struct scatterlist *sg;
6929         unsigned int si;
6930         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6931
6932         if (len == 0)
6933                 return;
6934
6935         if (qc->dma_dir == DMA_TO_DEVICE) {
6936                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6937                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6938         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6939                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6940
6941         ioarcb->data_transfer_length = cpu_to_be32(len);
6942         ioarcb->ioadl_len =
6943                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6944         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6945                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6946
6947         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6948                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6949                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6950                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6951
6952                 last_ioadl64 = ioadl64;
6953                 ioadl64++;
6954         }
6955
6956         if (likely(last_ioadl64))
6957                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6958 }
6959
6960 /**
6961  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6962  * @ipr_cmd:    ipr command struct
6963  * @qc:         ATA queued command
6964  *
6965  **/
6966 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6967                                 struct ata_queued_cmd *qc)
6968 {
6969         u32 ioadl_flags = 0;
6970         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6971         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6972         struct ipr_ioadl_desc *last_ioadl = NULL;
6973         int len = qc->nbytes;
6974         struct scatterlist *sg;
6975         unsigned int si;
6976
6977         if (len == 0)
6978                 return;
6979
6980         if (qc->dma_dir == DMA_TO_DEVICE) {
6981                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6982                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6983                 ioarcb->data_transfer_length = cpu_to_be32(len);
6984                 ioarcb->ioadl_len =
6985                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6986         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6987                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6988                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6989                 ioarcb->read_ioadl_len =
6990                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6991         }
6992
6993         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6994                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6995                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6996
6997                 last_ioadl = ioadl;
6998                 ioadl++;
6999         }
7000
7001         if (likely(last_ioadl))
7002                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
7003 }
7004
7005 /**
7006  * ipr_qc_defer - Get a free ipr_cmd
7007  * @qc: queued command
7008  *
7009  * Return value:
7010  *      0 if success
7011  **/
7012 static int ipr_qc_defer(struct ata_queued_cmd *qc)
7013 {
7014         struct ata_port *ap = qc->ap;
7015         struct ipr_sata_port *sata_port = ap->private_data;
7016         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7017         struct ipr_cmnd *ipr_cmd;
7018         struct ipr_hrr_queue *hrrq;
7019         int hrrq_id;
7020
7021         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
7022         hrrq = &ioa_cfg->hrrq[hrrq_id];
7023
7024         qc->lldd_task = NULL;
7025         spin_lock(&hrrq->_lock);
7026         if (unlikely(hrrq->ioa_is_dead)) {
7027                 spin_unlock(&hrrq->_lock);
7028                 return 0;
7029         }
7030
7031         if (unlikely(!hrrq->allow_cmds)) {
7032                 spin_unlock(&hrrq->_lock);
7033                 return ATA_DEFER_LINK;
7034         }
7035
7036         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
7037         if (ipr_cmd == NULL) {
7038                 spin_unlock(&hrrq->_lock);
7039                 return ATA_DEFER_LINK;
7040         }
7041
7042         qc->lldd_task = ipr_cmd;
7043         spin_unlock(&hrrq->_lock);
7044         return 0;
7045 }
7046
7047 /**
7048  * ipr_qc_issue - Issue a SATA qc to a device
7049  * @qc: queued command
7050  *
7051  * Return value:
7052  *      0 if success
7053  **/
7054 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7055 {
7056         struct ata_port *ap = qc->ap;
7057         struct ipr_sata_port *sata_port = ap->private_data;
7058         struct ipr_resource_entry *res = sata_port->res;
7059         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7060         struct ipr_cmnd *ipr_cmd;
7061         struct ipr_ioarcb *ioarcb;
7062         struct ipr_ioarcb_ata_regs *regs;
7063
7064         if (qc->lldd_task == NULL)
7065                 ipr_qc_defer(qc);
7066
7067         ipr_cmd = qc->lldd_task;
7068         if (ipr_cmd == NULL)
7069                 return AC_ERR_SYSTEM;
7070
7071         qc->lldd_task = NULL;
7072         spin_lock(&ipr_cmd->hrrq->_lock);
7073         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7074                         ipr_cmd->hrrq->ioa_is_dead)) {
7075                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7076                 spin_unlock(&ipr_cmd->hrrq->_lock);
7077                 return AC_ERR_SYSTEM;
7078         }
7079
7080         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
7081         ioarcb = &ipr_cmd->ioarcb;
7082
7083         if (ioa_cfg->sis64) {
7084                 regs = &ipr_cmd->i.ata_ioadl.regs;
7085                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7086         } else
7087                 regs = &ioarcb->u.add_data.u.regs;
7088
7089         memset(regs, 0, sizeof(*regs));
7090         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
7091
7092         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7093         ipr_cmd->qc = qc;
7094         ipr_cmd->done = ipr_sata_done;
7095         ipr_cmd->ioarcb.res_handle = res->res_handle;
7096         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7097         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7098         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
7099         ipr_cmd->dma_use_sg = qc->n_elem;
7100
7101         if (ioa_cfg->sis64)
7102                 ipr_build_ata_ioadl64(ipr_cmd, qc);
7103         else
7104                 ipr_build_ata_ioadl(ipr_cmd, qc);
7105
7106         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7107         ipr_copy_sata_tf(regs, &qc->tf);
7108         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
7109         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
7110
7111         switch (qc->tf.protocol) {
7112         case ATA_PROT_NODATA:
7113         case ATA_PROT_PIO:
7114                 break;
7115
7116         case ATA_PROT_DMA:
7117                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7118                 break;
7119
7120         case ATAPI_PROT_PIO:
7121         case ATAPI_PROT_NODATA:
7122                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7123                 break;
7124
7125         case ATAPI_PROT_DMA:
7126                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7127                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7128                 break;
7129
7130         default:
7131                 WARN_ON(1);
7132                 spin_unlock(&ipr_cmd->hrrq->_lock);
7133                 return AC_ERR_INVALID;
7134         }
7135
7136         ipr_send_command(ipr_cmd);
7137         spin_unlock(&ipr_cmd->hrrq->_lock);
7138
7139         return 0;
7140 }
7141
7142 /**
7143  * ipr_qc_fill_rtf - Read result TF
7144  * @qc: ATA queued command
7145  *
7146  * Return value:
7147  *      true
7148  **/
7149 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7150 {
7151         struct ipr_sata_port *sata_port = qc->ap->private_data;
7152         struct ipr_ioasa_gata *g = &sata_port->ioasa;
7153         struct ata_taskfile *tf = &qc->result_tf;
7154
7155         tf->feature = g->error;
7156         tf->nsect = g->nsect;
7157         tf->lbal = g->lbal;
7158         tf->lbam = g->lbam;
7159         tf->lbah = g->lbah;
7160         tf->device = g->device;
7161         tf->command = g->status;
7162         tf->hob_nsect = g->hob_nsect;
7163         tf->hob_lbal = g->hob_lbal;
7164         tf->hob_lbam = g->hob_lbam;
7165         tf->hob_lbah = g->hob_lbah;
7166
7167         return true;
7168 }
7169
7170 static struct ata_port_operations ipr_sata_ops = {
7171         .phy_reset = ipr_ata_phy_reset,
7172         .hardreset = ipr_sata_reset,
7173         .post_internal_cmd = ipr_ata_post_internal,
7174         .qc_prep = ata_noop_qc_prep,
7175         .qc_defer = ipr_qc_defer,
7176         .qc_issue = ipr_qc_issue,
7177         .qc_fill_rtf = ipr_qc_fill_rtf,
7178         .port_start = ata_sas_port_start,
7179         .port_stop = ata_sas_port_stop
7180 };
7181
7182 static struct ata_port_info sata_port_info = {
7183         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7184                           ATA_FLAG_SAS_HOST,
7185         .pio_mask       = ATA_PIO4_ONLY,
7186         .mwdma_mask     = ATA_MWDMA2,
7187         .udma_mask      = ATA_UDMA6,
7188         .port_ops       = &ipr_sata_ops
7189 };
7190
7191 #ifdef CONFIG_PPC_PSERIES
7192 static const u16 ipr_blocked_processors[] = {
7193         PVR_NORTHSTAR,
7194         PVR_PULSAR,
7195         PVR_POWER4,
7196         PVR_ICESTAR,
7197         PVR_SSTAR,
7198         PVR_POWER4p,
7199         PVR_630,
7200         PVR_630p
7201 };
7202
7203 /**
7204  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7205  * @ioa_cfg:    ioa cfg struct
7206  *
7207  * Adapters that use Gemstone revision < 3.1 do not work reliably on
7208  * certain pSeries hardware. This function determines if the given
7209  * adapter is in one of these confgurations or not.
7210  *
7211  * Return value:
7212  *      1 if adapter is not supported / 0 if adapter is supported
7213  **/
7214 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7215 {
7216         int i;
7217
7218         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
7219                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
7220                         if (pvr_version_is(ipr_blocked_processors[i]))
7221                                 return 1;
7222                 }
7223         }
7224         return 0;
7225 }
7226 #else
7227 #define ipr_invalid_adapter(ioa_cfg) 0
7228 #endif
7229
7230 /**
7231  * ipr_ioa_bringdown_done - IOA bring down completion.
7232  * @ipr_cmd:    ipr command struct
7233  *
7234  * This function processes the completion of an adapter bring down.
7235  * It wakes any reset sleepers.
7236  *
7237  * Return value:
7238  *      IPR_RC_JOB_RETURN
7239  **/
7240 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7241 {
7242         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7243         int i;
7244
7245         ENTER;
7246         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7247                 ipr_trace;
7248                 ioa_cfg->scsi_unblock = 1;
7249                 schedule_work(&ioa_cfg->work_q);
7250         }
7251
7252         ioa_cfg->in_reset_reload = 0;
7253         ioa_cfg->reset_retries = 0;
7254         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7255                 spin_lock(&ioa_cfg->hrrq[i]._lock);
7256                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7257                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7258         }
7259         wmb();
7260
7261         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7262         wake_up_all(&ioa_cfg->reset_wait_q);
7263         LEAVE;
7264
7265         return IPR_RC_JOB_RETURN;
7266 }
7267
7268 /**
7269  * ipr_ioa_reset_done - IOA reset completion.
7270  * @ipr_cmd:    ipr command struct
7271  *
7272  * This function processes the completion of an adapter reset.
7273  * It schedules any necessary mid-layer add/removes and
7274  * wakes any reset sleepers.
7275  *
7276  * Return value:
7277  *      IPR_RC_JOB_RETURN
7278  **/
7279 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7280 {
7281         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7282         struct ipr_resource_entry *res;
7283         int j;
7284
7285         ENTER;
7286         ioa_cfg->in_reset_reload = 0;
7287         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7288                 spin_lock(&ioa_cfg->hrrq[j]._lock);
7289                 ioa_cfg->hrrq[j].allow_cmds = 1;
7290                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7291         }
7292         wmb();
7293         ioa_cfg->reset_cmd = NULL;
7294         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7295
7296         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7297                 if (res->add_to_ml || res->del_from_ml) {
7298                         ipr_trace;
7299                         break;
7300                 }
7301         }
7302         schedule_work(&ioa_cfg->work_q);
7303
7304         for (j = 0; j < IPR_NUM_HCAMS; j++) {
7305                 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7306                 if (j < IPR_NUM_LOG_HCAMS)
7307                         ipr_send_hcam(ioa_cfg,
7308                                 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7309                                 ioa_cfg->hostrcb[j]);
7310                 else
7311                         ipr_send_hcam(ioa_cfg,
7312                                 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7313                                 ioa_cfg->hostrcb[j]);
7314         }
7315
7316         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7317         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7318
7319         ioa_cfg->reset_retries = 0;
7320         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7321         wake_up_all(&ioa_cfg->reset_wait_q);
7322
7323         ioa_cfg->scsi_unblock = 1;
7324         schedule_work(&ioa_cfg->work_q);
7325         LEAVE;
7326         return IPR_RC_JOB_RETURN;
7327 }
7328
7329 /**
7330  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7331  * @supported_dev:      supported device struct
7332  * @vpids:                      vendor product id struct
7333  *
7334  * Return value:
7335  *      none
7336  **/
7337 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7338                                  struct ipr_std_inq_vpids *vpids)
7339 {
7340         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7341         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7342         supported_dev->num_records = 1;
7343         supported_dev->data_length =
7344                 cpu_to_be16(sizeof(struct ipr_supported_device));
7345         supported_dev->reserved = 0;
7346 }
7347
7348 /**
7349  * ipr_set_supported_devs - Send Set Supported Devices for a device
7350  * @ipr_cmd:    ipr command struct
7351  *
7352  * This function sends a Set Supported Devices to the adapter
7353  *
7354  * Return value:
7355  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7356  **/
7357 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7358 {
7359         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7360         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7361         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7362         struct ipr_resource_entry *res = ipr_cmd->u.res;
7363
7364         ipr_cmd->job_step = ipr_ioa_reset_done;
7365
7366         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7367                 if (!ipr_is_scsi_disk(res))
7368                         continue;
7369
7370                 ipr_cmd->u.res = res;
7371                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7372
7373                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7374                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7375                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7376
7377                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7378                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7379                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7380                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7381
7382                 ipr_init_ioadl(ipr_cmd,
7383                                ioa_cfg->vpd_cbs_dma +
7384                                  offsetof(struct ipr_misc_cbs, supp_dev),
7385                                sizeof(struct ipr_supported_device),
7386                                IPR_IOADL_FLAGS_WRITE_LAST);
7387
7388                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7389                            IPR_SET_SUP_DEVICE_TIMEOUT);
7390
7391                 if (!ioa_cfg->sis64)
7392                         ipr_cmd->job_step = ipr_set_supported_devs;
7393                 LEAVE;
7394                 return IPR_RC_JOB_RETURN;
7395         }
7396
7397         LEAVE;
7398         return IPR_RC_JOB_CONTINUE;
7399 }
7400
7401 /**
7402  * ipr_get_mode_page - Locate specified mode page
7403  * @mode_pages: mode page buffer
7404  * @page_code:  page code to find
7405  * @len:                minimum required length for mode page
7406  *
7407  * Return value:
7408  *      pointer to mode page / NULL on failure
7409  **/
7410 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7411                                u32 page_code, u32 len)
7412 {
7413         struct ipr_mode_page_hdr *mode_hdr;
7414         u32 page_length;
7415         u32 length;
7416
7417         if (!mode_pages || (mode_pages->hdr.length == 0))
7418                 return NULL;
7419
7420         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7421         mode_hdr = (struct ipr_mode_page_hdr *)
7422                 (mode_pages->data + mode_pages->hdr.block_desc_len);
7423
7424         while (length) {
7425                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7426                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7427                                 return mode_hdr;
7428                         break;
7429                 } else {
7430                         page_length = (sizeof(struct ipr_mode_page_hdr) +
7431                                        mode_hdr->page_length);
7432                         length -= page_length;
7433                         mode_hdr = (struct ipr_mode_page_hdr *)
7434                                 ((unsigned long)mode_hdr + page_length);
7435                 }
7436         }
7437         return NULL;
7438 }
7439
7440 /**
7441  * ipr_check_term_power - Check for term power errors
7442  * @ioa_cfg:    ioa config struct
7443  * @mode_pages: IOAFP mode pages buffer
7444  *
7445  * Check the IOAFP's mode page 28 for term power errors
7446  *
7447  * Return value:
7448  *      nothing
7449  **/
7450 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7451                                  struct ipr_mode_pages *mode_pages)
7452 {
7453         int i;
7454         int entry_length;
7455         struct ipr_dev_bus_entry *bus;
7456         struct ipr_mode_page28 *mode_page;
7457
7458         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7459                                       sizeof(struct ipr_mode_page28));
7460
7461         entry_length = mode_page->entry_length;
7462
7463         bus = mode_page->bus;
7464
7465         for (i = 0; i < mode_page->num_entries; i++) {
7466                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7467                         dev_err(&ioa_cfg->pdev->dev,
7468                                 "Term power is absent on scsi bus %d\n",
7469                                 bus->res_addr.bus);
7470                 }
7471
7472                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7473         }
7474 }
7475
7476 /**
7477  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7478  * @ioa_cfg:    ioa config struct
7479  *
7480  * Looks through the config table checking for SES devices. If
7481  * the SES device is in the SES table indicating a maximum SCSI
7482  * bus speed, the speed is limited for the bus.
7483  *
7484  * Return value:
7485  *      none
7486  **/
7487 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7488 {
7489         u32 max_xfer_rate;
7490         int i;
7491
7492         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7493                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7494                                                        ioa_cfg->bus_attr[i].bus_width);
7495
7496                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7497                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7498         }
7499 }
7500
7501 /**
7502  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7503  * @ioa_cfg:    ioa config struct
7504  * @mode_pages: mode page 28 buffer
7505  *
7506  * Updates mode page 28 based on driver configuration
7507  *
7508  * Return value:
7509  *      none
7510  **/
7511 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7512                                           struct ipr_mode_pages *mode_pages)
7513 {
7514         int i, entry_length;
7515         struct ipr_dev_bus_entry *bus;
7516         struct ipr_bus_attributes *bus_attr;
7517         struct ipr_mode_page28 *mode_page;
7518
7519         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7520                                       sizeof(struct ipr_mode_page28));
7521
7522         entry_length = mode_page->entry_length;
7523
7524         /* Loop for each device bus entry */
7525         for (i = 0, bus = mode_page->bus;
7526              i < mode_page->num_entries;
7527              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7528                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7529                         dev_err(&ioa_cfg->pdev->dev,
7530                                 "Invalid resource address reported: 0x%08X\n",
7531                                 IPR_GET_PHYS_LOC(bus->res_addr));
7532                         continue;
7533                 }
7534
7535                 bus_attr = &ioa_cfg->bus_attr[i];
7536                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7537                 bus->bus_width = bus_attr->bus_width;
7538                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7539                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7540                 if (bus_attr->qas_enabled)
7541                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7542                 else
7543                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7544         }
7545 }
7546
7547 /**
7548  * ipr_build_mode_select - Build a mode select command
7549  * @ipr_cmd:    ipr command struct
7550  * @res_handle: resource handle to send command to
7551  * @parm:               Byte 2 of Mode Sense command
7552  * @dma_addr:   DMA buffer address
7553  * @xfer_len:   data transfer length
7554  *
7555  * Return value:
7556  *      none
7557  **/
7558 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7559                                   __be32 res_handle, u8 parm,
7560                                   dma_addr_t dma_addr, u8 xfer_len)
7561 {
7562         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7563
7564         ioarcb->res_handle = res_handle;
7565         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7566         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7567         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7568         ioarcb->cmd_pkt.cdb[1] = parm;
7569         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7570
7571         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7572 }
7573
7574 /**
7575  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7576  * @ipr_cmd:    ipr command struct
7577  *
7578  * This function sets up the SCSI bus attributes and sends
7579  * a Mode Select for Page 28 to activate them.
7580  *
7581  * Return value:
7582  *      IPR_RC_JOB_RETURN
7583  **/
7584 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7585 {
7586         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7587         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7588         int length;
7589
7590         ENTER;
7591         ipr_scsi_bus_speed_limit(ioa_cfg);
7592         ipr_check_term_power(ioa_cfg, mode_pages);
7593         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7594         length = mode_pages->hdr.length + 1;
7595         mode_pages->hdr.length = 0;
7596
7597         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7598                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7599                               length);
7600
7601         ipr_cmd->job_step = ipr_set_supported_devs;
7602         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7603                                     struct ipr_resource_entry, queue);
7604         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7605
7606         LEAVE;
7607         return IPR_RC_JOB_RETURN;
7608 }
7609
7610 /**
7611  * ipr_build_mode_sense - Builds a mode sense command
7612  * @ipr_cmd:    ipr command struct
7613  * @res_handle:         resource entry struct
7614  * @parm:               Byte 2 of mode sense command
7615  * @dma_addr:   DMA address of mode sense buffer
7616  * @xfer_len:   Size of DMA buffer
7617  *
7618  * Return value:
7619  *      none
7620  **/
7621 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7622                                  __be32 res_handle,
7623                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7624 {
7625         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7626
7627         ioarcb->res_handle = res_handle;
7628         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7629         ioarcb->cmd_pkt.cdb[2] = parm;
7630         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7631         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7632
7633         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7634 }
7635
7636 /**
7637  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7638  * @ipr_cmd:    ipr command struct
7639  *
7640  * This function handles the failure of an IOA bringup command.
7641  *
7642  * Return value:
7643  *      IPR_RC_JOB_RETURN
7644  **/
7645 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7646 {
7647         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7648         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7649
7650         dev_err(&ioa_cfg->pdev->dev,
7651                 "0x%02X failed with IOASC: 0x%08X\n",
7652                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7653
7654         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7655         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7656         return IPR_RC_JOB_RETURN;
7657 }
7658
7659 /**
7660  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7661  * @ipr_cmd:    ipr command struct
7662  *
7663  * This function handles the failure of a Mode Sense to the IOAFP.
7664  * Some adapters do not handle all mode pages.
7665  *
7666  * Return value:
7667  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7668  **/
7669 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7670 {
7671         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7672         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7673
7674         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7675                 ipr_cmd->job_step = ipr_set_supported_devs;
7676                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7677                                             struct ipr_resource_entry, queue);
7678                 return IPR_RC_JOB_CONTINUE;
7679         }
7680
7681         return ipr_reset_cmd_failed(ipr_cmd);
7682 }
7683
7684 /**
7685  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7686  * @ipr_cmd:    ipr command struct
7687  *
7688  * This function send a Page 28 mode sense to the IOA to
7689  * retrieve SCSI bus attributes.
7690  *
7691  * Return value:
7692  *      IPR_RC_JOB_RETURN
7693  **/
7694 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7695 {
7696         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7697
7698         ENTER;
7699         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7700                              0x28, ioa_cfg->vpd_cbs_dma +
7701                              offsetof(struct ipr_misc_cbs, mode_pages),
7702                              sizeof(struct ipr_mode_pages));
7703
7704         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7705         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7706
7707         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7708
7709         LEAVE;
7710         return IPR_RC_JOB_RETURN;
7711 }
7712
7713 /**
7714  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7715  * @ipr_cmd:    ipr command struct
7716  *
7717  * This function enables dual IOA RAID support if possible.
7718  *
7719  * Return value:
7720  *      IPR_RC_JOB_RETURN
7721  **/
7722 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7723 {
7724         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7725         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7726         struct ipr_mode_page24 *mode_page;
7727         int length;
7728
7729         ENTER;
7730         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7731                                       sizeof(struct ipr_mode_page24));
7732
7733         if (mode_page)
7734                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7735
7736         length = mode_pages->hdr.length + 1;
7737         mode_pages->hdr.length = 0;
7738
7739         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7740                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7741                               length);
7742
7743         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7744         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7745
7746         LEAVE;
7747         return IPR_RC_JOB_RETURN;
7748 }
7749
7750 /**
7751  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7752  * @ipr_cmd:    ipr command struct
7753  *
7754  * This function handles the failure of a Mode Sense to the IOAFP.
7755  * Some adapters do not handle all mode pages.
7756  *
7757  * Return value:
7758  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7759  **/
7760 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7761 {
7762         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7763
7764         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7765                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7766                 return IPR_RC_JOB_CONTINUE;
7767         }
7768
7769         return ipr_reset_cmd_failed(ipr_cmd);
7770 }
7771
7772 /**
7773  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7774  * @ipr_cmd:    ipr command struct
7775  *
7776  * This function send a mode sense to the IOA to retrieve
7777  * the IOA Advanced Function Control mode page.
7778  *
7779  * Return value:
7780  *      IPR_RC_JOB_RETURN
7781  **/
7782 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7783 {
7784         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7785
7786         ENTER;
7787         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7788                              0x24, ioa_cfg->vpd_cbs_dma +
7789                              offsetof(struct ipr_misc_cbs, mode_pages),
7790                              sizeof(struct ipr_mode_pages));
7791
7792         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7793         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7794
7795         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7796
7797         LEAVE;
7798         return IPR_RC_JOB_RETURN;
7799 }
7800
7801 /**
7802  * ipr_init_res_table - Initialize the resource table
7803  * @ipr_cmd:    ipr command struct
7804  *
7805  * This function looks through the existing resource table, comparing
7806  * it with the config table. This function will take care of old/new
7807  * devices and schedule adding/removing them from the mid-layer
7808  * as appropriate.
7809  *
7810  * Return value:
7811  *      IPR_RC_JOB_CONTINUE
7812  **/
7813 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7814 {
7815         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7816         struct ipr_resource_entry *res, *temp;
7817         struct ipr_config_table_entry_wrapper cfgtew;
7818         int entries, found, flag, i;
7819         LIST_HEAD(old_res);
7820
7821         ENTER;
7822         if (ioa_cfg->sis64)
7823                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7824         else
7825                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7826
7827         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7828                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7829
7830         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7831                 list_move_tail(&res->queue, &old_res);
7832
7833         if (ioa_cfg->sis64)
7834                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7835         else
7836                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7837
7838         for (i = 0; i < entries; i++) {
7839                 if (ioa_cfg->sis64)
7840                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7841                 else
7842                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7843                 found = 0;
7844
7845                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7846                         if (ipr_is_same_device(res, &cfgtew)) {
7847                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7848                                 found = 1;
7849                                 break;
7850                         }
7851                 }
7852
7853                 if (!found) {
7854                         if (list_empty(&ioa_cfg->free_res_q)) {
7855                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7856                                 break;
7857                         }
7858
7859                         found = 1;
7860                         res = list_entry(ioa_cfg->free_res_q.next,
7861                                          struct ipr_resource_entry, queue);
7862                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7863                         ipr_init_res_entry(res, &cfgtew);
7864                         res->add_to_ml = 1;
7865                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7866                         res->sdev->allow_restart = 1;
7867
7868                 if (found)
7869                         ipr_update_res_entry(res, &cfgtew);
7870         }
7871
7872         list_for_each_entry_safe(res, temp, &old_res, queue) {
7873                 if (res->sdev) {
7874                         res->del_from_ml = 1;
7875                         res->res_handle = IPR_INVALID_RES_HANDLE;
7876                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7877                 }
7878         }
7879
7880         list_for_each_entry_safe(res, temp, &old_res, queue) {
7881                 ipr_clear_res_target(res);
7882                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7883         }
7884
7885         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7886                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7887         else
7888                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7889
7890         LEAVE;
7891         return IPR_RC_JOB_CONTINUE;
7892 }
7893
7894 /**
7895  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7896  * @ipr_cmd:    ipr command struct
7897  *
7898  * This function sends a Query IOA Configuration command
7899  * to the adapter to retrieve the IOA configuration table.
7900  *
7901  * Return value:
7902  *      IPR_RC_JOB_RETURN
7903  **/
7904 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7905 {
7906         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7907         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7908         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7909         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7910
7911         ENTER;
7912         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7913                 ioa_cfg->dual_raid = 1;
7914         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7915                  ucode_vpd->major_release, ucode_vpd->card_type,
7916                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7917         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7918         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7919
7920         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7921         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7922         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7923         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7924
7925         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7926                        IPR_IOADL_FLAGS_READ_LAST);
7927
7928         ipr_cmd->job_step = ipr_init_res_table;
7929
7930         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7931
7932         LEAVE;
7933         return IPR_RC_JOB_RETURN;
7934 }
7935
7936 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7937 {
7938         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7939
7940         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7941                 return IPR_RC_JOB_CONTINUE;
7942
7943         return ipr_reset_cmd_failed(ipr_cmd);
7944 }
7945
7946 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7947                                          __be32 res_handle, u8 sa_code)
7948 {
7949         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7950
7951         ioarcb->res_handle = res_handle;
7952         ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7953         ioarcb->cmd_pkt.cdb[1] = sa_code;
7954         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7955 }
7956
7957 /**
7958  * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7959  * action
7960  * @ipr_cmd:    ipr command struct
7961  *
7962  * Return value:
7963  *      none
7964  **/
7965 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7966 {
7967         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7968         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7969         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7970
7971         ENTER;
7972
7973         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7974
7975         if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7976                 ipr_build_ioa_service_action(ipr_cmd,
7977                                              cpu_to_be32(IPR_IOA_RES_HANDLE),
7978                                              IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7979
7980                 ioarcb->cmd_pkt.cdb[2] = 0x40;
7981
7982                 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7983                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7984                            IPR_SET_SUP_DEVICE_TIMEOUT);
7985
7986                 LEAVE;
7987                 return IPR_RC_JOB_RETURN;
7988         }
7989
7990         LEAVE;
7991         return IPR_RC_JOB_CONTINUE;
7992 }
7993
7994 /**
7995  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7996  * @ipr_cmd:    ipr command struct
7997  * @flags:      flags to send
7998  * @page:       page to inquire
7999  * @dma_addr:   DMA address
8000  * @xfer_len:   transfer data length
8001  *
8002  * This utility function sends an inquiry to the adapter.
8003  *
8004  * Return value:
8005  *      none
8006  **/
8007 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
8008                               dma_addr_t dma_addr, u8 xfer_len)
8009 {
8010         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8011
8012         ENTER;
8013         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8014         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8015
8016         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
8017         ioarcb->cmd_pkt.cdb[1] = flags;
8018         ioarcb->cmd_pkt.cdb[2] = page;
8019         ioarcb->cmd_pkt.cdb[4] = xfer_len;
8020
8021         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
8022
8023         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
8024         LEAVE;
8025 }
8026
8027 /**
8028  * ipr_inquiry_page_supported - Is the given inquiry page supported
8029  * @page0:              inquiry page 0 buffer
8030  * @page:               page code.
8031  *
8032  * This function determines if the specified inquiry page is supported.
8033  *
8034  * Return value:
8035  *      1 if page is supported / 0 if not
8036  **/
8037 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
8038 {
8039         int i;
8040
8041         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
8042                 if (page0->page[i] == page)
8043                         return 1;
8044
8045         return 0;
8046 }
8047
8048 /**
8049  * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
8050  * @ipr_cmd:    ipr command struct
8051  *
8052  * This function sends a Page 0xC4 inquiry to the adapter
8053  * to retrieve software VPD information.
8054  *
8055  * Return value:
8056  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8057  **/
8058 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8059 {
8060         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8061         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8062         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8063
8064         ENTER;
8065         ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
8066         memset(pageC4, 0, sizeof(*pageC4));
8067
8068         if (ipr_inquiry_page_supported(page0, 0xC4)) {
8069                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8070                                   (ioa_cfg->vpd_cbs_dma
8071                                    + offsetof(struct ipr_misc_cbs,
8072                                               pageC4_data)),
8073                                   sizeof(struct ipr_inquiry_pageC4));
8074                 return IPR_RC_JOB_RETURN;
8075         }
8076
8077         LEAVE;
8078         return IPR_RC_JOB_CONTINUE;
8079 }
8080
8081 /**
8082  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8083  * @ipr_cmd:    ipr command struct
8084  *
8085  * This function sends a Page 0xD0 inquiry to the adapter
8086  * to retrieve adapter capabilities.
8087  *
8088  * Return value:
8089  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8090  **/
8091 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8092 {
8093         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8094         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8095         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8096
8097         ENTER;
8098         ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
8099         memset(cap, 0, sizeof(*cap));
8100
8101         if (ipr_inquiry_page_supported(page0, 0xD0)) {
8102                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8103                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8104                                   sizeof(struct ipr_inquiry_cap));
8105                 return IPR_RC_JOB_RETURN;
8106         }
8107
8108         LEAVE;
8109         return IPR_RC_JOB_CONTINUE;
8110 }
8111
8112 /**
8113  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8114  * @ipr_cmd:    ipr command struct
8115  *
8116  * This function sends a Page 3 inquiry to the adapter
8117  * to retrieve software VPD information.
8118  *
8119  * Return value:
8120  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8121  **/
8122 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8123 {
8124         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8125
8126         ENTER;
8127
8128         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
8129
8130         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8131                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8132                           sizeof(struct ipr_inquiry_page3));
8133
8134         LEAVE;
8135         return IPR_RC_JOB_RETURN;
8136 }
8137
8138 /**
8139  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8140  * @ipr_cmd:    ipr command struct
8141  *
8142  * This function sends a Page 0 inquiry to the adapter
8143  * to retrieve supported inquiry pages.
8144  *
8145  * Return value:
8146  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8147  **/
8148 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8149 {
8150         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8151         char type[5];
8152
8153         ENTER;
8154
8155         /* Grab the type out of the VPD and store it away */
8156         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8157         type[4] = '\0';
8158         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8159
8160         if (ipr_invalid_adapter(ioa_cfg)) {
8161                 dev_err(&ioa_cfg->pdev->dev,
8162                         "Adapter not supported in this hardware configuration.\n");
8163
8164                 if (!ipr_testmode) {
8165                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8166                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8167                         list_add_tail(&ipr_cmd->queue,
8168                                         &ioa_cfg->hrrq->hrrq_free_q);
8169                         return IPR_RC_JOB_RETURN;
8170                 }
8171         }
8172
8173         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
8174
8175         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8176                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8177                           sizeof(struct ipr_inquiry_page0));
8178
8179         LEAVE;
8180         return IPR_RC_JOB_RETURN;
8181 }
8182
8183 /**
8184  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8185  * @ipr_cmd:    ipr command struct
8186  *
8187  * This function sends a standard inquiry to the adapter.
8188  *
8189  * Return value:
8190  *      IPR_RC_JOB_RETURN
8191  **/
8192 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8193 {
8194         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8195
8196         ENTER;
8197         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
8198
8199         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8200                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8201                           sizeof(struct ipr_ioa_vpd));
8202
8203         LEAVE;
8204         return IPR_RC_JOB_RETURN;
8205 }
8206
8207 /**
8208  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8209  * @ipr_cmd:    ipr command struct
8210  *
8211  * This function send an Identify Host Request Response Queue
8212  * command to establish the HRRQ with the adapter.
8213  *
8214  * Return value:
8215  *      IPR_RC_JOB_RETURN
8216  **/
8217 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
8218 {
8219         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8220         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8221         struct ipr_hrr_queue *hrrq;
8222
8223         ENTER;
8224         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
8225         if (ioa_cfg->identify_hrrq_index == 0)
8226                 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
8227
8228         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8229                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
8230
8231                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8232                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8233
8234                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8235                 if (ioa_cfg->sis64)
8236                         ioarcb->cmd_pkt.cdb[1] = 0x1;
8237
8238                 if (ioa_cfg->nvectors == 1)
8239                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8240                 else
8241                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8242
8243                 ioarcb->cmd_pkt.cdb[2] =
8244                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8245                 ioarcb->cmd_pkt.cdb[3] =
8246                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8247                 ioarcb->cmd_pkt.cdb[4] =
8248                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8249                 ioarcb->cmd_pkt.cdb[5] =
8250                         ((u64) hrrq->host_rrq_dma) & 0xff;
8251                 ioarcb->cmd_pkt.cdb[7] =
8252                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8253                 ioarcb->cmd_pkt.cdb[8] =
8254                         (sizeof(u32) * hrrq->size) & 0xff;
8255
8256                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8257                         ioarcb->cmd_pkt.cdb[9] =
8258                                         ioa_cfg->identify_hrrq_index;
8259
8260                 if (ioa_cfg->sis64) {
8261                         ioarcb->cmd_pkt.cdb[10] =
8262                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8263                         ioarcb->cmd_pkt.cdb[11] =
8264                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8265                         ioarcb->cmd_pkt.cdb[12] =
8266                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8267                         ioarcb->cmd_pkt.cdb[13] =
8268                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8269                 }
8270
8271                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8272                         ioarcb->cmd_pkt.cdb[14] =
8273                                         ioa_cfg->identify_hrrq_index;
8274
8275                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8276                            IPR_INTERNAL_TIMEOUT);
8277
8278                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8279                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8280
8281                 LEAVE;
8282                 return IPR_RC_JOB_RETURN;
8283         }
8284
8285         LEAVE;
8286         return IPR_RC_JOB_CONTINUE;
8287 }
8288
8289 /**
8290  * ipr_reset_timer_done - Adapter reset timer function
8291  * @t: Timer context used to fetch ipr command struct
8292  *
8293  * Description: This function is used in adapter reset processing
8294  * for timing events. If the reset_cmd pointer in the IOA
8295  * config struct is not this adapter's we are doing nested
8296  * resets and fail_all_ops will take care of freeing the
8297  * command block.
8298  *
8299  * Return value:
8300  *      none
8301  **/
8302 static void ipr_reset_timer_done(struct timer_list *t)
8303 {
8304         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
8305         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8306         unsigned long lock_flags = 0;
8307
8308         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8309
8310         if (ioa_cfg->reset_cmd == ipr_cmd) {
8311                 list_del(&ipr_cmd->queue);
8312                 ipr_cmd->done(ipr_cmd);
8313         }
8314
8315         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8316 }
8317
8318 /**
8319  * ipr_reset_start_timer - Start a timer for adapter reset job
8320  * @ipr_cmd:    ipr command struct
8321  * @timeout:    timeout value
8322  *
8323  * Description: This function is used in adapter reset processing
8324  * for timing events. If the reset_cmd pointer in the IOA
8325  * config struct is not this adapter's we are doing nested
8326  * resets and fail_all_ops will take care of freeing the
8327  * command block.
8328  *
8329  * Return value:
8330  *      none
8331  **/
8332 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8333                                   unsigned long timeout)
8334 {
8335
8336         ENTER;
8337         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8338         ipr_cmd->done = ipr_reset_ioa_job;
8339
8340         ipr_cmd->timer.expires = jiffies + timeout;
8341         ipr_cmd->timer.function = ipr_reset_timer_done;
8342         add_timer(&ipr_cmd->timer);
8343 }
8344
8345 /**
8346  * ipr_init_ioa_mem - Initialize ioa_cfg control block
8347  * @ioa_cfg:    ioa cfg struct
8348  *
8349  * Return value:
8350  *      nothing
8351  **/
8352 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8353 {
8354         struct ipr_hrr_queue *hrrq;
8355
8356         for_each_hrrq(hrrq, ioa_cfg) {
8357                 spin_lock(&hrrq->_lock);
8358                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8359
8360                 /* Initialize Host RRQ pointers */
8361                 hrrq->hrrq_start = hrrq->host_rrq;
8362                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8363                 hrrq->hrrq_curr = hrrq->hrrq_start;
8364                 hrrq->toggle_bit = 1;
8365                 spin_unlock(&hrrq->_lock);
8366         }
8367         wmb();
8368
8369         ioa_cfg->identify_hrrq_index = 0;
8370         if (ioa_cfg->hrrq_num == 1)
8371                 atomic_set(&ioa_cfg->hrrq_index, 0);
8372         else
8373                 atomic_set(&ioa_cfg->hrrq_index, 1);
8374
8375         /* Zero out config table */
8376         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8377 }
8378
8379 /**
8380  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8381  * @ipr_cmd:    ipr command struct
8382  *
8383  * Return value:
8384  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8385  **/
8386 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8387 {
8388         unsigned long stage, stage_time;
8389         u32 feedback;
8390         volatile u32 int_reg;
8391         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8392         u64 maskval = 0;
8393
8394         feedback = readl(ioa_cfg->regs.init_feedback_reg);
8395         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8396         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8397
8398         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8399
8400         /* sanity check the stage_time value */
8401         if (stage_time == 0)
8402                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8403         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8404                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8405         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8406                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8407
8408         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8409                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8410                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8411                 stage_time = ioa_cfg->transop_timeout;
8412                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8413         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8414                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8415                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8416                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8417                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
8418                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8419                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8420                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8421                         return IPR_RC_JOB_CONTINUE;
8422                 }
8423         }
8424
8425         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8426         ipr_cmd->timer.function = ipr_oper_timeout;
8427         ipr_cmd->done = ipr_reset_ioa_job;
8428         add_timer(&ipr_cmd->timer);
8429
8430         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8431
8432         return IPR_RC_JOB_RETURN;
8433 }
8434
8435 /**
8436  * ipr_reset_enable_ioa - Enable the IOA following a reset.
8437  * @ipr_cmd:    ipr command struct
8438  *
8439  * This function reinitializes some control blocks and
8440  * enables destructive diagnostics on the adapter.
8441  *
8442  * Return value:
8443  *      IPR_RC_JOB_RETURN
8444  **/
8445 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8446 {
8447         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8448         volatile u32 int_reg;
8449         volatile u64 maskval;
8450         int i;
8451
8452         ENTER;
8453         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8454         ipr_init_ioa_mem(ioa_cfg);
8455
8456         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8457                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8458                 ioa_cfg->hrrq[i].allow_interrupts = 1;
8459                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8460         }
8461         if (ioa_cfg->sis64) {
8462                 /* Set the adapter to the correct endian mode. */
8463                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8464                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8465         }
8466
8467         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8468
8469         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8470                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8471                        ioa_cfg->regs.clr_interrupt_mask_reg32);
8472                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8473                 return IPR_RC_JOB_CONTINUE;
8474         }
8475
8476         /* Enable destructive diagnostics on IOA */
8477         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8478
8479         if (ioa_cfg->sis64) {
8480                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8481                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8482                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8483         } else
8484                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8485
8486         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8487
8488         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8489
8490         if (ioa_cfg->sis64) {
8491                 ipr_cmd->job_step = ipr_reset_next_stage;
8492                 return IPR_RC_JOB_CONTINUE;
8493         }
8494
8495         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8496         ipr_cmd->timer.function = ipr_oper_timeout;
8497         ipr_cmd->done = ipr_reset_ioa_job;
8498         add_timer(&ipr_cmd->timer);
8499         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8500
8501         LEAVE;
8502         return IPR_RC_JOB_RETURN;
8503 }
8504
8505 /**
8506  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8507  * @ipr_cmd:    ipr command struct
8508  *
8509  * This function is invoked when an adapter dump has run out
8510  * of processing time.
8511  *
8512  * Return value:
8513  *      IPR_RC_JOB_CONTINUE
8514  **/
8515 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8516 {
8517         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8518
8519         if (ioa_cfg->sdt_state == GET_DUMP)
8520                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8521         else if (ioa_cfg->sdt_state == READ_DUMP)
8522                 ioa_cfg->sdt_state = ABORT_DUMP;
8523
8524         ioa_cfg->dump_timeout = 1;
8525         ipr_cmd->job_step = ipr_reset_alert;
8526
8527         return IPR_RC_JOB_CONTINUE;
8528 }
8529
8530 /**
8531  * ipr_unit_check_no_data - Log a unit check/no data error log
8532  * @ioa_cfg:            ioa config struct
8533  *
8534  * Logs an error indicating the adapter unit checked, but for some
8535  * reason, we were unable to fetch the unit check buffer.
8536  *
8537  * Return value:
8538  *      nothing
8539  **/
8540 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8541 {
8542         ioa_cfg->errors_logged++;
8543         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8544 }
8545
8546 /**
8547  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8548  * @ioa_cfg:            ioa config struct
8549  *
8550  * Fetches the unit check buffer from the adapter by clocking the data
8551  * through the mailbox register.
8552  *
8553  * Return value:
8554  *      nothing
8555  **/
8556 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8557 {
8558         unsigned long mailbox;
8559         struct ipr_hostrcb *hostrcb;
8560         struct ipr_uc_sdt sdt;
8561         int rc, length;
8562         u32 ioasc;
8563
8564         mailbox = readl(ioa_cfg->ioa_mailbox);
8565
8566         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8567                 ipr_unit_check_no_data(ioa_cfg);
8568                 return;
8569         }
8570
8571         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8572         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8573                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8574
8575         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8576             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8577             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8578                 ipr_unit_check_no_data(ioa_cfg);
8579                 return;
8580         }
8581
8582         /* Find length of the first sdt entry (UC buffer) */
8583         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8584                 length = be32_to_cpu(sdt.entry[0].end_token);
8585         else
8586                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8587                           be32_to_cpu(sdt.entry[0].start_token)) &
8588                           IPR_FMT2_MBX_ADDR_MASK;
8589
8590         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8591                              struct ipr_hostrcb, queue);
8592         list_del_init(&hostrcb->queue);
8593         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8594
8595         rc = ipr_get_ldump_data_section(ioa_cfg,
8596                                         be32_to_cpu(sdt.entry[0].start_token),
8597                                         (__be32 *)&hostrcb->hcam,
8598                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8599
8600         if (!rc) {
8601                 ipr_handle_log_data(ioa_cfg, hostrcb);
8602                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8603                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8604                     ioa_cfg->sdt_state == GET_DUMP)
8605                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8606         } else
8607                 ipr_unit_check_no_data(ioa_cfg);
8608
8609         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8610 }
8611
8612 /**
8613  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8614  * @ipr_cmd:    ipr command struct
8615  *
8616  * Description: This function will call to get the unit check buffer.
8617  *
8618  * Return value:
8619  *      IPR_RC_JOB_RETURN
8620  **/
8621 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8622 {
8623         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8624
8625         ENTER;
8626         ioa_cfg->ioa_unit_checked = 0;
8627         ipr_get_unit_check_buffer(ioa_cfg);
8628         ipr_cmd->job_step = ipr_reset_alert;
8629         ipr_reset_start_timer(ipr_cmd, 0);
8630
8631         LEAVE;
8632         return IPR_RC_JOB_RETURN;
8633 }
8634
8635 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8636 {
8637         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8638
8639         ENTER;
8640
8641         if (ioa_cfg->sdt_state != GET_DUMP)
8642                 return IPR_RC_JOB_RETURN;
8643
8644         if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8645             (readl(ioa_cfg->regs.sense_interrupt_reg) &
8646              IPR_PCII_MAILBOX_STABLE)) {
8647
8648                 if (!ipr_cmd->u.time_left)
8649                         dev_err(&ioa_cfg->pdev->dev,
8650                                 "Timed out waiting for Mailbox register.\n");
8651
8652                 ioa_cfg->sdt_state = READ_DUMP;
8653                 ioa_cfg->dump_timeout = 0;
8654                 if (ioa_cfg->sis64)
8655                         ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8656                 else
8657                         ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8658                 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8659                 schedule_work(&ioa_cfg->work_q);
8660
8661         } else {
8662                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8663                 ipr_reset_start_timer(ipr_cmd,
8664                                       IPR_CHECK_FOR_RESET_TIMEOUT);
8665         }
8666
8667         LEAVE;
8668         return IPR_RC_JOB_RETURN;
8669 }
8670
8671 /**
8672  * ipr_reset_restore_cfg_space - Restore PCI config space.
8673  * @ipr_cmd:    ipr command struct
8674  *
8675  * Description: This function restores the saved PCI config space of
8676  * the adapter, fails all outstanding ops back to the callers, and
8677  * fetches the dump/unit check if applicable to this reset.
8678  *
8679  * Return value:
8680  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8681  **/
8682 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8683 {
8684         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8685
8686         ENTER;
8687         ioa_cfg->pdev->state_saved = true;
8688         pci_restore_state(ioa_cfg->pdev);
8689
8690         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8691                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8692                 return IPR_RC_JOB_CONTINUE;
8693         }
8694
8695         ipr_fail_all_ops(ioa_cfg);
8696
8697         if (ioa_cfg->sis64) {
8698                 /* Set the adapter to the correct endian mode. */
8699                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8700                 readl(ioa_cfg->regs.endian_swap_reg);
8701         }
8702
8703         if (ioa_cfg->ioa_unit_checked) {
8704                 if (ioa_cfg->sis64) {
8705                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8706                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8707                         return IPR_RC_JOB_RETURN;
8708                 } else {
8709                         ioa_cfg->ioa_unit_checked = 0;
8710                         ipr_get_unit_check_buffer(ioa_cfg);
8711                         ipr_cmd->job_step = ipr_reset_alert;
8712                         ipr_reset_start_timer(ipr_cmd, 0);
8713                         return IPR_RC_JOB_RETURN;
8714                 }
8715         }
8716
8717         if (ioa_cfg->in_ioa_bringdown) {
8718                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8719         } else if (ioa_cfg->sdt_state == GET_DUMP) {
8720                 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8721                 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8722         } else {
8723                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8724         }
8725
8726         LEAVE;
8727         return IPR_RC_JOB_CONTINUE;
8728 }
8729
8730 /**
8731  * ipr_reset_bist_done - BIST has completed on the adapter.
8732  * @ipr_cmd:    ipr command struct
8733  *
8734  * Description: Unblock config space and resume the reset process.
8735  *
8736  * Return value:
8737  *      IPR_RC_JOB_CONTINUE
8738  **/
8739 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8740 {
8741         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8742
8743         ENTER;
8744         if (ioa_cfg->cfg_locked)
8745                 pci_cfg_access_unlock(ioa_cfg->pdev);
8746         ioa_cfg->cfg_locked = 0;
8747         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8748         LEAVE;
8749         return IPR_RC_JOB_CONTINUE;
8750 }
8751
8752 /**
8753  * ipr_reset_start_bist - Run BIST on the adapter.
8754  * @ipr_cmd:    ipr command struct
8755  *
8756  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8757  *
8758  * Return value:
8759  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8760  **/
8761 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8762 {
8763         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8764         int rc = PCIBIOS_SUCCESSFUL;
8765
8766         ENTER;
8767         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8768                 writel(IPR_UPROCI_SIS64_START_BIST,
8769                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8770         else
8771                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8772
8773         if (rc == PCIBIOS_SUCCESSFUL) {
8774                 ipr_cmd->job_step = ipr_reset_bist_done;
8775                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8776                 rc = IPR_RC_JOB_RETURN;
8777         } else {
8778                 if (ioa_cfg->cfg_locked)
8779                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8780                 ioa_cfg->cfg_locked = 0;
8781                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8782                 rc = IPR_RC_JOB_CONTINUE;
8783         }
8784
8785         LEAVE;
8786         return rc;
8787 }
8788
8789 /**
8790  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8791  * @ipr_cmd:    ipr command struct
8792  *
8793  * Description: This clears PCI reset to the adapter and delays two seconds.
8794  *
8795  * Return value:
8796  *      IPR_RC_JOB_RETURN
8797  **/
8798 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8799 {
8800         ENTER;
8801         ipr_cmd->job_step = ipr_reset_bist_done;
8802         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8803         LEAVE;
8804         return IPR_RC_JOB_RETURN;
8805 }
8806
8807 /**
8808  * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8809  * @work:       work struct
8810  *
8811  * Description: This pulses warm reset to a slot.
8812  *
8813  **/
8814 static void ipr_reset_reset_work(struct work_struct *work)
8815 {
8816         struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8817         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8818         struct pci_dev *pdev = ioa_cfg->pdev;
8819         unsigned long lock_flags = 0;
8820
8821         ENTER;
8822         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8823         msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8824         pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8825
8826         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8827         if (ioa_cfg->reset_cmd == ipr_cmd)
8828                 ipr_reset_ioa_job(ipr_cmd);
8829         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8830         LEAVE;
8831 }
8832
8833 /**
8834  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8835  * @ipr_cmd:    ipr command struct
8836  *
8837  * Description: This asserts PCI reset to the adapter.
8838  *
8839  * Return value:
8840  *      IPR_RC_JOB_RETURN
8841  **/
8842 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8843 {
8844         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8845
8846         ENTER;
8847         INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8848         queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8849         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8850         LEAVE;
8851         return IPR_RC_JOB_RETURN;
8852 }
8853
8854 /**
8855  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8856  * @ipr_cmd:    ipr command struct
8857  *
8858  * Description: This attempts to block config access to the IOA.
8859  *
8860  * Return value:
8861  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8862  **/
8863 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8864 {
8865         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8866         int rc = IPR_RC_JOB_CONTINUE;
8867
8868         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8869                 ioa_cfg->cfg_locked = 1;
8870                 ipr_cmd->job_step = ioa_cfg->reset;
8871         } else {
8872                 if (ipr_cmd->u.time_left) {
8873                         rc = IPR_RC_JOB_RETURN;
8874                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8875                         ipr_reset_start_timer(ipr_cmd,
8876                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8877                 } else {
8878                         ipr_cmd->job_step = ioa_cfg->reset;
8879                         dev_err(&ioa_cfg->pdev->dev,
8880                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8881                 }
8882         }
8883
8884         return rc;
8885 }
8886
8887 /**
8888  * ipr_reset_block_config_access - Block config access to the IOA
8889  * @ipr_cmd:    ipr command struct
8890  *
8891  * Description: This attempts to block config access to the IOA
8892  *
8893  * Return value:
8894  *      IPR_RC_JOB_CONTINUE
8895  **/
8896 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8897 {
8898         ipr_cmd->ioa_cfg->cfg_locked = 0;
8899         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8900         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8901         return IPR_RC_JOB_CONTINUE;
8902 }
8903
8904 /**
8905  * ipr_reset_allowed - Query whether or not IOA can be reset
8906  * @ioa_cfg:    ioa config struct
8907  *
8908  * Return value:
8909  *      0 if reset not allowed / non-zero if reset is allowed
8910  **/
8911 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8912 {
8913         volatile u32 temp_reg;
8914
8915         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8916         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8917 }
8918
8919 /**
8920  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8921  * @ipr_cmd:    ipr command struct
8922  *
8923  * Description: This function waits for adapter permission to run BIST,
8924  * then runs BIST. If the adapter does not give permission after a
8925  * reasonable time, we will reset the adapter anyway. The impact of
8926  * resetting the adapter without warning the adapter is the risk of
8927  * losing the persistent error log on the adapter. If the adapter is
8928  * reset while it is writing to the flash on the adapter, the flash
8929  * segment will have bad ECC and be zeroed.
8930  *
8931  * Return value:
8932  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8933  **/
8934 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8935 {
8936         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8937         int rc = IPR_RC_JOB_RETURN;
8938
8939         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8940                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8941                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8942         } else {
8943                 ipr_cmd->job_step = ipr_reset_block_config_access;
8944                 rc = IPR_RC_JOB_CONTINUE;
8945         }
8946
8947         return rc;
8948 }
8949
8950 /**
8951  * ipr_reset_alert - Alert the adapter of a pending reset
8952  * @ipr_cmd:    ipr command struct
8953  *
8954  * Description: This function alerts the adapter that it will be reset.
8955  * If memory space is not currently enabled, proceed directly
8956  * to running BIST on the adapter. The timer must always be started
8957  * so we guarantee we do not run BIST from ipr_isr.
8958  *
8959  * Return value:
8960  *      IPR_RC_JOB_RETURN
8961  **/
8962 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8963 {
8964         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8965         u16 cmd_reg;
8966         int rc;
8967
8968         ENTER;
8969         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8970
8971         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8972                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8973                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8974                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8975         } else {
8976                 ipr_cmd->job_step = ipr_reset_block_config_access;
8977         }
8978
8979         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8980         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8981
8982         LEAVE;
8983         return IPR_RC_JOB_RETURN;
8984 }
8985
8986 /**
8987  * ipr_reset_quiesce_done - Complete IOA disconnect
8988  * @ipr_cmd:    ipr command struct
8989  *
8990  * Description: Freeze the adapter to complete quiesce processing
8991  *
8992  * Return value:
8993  *      IPR_RC_JOB_CONTINUE
8994  **/
8995 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8996 {
8997         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8998
8999         ENTER;
9000         ipr_cmd->job_step = ipr_ioa_bringdown_done;
9001         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9002         LEAVE;
9003         return IPR_RC_JOB_CONTINUE;
9004 }
9005
9006 /**
9007  * ipr_reset_cancel_hcam_done - Check for outstanding commands
9008  * @ipr_cmd:    ipr command struct
9009  *
9010  * Description: Ensure nothing is outstanding to the IOA and
9011  *                      proceed with IOA disconnect. Otherwise reset the IOA.
9012  *
9013  * Return value:
9014  *      IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
9015  **/
9016 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
9017 {
9018         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9019         struct ipr_cmnd *loop_cmd;
9020         struct ipr_hrr_queue *hrrq;
9021         int rc = IPR_RC_JOB_CONTINUE;
9022         int count = 0;
9023
9024         ENTER;
9025         ipr_cmd->job_step = ipr_reset_quiesce_done;
9026
9027         for_each_hrrq(hrrq, ioa_cfg) {
9028                 spin_lock(&hrrq->_lock);
9029                 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
9030                         count++;
9031                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9032                         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9033                         rc = IPR_RC_JOB_RETURN;
9034                         break;
9035                 }
9036                 spin_unlock(&hrrq->_lock);
9037
9038                 if (count)
9039                         break;
9040         }
9041
9042         LEAVE;
9043         return rc;
9044 }
9045
9046 /**
9047  * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
9048  * @ipr_cmd:    ipr command struct
9049  *
9050  * Description: Cancel any oustanding HCAMs to the IOA.
9051  *
9052  * Return value:
9053  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9054  **/
9055 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9056 {
9057         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9058         int rc = IPR_RC_JOB_CONTINUE;
9059         struct ipr_cmd_pkt *cmd_pkt;
9060         struct ipr_cmnd *hcam_cmd;
9061         struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9062
9063         ENTER;
9064         ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9065
9066         if (!hrrq->ioa_is_dead) {
9067                 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9068                         list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9069                                 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9070                                         continue;
9071
9072                                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9073                                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9074                                 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9075                                 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9076                                 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9077                                 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9078                                 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9079                                 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9080                                 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9081                                 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9082                                 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9083                                 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9084                                 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9085                                 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9086
9087                                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9088                                            IPR_CANCEL_TIMEOUT);
9089
9090                                 rc = IPR_RC_JOB_RETURN;
9091                                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9092                                 break;
9093                         }
9094                 }
9095         } else
9096                 ipr_cmd->job_step = ipr_reset_alert;
9097
9098         LEAVE;
9099         return rc;
9100 }
9101
9102 /**
9103  * ipr_reset_ucode_download_done - Microcode download completion
9104  * @ipr_cmd:    ipr command struct
9105  *
9106  * Description: This function unmaps the microcode download buffer.
9107  *
9108  * Return value:
9109  *      IPR_RC_JOB_CONTINUE
9110  **/
9111 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9112 {
9113         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9114         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9115
9116         dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
9117                      sglist->num_sg, DMA_TO_DEVICE);
9118
9119         ipr_cmd->job_step = ipr_reset_alert;
9120         return IPR_RC_JOB_CONTINUE;
9121 }
9122
9123 /**
9124  * ipr_reset_ucode_download - Download microcode to the adapter
9125  * @ipr_cmd:    ipr command struct
9126  *
9127  * Description: This function checks to see if it there is microcode
9128  * to download to the adapter. If there is, a download is performed.
9129  *
9130  * Return value:
9131  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9132  **/
9133 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9134 {
9135         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9136         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9137
9138         ENTER;
9139         ipr_cmd->job_step = ipr_reset_alert;
9140
9141         if (!sglist)
9142                 return IPR_RC_JOB_CONTINUE;
9143
9144         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9145         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9146         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9147         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9148         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9149         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9150         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9151
9152         if (ioa_cfg->sis64)
9153                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9154         else
9155                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
9156         ipr_cmd->job_step = ipr_reset_ucode_download_done;
9157
9158         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9159                    IPR_WRITE_BUFFER_TIMEOUT);
9160
9161         LEAVE;
9162         return IPR_RC_JOB_RETURN;
9163 }
9164
9165 /**
9166  * ipr_reset_shutdown_ioa - Shutdown the adapter
9167  * @ipr_cmd:    ipr command struct
9168  *
9169  * Description: This function issues an adapter shutdown of the
9170  * specified type to the specified adapter as part of the
9171  * adapter reset job.
9172  *
9173  * Return value:
9174  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9175  **/
9176 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9177 {
9178         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9179         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9180         unsigned long timeout;
9181         int rc = IPR_RC_JOB_CONTINUE;
9182
9183         ENTER;
9184         if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9185                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9186         else if (shutdown_type != IPR_SHUTDOWN_NONE &&
9187                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
9188                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9189                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9190                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9191                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9192
9193                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9194                         timeout = IPR_SHUTDOWN_TIMEOUT;
9195                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9196                         timeout = IPR_INTERNAL_TIMEOUT;
9197                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9198                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
9199                 else
9200                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
9201
9202                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9203
9204                 rc = IPR_RC_JOB_RETURN;
9205                 ipr_cmd->job_step = ipr_reset_ucode_download;
9206         } else
9207                 ipr_cmd->job_step = ipr_reset_alert;
9208
9209         LEAVE;
9210         return rc;
9211 }
9212
9213 /**
9214  * ipr_reset_ioa_job - Adapter reset job
9215  * @ipr_cmd:    ipr command struct
9216  *
9217  * Description: This function is the job router for the adapter reset job.
9218  *
9219  * Return value:
9220  *      none
9221  **/
9222 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9223 {
9224         u32 rc, ioasc;
9225         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9226
9227         do {
9228                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
9229
9230                 if (ioa_cfg->reset_cmd != ipr_cmd) {
9231                         /*
9232                          * We are doing nested adapter resets and this is
9233                          * not the current reset job.
9234                          */
9235                         list_add_tail(&ipr_cmd->queue,
9236                                         &ipr_cmd->hrrq->hrrq_free_q);
9237                         return;
9238                 }
9239
9240                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
9241                         rc = ipr_cmd->job_step_failed(ipr_cmd);
9242                         if (rc == IPR_RC_JOB_RETURN)
9243                                 return;
9244                 }
9245
9246                 ipr_reinit_ipr_cmnd(ipr_cmd);
9247                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
9248                 rc = ipr_cmd->job_step(ipr_cmd);
9249         } while (rc == IPR_RC_JOB_CONTINUE);
9250 }
9251
9252 /**
9253  * _ipr_initiate_ioa_reset - Initiate an adapter reset
9254  * @ioa_cfg:            ioa config struct
9255  * @job_step:           first job step of reset job
9256  * @shutdown_type:      shutdown type
9257  *
9258  * Description: This function will initiate the reset of the given adapter
9259  * starting at the selected job step.
9260  * If the caller needs to wait on the completion of the reset,
9261  * the caller must sleep on the reset_wait_q.
9262  *
9263  * Return value:
9264  *      none
9265  **/
9266 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9267                                     int (*job_step) (struct ipr_cmnd *),
9268                                     enum ipr_shutdown_type shutdown_type)
9269 {
9270         struct ipr_cmnd *ipr_cmd;
9271         int i;
9272
9273         ioa_cfg->in_reset_reload = 1;
9274         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9275                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9276                 ioa_cfg->hrrq[i].allow_cmds = 0;
9277                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9278         }
9279         wmb();
9280         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9281                 ioa_cfg->scsi_unblock = 0;
9282                 ioa_cfg->scsi_blocked = 1;
9283                 scsi_block_requests(ioa_cfg->host);
9284         }
9285
9286         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9287         ioa_cfg->reset_cmd = ipr_cmd;
9288         ipr_cmd->job_step = job_step;
9289         ipr_cmd->u.shutdown_type = shutdown_type;
9290
9291         ipr_reset_ioa_job(ipr_cmd);
9292 }
9293
9294 /**
9295  * ipr_initiate_ioa_reset - Initiate an adapter reset
9296  * @ioa_cfg:            ioa config struct
9297  * @shutdown_type:      shutdown type
9298  *
9299  * Description: This function will initiate the reset of the given adapter.
9300  * If the caller needs to wait on the completion of the reset,
9301  * the caller must sleep on the reset_wait_q.
9302  *
9303  * Return value:
9304  *      none
9305  **/
9306 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9307                                    enum ipr_shutdown_type shutdown_type)
9308 {
9309         int i;
9310
9311         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9312                 return;
9313
9314         if (ioa_cfg->in_reset_reload) {
9315                 if (ioa_cfg->sdt_state == GET_DUMP)
9316                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9317                 else if (ioa_cfg->sdt_state == READ_DUMP)
9318                         ioa_cfg->sdt_state = ABORT_DUMP;
9319         }
9320
9321         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9322                 dev_err(&ioa_cfg->pdev->dev,
9323                         "IOA taken offline - error recovery failed\n");
9324
9325                 ioa_cfg->reset_retries = 0;
9326                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9327                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9328                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
9329                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9330                 }
9331                 wmb();
9332
9333                 if (ioa_cfg->in_ioa_bringdown) {
9334                         ioa_cfg->reset_cmd = NULL;
9335                         ioa_cfg->in_reset_reload = 0;
9336                         ipr_fail_all_ops(ioa_cfg);
9337                         wake_up_all(&ioa_cfg->reset_wait_q);
9338
9339                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9340                                 ioa_cfg->scsi_unblock = 1;
9341                                 schedule_work(&ioa_cfg->work_q);
9342                         }
9343                         return;
9344                 } else {
9345                         ioa_cfg->in_ioa_bringdown = 1;
9346                         shutdown_type = IPR_SHUTDOWN_NONE;
9347                 }
9348         }
9349
9350         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9351                                 shutdown_type);
9352 }
9353
9354 /**
9355  * ipr_reset_freeze - Hold off all I/O activity
9356  * @ipr_cmd:    ipr command struct
9357  *
9358  * Description: If the PCI slot is frozen, hold off all I/O
9359  * activity; then, as soon as the slot is available again,
9360  * initiate an adapter reset.
9361  */
9362 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9363 {
9364         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9365         int i;
9366
9367         /* Disallow new interrupts, avoid loop */
9368         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9369                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9370                 ioa_cfg->hrrq[i].allow_interrupts = 0;
9371                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9372         }
9373         wmb();
9374         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9375         ipr_cmd->done = ipr_reset_ioa_job;
9376         return IPR_RC_JOB_RETURN;
9377 }
9378
9379 /**
9380  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9381  * @pdev:       PCI device struct
9382  *
9383  * Description: This routine is called to tell us that the MMIO
9384  * access to the IOA has been restored
9385  */
9386 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9387 {
9388         unsigned long flags = 0;
9389         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9390
9391         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9392         if (!ioa_cfg->probe_done)
9393                 pci_save_state(pdev);
9394         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9395         return PCI_ERS_RESULT_NEED_RESET;
9396 }
9397
9398 /**
9399  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9400  * @pdev:       PCI device struct
9401  *
9402  * Description: This routine is called to tell us that the PCI bus
9403  * is down. Can't do anything here, except put the device driver
9404  * into a holding pattern, waiting for the PCI bus to come back.
9405  */
9406 static void ipr_pci_frozen(struct pci_dev *pdev)
9407 {
9408         unsigned long flags = 0;
9409         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9410
9411         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9412         if (ioa_cfg->probe_done)
9413                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9414         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9415 }
9416
9417 /**
9418  * ipr_pci_slot_reset - Called when PCI slot has been reset.
9419  * @pdev:       PCI device struct
9420  *
9421  * Description: This routine is called by the pci error recovery
9422  * code after the PCI slot has been reset, just before we
9423  * should resume normal operations.
9424  */
9425 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9426 {
9427         unsigned long flags = 0;
9428         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9429
9430         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9431         if (ioa_cfg->probe_done) {
9432                 if (ioa_cfg->needs_warm_reset)
9433                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9434                 else
9435                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9436                                                 IPR_SHUTDOWN_NONE);
9437         } else
9438                 wake_up_all(&ioa_cfg->eeh_wait_q);
9439         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9440         return PCI_ERS_RESULT_RECOVERED;
9441 }
9442
9443 /**
9444  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9445  * @pdev:       PCI device struct
9446  *
9447  * Description: This routine is called when the PCI bus has
9448  * permanently failed.
9449  */
9450 static void ipr_pci_perm_failure(struct pci_dev *pdev)
9451 {
9452         unsigned long flags = 0;
9453         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9454         int i;
9455
9456         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9457         if (ioa_cfg->probe_done) {
9458                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9459                         ioa_cfg->sdt_state = ABORT_DUMP;
9460                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9461                 ioa_cfg->in_ioa_bringdown = 1;
9462                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9463                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9464                         ioa_cfg->hrrq[i].allow_cmds = 0;
9465                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9466                 }
9467                 wmb();
9468                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9469         } else
9470                 wake_up_all(&ioa_cfg->eeh_wait_q);
9471         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9472 }
9473
9474 /**
9475  * ipr_pci_error_detected - Called when a PCI error is detected.
9476  * @pdev:       PCI device struct
9477  * @state:      PCI channel state
9478  *
9479  * Description: Called when a PCI error is detected.
9480  *
9481  * Return value:
9482  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9483  */
9484 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9485                                                pci_channel_state_t state)
9486 {
9487         switch (state) {
9488         case pci_channel_io_frozen:
9489                 ipr_pci_frozen(pdev);
9490                 return PCI_ERS_RESULT_CAN_RECOVER;
9491         case pci_channel_io_perm_failure:
9492                 ipr_pci_perm_failure(pdev);
9493                 return PCI_ERS_RESULT_DISCONNECT;
9494         default:
9495                 break;
9496         }
9497         return PCI_ERS_RESULT_NEED_RESET;
9498 }
9499
9500 /**
9501  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9502  * @ioa_cfg:    ioa cfg struct
9503  *
9504  * Description: This is the second phase of adapter initialization
9505  * This function takes care of initilizing the adapter to the point
9506  * where it can accept new commands.
9507  * Return value:
9508  *      0 on success / -EIO on failure
9509  **/
9510 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9511 {
9512         int rc = 0;
9513         unsigned long host_lock_flags = 0;
9514
9515         ENTER;
9516         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9517         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9518         ioa_cfg->probe_done = 1;
9519         if (ioa_cfg->needs_hard_reset) {
9520                 ioa_cfg->needs_hard_reset = 0;
9521                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9522         } else
9523                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9524                                         IPR_SHUTDOWN_NONE);
9525         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9526
9527         LEAVE;
9528         return rc;
9529 }
9530
9531 /**
9532  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9533  * @ioa_cfg:    ioa config struct
9534  *
9535  * Return value:
9536  *      none
9537  **/
9538 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9539 {
9540         int i;
9541
9542         if (ioa_cfg->ipr_cmnd_list) {
9543                 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9544                         if (ioa_cfg->ipr_cmnd_list[i])
9545                                 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9546                                               ioa_cfg->ipr_cmnd_list[i],
9547                                               ioa_cfg->ipr_cmnd_list_dma[i]);
9548
9549                         ioa_cfg->ipr_cmnd_list[i] = NULL;
9550                 }
9551         }
9552
9553         dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9554
9555         kfree(ioa_cfg->ipr_cmnd_list);
9556         kfree(ioa_cfg->ipr_cmnd_list_dma);
9557         ioa_cfg->ipr_cmnd_list = NULL;
9558         ioa_cfg->ipr_cmnd_list_dma = NULL;
9559         ioa_cfg->ipr_cmd_pool = NULL;
9560 }
9561
9562 /**
9563  * ipr_free_mem - Frees memory allocated for an adapter
9564  * @ioa_cfg:    ioa cfg struct
9565  *
9566  * Return value:
9567  *      nothing
9568  **/
9569 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9570 {
9571         int i;
9572
9573         kfree(ioa_cfg->res_entries);
9574         dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9575                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9576         ipr_free_cmd_blks(ioa_cfg);
9577
9578         for (i = 0; i < ioa_cfg->hrrq_num; i++)
9579                 dma_free_coherent(&ioa_cfg->pdev->dev,
9580                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9581                                   ioa_cfg->hrrq[i].host_rrq,
9582                                   ioa_cfg->hrrq[i].host_rrq_dma);
9583
9584         dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9585                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9586
9587         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9588                 dma_free_coherent(&ioa_cfg->pdev->dev,
9589                                   sizeof(struct ipr_hostrcb),
9590                                   ioa_cfg->hostrcb[i],
9591                                   ioa_cfg->hostrcb_dma[i]);
9592         }
9593
9594         ipr_free_dump(ioa_cfg);
9595         kfree(ioa_cfg->trace);
9596 }
9597
9598 /**
9599  * ipr_free_irqs - Free all allocated IRQs for the adapter.
9600  * @ioa_cfg:    ipr cfg struct
9601  *
9602  * This function frees all allocated IRQs for the
9603  * specified adapter.
9604  *
9605  * Return value:
9606  *      none
9607  **/
9608 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9609 {
9610         struct pci_dev *pdev = ioa_cfg->pdev;
9611         int i;
9612
9613         for (i = 0; i < ioa_cfg->nvectors; i++)
9614                 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9615         pci_free_irq_vectors(pdev);
9616 }
9617
9618 /**
9619  * ipr_free_all_resources - Free all allocated resources for an adapter.
9620  * @ioa_cfg:    ioa config struct
9621  *
9622  * This function frees all allocated resources for the
9623  * specified adapter.
9624  *
9625  * Return value:
9626  *      none
9627  **/
9628 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9629 {
9630         struct pci_dev *pdev = ioa_cfg->pdev;
9631
9632         ENTER;
9633         ipr_free_irqs(ioa_cfg);
9634         if (ioa_cfg->reset_work_q)
9635                 destroy_workqueue(ioa_cfg->reset_work_q);
9636         iounmap(ioa_cfg->hdw_dma_regs);
9637         pci_release_regions(pdev);
9638         ipr_free_mem(ioa_cfg);
9639         scsi_host_put(ioa_cfg->host);
9640         pci_disable_device(pdev);
9641         LEAVE;
9642 }
9643
9644 /**
9645  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9646  * @ioa_cfg:    ioa config struct
9647  *
9648  * Return value:
9649  *      0 on success / -ENOMEM on allocation failure
9650  **/
9651 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9652 {
9653         struct ipr_cmnd *ipr_cmd;
9654         struct ipr_ioarcb *ioarcb;
9655         dma_addr_t dma_addr;
9656         int i, entries_each_hrrq, hrrq_id = 0;
9657
9658         ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9659                                                 sizeof(struct ipr_cmnd), 512, 0);
9660
9661         if (!ioa_cfg->ipr_cmd_pool)
9662                 return -ENOMEM;
9663
9664         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9665         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9666
9667         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9668                 ipr_free_cmd_blks(ioa_cfg);
9669                 return -ENOMEM;
9670         }
9671
9672         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9673                 if (ioa_cfg->hrrq_num > 1) {
9674                         if (i == 0) {
9675                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9676                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
9677                                 ioa_cfg->hrrq[i].max_cmd_id =
9678                                         (entries_each_hrrq - 1);
9679                         } else {
9680                                 entries_each_hrrq =
9681                                         IPR_NUM_BASE_CMD_BLKS/
9682                                         (ioa_cfg->hrrq_num - 1);
9683                                 ioa_cfg->hrrq[i].min_cmd_id =
9684                                         IPR_NUM_INTERNAL_CMD_BLKS +
9685                                         (i - 1) * entries_each_hrrq;
9686                                 ioa_cfg->hrrq[i].max_cmd_id =
9687                                         (IPR_NUM_INTERNAL_CMD_BLKS +
9688                                         i * entries_each_hrrq - 1);
9689                         }
9690                 } else {
9691                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
9692                         ioa_cfg->hrrq[i].min_cmd_id = 0;
9693                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9694                 }
9695                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9696         }
9697
9698         BUG_ON(ioa_cfg->hrrq_num == 0);
9699
9700         i = IPR_NUM_CMD_BLKS -
9701                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9702         if (i > 0) {
9703                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9704                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9705         }
9706
9707         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9708                 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
9709                                 GFP_KERNEL, &dma_addr);
9710
9711                 if (!ipr_cmd) {
9712                         ipr_free_cmd_blks(ioa_cfg);
9713                         return -ENOMEM;
9714                 }
9715
9716                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9717                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9718
9719                 ioarcb = &ipr_cmd->ioarcb;
9720                 ipr_cmd->dma_addr = dma_addr;
9721                 if (ioa_cfg->sis64)
9722                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9723                 else
9724                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9725
9726                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9727                 if (ioa_cfg->sis64) {
9728                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9729                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9730                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9731                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9732                 } else {
9733                         ioarcb->write_ioadl_addr =
9734                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9735                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9736                         ioarcb->ioasa_host_pci_addr =
9737                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9738                 }
9739                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9740                 ipr_cmd->cmd_index = i;
9741                 ipr_cmd->ioa_cfg = ioa_cfg;
9742                 ipr_cmd->sense_buffer_dma = dma_addr +
9743                         offsetof(struct ipr_cmnd, sense_buffer);
9744
9745                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9746                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9747                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9748                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9749                         hrrq_id++;
9750         }
9751
9752         return 0;
9753 }
9754
9755 /**
9756  * ipr_alloc_mem - Allocate memory for an adapter
9757  * @ioa_cfg:    ioa config struct
9758  *
9759  * Return value:
9760  *      0 on success / non-zero for error
9761  **/
9762 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9763 {
9764         struct pci_dev *pdev = ioa_cfg->pdev;
9765         int i, rc = -ENOMEM;
9766
9767         ENTER;
9768         ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
9769                                        sizeof(struct ipr_resource_entry),
9770                                        GFP_KERNEL);
9771
9772         if (!ioa_cfg->res_entries)
9773                 goto out;
9774
9775         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9776                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9777                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9778         }
9779
9780         ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9781                                               sizeof(struct ipr_misc_cbs),
9782                                               &ioa_cfg->vpd_cbs_dma,
9783                                               GFP_KERNEL);
9784
9785         if (!ioa_cfg->vpd_cbs)
9786                 goto out_free_res_entries;
9787
9788         if (ipr_alloc_cmd_blks(ioa_cfg))
9789                 goto out_free_vpd_cbs;
9790
9791         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9792                 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9793                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9794                                         &ioa_cfg->hrrq[i].host_rrq_dma,
9795                                         GFP_KERNEL);
9796
9797                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9798                         while (--i >= 0)
9799                                 dma_free_coherent(&pdev->dev,
9800                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9801                                         ioa_cfg->hrrq[i].host_rrq,
9802                                         ioa_cfg->hrrq[i].host_rrq_dma);
9803                         goto out_ipr_free_cmd_blocks;
9804                 }
9805                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9806         }
9807
9808         ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9809                                                   ioa_cfg->cfg_table_size,
9810                                                   &ioa_cfg->cfg_table_dma,
9811                                                   GFP_KERNEL);
9812
9813         if (!ioa_cfg->u.cfg_table)
9814                 goto out_free_host_rrq;
9815
9816         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9817                 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9818                                                          sizeof(struct ipr_hostrcb),
9819                                                          &ioa_cfg->hostrcb_dma[i],
9820                                                          GFP_KERNEL);
9821
9822                 if (!ioa_cfg->hostrcb[i])
9823                         goto out_free_hostrcb_dma;
9824
9825                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9826                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9827                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9828                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9829         }
9830
9831         ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
9832                                  sizeof(struct ipr_trace_entry),
9833                                  GFP_KERNEL);
9834
9835         if (!ioa_cfg->trace)
9836                 goto out_free_hostrcb_dma;
9837
9838         rc = 0;
9839 out:
9840         LEAVE;
9841         return rc;
9842
9843 out_free_hostrcb_dma:
9844         while (i-- > 0) {
9845                 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9846                                   ioa_cfg->hostrcb[i],
9847                                   ioa_cfg->hostrcb_dma[i]);
9848         }
9849         dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9850                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9851 out_free_host_rrq:
9852         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9853                 dma_free_coherent(&pdev->dev,
9854                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9855                                   ioa_cfg->hrrq[i].host_rrq,
9856                                   ioa_cfg->hrrq[i].host_rrq_dma);
9857         }
9858 out_ipr_free_cmd_blocks:
9859         ipr_free_cmd_blks(ioa_cfg);
9860 out_free_vpd_cbs:
9861         dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9862                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9863 out_free_res_entries:
9864         kfree(ioa_cfg->res_entries);
9865         goto out;
9866 }
9867
9868 /**
9869  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9870  * @ioa_cfg:    ioa config struct
9871  *
9872  * Return value:
9873  *      none
9874  **/
9875 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9876 {
9877         int i;
9878
9879         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9880                 ioa_cfg->bus_attr[i].bus = i;
9881                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9882                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9883                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9884                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9885                 else
9886                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9887         }
9888 }
9889
9890 /**
9891  * ipr_init_regs - Initialize IOA registers
9892  * @ioa_cfg:    ioa config struct
9893  *
9894  * Return value:
9895  *      none
9896  **/
9897 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9898 {
9899         const struct ipr_interrupt_offsets *p;
9900         struct ipr_interrupts *t;
9901         void __iomem *base;
9902
9903         p = &ioa_cfg->chip_cfg->regs;
9904         t = &ioa_cfg->regs;
9905         base = ioa_cfg->hdw_dma_regs;
9906
9907         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9908         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9909         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9910         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9911         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9912         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9913         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9914         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9915         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9916         t->ioarrin_reg = base + p->ioarrin_reg;
9917         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9918         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9919         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9920         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9921         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9922         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9923
9924         if (ioa_cfg->sis64) {
9925                 t->init_feedback_reg = base + p->init_feedback_reg;
9926                 t->dump_addr_reg = base + p->dump_addr_reg;
9927                 t->dump_data_reg = base + p->dump_data_reg;
9928                 t->endian_swap_reg = base + p->endian_swap_reg;
9929         }
9930 }
9931
9932 /**
9933  * ipr_init_ioa_cfg - Initialize IOA config struct
9934  * @ioa_cfg:    ioa config struct
9935  * @host:               scsi host struct
9936  * @pdev:               PCI dev struct
9937  *
9938  * Return value:
9939  *      none
9940  **/
9941 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9942                              struct Scsi_Host *host, struct pci_dev *pdev)
9943 {
9944         int i;
9945
9946         ioa_cfg->host = host;
9947         ioa_cfg->pdev = pdev;
9948         ioa_cfg->log_level = ipr_log_level;
9949         ioa_cfg->doorbell = IPR_DOORBELL;
9950         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9951         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9952         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9953         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9954         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9955         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9956
9957         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9958         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9959         INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9960         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9961         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9962         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9963         INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
9964         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9965         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9966         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9967         ioa_cfg->sdt_state = INACTIVE;
9968
9969         ipr_initialize_bus_attr(ioa_cfg);
9970         ioa_cfg->max_devs_supported = ipr_max_devs;
9971
9972         if (ioa_cfg->sis64) {
9973                 host->max_channel = IPR_MAX_SIS64_BUSES;
9974                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9975                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9976                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9977                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9978                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9979                                            + ((sizeof(struct ipr_config_table_entry64)
9980                                                * ioa_cfg->max_devs_supported)));
9981         } else {
9982                 host->max_channel = IPR_VSET_BUS;
9983                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9984                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9985                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9986                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9987                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9988                                            + ((sizeof(struct ipr_config_table_entry)
9989                                                * ioa_cfg->max_devs_supported)));
9990         }
9991
9992         host->unique_id = host->host_no;
9993         host->max_cmd_len = IPR_MAX_CDB_LEN;
9994         host->can_queue = ioa_cfg->max_cmds;
9995         pci_set_drvdata(pdev, ioa_cfg);
9996
9997         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9998                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9999                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
10000                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
10001                 if (i == 0)
10002                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
10003                 else
10004                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
10005         }
10006 }
10007
10008 /**
10009  * ipr_get_chip_info - Find adapter chip information
10010  * @dev_id:             PCI device id struct
10011  *
10012  * Return value:
10013  *      ptr to chip information on success / NULL on failure
10014  **/
10015 static const struct ipr_chip_t *
10016 ipr_get_chip_info(const struct pci_device_id *dev_id)
10017 {
10018         int i;
10019
10020         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
10021                 if (ipr_chip[i].vendor == dev_id->vendor &&
10022                     ipr_chip[i].device == dev_id->device)
10023                         return &ipr_chip[i];
10024         return NULL;
10025 }
10026
10027 /**
10028  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
10029  *                                              during probe time
10030  * @ioa_cfg:    ioa config struct
10031  *
10032  * Return value:
10033  *      None
10034  **/
10035 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
10036 {
10037         struct pci_dev *pdev = ioa_cfg->pdev;
10038
10039         if (pci_channel_offline(pdev)) {
10040                 wait_event_timeout(ioa_cfg->eeh_wait_q,
10041                                    !pci_channel_offline(pdev),
10042                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
10043                 pci_restore_state(pdev);
10044         }
10045 }
10046
10047 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
10048 {
10049         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
10050
10051         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
10052                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
10053                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10054                 ioa_cfg->vectors_info[vec_idx].
10055                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10056         }
10057 }
10058
10059 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10060                 struct pci_dev *pdev)
10061 {
10062         int i, rc;
10063
10064         for (i = 1; i < ioa_cfg->nvectors; i++) {
10065                 rc = request_irq(pci_irq_vector(pdev, i),
10066                         ipr_isr_mhrrq,
10067                         0,
10068                         ioa_cfg->vectors_info[i].desc,
10069                         &ioa_cfg->hrrq[i]);
10070                 if (rc) {
10071                         while (--i > 0)
10072                                 free_irq(pci_irq_vector(pdev, i),
10073                                         &ioa_cfg->hrrq[i]);
10074                         return rc;
10075                 }
10076         }
10077         return 0;
10078 }
10079
10080 /**
10081  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10082  * @devp:               PCI device struct
10083  * @irq:                IRQ number
10084  *
10085  * Description: Simply set the msi_received flag to 1 indicating that
10086  * Message Signaled Interrupts are supported.
10087  *
10088  * Return value:
10089  *      0 on success / non-zero on failure
10090  **/
10091 static irqreturn_t ipr_test_intr(int irq, void *devp)
10092 {
10093         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10094         unsigned long lock_flags = 0;
10095
10096         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
10097         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10098
10099         ioa_cfg->msi_received = 1;
10100         wake_up(&ioa_cfg->msi_wait_q);
10101
10102         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10103         return IRQ_HANDLED;
10104 }
10105
10106 /**
10107  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10108  * @ioa_cfg:            ioa config struct
10109  * @pdev:               PCI device struct
10110  *
10111  * Description: This routine sets up and initiates a test interrupt to determine
10112  * if the interrupt is received via the ipr_test_intr() service routine.
10113  * If the tests fails, the driver will fall back to LSI.
10114  *
10115  * Return value:
10116  *      0 on success / non-zero on failure
10117  **/
10118 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10119 {
10120         int rc;
10121         unsigned long lock_flags = 0;
10122         int irq = pci_irq_vector(pdev, 0);
10123
10124         ENTER;
10125
10126         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10127         init_waitqueue_head(&ioa_cfg->msi_wait_q);
10128         ioa_cfg->msi_received = 0;
10129         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10130         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
10131         readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10132         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10133
10134         rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10135         if (rc) {
10136                 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
10137                 return rc;
10138         } else if (ipr_debug)
10139                 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
10140
10141         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
10142         readl(ioa_cfg->regs.sense_interrupt_reg);
10143         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
10144         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10145         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10146
10147         if (!ioa_cfg->msi_received) {
10148                 /* MSI test failed */
10149                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
10150                 rc = -EOPNOTSUPP;
10151         } else if (ipr_debug)
10152                 dev_info(&pdev->dev, "MSI test succeeded.\n");
10153
10154         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10155
10156         free_irq(irq, ioa_cfg);
10157
10158         LEAVE;
10159
10160         return rc;
10161 }
10162
10163  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10164  * @pdev:               PCI device struct
10165  * @dev_id:             PCI device id struct
10166  *
10167  * Return value:
10168  *      0 on success / non-zero on failure
10169  **/
10170 static int ipr_probe_ioa(struct pci_dev *pdev,
10171                          const struct pci_device_id *dev_id)
10172 {
10173         struct ipr_ioa_cfg *ioa_cfg;
10174         struct Scsi_Host *host;
10175         unsigned long ipr_regs_pci;
10176         void __iomem *ipr_regs;
10177         int rc = PCIBIOS_SUCCESSFUL;
10178         volatile u32 mask, uproc, interrupts;
10179         unsigned long lock_flags, driver_lock_flags;
10180         unsigned int irq_flag;
10181
10182         ENTER;
10183
10184         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
10185         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10186
10187         if (!host) {
10188                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10189                 rc = -ENOMEM;
10190                 goto out;
10191         }
10192
10193         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10194         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
10195         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
10196
10197         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
10198
10199         if (!ioa_cfg->ipr_chip) {
10200                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10201                         dev_id->vendor, dev_id->device);
10202                 goto out_scsi_host_put;
10203         }
10204
10205         /* set SIS 32 or SIS 64 */
10206         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
10207         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10208         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10209         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10210
10211         if (ipr_transop_timeout)
10212                 ioa_cfg->transop_timeout = ipr_transop_timeout;
10213         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10214                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10215         else
10216                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10217
10218         ioa_cfg->revid = pdev->revision;
10219
10220         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10221
10222         ipr_regs_pci = pci_resource_start(pdev, 0);
10223
10224         rc = pci_request_regions(pdev, IPR_NAME);
10225         if (rc < 0) {
10226                 dev_err(&pdev->dev,
10227                         "Couldn't register memory range of registers\n");
10228                 goto out_scsi_host_put;
10229         }
10230
10231         rc = pci_enable_device(pdev);
10232
10233         if (rc || pci_channel_offline(pdev)) {
10234                 if (pci_channel_offline(pdev)) {
10235                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10236                         rc = pci_enable_device(pdev);
10237                 }
10238
10239                 if (rc) {
10240                         dev_err(&pdev->dev, "Cannot enable adapter\n");
10241                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10242                         goto out_release_regions;
10243                 }
10244         }
10245
10246         ipr_regs = pci_ioremap_bar(pdev, 0);
10247
10248         if (!ipr_regs) {
10249                 dev_err(&pdev->dev,
10250                         "Couldn't map memory range of registers\n");
10251                 rc = -ENOMEM;
10252                 goto out_disable;
10253         }
10254
10255         ioa_cfg->hdw_dma_regs = ipr_regs;
10256         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10257         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10258
10259         ipr_init_regs(ioa_cfg);
10260
10261         if (ioa_cfg->sis64) {
10262                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10263                 if (rc < 0) {
10264                         dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10265                         rc = dma_set_mask_and_coherent(&pdev->dev,
10266                                                        DMA_BIT_MASK(32));
10267                 }
10268         } else
10269                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10270
10271         if (rc < 0) {
10272                 dev_err(&pdev->dev, "Failed to set DMA mask\n");
10273                 goto cleanup_nomem;
10274         }
10275
10276         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10277                                    ioa_cfg->chip_cfg->cache_line_size);
10278
10279         if (rc != PCIBIOS_SUCCESSFUL) {
10280                 dev_err(&pdev->dev, "Write of cache line size failed\n");
10281                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10282                 rc = -EIO;
10283                 goto cleanup_nomem;
10284         }
10285
10286         /* Issue MMIO read to ensure card is not in EEH */
10287         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10288         ipr_wait_for_pci_err_recovery(ioa_cfg);
10289
10290         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10291                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10292                         IPR_MAX_MSIX_VECTORS);
10293                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10294         }
10295
10296         irq_flag = PCI_IRQ_LEGACY;
10297         if (ioa_cfg->ipr_chip->has_msi)
10298                 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10299         rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10300         if (rc < 0) {
10301                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10302                 goto cleanup_nomem;
10303         }
10304         ioa_cfg->nvectors = rc;
10305
10306         if (!pdev->msi_enabled && !pdev->msix_enabled)
10307                 ioa_cfg->clear_isr = 1;
10308
10309         pci_set_master(pdev);
10310
10311         if (pci_channel_offline(pdev)) {
10312                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10313                 pci_set_master(pdev);
10314                 if (pci_channel_offline(pdev)) {
10315                         rc = -EIO;
10316                         goto out_msi_disable;
10317                 }
10318         }
10319
10320         if (pdev->msi_enabled || pdev->msix_enabled) {
10321                 rc = ipr_test_msi(ioa_cfg, pdev);
10322                 switch (rc) {
10323                 case 0:
10324                         dev_info(&pdev->dev,
10325                                 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10326                                 pdev->msix_enabled ? "-X" : "");
10327                         break;
10328                 case -EOPNOTSUPP:
10329                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10330                         pci_free_irq_vectors(pdev);
10331
10332                         ioa_cfg->nvectors = 1;
10333                         ioa_cfg->clear_isr = 1;
10334                         break;
10335                 default:
10336                         goto out_msi_disable;
10337                 }
10338         }
10339
10340         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10341                                 (unsigned int)num_online_cpus(),
10342                                 (unsigned int)IPR_MAX_HRRQ_NUM);
10343
10344         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10345                 goto out_msi_disable;
10346
10347         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10348                 goto out_msi_disable;
10349
10350         rc = ipr_alloc_mem(ioa_cfg);
10351         if (rc < 0) {
10352                 dev_err(&pdev->dev,
10353                         "Couldn't allocate enough memory for device driver!\n");
10354                 goto out_msi_disable;
10355         }
10356
10357         /* Save away PCI config space for use following IOA reset */
10358         rc = pci_save_state(pdev);
10359
10360         if (rc != PCIBIOS_SUCCESSFUL) {
10361                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10362                 rc = -EIO;
10363                 goto cleanup_nolog;
10364         }
10365
10366         /*
10367          * If HRRQ updated interrupt is not masked, or reset alert is set,
10368          * the card is in an unknown state and needs a hard reset
10369          */
10370         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10371         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10372         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10373         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10374                 ioa_cfg->needs_hard_reset = 1;
10375         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10376                 ioa_cfg->needs_hard_reset = 1;
10377         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10378                 ioa_cfg->ioa_unit_checked = 1;
10379
10380         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10381         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10382         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10383
10384         if (pdev->msi_enabled || pdev->msix_enabled) {
10385                 name_msi_vectors(ioa_cfg);
10386                 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
10387                         ioa_cfg->vectors_info[0].desc,
10388                         &ioa_cfg->hrrq[0]);
10389                 if (!rc)
10390                         rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
10391         } else {
10392                 rc = request_irq(pdev->irq, ipr_isr,
10393                          IRQF_SHARED,
10394                          IPR_NAME, &ioa_cfg->hrrq[0]);
10395         }
10396         if (rc) {
10397                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10398                         pdev->irq, rc);
10399                 goto cleanup_nolog;
10400         }
10401
10402         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10403             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10404                 ioa_cfg->needs_warm_reset = 1;
10405                 ioa_cfg->reset = ipr_reset_slot_reset;
10406
10407                 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10408                                                                 WQ_MEM_RECLAIM, host->host_no);
10409
10410                 if (!ioa_cfg->reset_work_q) {
10411                         dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10412                         rc = -ENOMEM;
10413                         goto out_free_irq;
10414                 }
10415         } else
10416                 ioa_cfg->reset = ipr_reset_start_bist;
10417
10418         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10419         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10420         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10421
10422         LEAVE;
10423 out:
10424         return rc;
10425
10426 out_free_irq:
10427         ipr_free_irqs(ioa_cfg);
10428 cleanup_nolog:
10429         ipr_free_mem(ioa_cfg);
10430 out_msi_disable:
10431         ipr_wait_for_pci_err_recovery(ioa_cfg);
10432         pci_free_irq_vectors(pdev);
10433 cleanup_nomem:
10434         iounmap(ipr_regs);
10435 out_disable:
10436         pci_disable_device(pdev);
10437 out_release_regions:
10438         pci_release_regions(pdev);
10439 out_scsi_host_put:
10440         scsi_host_put(host);
10441         goto out;
10442 }
10443
10444 /**
10445  * ipr_initiate_ioa_bringdown - Bring down an adapter
10446  * @ioa_cfg:            ioa config struct
10447  * @shutdown_type:      shutdown type
10448  *
10449  * Description: This function will initiate bringing down the adapter.
10450  * This consists of issuing an IOA shutdown to the adapter
10451  * to flush the cache, and running BIST.
10452  * If the caller needs to wait on the completion of the reset,
10453  * the caller must sleep on the reset_wait_q.
10454  *
10455  * Return value:
10456  *      none
10457  **/
10458 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10459                                        enum ipr_shutdown_type shutdown_type)
10460 {
10461         ENTER;
10462         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10463                 ioa_cfg->sdt_state = ABORT_DUMP;
10464         ioa_cfg->reset_retries = 0;
10465         ioa_cfg->in_ioa_bringdown = 1;
10466         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10467         LEAVE;
10468 }
10469
10470 /**
10471  * __ipr_remove - Remove a single adapter
10472  * @pdev:       pci device struct
10473  *
10474  * Adapter hot plug remove entry point.
10475  *
10476  * Return value:
10477  *      none
10478  **/
10479 static void __ipr_remove(struct pci_dev *pdev)
10480 {
10481         unsigned long host_lock_flags = 0;
10482         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10483         int i;
10484         unsigned long driver_lock_flags;
10485         ENTER;
10486
10487         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10488         while (ioa_cfg->in_reset_reload) {
10489                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10490                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10491                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10492         }
10493
10494         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10495                 spin_lock(&ioa_cfg->hrrq[i]._lock);
10496                 ioa_cfg->hrrq[i].removing_ioa = 1;
10497                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10498         }
10499         wmb();
10500         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10501
10502         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10503         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10504         flush_work(&ioa_cfg->work_q);
10505         if (ioa_cfg->reset_work_q)
10506                 flush_workqueue(ioa_cfg->reset_work_q);
10507         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10508         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10509
10510         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10511         list_del(&ioa_cfg->queue);
10512         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10513
10514         if (ioa_cfg->sdt_state == ABORT_DUMP)
10515                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10516         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10517
10518         ipr_free_all_resources(ioa_cfg);
10519
10520         LEAVE;
10521 }
10522
10523 /**
10524  * ipr_remove - IOA hot plug remove entry point
10525  * @pdev:       pci device struct
10526  *
10527  * Adapter hot plug remove entry point.
10528  *
10529  * Return value:
10530  *      none
10531  **/
10532 static void ipr_remove(struct pci_dev *pdev)
10533 {
10534         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10535
10536         ENTER;
10537
10538         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10539                               &ipr_trace_attr);
10540         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10541                              &ipr_dump_attr);
10542         sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10543                         &ipr_ioa_async_err_log);
10544         scsi_remove_host(ioa_cfg->host);
10545
10546         __ipr_remove(pdev);
10547
10548         LEAVE;
10549 }
10550
10551 /**
10552  * ipr_probe - Adapter hot plug add entry point
10553  * @pdev:       pci device struct
10554  * @dev_id:     pci device ID
10555  *
10556  * Return value:
10557  *      0 on success / non-zero on failure
10558  **/
10559 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10560 {
10561         struct ipr_ioa_cfg *ioa_cfg;
10562         unsigned long flags;
10563         int rc, i;
10564
10565         rc = ipr_probe_ioa(pdev, dev_id);
10566
10567         if (rc)
10568                 return rc;
10569
10570         ioa_cfg = pci_get_drvdata(pdev);
10571         rc = ipr_probe_ioa_part2(ioa_cfg);
10572
10573         if (rc) {
10574                 __ipr_remove(pdev);
10575                 return rc;
10576         }
10577
10578         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10579
10580         if (rc) {
10581                 __ipr_remove(pdev);
10582                 return rc;
10583         }
10584
10585         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10586                                    &ipr_trace_attr);
10587
10588         if (rc) {
10589                 scsi_remove_host(ioa_cfg->host);
10590                 __ipr_remove(pdev);
10591                 return rc;
10592         }
10593
10594         rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10595                         &ipr_ioa_async_err_log);
10596
10597         if (rc) {
10598                 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10599                                 &ipr_dump_attr);
10600                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10601                                 &ipr_trace_attr);
10602                 scsi_remove_host(ioa_cfg->host);
10603                 __ipr_remove(pdev);
10604                 return rc;
10605         }
10606
10607         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10608                                    &ipr_dump_attr);
10609
10610         if (rc) {
10611                 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10612                                       &ipr_ioa_async_err_log);
10613                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10614                                       &ipr_trace_attr);
10615                 scsi_remove_host(ioa_cfg->host);
10616                 __ipr_remove(pdev);
10617                 return rc;
10618         }
10619         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10620         ioa_cfg->scan_enabled = 1;
10621         schedule_work(&ioa_cfg->work_q);
10622         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10623
10624         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10625
10626         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10627                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10628                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10629                                         ioa_cfg->iopoll_weight, ipr_iopoll);
10630                 }
10631         }
10632
10633         scsi_scan_host(ioa_cfg->host);
10634
10635         return 0;
10636 }
10637
10638 /**
10639  * ipr_shutdown - Shutdown handler.
10640  * @pdev:       pci device struct
10641  *
10642  * This function is invoked upon system shutdown/reboot. It will issue
10643  * an adapter shutdown to the adapter to flush the write cache.
10644  *
10645  * Return value:
10646  *      none
10647  **/
10648 static void ipr_shutdown(struct pci_dev *pdev)
10649 {
10650         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10651         unsigned long lock_flags = 0;
10652         enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10653         int i;
10654
10655         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10656         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10657                 ioa_cfg->iopoll_weight = 0;
10658                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10659                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10660         }
10661
10662         while (ioa_cfg->in_reset_reload) {
10663                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10664                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10665                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10666         }
10667
10668         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10669                 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10670
10671         ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10672         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10673         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10674         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10675                 ipr_free_irqs(ioa_cfg);
10676                 pci_disable_device(ioa_cfg->pdev);
10677         }
10678 }
10679
10680 static struct pci_device_id ipr_pci_table[] = {
10681         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10682                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10683         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10684                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10685         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10686                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10687         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10688                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10689         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10690                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10691         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10692                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10693         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10694                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10695         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10696                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10697                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10698         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10699               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10700         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10701               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10702               IPR_USE_LONG_TRANSOP_TIMEOUT },
10703         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10704               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10705               IPR_USE_LONG_TRANSOP_TIMEOUT },
10706         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10707               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10708         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10709               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10710               IPR_USE_LONG_TRANSOP_TIMEOUT},
10711         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10712               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10713               IPR_USE_LONG_TRANSOP_TIMEOUT },
10714         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10715               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10716               IPR_USE_LONG_TRANSOP_TIMEOUT },
10717         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10718               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10719         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10720               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10721         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10722               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10723               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10724         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10725                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10726         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10727                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10728         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10729                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10730                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10731         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10732                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10733                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10734         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10735                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10736         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10737                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10738         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10739                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10740         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10741                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10742         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10743                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10744         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10745                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10746         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10747                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10748         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10749                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10750         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10751                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10752         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10753                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10754         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10755                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10756         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10757                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10758         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10759                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10760         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10761                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10762         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10763                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10764         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10765                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10766         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10767                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10768         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10769                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10770         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10771                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10772         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10773                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10774         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10775                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10776         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10777                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10778         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10779                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10780         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10781                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10782         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10783                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10784         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10785                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10786         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10787                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10788         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10789                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
10790         { }
10791 };
10792 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10793
10794 static const struct pci_error_handlers ipr_err_handler = {
10795         .error_detected = ipr_pci_error_detected,
10796         .mmio_enabled = ipr_pci_mmio_enabled,
10797         .slot_reset = ipr_pci_slot_reset,
10798 };
10799
10800 static struct pci_driver ipr_driver = {
10801         .name = IPR_NAME,
10802         .id_table = ipr_pci_table,
10803         .probe = ipr_probe,
10804         .remove = ipr_remove,
10805         .shutdown = ipr_shutdown,
10806         .err_handler = &ipr_err_handler,
10807 };
10808
10809 /**
10810  * ipr_halt_done - Shutdown prepare completion
10811  * @ipr_cmd:   ipr command struct
10812  *
10813  * Return value:
10814  *      none
10815  **/
10816 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10817 {
10818         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10819 }
10820
10821 /**
10822  * ipr_halt - Issue shutdown prepare to all adapters
10823  * @nb: Notifier block
10824  * @event: Notifier event
10825  * @buf: Notifier data (unused)
10826  *
10827  * Return value:
10828  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10829  **/
10830 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10831 {
10832         struct ipr_cmnd *ipr_cmd;
10833         struct ipr_ioa_cfg *ioa_cfg;
10834         unsigned long flags = 0, driver_lock_flags;
10835
10836         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10837                 return NOTIFY_DONE;
10838
10839         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10840
10841         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10842                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10843                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10844                     (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10845                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10846                         continue;
10847                 }
10848
10849                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10850                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10851                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10852                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10853                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10854
10855                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10856                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10857         }
10858         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10859
10860         return NOTIFY_OK;
10861 }
10862
10863 static struct notifier_block ipr_notifier = {
10864         ipr_halt, NULL, 0
10865 };
10866
10867 /**
10868  * ipr_init - Module entry point
10869  *
10870  * Return value:
10871  *      0 on success / negative value on failure
10872  **/
10873 static int __init ipr_init(void)
10874 {
10875         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10876                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10877
10878         register_reboot_notifier(&ipr_notifier);
10879         return pci_register_driver(&ipr_driver);
10880 }
10881
10882 /**
10883  * ipr_exit - Module unload
10884  *
10885  * Module unload entry point.
10886  *
10887  * Return value:
10888  *      none
10889  **/
10890 static void __exit ipr_exit(void)
10891 {
10892         unregister_reboot_notifier(&ipr_notifier);
10893         pci_unregister_driver(&ipr_driver);
10894 }
10895
10896 module_init(ipr_init);
10897 module_exit(ipr_exit);