arm64: kprobes: Restore local irqflag if kprobes is cancelled
[linux-2.6-microblaze.git] / drivers / scsi / hpsa.c
1 /*
2  *    Disk Array driver for HP Smart Array SAS controllers
3  *    Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
4  *    Copyright 2016 Microsemi Corporation
5  *    Copyright 2014-2015 PMC-Sierra, Inc.
6  *    Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
7  *
8  *    This program is free software; you can redistribute it and/or modify
9  *    it under the terms of the GNU General Public License as published by
10  *    the Free Software Foundation; version 2 of the License.
11  *
12  *    This program is distributed in the hope that it will be useful,
13  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
15  *    NON INFRINGEMENT.  See the GNU General Public License for more details.
16  *
17  *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
18  *
19  */
20
21 #include <linux/module.h>
22 #include <linux/interrupt.h>
23 #include <linux/types.h>
24 #include <linux/pci.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include <linux/fs.h>
29 #include <linux/timer.h>
30 #include <linux/init.h>
31 #include <linux/spinlock.h>
32 #include <linux/compat.h>
33 #include <linux/blktrace_api.h>
34 #include <linux/uaccess.h>
35 #include <linux/io.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/completion.h>
38 #include <linux/moduleparam.h>
39 #include <scsi/scsi.h>
40 #include <scsi/scsi_cmnd.h>
41 #include <scsi/scsi_device.h>
42 #include <scsi/scsi_host.h>
43 #include <scsi/scsi_tcq.h>
44 #include <scsi/scsi_eh.h>
45 #include <scsi/scsi_transport_sas.h>
46 #include <scsi/scsi_dbg.h>
47 #include <linux/cciss_ioctl.h>
48 #include <linux/string.h>
49 #include <linux/bitmap.h>
50 #include <linux/atomic.h>
51 #include <linux/jiffies.h>
52 #include <linux/percpu-defs.h>
53 #include <linux/percpu.h>
54 #include <asm/unaligned.h>
55 #include <asm/div64.h>
56 #include "hpsa_cmd.h"
57 #include "hpsa.h"
58
59 /*
60  * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
61  * with an optional trailing '-' followed by a byte value (0-255).
62  */
63 #define HPSA_DRIVER_VERSION "3.4.20-200"
64 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
65 #define HPSA "hpsa"
66
67 /* How long to wait for CISS doorbell communication */
68 #define CLEAR_EVENT_WAIT_INTERVAL 20    /* ms for each msleep() call */
69 #define MODE_CHANGE_WAIT_INTERVAL 10    /* ms for each msleep() call */
70 #define MAX_CLEAR_EVENT_WAIT 30000      /* times 20 ms = 600 s */
71 #define MAX_MODE_CHANGE_WAIT 2000       /* times 10 ms = 20 s */
72 #define MAX_IOCTL_CONFIG_WAIT 1000
73
74 /*define how many times we will try a command because of bus resets */
75 #define MAX_CMD_RETRIES 3
76 /* How long to wait before giving up on a command */
77 #define HPSA_EH_PTRAID_TIMEOUT (240 * HZ)
78
79 /* Embedded module documentation macros - see modules.h */
80 MODULE_AUTHOR("Hewlett-Packard Company");
81 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
82         HPSA_DRIVER_VERSION);
83 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
84 MODULE_VERSION(HPSA_DRIVER_VERSION);
85 MODULE_LICENSE("GPL");
86 MODULE_ALIAS("cciss");
87
88 static int hpsa_simple_mode;
89 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
90 MODULE_PARM_DESC(hpsa_simple_mode,
91         "Use 'simple mode' rather than 'performant mode'");
92
93 /* define the PCI info for the cards we can control */
94 static const struct pci_device_id hpsa_pci_device_id[] = {
95         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3241},
96         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3243},
97         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3245},
98         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3247},
99         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
100         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324A},
101         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324B},
102         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3233},
103         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3350},
104         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3351},
105         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3352},
106         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3353},
107         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3354},
108         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3355},
109         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3356},
110         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103c, 0x1920},
111         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1921},
112         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1922},
113         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1923},
114         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1924},
115         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103c, 0x1925},
116         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1926},
117         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1928},
118         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1929},
119         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BD},
120         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BE},
121         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BF},
122         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C0},
123         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C1},
124         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C2},
125         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C3},
126         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C4},
127         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C5},
128         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C6},
129         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C7},
130         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C8},
131         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C9},
132         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CA},
133         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CB},
134         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CC},
135         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CD},
136         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CE},
137         {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
138         {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
139         {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
140         {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
141         {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
142         {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
143         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
144         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
145         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
146         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
147         {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
148         {PCI_VENDOR_ID_HP,     PCI_ANY_ID,      PCI_ANY_ID, PCI_ANY_ID,
149                 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
150         {PCI_VENDOR_ID_COMPAQ,     PCI_ANY_ID,  PCI_ANY_ID, PCI_ANY_ID,
151                 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
152         {0,}
153 };
154
155 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
156
157 /*  board_id = Subsystem Device ID & Vendor ID
158  *  product = Marketing Name for the board
159  *  access = Address of the struct of function pointers
160  */
161 static struct board_type products[] = {
162         {0x40700E11, "Smart Array 5300", &SA5A_access},
163         {0x40800E11, "Smart Array 5i", &SA5B_access},
164         {0x40820E11, "Smart Array 532", &SA5B_access},
165         {0x40830E11, "Smart Array 5312", &SA5B_access},
166         {0x409A0E11, "Smart Array 641", &SA5A_access},
167         {0x409B0E11, "Smart Array 642", &SA5A_access},
168         {0x409C0E11, "Smart Array 6400", &SA5A_access},
169         {0x409D0E11, "Smart Array 6400 EM", &SA5A_access},
170         {0x40910E11, "Smart Array 6i", &SA5A_access},
171         {0x3225103C, "Smart Array P600", &SA5A_access},
172         {0x3223103C, "Smart Array P800", &SA5A_access},
173         {0x3234103C, "Smart Array P400", &SA5A_access},
174         {0x3235103C, "Smart Array P400i", &SA5A_access},
175         {0x3211103C, "Smart Array E200i", &SA5A_access},
176         {0x3212103C, "Smart Array E200", &SA5A_access},
177         {0x3213103C, "Smart Array E200i", &SA5A_access},
178         {0x3214103C, "Smart Array E200i", &SA5A_access},
179         {0x3215103C, "Smart Array E200i", &SA5A_access},
180         {0x3237103C, "Smart Array E500", &SA5A_access},
181         {0x323D103C, "Smart Array P700m", &SA5A_access},
182         {0x3241103C, "Smart Array P212", &SA5_access},
183         {0x3243103C, "Smart Array P410", &SA5_access},
184         {0x3245103C, "Smart Array P410i", &SA5_access},
185         {0x3247103C, "Smart Array P411", &SA5_access},
186         {0x3249103C, "Smart Array P812", &SA5_access},
187         {0x324A103C, "Smart Array P712m", &SA5_access},
188         {0x324B103C, "Smart Array P711m", &SA5_access},
189         {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
190         {0x3350103C, "Smart Array P222", &SA5_access},
191         {0x3351103C, "Smart Array P420", &SA5_access},
192         {0x3352103C, "Smart Array P421", &SA5_access},
193         {0x3353103C, "Smart Array P822", &SA5_access},
194         {0x3354103C, "Smart Array P420i", &SA5_access},
195         {0x3355103C, "Smart Array P220i", &SA5_access},
196         {0x3356103C, "Smart Array P721m", &SA5_access},
197         {0x1920103C, "Smart Array P430i", &SA5_access},
198         {0x1921103C, "Smart Array P830i", &SA5_access},
199         {0x1922103C, "Smart Array P430", &SA5_access},
200         {0x1923103C, "Smart Array P431", &SA5_access},
201         {0x1924103C, "Smart Array P830", &SA5_access},
202         {0x1925103C, "Smart Array P831", &SA5_access},
203         {0x1926103C, "Smart Array P731m", &SA5_access},
204         {0x1928103C, "Smart Array P230i", &SA5_access},
205         {0x1929103C, "Smart Array P530", &SA5_access},
206         {0x21BD103C, "Smart Array P244br", &SA5_access},
207         {0x21BE103C, "Smart Array P741m", &SA5_access},
208         {0x21BF103C, "Smart HBA H240ar", &SA5_access},
209         {0x21C0103C, "Smart Array P440ar", &SA5_access},
210         {0x21C1103C, "Smart Array P840ar", &SA5_access},
211         {0x21C2103C, "Smart Array P440", &SA5_access},
212         {0x21C3103C, "Smart Array P441", &SA5_access},
213         {0x21C4103C, "Smart Array", &SA5_access},
214         {0x21C5103C, "Smart Array P841", &SA5_access},
215         {0x21C6103C, "Smart HBA H244br", &SA5_access},
216         {0x21C7103C, "Smart HBA H240", &SA5_access},
217         {0x21C8103C, "Smart HBA H241", &SA5_access},
218         {0x21C9103C, "Smart Array", &SA5_access},
219         {0x21CA103C, "Smart Array P246br", &SA5_access},
220         {0x21CB103C, "Smart Array P840", &SA5_access},
221         {0x21CC103C, "Smart Array", &SA5_access},
222         {0x21CD103C, "Smart Array", &SA5_access},
223         {0x21CE103C, "Smart HBA", &SA5_access},
224         {0x05809005, "SmartHBA-SA", &SA5_access},
225         {0x05819005, "SmartHBA-SA 8i", &SA5_access},
226         {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
227         {0x05839005, "SmartHBA-SA 8e", &SA5_access},
228         {0x05849005, "SmartHBA-SA 16i", &SA5_access},
229         {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
230         {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
231         {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
232         {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
233         {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
234         {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
235         {0xFFFF103C, "Unknown Smart Array", &SA5_access},
236 };
237
238 static struct scsi_transport_template *hpsa_sas_transport_template;
239 static int hpsa_add_sas_host(struct ctlr_info *h);
240 static void hpsa_delete_sas_host(struct ctlr_info *h);
241 static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
242                         struct hpsa_scsi_dev_t *device);
243 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device);
244 static struct hpsa_scsi_dev_t
245         *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
246                 struct sas_rphy *rphy);
247
248 #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
249 static const struct scsi_cmnd hpsa_cmd_busy;
250 #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
251 static const struct scsi_cmnd hpsa_cmd_idle;
252 static int number_of_controllers;
253
254 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
255 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
256 static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
257                       void __user *arg);
258 static int hpsa_passthru_ioctl(struct ctlr_info *h,
259                                IOCTL_Command_struct *iocommand);
260 static int hpsa_big_passthru_ioctl(struct ctlr_info *h,
261                                    BIG_IOCTL_Command_struct *ioc);
262
263 #ifdef CONFIG_COMPAT
264 static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
265         void __user *arg);
266 #endif
267
268 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
269 static struct CommandList *cmd_alloc(struct ctlr_info *h);
270 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
271 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
272                                             struct scsi_cmnd *scmd);
273 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
274         void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
275         int cmd_type);
276 static void hpsa_free_cmd_pool(struct ctlr_info *h);
277 #define VPD_PAGE (1 << 8)
278 #define HPSA_SIMPLE_ERROR_BITS 0x03
279
280 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
281 static void hpsa_scan_start(struct Scsi_Host *);
282 static int hpsa_scan_finished(struct Scsi_Host *sh,
283         unsigned long elapsed_time);
284 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
285
286 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
287 static int hpsa_slave_alloc(struct scsi_device *sdev);
288 static int hpsa_slave_configure(struct scsi_device *sdev);
289 static void hpsa_slave_destroy(struct scsi_device *sdev);
290
291 static void hpsa_update_scsi_devices(struct ctlr_info *h);
292 static int check_for_unit_attention(struct ctlr_info *h,
293         struct CommandList *c);
294 static void check_ioctl_unit_attention(struct ctlr_info *h,
295         struct CommandList *c);
296 /* performant mode helper functions */
297 static void calc_bucket_map(int *bucket, int num_buckets,
298         int nsgs, int min_blocks, u32 *bucket_map);
299 static void hpsa_free_performant_mode(struct ctlr_info *h);
300 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
301 static inline u32 next_command(struct ctlr_info *h, u8 q);
302 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
303                                u32 *cfg_base_addr, u64 *cfg_base_addr_index,
304                                u64 *cfg_offset);
305 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
306                                     unsigned long *memory_bar);
307 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
308                                 bool *legacy_board);
309 static int wait_for_device_to_become_ready(struct ctlr_info *h,
310                                            unsigned char lunaddr[],
311                                            int reply_queue);
312 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
313                                      int wait_for_ready);
314 static inline void finish_cmd(struct CommandList *c);
315 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
316 #define BOARD_NOT_READY 0
317 #define BOARD_READY 1
318 static void hpsa_drain_accel_commands(struct ctlr_info *h);
319 static void hpsa_flush_cache(struct ctlr_info *h);
320 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
321         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
322         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
323 static void hpsa_command_resubmit_worker(struct work_struct *work);
324 static u32 lockup_detected(struct ctlr_info *h);
325 static int detect_controller_lockup(struct ctlr_info *h);
326 static void hpsa_disable_rld_caching(struct ctlr_info *h);
327 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
328         struct ReportExtendedLUNdata *buf, int bufsize);
329 static bool hpsa_vpd_page_supported(struct ctlr_info *h,
330         unsigned char scsi3addr[], u8 page);
331 static int hpsa_luns_changed(struct ctlr_info *h);
332 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
333                                struct hpsa_scsi_dev_t *dev,
334                                unsigned char *scsi3addr);
335
336 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
337 {
338         unsigned long *priv = shost_priv(sdev->host);
339         return (struct ctlr_info *) *priv;
340 }
341
342 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
343 {
344         unsigned long *priv = shost_priv(sh);
345         return (struct ctlr_info *) *priv;
346 }
347
348 static inline bool hpsa_is_cmd_idle(struct CommandList *c)
349 {
350         return c->scsi_cmd == SCSI_CMD_IDLE;
351 }
352
353 /* extract sense key, asc, and ascq from sense data.  -1 means invalid. */
354 static void decode_sense_data(const u8 *sense_data, int sense_data_len,
355                         u8 *sense_key, u8 *asc, u8 *ascq)
356 {
357         struct scsi_sense_hdr sshdr;
358         bool rc;
359
360         *sense_key = -1;
361         *asc = -1;
362         *ascq = -1;
363
364         if (sense_data_len < 1)
365                 return;
366
367         rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
368         if (rc) {
369                 *sense_key = sshdr.sense_key;
370                 *asc = sshdr.asc;
371                 *ascq = sshdr.ascq;
372         }
373 }
374
375 static int check_for_unit_attention(struct ctlr_info *h,
376         struct CommandList *c)
377 {
378         u8 sense_key, asc, ascq;
379         int sense_len;
380
381         if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
382                 sense_len = sizeof(c->err_info->SenseInfo);
383         else
384                 sense_len = c->err_info->SenseLen;
385
386         decode_sense_data(c->err_info->SenseInfo, sense_len,
387                                 &sense_key, &asc, &ascq);
388         if (sense_key != UNIT_ATTENTION || asc == 0xff)
389                 return 0;
390
391         switch (asc) {
392         case STATE_CHANGED:
393                 dev_warn(&h->pdev->dev,
394                         "%s: a state change detected, command retried\n",
395                         h->devname);
396                 break;
397         case LUN_FAILED:
398                 dev_warn(&h->pdev->dev,
399                         "%s: LUN failure detected\n", h->devname);
400                 break;
401         case REPORT_LUNS_CHANGED:
402                 dev_warn(&h->pdev->dev,
403                         "%s: report LUN data changed\n", h->devname);
404         /*
405          * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
406          * target (array) devices.
407          */
408                 break;
409         case POWER_OR_RESET:
410                 dev_warn(&h->pdev->dev,
411                         "%s: a power on or device reset detected\n",
412                         h->devname);
413                 break;
414         case UNIT_ATTENTION_CLEARED:
415                 dev_warn(&h->pdev->dev,
416                         "%s: unit attention cleared by another initiator\n",
417                         h->devname);
418                 break;
419         default:
420                 dev_warn(&h->pdev->dev,
421                         "%s: unknown unit attention detected\n",
422                         h->devname);
423                 break;
424         }
425         return 1;
426 }
427
428 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
429 {
430         if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
431                 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
432                  c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
433                 return 0;
434         dev_warn(&h->pdev->dev, HPSA "device busy");
435         return 1;
436 }
437
438 static u32 lockup_detected(struct ctlr_info *h);
439 static ssize_t host_show_lockup_detected(struct device *dev,
440                 struct device_attribute *attr, char *buf)
441 {
442         int ld;
443         struct ctlr_info *h;
444         struct Scsi_Host *shost = class_to_shost(dev);
445
446         h = shost_to_hba(shost);
447         ld = lockup_detected(h);
448
449         return sprintf(buf, "ld=%d\n", ld);
450 }
451
452 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
453                                          struct device_attribute *attr,
454                                          const char *buf, size_t count)
455 {
456         int status, len;
457         struct ctlr_info *h;
458         struct Scsi_Host *shost = class_to_shost(dev);
459         char tmpbuf[10];
460
461         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
462                 return -EACCES;
463         len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
464         strncpy(tmpbuf, buf, len);
465         tmpbuf[len] = '\0';
466         if (sscanf(tmpbuf, "%d", &status) != 1)
467                 return -EINVAL;
468         h = shost_to_hba(shost);
469         h->acciopath_status = !!status;
470         dev_warn(&h->pdev->dev,
471                 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
472                 h->acciopath_status ? "enabled" : "disabled");
473         return count;
474 }
475
476 static ssize_t host_store_raid_offload_debug(struct device *dev,
477                                          struct device_attribute *attr,
478                                          const char *buf, size_t count)
479 {
480         int debug_level, len;
481         struct ctlr_info *h;
482         struct Scsi_Host *shost = class_to_shost(dev);
483         char tmpbuf[10];
484
485         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
486                 return -EACCES;
487         len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
488         strncpy(tmpbuf, buf, len);
489         tmpbuf[len] = '\0';
490         if (sscanf(tmpbuf, "%d", &debug_level) != 1)
491                 return -EINVAL;
492         if (debug_level < 0)
493                 debug_level = 0;
494         h = shost_to_hba(shost);
495         h->raid_offload_debug = debug_level;
496         dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
497                 h->raid_offload_debug);
498         return count;
499 }
500
501 static ssize_t host_store_rescan(struct device *dev,
502                                  struct device_attribute *attr,
503                                  const char *buf, size_t count)
504 {
505         struct ctlr_info *h;
506         struct Scsi_Host *shost = class_to_shost(dev);
507         h = shost_to_hba(shost);
508         hpsa_scan_start(h->scsi_host);
509         return count;
510 }
511
512 static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device)
513 {
514         device->offload_enabled = 0;
515         device->offload_to_be_enabled = 0;
516 }
517
518 static ssize_t host_show_firmware_revision(struct device *dev,
519              struct device_attribute *attr, char *buf)
520 {
521         struct ctlr_info *h;
522         struct Scsi_Host *shost = class_to_shost(dev);
523         unsigned char *fwrev;
524
525         h = shost_to_hba(shost);
526         if (!h->hba_inquiry_data)
527                 return 0;
528         fwrev = &h->hba_inquiry_data[32];
529         return snprintf(buf, 20, "%c%c%c%c\n",
530                 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
531 }
532
533 static ssize_t host_show_commands_outstanding(struct device *dev,
534              struct device_attribute *attr, char *buf)
535 {
536         struct Scsi_Host *shost = class_to_shost(dev);
537         struct ctlr_info *h = shost_to_hba(shost);
538
539         return snprintf(buf, 20, "%d\n",
540                         atomic_read(&h->commands_outstanding));
541 }
542
543 static ssize_t host_show_transport_mode(struct device *dev,
544         struct device_attribute *attr, char *buf)
545 {
546         struct ctlr_info *h;
547         struct Scsi_Host *shost = class_to_shost(dev);
548
549         h = shost_to_hba(shost);
550         return snprintf(buf, 20, "%s\n",
551                 h->transMethod & CFGTBL_Trans_Performant ?
552                         "performant" : "simple");
553 }
554
555 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
556         struct device_attribute *attr, char *buf)
557 {
558         struct ctlr_info *h;
559         struct Scsi_Host *shost = class_to_shost(dev);
560
561         h = shost_to_hba(shost);
562         return snprintf(buf, 30, "HP SSD Smart Path %s\n",
563                 (h->acciopath_status == 1) ?  "enabled" : "disabled");
564 }
565
566 /* List of controllers which cannot be hard reset on kexec with reset_devices */
567 static u32 unresettable_controller[] = {
568         0x324a103C, /* Smart Array P712m */
569         0x324b103C, /* Smart Array P711m */
570         0x3223103C, /* Smart Array P800 */
571         0x3234103C, /* Smart Array P400 */
572         0x3235103C, /* Smart Array P400i */
573         0x3211103C, /* Smart Array E200i */
574         0x3212103C, /* Smart Array E200 */
575         0x3213103C, /* Smart Array E200i */
576         0x3214103C, /* Smart Array E200i */
577         0x3215103C, /* Smart Array E200i */
578         0x3237103C, /* Smart Array E500 */
579         0x323D103C, /* Smart Array P700m */
580         0x40800E11, /* Smart Array 5i */
581         0x409C0E11, /* Smart Array 6400 */
582         0x409D0E11, /* Smart Array 6400 EM */
583         0x40700E11, /* Smart Array 5300 */
584         0x40820E11, /* Smart Array 532 */
585         0x40830E11, /* Smart Array 5312 */
586         0x409A0E11, /* Smart Array 641 */
587         0x409B0E11, /* Smart Array 642 */
588         0x40910E11, /* Smart Array 6i */
589 };
590
591 /* List of controllers which cannot even be soft reset */
592 static u32 soft_unresettable_controller[] = {
593         0x40800E11, /* Smart Array 5i */
594         0x40700E11, /* Smart Array 5300 */
595         0x40820E11, /* Smart Array 532 */
596         0x40830E11, /* Smart Array 5312 */
597         0x409A0E11, /* Smart Array 641 */
598         0x409B0E11, /* Smart Array 642 */
599         0x40910E11, /* Smart Array 6i */
600         /* Exclude 640x boards.  These are two pci devices in one slot
601          * which share a battery backed cache module.  One controls the
602          * cache, the other accesses the cache through the one that controls
603          * it.  If we reset the one controlling the cache, the other will
604          * likely not be happy.  Just forbid resetting this conjoined mess.
605          * The 640x isn't really supported by hpsa anyway.
606          */
607         0x409C0E11, /* Smart Array 6400 */
608         0x409D0E11, /* Smart Array 6400 EM */
609 };
610
611 static int board_id_in_array(u32 a[], int nelems, u32 board_id)
612 {
613         int i;
614
615         for (i = 0; i < nelems; i++)
616                 if (a[i] == board_id)
617                         return 1;
618         return 0;
619 }
620
621 static int ctlr_is_hard_resettable(u32 board_id)
622 {
623         return !board_id_in_array(unresettable_controller,
624                         ARRAY_SIZE(unresettable_controller), board_id);
625 }
626
627 static int ctlr_is_soft_resettable(u32 board_id)
628 {
629         return !board_id_in_array(soft_unresettable_controller,
630                         ARRAY_SIZE(soft_unresettable_controller), board_id);
631 }
632
633 static int ctlr_is_resettable(u32 board_id)
634 {
635         return ctlr_is_hard_resettable(board_id) ||
636                 ctlr_is_soft_resettable(board_id);
637 }
638
639 static ssize_t host_show_resettable(struct device *dev,
640         struct device_attribute *attr, char *buf)
641 {
642         struct ctlr_info *h;
643         struct Scsi_Host *shost = class_to_shost(dev);
644
645         h = shost_to_hba(shost);
646         return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
647 }
648
649 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
650 {
651         return (scsi3addr[3] & 0xC0) == 0x40;
652 }
653
654 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
655         "1(+0)ADM", "UNKNOWN", "PHYS DRV"
656 };
657 #define HPSA_RAID_0     0
658 #define HPSA_RAID_4     1
659 #define HPSA_RAID_1     2       /* also used for RAID 10 */
660 #define HPSA_RAID_5     3       /* also used for RAID 50 */
661 #define HPSA_RAID_51    4
662 #define HPSA_RAID_6     5       /* also used for RAID 60 */
663 #define HPSA_RAID_ADM   6       /* also used for RAID 1+0 ADM */
664 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
665 #define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
666
667 static inline bool is_logical_device(struct hpsa_scsi_dev_t *device)
668 {
669         return !device->physical_device;
670 }
671
672 static ssize_t raid_level_show(struct device *dev,
673              struct device_attribute *attr, char *buf)
674 {
675         ssize_t l = 0;
676         unsigned char rlevel;
677         struct ctlr_info *h;
678         struct scsi_device *sdev;
679         struct hpsa_scsi_dev_t *hdev;
680         unsigned long flags;
681
682         sdev = to_scsi_device(dev);
683         h = sdev_to_hba(sdev);
684         spin_lock_irqsave(&h->lock, flags);
685         hdev = sdev->hostdata;
686         if (!hdev) {
687                 spin_unlock_irqrestore(&h->lock, flags);
688                 return -ENODEV;
689         }
690
691         /* Is this even a logical drive? */
692         if (!is_logical_device(hdev)) {
693                 spin_unlock_irqrestore(&h->lock, flags);
694                 l = snprintf(buf, PAGE_SIZE, "N/A\n");
695                 return l;
696         }
697
698         rlevel = hdev->raid_level;
699         spin_unlock_irqrestore(&h->lock, flags);
700         if (rlevel > RAID_UNKNOWN)
701                 rlevel = RAID_UNKNOWN;
702         l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
703         return l;
704 }
705
706 static ssize_t lunid_show(struct device *dev,
707              struct device_attribute *attr, char *buf)
708 {
709         struct ctlr_info *h;
710         struct scsi_device *sdev;
711         struct hpsa_scsi_dev_t *hdev;
712         unsigned long flags;
713         unsigned char lunid[8];
714
715         sdev = to_scsi_device(dev);
716         h = sdev_to_hba(sdev);
717         spin_lock_irqsave(&h->lock, flags);
718         hdev = sdev->hostdata;
719         if (!hdev) {
720                 spin_unlock_irqrestore(&h->lock, flags);
721                 return -ENODEV;
722         }
723         memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
724         spin_unlock_irqrestore(&h->lock, flags);
725         return snprintf(buf, 20, "0x%8phN\n", lunid);
726 }
727
728 static ssize_t unique_id_show(struct device *dev,
729              struct device_attribute *attr, char *buf)
730 {
731         struct ctlr_info *h;
732         struct scsi_device *sdev;
733         struct hpsa_scsi_dev_t *hdev;
734         unsigned long flags;
735         unsigned char sn[16];
736
737         sdev = to_scsi_device(dev);
738         h = sdev_to_hba(sdev);
739         spin_lock_irqsave(&h->lock, flags);
740         hdev = sdev->hostdata;
741         if (!hdev) {
742                 spin_unlock_irqrestore(&h->lock, flags);
743                 return -ENODEV;
744         }
745         memcpy(sn, hdev->device_id, sizeof(sn));
746         spin_unlock_irqrestore(&h->lock, flags);
747         return snprintf(buf, 16 * 2 + 2,
748                         "%02X%02X%02X%02X%02X%02X%02X%02X"
749                         "%02X%02X%02X%02X%02X%02X%02X%02X\n",
750                         sn[0], sn[1], sn[2], sn[3],
751                         sn[4], sn[5], sn[6], sn[7],
752                         sn[8], sn[9], sn[10], sn[11],
753                         sn[12], sn[13], sn[14], sn[15]);
754 }
755
756 static ssize_t sas_address_show(struct device *dev,
757               struct device_attribute *attr, char *buf)
758 {
759         struct ctlr_info *h;
760         struct scsi_device *sdev;
761         struct hpsa_scsi_dev_t *hdev;
762         unsigned long flags;
763         u64 sas_address;
764
765         sdev = to_scsi_device(dev);
766         h = sdev_to_hba(sdev);
767         spin_lock_irqsave(&h->lock, flags);
768         hdev = sdev->hostdata;
769         if (!hdev || is_logical_device(hdev) || !hdev->expose_device) {
770                 spin_unlock_irqrestore(&h->lock, flags);
771                 return -ENODEV;
772         }
773         sas_address = hdev->sas_address;
774         spin_unlock_irqrestore(&h->lock, flags);
775
776         return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address);
777 }
778
779 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
780              struct device_attribute *attr, char *buf)
781 {
782         struct ctlr_info *h;
783         struct scsi_device *sdev;
784         struct hpsa_scsi_dev_t *hdev;
785         unsigned long flags;
786         int offload_enabled;
787
788         sdev = to_scsi_device(dev);
789         h = sdev_to_hba(sdev);
790         spin_lock_irqsave(&h->lock, flags);
791         hdev = sdev->hostdata;
792         if (!hdev) {
793                 spin_unlock_irqrestore(&h->lock, flags);
794                 return -ENODEV;
795         }
796         offload_enabled = hdev->offload_enabled;
797         spin_unlock_irqrestore(&h->lock, flags);
798
799         if (hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC)
800                 return snprintf(buf, 20, "%d\n", offload_enabled);
801         else
802                 return snprintf(buf, 40, "%s\n",
803                                 "Not applicable for a controller");
804 }
805
806 #define MAX_PATHS 8
807 static ssize_t path_info_show(struct device *dev,
808              struct device_attribute *attr, char *buf)
809 {
810         struct ctlr_info *h;
811         struct scsi_device *sdev;
812         struct hpsa_scsi_dev_t *hdev;
813         unsigned long flags;
814         int i;
815         int output_len = 0;
816         u8 box;
817         u8 bay;
818         u8 path_map_index = 0;
819         char *active;
820         unsigned char phys_connector[2];
821
822         sdev = to_scsi_device(dev);
823         h = sdev_to_hba(sdev);
824         spin_lock_irqsave(&h->devlock, flags);
825         hdev = sdev->hostdata;
826         if (!hdev) {
827                 spin_unlock_irqrestore(&h->devlock, flags);
828                 return -ENODEV;
829         }
830
831         bay = hdev->bay;
832         for (i = 0; i < MAX_PATHS; i++) {
833                 path_map_index = 1<<i;
834                 if (i == hdev->active_path_index)
835                         active = "Active";
836                 else if (hdev->path_map & path_map_index)
837                         active = "Inactive";
838                 else
839                         continue;
840
841                 output_len += scnprintf(buf + output_len,
842                                 PAGE_SIZE - output_len,
843                                 "[%d:%d:%d:%d] %20.20s ",
844                                 h->scsi_host->host_no,
845                                 hdev->bus, hdev->target, hdev->lun,
846                                 scsi_device_type(hdev->devtype));
847
848                 if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) {
849                         output_len += scnprintf(buf + output_len,
850                                                 PAGE_SIZE - output_len,
851                                                 "%s\n", active);
852                         continue;
853                 }
854
855                 box = hdev->box[i];
856                 memcpy(&phys_connector, &hdev->phys_connector[i],
857                         sizeof(phys_connector));
858                 if (phys_connector[0] < '0')
859                         phys_connector[0] = '0';
860                 if (phys_connector[1] < '0')
861                         phys_connector[1] = '0';
862                 output_len += scnprintf(buf + output_len,
863                                 PAGE_SIZE - output_len,
864                                 "PORT: %.2s ",
865                                 phys_connector);
866                 if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) &&
867                         hdev->expose_device) {
868                         if (box == 0 || box == 0xFF) {
869                                 output_len += scnprintf(buf + output_len,
870                                         PAGE_SIZE - output_len,
871                                         "BAY: %hhu %s\n",
872                                         bay, active);
873                         } else {
874                                 output_len += scnprintf(buf + output_len,
875                                         PAGE_SIZE - output_len,
876                                         "BOX: %hhu BAY: %hhu %s\n",
877                                         box, bay, active);
878                         }
879                 } else if (box != 0 && box != 0xFF) {
880                         output_len += scnprintf(buf + output_len,
881                                 PAGE_SIZE - output_len, "BOX: %hhu %s\n",
882                                 box, active);
883                 } else
884                         output_len += scnprintf(buf + output_len,
885                                 PAGE_SIZE - output_len, "%s\n", active);
886         }
887
888         spin_unlock_irqrestore(&h->devlock, flags);
889         return output_len;
890 }
891
892 static ssize_t host_show_ctlr_num(struct device *dev,
893         struct device_attribute *attr, char *buf)
894 {
895         struct ctlr_info *h;
896         struct Scsi_Host *shost = class_to_shost(dev);
897
898         h = shost_to_hba(shost);
899         return snprintf(buf, 20, "%d\n", h->ctlr);
900 }
901
902 static ssize_t host_show_legacy_board(struct device *dev,
903         struct device_attribute *attr, char *buf)
904 {
905         struct ctlr_info *h;
906         struct Scsi_Host *shost = class_to_shost(dev);
907
908         h = shost_to_hba(shost);
909         return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0);
910 }
911
912 static DEVICE_ATTR_RO(raid_level);
913 static DEVICE_ATTR_RO(lunid);
914 static DEVICE_ATTR_RO(unique_id);
915 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
916 static DEVICE_ATTR_RO(sas_address);
917 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
918                         host_show_hp_ssd_smart_path_enabled, NULL);
919 static DEVICE_ATTR_RO(path_info);
920 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
921                 host_show_hp_ssd_smart_path_status,
922                 host_store_hp_ssd_smart_path_status);
923 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
924                         host_store_raid_offload_debug);
925 static DEVICE_ATTR(firmware_revision, S_IRUGO,
926         host_show_firmware_revision, NULL);
927 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
928         host_show_commands_outstanding, NULL);
929 static DEVICE_ATTR(transport_mode, S_IRUGO,
930         host_show_transport_mode, NULL);
931 static DEVICE_ATTR(resettable, S_IRUGO,
932         host_show_resettable, NULL);
933 static DEVICE_ATTR(lockup_detected, S_IRUGO,
934         host_show_lockup_detected, NULL);
935 static DEVICE_ATTR(ctlr_num, S_IRUGO,
936         host_show_ctlr_num, NULL);
937 static DEVICE_ATTR(legacy_board, S_IRUGO,
938         host_show_legacy_board, NULL);
939
940 static struct device_attribute *hpsa_sdev_attrs[] = {
941         &dev_attr_raid_level,
942         &dev_attr_lunid,
943         &dev_attr_unique_id,
944         &dev_attr_hp_ssd_smart_path_enabled,
945         &dev_attr_path_info,
946         &dev_attr_sas_address,
947         NULL,
948 };
949
950 static struct device_attribute *hpsa_shost_attrs[] = {
951         &dev_attr_rescan,
952         &dev_attr_firmware_revision,
953         &dev_attr_commands_outstanding,
954         &dev_attr_transport_mode,
955         &dev_attr_resettable,
956         &dev_attr_hp_ssd_smart_path_status,
957         &dev_attr_raid_offload_debug,
958         &dev_attr_lockup_detected,
959         &dev_attr_ctlr_num,
960         &dev_attr_legacy_board,
961         NULL,
962 };
963
964 #define HPSA_NRESERVED_CMDS     (HPSA_CMDS_RESERVED_FOR_DRIVER +\
965                                  HPSA_MAX_CONCURRENT_PASSTHRUS)
966
967 static struct scsi_host_template hpsa_driver_template = {
968         .module                 = THIS_MODULE,
969         .name                   = HPSA,
970         .proc_name              = HPSA,
971         .queuecommand           = hpsa_scsi_queue_command,
972         .scan_start             = hpsa_scan_start,
973         .scan_finished          = hpsa_scan_finished,
974         .change_queue_depth     = hpsa_change_queue_depth,
975         .this_id                = -1,
976         .eh_device_reset_handler = hpsa_eh_device_reset_handler,
977         .ioctl                  = hpsa_ioctl,
978         .slave_alloc            = hpsa_slave_alloc,
979         .slave_configure        = hpsa_slave_configure,
980         .slave_destroy          = hpsa_slave_destroy,
981 #ifdef CONFIG_COMPAT
982         .compat_ioctl           = hpsa_compat_ioctl,
983 #endif
984         .sdev_attrs = hpsa_sdev_attrs,
985         .shost_attrs = hpsa_shost_attrs,
986         .max_sectors = 2048,
987         .no_write_same = 1,
988 };
989
990 static inline u32 next_command(struct ctlr_info *h, u8 q)
991 {
992         u32 a;
993         struct reply_queue_buffer *rq = &h->reply_queue[q];
994
995         if (h->transMethod & CFGTBL_Trans_io_accel1)
996                 return h->access.command_completed(h, q);
997
998         if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
999                 return h->access.command_completed(h, q);
1000
1001         if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
1002                 a = rq->head[rq->current_entry];
1003                 rq->current_entry++;
1004                 atomic_dec(&h->commands_outstanding);
1005         } else {
1006                 a = FIFO_EMPTY;
1007         }
1008         /* Check for wraparound */
1009         if (rq->current_entry == h->max_commands) {
1010                 rq->current_entry = 0;
1011                 rq->wraparound ^= 1;
1012         }
1013         return a;
1014 }
1015
1016 /*
1017  * There are some special bits in the bus address of the
1018  * command that we have to set for the controller to know
1019  * how to process the command:
1020  *
1021  * Normal performant mode:
1022  * bit 0: 1 means performant mode, 0 means simple mode.
1023  * bits 1-3 = block fetch table entry
1024  * bits 4-6 = command type (== 0)
1025  *
1026  * ioaccel1 mode:
1027  * bit 0 = "performant mode" bit.
1028  * bits 1-3 = block fetch table entry
1029  * bits 4-6 = command type (== 110)
1030  * (command type is needed because ioaccel1 mode
1031  * commands are submitted through the same register as normal
1032  * mode commands, so this is how the controller knows whether
1033  * the command is normal mode or ioaccel1 mode.)
1034  *
1035  * ioaccel2 mode:
1036  * bit 0 = "performant mode" bit.
1037  * bits 1-4 = block fetch table entry (note extra bit)
1038  * bits 4-6 = not needed, because ioaccel2 mode has
1039  * a separate special register for submitting commands.
1040  */
1041
1042 /*
1043  * set_performant_mode: Modify the tag for cciss performant
1044  * set bit 0 for pull model, bits 3-1 for block fetch
1045  * register number
1046  */
1047 #define DEFAULT_REPLY_QUEUE (-1)
1048 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
1049                                         int reply_queue)
1050 {
1051         if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
1052                 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
1053                 if (unlikely(!h->msix_vectors))
1054                         return;
1055                 c->Header.ReplyQueue = reply_queue;
1056         }
1057 }
1058
1059 static void set_ioaccel1_performant_mode(struct ctlr_info *h,
1060                                                 struct CommandList *c,
1061                                                 int reply_queue)
1062 {
1063         struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
1064
1065         /*
1066          * Tell the controller to post the reply to the queue for this
1067          * processor.  This seems to give the best I/O throughput.
1068          */
1069         cp->ReplyQueue = reply_queue;
1070         /*
1071          * Set the bits in the address sent down to include:
1072          *  - performant mode bit (bit 0)
1073          *  - pull count (bits 1-3)
1074          *  - command type (bits 4-6)
1075          */
1076         c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
1077                                         IOACCEL1_BUSADDR_CMDTYPE;
1078 }
1079
1080 static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
1081                                                 struct CommandList *c,
1082                                                 int reply_queue)
1083 {
1084         struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
1085                 &h->ioaccel2_cmd_pool[c->cmdindex];
1086
1087         /* Tell the controller to post the reply to the queue for this
1088          * processor.  This seems to give the best I/O throughput.
1089          */
1090         cp->reply_queue = reply_queue;
1091         /* Set the bits in the address sent down to include:
1092          *  - performant mode bit not used in ioaccel mode 2
1093          *  - pull count (bits 0-3)
1094          *  - command type isn't needed for ioaccel2
1095          */
1096         c->busaddr |= h->ioaccel2_blockFetchTable[0];
1097 }
1098
1099 static void set_ioaccel2_performant_mode(struct ctlr_info *h,
1100                                                 struct CommandList *c,
1101                                                 int reply_queue)
1102 {
1103         struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
1104
1105         /*
1106          * Tell the controller to post the reply to the queue for this
1107          * processor.  This seems to give the best I/O throughput.
1108          */
1109         cp->reply_queue = reply_queue;
1110         /*
1111          * Set the bits in the address sent down to include:
1112          *  - performant mode bit not used in ioaccel mode 2
1113          *  - pull count (bits 0-3)
1114          *  - command type isn't needed for ioaccel2
1115          */
1116         c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
1117 }
1118
1119 static int is_firmware_flash_cmd(u8 *cdb)
1120 {
1121         return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
1122 }
1123
1124 /*
1125  * During firmware flash, the heartbeat register may not update as frequently
1126  * as it should.  So we dial down lockup detection during firmware flash. and
1127  * dial it back up when firmware flash completes.
1128  */
1129 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1130 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1131 #define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ)
1132 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
1133                 struct CommandList *c)
1134 {
1135         if (!is_firmware_flash_cmd(c->Request.CDB))
1136                 return;
1137         atomic_inc(&h->firmware_flash_in_progress);
1138         h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
1139 }
1140
1141 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
1142                 struct CommandList *c)
1143 {
1144         if (is_firmware_flash_cmd(c->Request.CDB) &&
1145                 atomic_dec_and_test(&h->firmware_flash_in_progress))
1146                 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
1147 }
1148
1149 static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
1150         struct CommandList *c, int reply_queue)
1151 {
1152         dial_down_lockup_detection_during_fw_flash(h, c);
1153         atomic_inc(&h->commands_outstanding);
1154         /*
1155          * Check to see if the command is being retried.
1156          */
1157         if (c->device && !c->retry_pending)
1158                 atomic_inc(&c->device->commands_outstanding);
1159
1160         reply_queue = h->reply_map[raw_smp_processor_id()];
1161         switch (c->cmd_type) {
1162         case CMD_IOACCEL1:
1163                 set_ioaccel1_performant_mode(h, c, reply_queue);
1164                 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
1165                 break;
1166         case CMD_IOACCEL2:
1167                 set_ioaccel2_performant_mode(h, c, reply_queue);
1168                 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1169                 break;
1170         case IOACCEL2_TMF:
1171                 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
1172                 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1173                 break;
1174         default:
1175                 set_performant_mode(h, c, reply_queue);
1176                 h->access.submit_command(h, c);
1177         }
1178 }
1179
1180 static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
1181 {
1182         __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
1183 }
1184
1185 static inline int is_hba_lunid(unsigned char scsi3addr[])
1186 {
1187         return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
1188 }
1189
1190 static inline int is_scsi_rev_5(struct ctlr_info *h)
1191 {
1192         if (!h->hba_inquiry_data)
1193                 return 0;
1194         if ((h->hba_inquiry_data[2] & 0x07) == 5)
1195                 return 1;
1196         return 0;
1197 }
1198
1199 static int hpsa_find_target_lun(struct ctlr_info *h,
1200         unsigned char scsi3addr[], int bus, int *target, int *lun)
1201 {
1202         /* finds an unused bus, target, lun for a new physical device
1203          * assumes h->devlock is held
1204          */
1205         int i, found = 0;
1206         DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
1207
1208         bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
1209
1210         for (i = 0; i < h->ndevices; i++) {
1211                 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
1212                         __set_bit(h->dev[i]->target, lun_taken);
1213         }
1214
1215         i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1216         if (i < HPSA_MAX_DEVICES) {
1217                 /* *bus = 1; */
1218                 *target = i;
1219                 *lun = 0;
1220                 found = 1;
1221         }
1222         return !found;
1223 }
1224
1225 static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1226         struct hpsa_scsi_dev_t *dev, char *description)
1227 {
1228 #define LABEL_SIZE 25
1229         char label[LABEL_SIZE];
1230
1231         if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
1232                 return;
1233
1234         switch (dev->devtype) {
1235         case TYPE_RAID:
1236                 snprintf(label, LABEL_SIZE, "controller");
1237                 break;
1238         case TYPE_ENCLOSURE:
1239                 snprintf(label, LABEL_SIZE, "enclosure");
1240                 break;
1241         case TYPE_DISK:
1242         case TYPE_ZBC:
1243                 if (dev->external)
1244                         snprintf(label, LABEL_SIZE, "external");
1245                 else if (!is_logical_dev_addr_mode(dev->scsi3addr))
1246                         snprintf(label, LABEL_SIZE, "%s",
1247                                 raid_label[PHYSICAL_DRIVE]);
1248                 else
1249                         snprintf(label, LABEL_SIZE, "RAID-%s",
1250                                 dev->raid_level > RAID_UNKNOWN ? "?" :
1251                                 raid_label[dev->raid_level]);
1252                 break;
1253         case TYPE_ROM:
1254                 snprintf(label, LABEL_SIZE, "rom");
1255                 break;
1256         case TYPE_TAPE:
1257                 snprintf(label, LABEL_SIZE, "tape");
1258                 break;
1259         case TYPE_MEDIUM_CHANGER:
1260                 snprintf(label, LABEL_SIZE, "changer");
1261                 break;
1262         default:
1263                 snprintf(label, LABEL_SIZE, "UNKNOWN");
1264                 break;
1265         }
1266
1267         dev_printk(level, &h->pdev->dev,
1268                         "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
1269                         h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1270                         description,
1271                         scsi_device_type(dev->devtype),
1272                         dev->vendor,
1273                         dev->model,
1274                         label,
1275                         dev->offload_config ? '+' : '-',
1276                         dev->offload_to_be_enabled ? '+' : '-',
1277                         dev->expose_device);
1278 }
1279
1280 /* Add an entry into h->dev[] array. */
1281 static int hpsa_scsi_add_entry(struct ctlr_info *h,
1282                 struct hpsa_scsi_dev_t *device,
1283                 struct hpsa_scsi_dev_t *added[], int *nadded)
1284 {
1285         /* assumes h->devlock is held */
1286         int n = h->ndevices;
1287         int i;
1288         unsigned char addr1[8], addr2[8];
1289         struct hpsa_scsi_dev_t *sd;
1290
1291         if (n >= HPSA_MAX_DEVICES) {
1292                 dev_err(&h->pdev->dev, "too many devices, some will be "
1293                         "inaccessible.\n");
1294                 return -1;
1295         }
1296
1297         /* physical devices do not have lun or target assigned until now. */
1298         if (device->lun != -1)
1299                 /* Logical device, lun is already assigned. */
1300                 goto lun_assigned;
1301
1302         /* If this device a non-zero lun of a multi-lun device
1303          * byte 4 of the 8-byte LUN addr will contain the logical
1304          * unit no, zero otherwise.
1305          */
1306         if (device->scsi3addr[4] == 0) {
1307                 /* This is not a non-zero lun of a multi-lun device */
1308                 if (hpsa_find_target_lun(h, device->scsi3addr,
1309                         device->bus, &device->target, &device->lun) != 0)
1310                         return -1;
1311                 goto lun_assigned;
1312         }
1313
1314         /* This is a non-zero lun of a multi-lun device.
1315          * Search through our list and find the device which
1316          * has the same 8 byte LUN address, excepting byte 4 and 5.
1317          * Assign the same bus and target for this new LUN.
1318          * Use the logical unit number from the firmware.
1319          */
1320         memcpy(addr1, device->scsi3addr, 8);
1321         addr1[4] = 0;
1322         addr1[5] = 0;
1323         for (i = 0; i < n; i++) {
1324                 sd = h->dev[i];
1325                 memcpy(addr2, sd->scsi3addr, 8);
1326                 addr2[4] = 0;
1327                 addr2[5] = 0;
1328                 /* differ only in byte 4 and 5? */
1329                 if (memcmp(addr1, addr2, 8) == 0) {
1330                         device->bus = sd->bus;
1331                         device->target = sd->target;
1332                         device->lun = device->scsi3addr[4];
1333                         break;
1334                 }
1335         }
1336         if (device->lun == -1) {
1337                 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1338                         " suspect firmware bug or unsupported hardware "
1339                         "configuration.\n");
1340                 return -1;
1341         }
1342
1343 lun_assigned:
1344
1345         h->dev[n] = device;
1346         h->ndevices++;
1347         added[*nadded] = device;
1348         (*nadded)++;
1349         hpsa_show_dev_msg(KERN_INFO, h, device,
1350                 device->expose_device ? "added" : "masked");
1351         return 0;
1352 }
1353
1354 /*
1355  * Called during a scan operation.
1356  *
1357  * Update an entry in h->dev[] array.
1358  */
1359 static void hpsa_scsi_update_entry(struct ctlr_info *h,
1360         int entry, struct hpsa_scsi_dev_t *new_entry)
1361 {
1362         /* assumes h->devlock is held */
1363         BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1364
1365         /* Raid level changed. */
1366         h->dev[entry]->raid_level = new_entry->raid_level;
1367
1368         /*
1369          * ioacccel_handle may have changed for a dual domain disk
1370          */
1371         h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1372
1373         /* Raid offload parameters changed.  Careful about the ordering. */
1374         if (new_entry->offload_config && new_entry->offload_to_be_enabled) {
1375                 /*
1376                  * if drive is newly offload_enabled, we want to copy the
1377                  * raid map data first.  If previously offload_enabled and
1378                  * offload_config were set, raid map data had better be
1379                  * the same as it was before. If raid map data has changed
1380                  * then it had better be the case that
1381                  * h->dev[entry]->offload_enabled is currently 0.
1382                  */
1383                 h->dev[entry]->raid_map = new_entry->raid_map;
1384                 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1385         }
1386         if (new_entry->offload_to_be_enabled) {
1387                 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1388                 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1389         }
1390         h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
1391         h->dev[entry]->offload_config = new_entry->offload_config;
1392         h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1393         h->dev[entry]->queue_depth = new_entry->queue_depth;
1394
1395         /*
1396          * We can turn off ioaccel offload now, but need to delay turning
1397          * ioaccel on until we can update h->dev[entry]->phys_disk[], but we
1398          * can't do that until all the devices are updated.
1399          */
1400         h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled;
1401
1402         /*
1403          * turn ioaccel off immediately if told to do so.
1404          */
1405         if (!new_entry->offload_to_be_enabled)
1406                 h->dev[entry]->offload_enabled = 0;
1407
1408         hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1409 }
1410
1411 /* Replace an entry from h->dev[] array. */
1412 static void hpsa_scsi_replace_entry(struct ctlr_info *h,
1413         int entry, struct hpsa_scsi_dev_t *new_entry,
1414         struct hpsa_scsi_dev_t *added[], int *nadded,
1415         struct hpsa_scsi_dev_t *removed[], int *nremoved)
1416 {
1417         /* assumes h->devlock is held */
1418         BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1419         removed[*nremoved] = h->dev[entry];
1420         (*nremoved)++;
1421
1422         /*
1423          * New physical devices won't have target/lun assigned yet
1424          * so we need to preserve the values in the slot we are replacing.
1425          */
1426         if (new_entry->target == -1) {
1427                 new_entry->target = h->dev[entry]->target;
1428                 new_entry->lun = h->dev[entry]->lun;
1429         }
1430
1431         h->dev[entry] = new_entry;
1432         added[*nadded] = new_entry;
1433         (*nadded)++;
1434
1435         hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1436 }
1437
1438 /* Remove an entry from h->dev[] array. */
1439 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
1440         struct hpsa_scsi_dev_t *removed[], int *nremoved)
1441 {
1442         /* assumes h->devlock is held */
1443         int i;
1444         struct hpsa_scsi_dev_t *sd;
1445
1446         BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1447
1448         sd = h->dev[entry];
1449         removed[*nremoved] = h->dev[entry];
1450         (*nremoved)++;
1451
1452         for (i = entry; i < h->ndevices-1; i++)
1453                 h->dev[i] = h->dev[i+1];
1454         h->ndevices--;
1455         hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1456 }
1457
1458 #define SCSI3ADDR_EQ(a, b) ( \
1459         (a)[7] == (b)[7] && \
1460         (a)[6] == (b)[6] && \
1461         (a)[5] == (b)[5] && \
1462         (a)[4] == (b)[4] && \
1463         (a)[3] == (b)[3] && \
1464         (a)[2] == (b)[2] && \
1465         (a)[1] == (b)[1] && \
1466         (a)[0] == (b)[0])
1467
1468 static void fixup_botched_add(struct ctlr_info *h,
1469         struct hpsa_scsi_dev_t *added)
1470 {
1471         /* called when scsi_add_device fails in order to re-adjust
1472          * h->dev[] to match the mid layer's view.
1473          */
1474         unsigned long flags;
1475         int i, j;
1476
1477         spin_lock_irqsave(&h->lock, flags);
1478         for (i = 0; i < h->ndevices; i++) {
1479                 if (h->dev[i] == added) {
1480                         for (j = i; j < h->ndevices-1; j++)
1481                                 h->dev[j] = h->dev[j+1];
1482                         h->ndevices--;
1483                         break;
1484                 }
1485         }
1486         spin_unlock_irqrestore(&h->lock, flags);
1487         kfree(added);
1488 }
1489
1490 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1491         struct hpsa_scsi_dev_t *dev2)
1492 {
1493         /* we compare everything except lun and target as these
1494          * are not yet assigned.  Compare parts likely
1495          * to differ first
1496          */
1497         if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1498                 sizeof(dev1->scsi3addr)) != 0)
1499                 return 0;
1500         if (memcmp(dev1->device_id, dev2->device_id,
1501                 sizeof(dev1->device_id)) != 0)
1502                 return 0;
1503         if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1504                 return 0;
1505         if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1506                 return 0;
1507         if (dev1->devtype != dev2->devtype)
1508                 return 0;
1509         if (dev1->bus != dev2->bus)
1510                 return 0;
1511         return 1;
1512 }
1513
1514 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1515         struct hpsa_scsi_dev_t *dev2)
1516 {
1517         /* Device attributes that can change, but don't mean
1518          * that the device is a different device, nor that the OS
1519          * needs to be told anything about the change.
1520          */
1521         if (dev1->raid_level != dev2->raid_level)
1522                 return 1;
1523         if (dev1->offload_config != dev2->offload_config)
1524                 return 1;
1525         if (dev1->offload_to_be_enabled != dev2->offload_to_be_enabled)
1526                 return 1;
1527         if (!is_logical_dev_addr_mode(dev1->scsi3addr))
1528                 if (dev1->queue_depth != dev2->queue_depth)
1529                         return 1;
1530         /*
1531          * This can happen for dual domain devices. An active
1532          * path change causes the ioaccel handle to change
1533          *
1534          * for example note the handle differences between p0 and p1
1535          * Device                    WWN               ,WWN hash,Handle
1536          * D016 p0|0x3 [02]P2E:01:01,0x5000C5005FC4DACA,0x9B5616,0x01030003
1537          *      p1                   0x5000C5005FC4DAC9,0x6798C0,0x00040004
1538          */
1539         if (dev1->ioaccel_handle != dev2->ioaccel_handle)
1540                 return 1;
1541         return 0;
1542 }
1543
1544 /* Find needle in haystack.  If exact match found, return DEVICE_SAME,
1545  * and return needle location in *index.  If scsi3addr matches, but not
1546  * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1547  * location in *index.
1548  * In the case of a minor device attribute change, such as RAID level, just
1549  * return DEVICE_UPDATED, along with the updated device's location in index.
1550  * If needle not found, return DEVICE_NOT_FOUND.
1551  */
1552 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1553         struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1554         int *index)
1555 {
1556         int i;
1557 #define DEVICE_NOT_FOUND 0
1558 #define DEVICE_CHANGED 1
1559 #define DEVICE_SAME 2
1560 #define DEVICE_UPDATED 3
1561         if (needle == NULL)
1562                 return DEVICE_NOT_FOUND;
1563
1564         for (i = 0; i < haystack_size; i++) {
1565                 if (haystack[i] == NULL) /* previously removed. */
1566                         continue;
1567                 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1568                         *index = i;
1569                         if (device_is_the_same(needle, haystack[i])) {
1570                                 if (device_updated(needle, haystack[i]))
1571                                         return DEVICE_UPDATED;
1572                                 return DEVICE_SAME;
1573                         } else {
1574                                 /* Keep offline devices offline */
1575                                 if (needle->volume_offline)
1576                                         return DEVICE_NOT_FOUND;
1577                                 return DEVICE_CHANGED;
1578                         }
1579                 }
1580         }
1581         *index = -1;
1582         return DEVICE_NOT_FOUND;
1583 }
1584
1585 static void hpsa_monitor_offline_device(struct ctlr_info *h,
1586                                         unsigned char scsi3addr[])
1587 {
1588         struct offline_device_entry *device;
1589         unsigned long flags;
1590
1591         /* Check to see if device is already on the list */
1592         spin_lock_irqsave(&h->offline_device_lock, flags);
1593         list_for_each_entry(device, &h->offline_device_list, offline_list) {
1594                 if (memcmp(device->scsi3addr, scsi3addr,
1595                         sizeof(device->scsi3addr)) == 0) {
1596                         spin_unlock_irqrestore(&h->offline_device_lock, flags);
1597                         return;
1598                 }
1599         }
1600         spin_unlock_irqrestore(&h->offline_device_lock, flags);
1601
1602         /* Device is not on the list, add it. */
1603         device = kmalloc(sizeof(*device), GFP_KERNEL);
1604         if (!device)
1605                 return;
1606
1607         memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1608         spin_lock_irqsave(&h->offline_device_lock, flags);
1609         list_add_tail(&device->offline_list, &h->offline_device_list);
1610         spin_unlock_irqrestore(&h->offline_device_lock, flags);
1611 }
1612
1613 /* Print a message explaining various offline volume states */
1614 static void hpsa_show_volume_status(struct ctlr_info *h,
1615         struct hpsa_scsi_dev_t *sd)
1616 {
1617         if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1618                 dev_info(&h->pdev->dev,
1619                         "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1620                         h->scsi_host->host_no,
1621                         sd->bus, sd->target, sd->lun);
1622         switch (sd->volume_offline) {
1623         case HPSA_LV_OK:
1624                 break;
1625         case HPSA_LV_UNDERGOING_ERASE:
1626                 dev_info(&h->pdev->dev,
1627                         "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1628                         h->scsi_host->host_no,
1629                         sd->bus, sd->target, sd->lun);
1630                 break;
1631         case HPSA_LV_NOT_AVAILABLE:
1632                 dev_info(&h->pdev->dev,
1633                         "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1634                         h->scsi_host->host_no,
1635                         sd->bus, sd->target, sd->lun);
1636                 break;
1637         case HPSA_LV_UNDERGOING_RPI:
1638                 dev_info(&h->pdev->dev,
1639                         "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1640                         h->scsi_host->host_no,
1641                         sd->bus, sd->target, sd->lun);
1642                 break;
1643         case HPSA_LV_PENDING_RPI:
1644                 dev_info(&h->pdev->dev,
1645                         "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1646                         h->scsi_host->host_no,
1647                         sd->bus, sd->target, sd->lun);
1648                 break;
1649         case HPSA_LV_ENCRYPTED_NO_KEY:
1650                 dev_info(&h->pdev->dev,
1651                         "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1652                         h->scsi_host->host_no,
1653                         sd->bus, sd->target, sd->lun);
1654                 break;
1655         case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1656                 dev_info(&h->pdev->dev,
1657                         "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1658                         h->scsi_host->host_no,
1659                         sd->bus, sd->target, sd->lun);
1660                 break;
1661         case HPSA_LV_UNDERGOING_ENCRYPTION:
1662                 dev_info(&h->pdev->dev,
1663                         "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1664                         h->scsi_host->host_no,
1665                         sd->bus, sd->target, sd->lun);
1666                 break;
1667         case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1668                 dev_info(&h->pdev->dev,
1669                         "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1670                         h->scsi_host->host_no,
1671                         sd->bus, sd->target, sd->lun);
1672                 break;
1673         case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1674                 dev_info(&h->pdev->dev,
1675                         "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1676                         h->scsi_host->host_no,
1677                         sd->bus, sd->target, sd->lun);
1678                 break;
1679         case HPSA_LV_PENDING_ENCRYPTION:
1680                 dev_info(&h->pdev->dev,
1681                         "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1682                         h->scsi_host->host_no,
1683                         sd->bus, sd->target, sd->lun);
1684                 break;
1685         case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1686                 dev_info(&h->pdev->dev,
1687                         "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1688                         h->scsi_host->host_no,
1689                         sd->bus, sd->target, sd->lun);
1690                 break;
1691         }
1692 }
1693
1694 /*
1695  * Figure the list of physical drive pointers for a logical drive with
1696  * raid offload configured.
1697  */
1698 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1699                                 struct hpsa_scsi_dev_t *dev[], int ndevices,
1700                                 struct hpsa_scsi_dev_t *logical_drive)
1701 {
1702         struct raid_map_data *map = &logical_drive->raid_map;
1703         struct raid_map_disk_data *dd = &map->data[0];
1704         int i, j;
1705         int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1706                                 le16_to_cpu(map->metadata_disks_per_row);
1707         int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1708                                 le16_to_cpu(map->layout_map_count) *
1709                                 total_disks_per_row;
1710         int nphys_disk = le16_to_cpu(map->layout_map_count) *
1711                                 total_disks_per_row;
1712         int qdepth;
1713
1714         if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1715                 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1716
1717         logical_drive->nphysical_disks = nraid_map_entries;
1718
1719         qdepth = 0;
1720         for (i = 0; i < nraid_map_entries; i++) {
1721                 logical_drive->phys_disk[i] = NULL;
1722                 if (!logical_drive->offload_config)
1723                         continue;
1724                 for (j = 0; j < ndevices; j++) {
1725                         if (dev[j] == NULL)
1726                                 continue;
1727                         if (dev[j]->devtype != TYPE_DISK &&
1728                             dev[j]->devtype != TYPE_ZBC)
1729                                 continue;
1730                         if (is_logical_device(dev[j]))
1731                                 continue;
1732                         if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1733                                 continue;
1734
1735                         logical_drive->phys_disk[i] = dev[j];
1736                         if (i < nphys_disk)
1737                                 qdepth = min(h->nr_cmds, qdepth +
1738                                     logical_drive->phys_disk[i]->queue_depth);
1739                         break;
1740                 }
1741
1742                 /*
1743                  * This can happen if a physical drive is removed and
1744                  * the logical drive is degraded.  In that case, the RAID
1745                  * map data will refer to a physical disk which isn't actually
1746                  * present.  And in that case offload_enabled should already
1747                  * be 0, but we'll turn it off here just in case
1748                  */
1749                 if (!logical_drive->phys_disk[i]) {
1750                         dev_warn(&h->pdev->dev,
1751                                 "%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n",
1752                                 __func__,
1753                                 h->scsi_host->host_no, logical_drive->bus,
1754                                 logical_drive->target, logical_drive->lun);
1755                         hpsa_turn_off_ioaccel_for_device(logical_drive);
1756                         logical_drive->queue_depth = 8;
1757                 }
1758         }
1759         if (nraid_map_entries)
1760                 /*
1761                  * This is correct for reads, too high for full stripe writes,
1762                  * way too high for partial stripe writes
1763                  */
1764                 logical_drive->queue_depth = qdepth;
1765         else {
1766                 if (logical_drive->external)
1767                         logical_drive->queue_depth = EXTERNAL_QD;
1768                 else
1769                         logical_drive->queue_depth = h->nr_cmds;
1770         }
1771 }
1772
1773 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1774                                 struct hpsa_scsi_dev_t *dev[], int ndevices)
1775 {
1776         int i;
1777
1778         for (i = 0; i < ndevices; i++) {
1779                 if (dev[i] == NULL)
1780                         continue;
1781                 if (dev[i]->devtype != TYPE_DISK &&
1782                     dev[i]->devtype != TYPE_ZBC)
1783                         continue;
1784                 if (!is_logical_device(dev[i]))
1785                         continue;
1786
1787                 /*
1788                  * If offload is currently enabled, the RAID map and
1789                  * phys_disk[] assignment *better* not be changing
1790                  * because we would be changing ioaccel phsy_disk[] pointers
1791                  * on a ioaccel volume processing I/O requests.
1792                  *
1793                  * If an ioaccel volume status changed, initially because it was
1794                  * re-configured and thus underwent a transformation, or
1795                  * a drive failed, we would have received a state change
1796                  * request and ioaccel should have been turned off. When the
1797                  * transformation completes, we get another state change
1798                  * request to turn ioaccel back on. In this case, we need
1799                  * to update the ioaccel information.
1800                  *
1801                  * Thus: If it is not currently enabled, but will be after
1802                  * the scan completes, make sure the ioaccel pointers
1803                  * are up to date.
1804                  */
1805
1806                 if (!dev[i]->offload_enabled && dev[i]->offload_to_be_enabled)
1807                         hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1808         }
1809 }
1810
1811 static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1812 {
1813         int rc = 0;
1814
1815         if (!h->scsi_host)
1816                 return 1;
1817
1818         if (is_logical_device(device)) /* RAID */
1819                 rc = scsi_add_device(h->scsi_host, device->bus,
1820                                         device->target, device->lun);
1821         else /* HBA */
1822                 rc = hpsa_add_sas_device(h->sas_host, device);
1823
1824         return rc;
1825 }
1826
1827 static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h,
1828                                                 struct hpsa_scsi_dev_t *dev)
1829 {
1830         int i;
1831         int count = 0;
1832
1833         for (i = 0; i < h->nr_cmds; i++) {
1834                 struct CommandList *c = h->cmd_pool + i;
1835                 int refcount = atomic_inc_return(&c->refcount);
1836
1837                 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev,
1838                                 dev->scsi3addr)) {
1839                         unsigned long flags;
1840
1841                         spin_lock_irqsave(&h->lock, flags);     /* Implied MB */
1842                         if (!hpsa_is_cmd_idle(c))
1843                                 ++count;
1844                         spin_unlock_irqrestore(&h->lock, flags);
1845                 }
1846
1847                 cmd_free(h, c);
1848         }
1849
1850         return count;
1851 }
1852
1853 #define NUM_WAIT 20
1854 static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
1855                                                 struct hpsa_scsi_dev_t *device)
1856 {
1857         int cmds = 0;
1858         int waits = 0;
1859         int num_wait = NUM_WAIT;
1860
1861         if (device->external)
1862                 num_wait = HPSA_EH_PTRAID_TIMEOUT;
1863
1864         while (1) {
1865                 cmds = hpsa_find_outstanding_commands_for_dev(h, device);
1866                 if (cmds == 0)
1867                         break;
1868                 if (++waits > num_wait)
1869                         break;
1870                 msleep(1000);
1871         }
1872
1873         if (waits > num_wait) {
1874                 dev_warn(&h->pdev->dev,
1875                         "%s: removing device [%d:%d:%d:%d] with %d outstanding commands!\n",
1876                         __func__,
1877                         h->scsi_host->host_no,
1878                         device->bus, device->target, device->lun, cmds);
1879         }
1880 }
1881
1882 static void hpsa_remove_device(struct ctlr_info *h,
1883                         struct hpsa_scsi_dev_t *device)
1884 {
1885         struct scsi_device *sdev = NULL;
1886
1887         if (!h->scsi_host)
1888                 return;
1889
1890         /*
1891          * Allow for commands to drain
1892          */
1893         device->removed = 1;
1894         hpsa_wait_for_outstanding_commands_for_dev(h, device);
1895
1896         if (is_logical_device(device)) { /* RAID */
1897                 sdev = scsi_device_lookup(h->scsi_host, device->bus,
1898                                                 device->target, device->lun);
1899                 if (sdev) {
1900                         scsi_remove_device(sdev);
1901                         scsi_device_put(sdev);
1902                 } else {
1903                         /*
1904                          * We don't expect to get here.  Future commands
1905                          * to this device will get a selection timeout as
1906                          * if the device were gone.
1907                          */
1908                         hpsa_show_dev_msg(KERN_WARNING, h, device,
1909                                         "didn't find device for removal.");
1910                 }
1911         } else { /* HBA */
1912
1913                 hpsa_remove_sas_device(device);
1914         }
1915 }
1916
1917 static void adjust_hpsa_scsi_table(struct ctlr_info *h,
1918         struct hpsa_scsi_dev_t *sd[], int nsds)
1919 {
1920         /* sd contains scsi3 addresses and devtypes, and inquiry
1921          * data.  This function takes what's in sd to be the current
1922          * reality and updates h->dev[] to reflect that reality.
1923          */
1924         int i, entry, device_change, changes = 0;
1925         struct hpsa_scsi_dev_t *csd;
1926         unsigned long flags;
1927         struct hpsa_scsi_dev_t **added, **removed;
1928         int nadded, nremoved;
1929
1930         /*
1931          * A reset can cause a device status to change
1932          * re-schedule the scan to see what happened.
1933          */
1934         spin_lock_irqsave(&h->reset_lock, flags);
1935         if (h->reset_in_progress) {
1936                 h->drv_req_rescan = 1;
1937                 spin_unlock_irqrestore(&h->reset_lock, flags);
1938                 return;
1939         }
1940         spin_unlock_irqrestore(&h->reset_lock, flags);
1941
1942         added = kcalloc(HPSA_MAX_DEVICES, sizeof(*added), GFP_KERNEL);
1943         removed = kcalloc(HPSA_MAX_DEVICES, sizeof(*removed), GFP_KERNEL);
1944
1945         if (!added || !removed) {
1946                 dev_warn(&h->pdev->dev, "out of memory in "
1947                         "adjust_hpsa_scsi_table\n");
1948                 goto free_and_out;
1949         }
1950
1951         spin_lock_irqsave(&h->devlock, flags);
1952
1953         /* find any devices in h->dev[] that are not in
1954          * sd[] and remove them from h->dev[], and for any
1955          * devices which have changed, remove the old device
1956          * info and add the new device info.
1957          * If minor device attributes change, just update
1958          * the existing device structure.
1959          */
1960         i = 0;
1961         nremoved = 0;
1962         nadded = 0;
1963         while (i < h->ndevices) {
1964                 csd = h->dev[i];
1965                 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1966                 if (device_change == DEVICE_NOT_FOUND) {
1967                         changes++;
1968                         hpsa_scsi_remove_entry(h, i, removed, &nremoved);
1969                         continue; /* remove ^^^, hence i not incremented */
1970                 } else if (device_change == DEVICE_CHANGED) {
1971                         changes++;
1972                         hpsa_scsi_replace_entry(h, i, sd[entry],
1973                                 added, &nadded, removed, &nremoved);
1974                         /* Set it to NULL to prevent it from being freed
1975                          * at the bottom of hpsa_update_scsi_devices()
1976                          */
1977                         sd[entry] = NULL;
1978                 } else if (device_change == DEVICE_UPDATED) {
1979                         hpsa_scsi_update_entry(h, i, sd[entry]);
1980                 }
1981                 i++;
1982         }
1983
1984         /* Now, make sure every device listed in sd[] is also
1985          * listed in h->dev[], adding them if they aren't found
1986          */
1987
1988         for (i = 0; i < nsds; i++) {
1989                 if (!sd[i]) /* if already added above. */
1990                         continue;
1991
1992                 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1993                  * as the SCSI mid-layer does not handle such devices well.
1994                  * It relentlessly loops sending TUR at 3Hz, then READ(10)
1995                  * at 160Hz, and prevents the system from coming up.
1996                  */
1997                 if (sd[i]->volume_offline) {
1998                         hpsa_show_volume_status(h, sd[i]);
1999                         hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
2000                         continue;
2001                 }
2002
2003                 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
2004                                         h->ndevices, &entry);
2005                 if (device_change == DEVICE_NOT_FOUND) {
2006                         changes++;
2007                         if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
2008                                 break;
2009                         sd[i] = NULL; /* prevent from being freed later. */
2010                 } else if (device_change == DEVICE_CHANGED) {
2011                         /* should never happen... */
2012                         changes++;
2013                         dev_warn(&h->pdev->dev,
2014                                 "device unexpectedly changed.\n");
2015                         /* but if it does happen, we just ignore that device */
2016                 }
2017         }
2018         hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
2019
2020         /*
2021          * Now that h->dev[]->phys_disk[] is coherent, we can enable
2022          * any logical drives that need it enabled.
2023          *
2024          * The raid map should be current by now.
2025          *
2026          * We are updating the device list used for I/O requests.
2027          */
2028         for (i = 0; i < h->ndevices; i++) {
2029                 if (h->dev[i] == NULL)
2030                         continue;
2031                 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
2032         }
2033
2034         spin_unlock_irqrestore(&h->devlock, flags);
2035
2036         /* Monitor devices which are in one of several NOT READY states to be
2037          * brought online later. This must be done without holding h->devlock,
2038          * so don't touch h->dev[]
2039          */
2040         for (i = 0; i < nsds; i++) {
2041                 if (!sd[i]) /* if already added above. */
2042                         continue;
2043                 if (sd[i]->volume_offline)
2044                         hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
2045         }
2046
2047         /* Don't notify scsi mid layer of any changes the first time through
2048          * (or if there are no changes) scsi_scan_host will do it later the
2049          * first time through.
2050          */
2051         if (!changes)
2052                 goto free_and_out;
2053
2054         /* Notify scsi mid layer of any removed devices */
2055         for (i = 0; i < nremoved; i++) {
2056                 if (removed[i] == NULL)
2057                         continue;
2058                 if (removed[i]->expose_device)
2059                         hpsa_remove_device(h, removed[i]);
2060                 kfree(removed[i]);
2061                 removed[i] = NULL;
2062         }
2063
2064         /* Notify scsi mid layer of any added devices */
2065         for (i = 0; i < nadded; i++) {
2066                 int rc = 0;
2067
2068                 if (added[i] == NULL)
2069                         continue;
2070                 if (!(added[i]->expose_device))
2071                         continue;
2072                 rc = hpsa_add_device(h, added[i]);
2073                 if (!rc)
2074                         continue;
2075                 dev_warn(&h->pdev->dev,
2076                         "addition failed %d, device not added.", rc);
2077                 /* now we have to remove it from h->dev,
2078                  * since it didn't get added to scsi mid layer
2079                  */
2080                 fixup_botched_add(h, added[i]);
2081                 h->drv_req_rescan = 1;
2082         }
2083
2084 free_and_out:
2085         kfree(added);
2086         kfree(removed);
2087 }
2088
2089 /*
2090  * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
2091  * Assume's h->devlock is held.
2092  */
2093 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
2094         int bus, int target, int lun)
2095 {
2096         int i;
2097         struct hpsa_scsi_dev_t *sd;
2098
2099         for (i = 0; i < h->ndevices; i++) {
2100                 sd = h->dev[i];
2101                 if (sd->bus == bus && sd->target == target && sd->lun == lun)
2102                         return sd;
2103         }
2104         return NULL;
2105 }
2106
2107 static int hpsa_slave_alloc(struct scsi_device *sdev)
2108 {
2109         struct hpsa_scsi_dev_t *sd = NULL;
2110         unsigned long flags;
2111         struct ctlr_info *h;
2112
2113         h = sdev_to_hba(sdev);
2114         spin_lock_irqsave(&h->devlock, flags);
2115         if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) {
2116                 struct scsi_target *starget;
2117                 struct sas_rphy *rphy;
2118
2119                 starget = scsi_target(sdev);
2120                 rphy = target_to_rphy(starget);
2121                 sd = hpsa_find_device_by_sas_rphy(h, rphy);
2122                 if (sd) {
2123                         sd->target = sdev_id(sdev);
2124                         sd->lun = sdev->lun;
2125                 }
2126         }
2127         if (!sd)
2128                 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
2129                                         sdev_id(sdev), sdev->lun);
2130
2131         if (sd && sd->expose_device) {
2132                 atomic_set(&sd->ioaccel_cmds_out, 0);
2133                 sdev->hostdata = sd;
2134         } else
2135                 sdev->hostdata = NULL;
2136         spin_unlock_irqrestore(&h->devlock, flags);
2137         return 0;
2138 }
2139
2140 /* configure scsi device based on internal per-device structure */
2141 #define CTLR_TIMEOUT (120 * HZ)
2142 static int hpsa_slave_configure(struct scsi_device *sdev)
2143 {
2144         struct hpsa_scsi_dev_t *sd;
2145         int queue_depth;
2146
2147         sd = sdev->hostdata;
2148         sdev->no_uld_attach = !sd || !sd->expose_device;
2149
2150         if (sd) {
2151                 sd->was_removed = 0;
2152                 queue_depth = sd->queue_depth != 0 ?
2153                                 sd->queue_depth : sdev->host->can_queue;
2154                 if (sd->external) {
2155                         queue_depth = EXTERNAL_QD;
2156                         sdev->eh_timeout = HPSA_EH_PTRAID_TIMEOUT;
2157                         blk_queue_rq_timeout(sdev->request_queue,
2158                                                 HPSA_EH_PTRAID_TIMEOUT);
2159                 }
2160                 if (is_hba_lunid(sd->scsi3addr)) {
2161                         sdev->eh_timeout = CTLR_TIMEOUT;
2162                         blk_queue_rq_timeout(sdev->request_queue, CTLR_TIMEOUT);
2163                 }
2164         } else {
2165                 queue_depth = sdev->host->can_queue;
2166         }
2167
2168         scsi_change_queue_depth(sdev, queue_depth);
2169
2170         return 0;
2171 }
2172
2173 static void hpsa_slave_destroy(struct scsi_device *sdev)
2174 {
2175         struct hpsa_scsi_dev_t *hdev = NULL;
2176
2177         hdev = sdev->hostdata;
2178
2179         if (hdev)
2180                 hdev->was_removed = 1;
2181 }
2182
2183 static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2184 {
2185         int i;
2186
2187         if (!h->ioaccel2_cmd_sg_list)
2188                 return;
2189         for (i = 0; i < h->nr_cmds; i++) {
2190                 kfree(h->ioaccel2_cmd_sg_list[i]);
2191                 h->ioaccel2_cmd_sg_list[i] = NULL;
2192         }
2193         kfree(h->ioaccel2_cmd_sg_list);
2194         h->ioaccel2_cmd_sg_list = NULL;
2195 }
2196
2197 static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2198 {
2199         int i;
2200
2201         if (h->chainsize <= 0)
2202                 return 0;
2203
2204         h->ioaccel2_cmd_sg_list =
2205                 kcalloc(h->nr_cmds, sizeof(*h->ioaccel2_cmd_sg_list),
2206                                         GFP_KERNEL);
2207         if (!h->ioaccel2_cmd_sg_list)
2208                 return -ENOMEM;
2209         for (i = 0; i < h->nr_cmds; i++) {
2210                 h->ioaccel2_cmd_sg_list[i] =
2211                         kmalloc_array(h->maxsgentries,
2212                                       sizeof(*h->ioaccel2_cmd_sg_list[i]),
2213                                       GFP_KERNEL);
2214                 if (!h->ioaccel2_cmd_sg_list[i])
2215                         goto clean;
2216         }
2217         return 0;
2218
2219 clean:
2220         hpsa_free_ioaccel2_sg_chain_blocks(h);
2221         return -ENOMEM;
2222 }
2223
2224 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
2225 {
2226         int i;
2227
2228         if (!h->cmd_sg_list)
2229                 return;
2230         for (i = 0; i < h->nr_cmds; i++) {
2231                 kfree(h->cmd_sg_list[i]);
2232                 h->cmd_sg_list[i] = NULL;
2233         }
2234         kfree(h->cmd_sg_list);
2235         h->cmd_sg_list = NULL;
2236 }
2237
2238 static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
2239 {
2240         int i;
2241
2242         if (h->chainsize <= 0)
2243                 return 0;
2244
2245         h->cmd_sg_list = kcalloc(h->nr_cmds, sizeof(*h->cmd_sg_list),
2246                                  GFP_KERNEL);
2247         if (!h->cmd_sg_list)
2248                 return -ENOMEM;
2249
2250         for (i = 0; i < h->nr_cmds; i++) {
2251                 h->cmd_sg_list[i] = kmalloc_array(h->chainsize,
2252                                                   sizeof(*h->cmd_sg_list[i]),
2253                                                   GFP_KERNEL);
2254                 if (!h->cmd_sg_list[i])
2255                         goto clean;
2256
2257         }
2258         return 0;
2259
2260 clean:
2261         hpsa_free_sg_chain_blocks(h);
2262         return -ENOMEM;
2263 }
2264
2265 static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
2266         struct io_accel2_cmd *cp, struct CommandList *c)
2267 {
2268         struct ioaccel2_sg_element *chain_block;
2269         u64 temp64;
2270         u32 chain_size;
2271
2272         chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
2273         chain_size = le32_to_cpu(cp->sg[0].length);
2274         temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_size,
2275                                 DMA_TO_DEVICE);
2276         if (dma_mapping_error(&h->pdev->dev, temp64)) {
2277                 /* prevent subsequent unmapping */
2278                 cp->sg->address = 0;
2279                 return -1;
2280         }
2281         cp->sg->address = cpu_to_le64(temp64);
2282         return 0;
2283 }
2284
2285 static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
2286         struct io_accel2_cmd *cp)
2287 {
2288         struct ioaccel2_sg_element *chain_sg;
2289         u64 temp64;
2290         u32 chain_size;
2291
2292         chain_sg = cp->sg;
2293         temp64 = le64_to_cpu(chain_sg->address);
2294         chain_size = le32_to_cpu(cp->sg[0].length);
2295         dma_unmap_single(&h->pdev->dev, temp64, chain_size, DMA_TO_DEVICE);
2296 }
2297
2298 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
2299         struct CommandList *c)
2300 {
2301         struct SGDescriptor *chain_sg, *chain_block;
2302         u64 temp64;
2303         u32 chain_len;
2304
2305         chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2306         chain_block = h->cmd_sg_list[c->cmdindex];
2307         chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
2308         chain_len = sizeof(*chain_sg) *
2309                 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
2310         chain_sg->Len = cpu_to_le32(chain_len);
2311         temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_len,
2312                                 DMA_TO_DEVICE);
2313         if (dma_mapping_error(&h->pdev->dev, temp64)) {
2314                 /* prevent subsequent unmapping */
2315                 chain_sg->Addr = cpu_to_le64(0);
2316                 return -1;
2317         }
2318         chain_sg->Addr = cpu_to_le64(temp64);
2319         return 0;
2320 }
2321
2322 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
2323         struct CommandList *c)
2324 {
2325         struct SGDescriptor *chain_sg;
2326
2327         if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
2328                 return;
2329
2330         chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2331         dma_unmap_single(&h->pdev->dev, le64_to_cpu(chain_sg->Addr),
2332                         le32_to_cpu(chain_sg->Len), DMA_TO_DEVICE);
2333 }
2334
2335
2336 /* Decode the various types of errors on ioaccel2 path.
2337  * Return 1 for any error that should generate a RAID path retry.
2338  * Return 0 for errors that don't require a RAID path retry.
2339  */
2340 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
2341                                         struct CommandList *c,
2342                                         struct scsi_cmnd *cmd,
2343                                         struct io_accel2_cmd *c2,
2344                                         struct hpsa_scsi_dev_t *dev)
2345 {
2346         int data_len;
2347         int retry = 0;
2348         u32 ioaccel2_resid = 0;
2349
2350         switch (c2->error_data.serv_response) {
2351         case IOACCEL2_SERV_RESPONSE_COMPLETE:
2352                 switch (c2->error_data.status) {
2353                 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
2354                         if (cmd)
2355                                 cmd->result = 0;
2356                         break;
2357                 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
2358                         cmd->result |= SAM_STAT_CHECK_CONDITION;
2359                         if (c2->error_data.data_present !=
2360                                         IOACCEL2_SENSE_DATA_PRESENT) {
2361                                 memset(cmd->sense_buffer, 0,
2362                                         SCSI_SENSE_BUFFERSIZE);
2363                                 break;
2364                         }
2365                         /* copy the sense data */
2366                         data_len = c2->error_data.sense_data_len;
2367                         if (data_len > SCSI_SENSE_BUFFERSIZE)
2368                                 data_len = SCSI_SENSE_BUFFERSIZE;
2369                         if (data_len > sizeof(c2->error_data.sense_data_buff))
2370                                 data_len =
2371                                         sizeof(c2->error_data.sense_data_buff);
2372                         memcpy(cmd->sense_buffer,
2373                                 c2->error_data.sense_data_buff, data_len);
2374                         retry = 1;
2375                         break;
2376                 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
2377                         retry = 1;
2378                         break;
2379                 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
2380                         retry = 1;
2381                         break;
2382                 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
2383                         retry = 1;
2384                         break;
2385                 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
2386                         retry = 1;
2387                         break;
2388                 default:
2389                         retry = 1;
2390                         break;
2391                 }
2392                 break;
2393         case IOACCEL2_SERV_RESPONSE_FAILURE:
2394                 switch (c2->error_data.status) {
2395                 case IOACCEL2_STATUS_SR_IO_ERROR:
2396                 case IOACCEL2_STATUS_SR_IO_ABORTED:
2397                 case IOACCEL2_STATUS_SR_OVERRUN:
2398                         retry = 1;
2399                         break;
2400                 case IOACCEL2_STATUS_SR_UNDERRUN:
2401                         cmd->result = (DID_OK << 16);           /* host byte */
2402                         ioaccel2_resid = get_unaligned_le32(
2403                                                 &c2->error_data.resid_cnt[0]);
2404                         scsi_set_resid(cmd, ioaccel2_resid);
2405                         break;
2406                 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
2407                 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
2408                 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
2409                         /*
2410                          * Did an HBA disk disappear? We will eventually
2411                          * get a state change event from the controller but
2412                          * in the meantime, we need to tell the OS that the
2413                          * HBA disk is no longer there and stop I/O
2414                          * from going down. This allows the potential re-insert
2415                          * of the disk to get the same device node.
2416                          */
2417                         if (dev->physical_device && dev->expose_device) {
2418                                 cmd->result = DID_NO_CONNECT << 16;
2419                                 dev->removed = 1;
2420                                 h->drv_req_rescan = 1;
2421                                 dev_warn(&h->pdev->dev,
2422                                         "%s: device is gone!\n", __func__);
2423                         } else
2424                                 /*
2425                                  * Retry by sending down the RAID path.
2426                                  * We will get an event from ctlr to
2427                                  * trigger rescan regardless.
2428                                  */
2429                                 retry = 1;
2430                         break;
2431                 default:
2432                         retry = 1;
2433                 }
2434                 break;
2435         case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
2436                 break;
2437         case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
2438                 break;
2439         case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
2440                 retry = 1;
2441                 break;
2442         case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
2443                 break;
2444         default:
2445                 retry = 1;
2446                 break;
2447         }
2448
2449         if (dev->in_reset)
2450                 retry = 0;
2451
2452         return retry;   /* retry on raid path? */
2453 }
2454
2455 static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2456                 struct CommandList *c)
2457 {
2458         struct hpsa_scsi_dev_t *dev = c->device;
2459
2460         /*
2461          * Reset c->scsi_cmd here so that the reset handler will know
2462          * this command has completed.  Then, check to see if the handler is
2463          * waiting for this command, and, if so, wake it.
2464          */
2465         c->scsi_cmd = SCSI_CMD_IDLE;
2466         mb();   /* Declare command idle before checking for pending events. */
2467         if (dev) {
2468                 atomic_dec(&dev->commands_outstanding);
2469                 if (dev->in_reset &&
2470                         atomic_read(&dev->commands_outstanding) <= 0)
2471                         wake_up_all(&h->event_sync_wait_queue);
2472         }
2473 }
2474
2475 static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2476                                       struct CommandList *c)
2477 {
2478         hpsa_cmd_resolve_events(h, c);
2479         cmd_tagged_free(h, c);
2480 }
2481
2482 static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2483                 struct CommandList *c, struct scsi_cmnd *cmd)
2484 {
2485         hpsa_cmd_resolve_and_free(h, c);
2486         if (cmd && cmd->scsi_done)
2487                 cmd->scsi_done(cmd);
2488 }
2489
2490 static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2491 {
2492         INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2493         queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2494 }
2495
2496 static void process_ioaccel2_completion(struct ctlr_info *h,
2497                 struct CommandList *c, struct scsi_cmnd *cmd,
2498                 struct hpsa_scsi_dev_t *dev)
2499 {
2500         struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2501
2502         /* check for good status */
2503         if (likely(c2->error_data.serv_response == 0 &&
2504                         c2->error_data.status == 0)) {
2505                 cmd->result = 0;
2506                 return hpsa_cmd_free_and_done(h, c, cmd);
2507         }
2508
2509         /*
2510          * Any RAID offload error results in retry which will use
2511          * the normal I/O path so the controller can handle whatever is
2512          * wrong.
2513          */
2514         if (is_logical_device(dev) &&
2515                 c2->error_data.serv_response ==
2516                         IOACCEL2_SERV_RESPONSE_FAILURE) {
2517                 if (c2->error_data.status ==
2518                         IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
2519                         hpsa_turn_off_ioaccel_for_device(dev);
2520                 }
2521
2522                 if (dev->in_reset) {
2523                         cmd->result = DID_RESET << 16;
2524                         return hpsa_cmd_free_and_done(h, c, cmd);
2525                 }
2526
2527                 return hpsa_retry_cmd(h, c);
2528         }
2529
2530         if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev))
2531                 return hpsa_retry_cmd(h, c);
2532
2533         return hpsa_cmd_free_and_done(h, c, cmd);
2534 }
2535
2536 /* Returns 0 on success, < 0 otherwise. */
2537 static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2538                                         struct CommandList *cp)
2539 {
2540         u8 tmf_status = cp->err_info->ScsiStatus;
2541
2542         switch (tmf_status) {
2543         case CISS_TMF_COMPLETE:
2544                 /*
2545                  * CISS_TMF_COMPLETE never happens, instead,
2546                  * ei->CommandStatus == 0 for this case.
2547                  */
2548         case CISS_TMF_SUCCESS:
2549                 return 0;
2550         case CISS_TMF_INVALID_FRAME:
2551         case CISS_TMF_NOT_SUPPORTED:
2552         case CISS_TMF_FAILED:
2553         case CISS_TMF_WRONG_LUN:
2554         case CISS_TMF_OVERLAPPED_TAG:
2555                 break;
2556         default:
2557                 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2558                                 tmf_status);
2559                 break;
2560         }
2561         return -tmf_status;
2562 }
2563
2564 static void complete_scsi_command(struct CommandList *cp)
2565 {
2566         struct scsi_cmnd *cmd;
2567         struct ctlr_info *h;
2568         struct ErrorInfo *ei;
2569         struct hpsa_scsi_dev_t *dev;
2570         struct io_accel2_cmd *c2;
2571
2572         u8 sense_key;
2573         u8 asc;      /* additional sense code */
2574         u8 ascq;     /* additional sense code qualifier */
2575         unsigned long sense_data_size;
2576
2577         ei = cp->err_info;
2578         cmd = cp->scsi_cmd;
2579         h = cp->h;
2580
2581         if (!cmd->device) {
2582                 cmd->result = DID_NO_CONNECT << 16;
2583                 return hpsa_cmd_free_and_done(h, cp, cmd);
2584         }
2585
2586         dev = cmd->device->hostdata;
2587         if (!dev) {
2588                 cmd->result = DID_NO_CONNECT << 16;
2589                 return hpsa_cmd_free_and_done(h, cp, cmd);
2590         }
2591         c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
2592
2593         scsi_dma_unmap(cmd); /* undo the DMA mappings */
2594         if ((cp->cmd_type == CMD_SCSI) &&
2595                 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
2596                 hpsa_unmap_sg_chain_block(h, cp);
2597
2598         if ((cp->cmd_type == CMD_IOACCEL2) &&
2599                 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2600                 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2601
2602         cmd->result = (DID_OK << 16);           /* host byte */
2603
2604         /* SCSI command has already been cleaned up in SML */
2605         if (dev->was_removed) {
2606                 hpsa_cmd_resolve_and_free(h, cp);
2607                 return;
2608         }
2609
2610         if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) {
2611                 if (dev->physical_device && dev->expose_device &&
2612                         dev->removed) {
2613                         cmd->result = DID_NO_CONNECT << 16;
2614                         return hpsa_cmd_free_and_done(h, cp, cmd);
2615                 }
2616                 if (likely(cp->phys_disk != NULL))
2617                         atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2618         }
2619
2620         /*
2621          * We check for lockup status here as it may be set for
2622          * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2623          * fail_all_oustanding_cmds()
2624          */
2625         if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2626                 /* DID_NO_CONNECT will prevent a retry */
2627                 cmd->result = DID_NO_CONNECT << 16;
2628                 return hpsa_cmd_free_and_done(h, cp, cmd);
2629         }
2630
2631         if (cp->cmd_type == CMD_IOACCEL2)
2632                 return process_ioaccel2_completion(h, cp, cmd, dev);
2633
2634         scsi_set_resid(cmd, ei->ResidualCnt);
2635         if (ei->CommandStatus == 0)
2636                 return hpsa_cmd_free_and_done(h, cp, cmd);
2637
2638         /* For I/O accelerator commands, copy over some fields to the normal
2639          * CISS header used below for error handling.
2640          */
2641         if (cp->cmd_type == CMD_IOACCEL1) {
2642                 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2643                 cp->Header.SGList = scsi_sg_count(cmd);
2644                 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2645                 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2646                         IOACCEL1_IOFLAGS_CDBLEN_MASK;
2647                 cp->Header.tag = c->tag;
2648                 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2649                 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
2650
2651                 /* Any RAID offload error results in retry which will use
2652                  * the normal I/O path so the controller can handle whatever's
2653                  * wrong.
2654                  */
2655                 if (is_logical_device(dev)) {
2656                         if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2657                                 dev->offload_enabled = 0;
2658                         return hpsa_retry_cmd(h, cp);
2659                 }
2660         }
2661
2662         /* an error has occurred */
2663         switch (ei->CommandStatus) {
2664
2665         case CMD_TARGET_STATUS:
2666                 cmd->result |= ei->ScsiStatus;
2667                 /* copy the sense data */
2668                 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2669                         sense_data_size = SCSI_SENSE_BUFFERSIZE;
2670                 else
2671                         sense_data_size = sizeof(ei->SenseInfo);
2672                 if (ei->SenseLen < sense_data_size)
2673                         sense_data_size = ei->SenseLen;
2674                 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2675                 if (ei->ScsiStatus)
2676                         decode_sense_data(ei->SenseInfo, sense_data_size,
2677                                 &sense_key, &asc, &ascq);
2678                 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
2679                         switch (sense_key) {
2680                         case ABORTED_COMMAND:
2681                                 cmd->result |= DID_SOFT_ERROR << 16;
2682                                 break;
2683                         case UNIT_ATTENTION:
2684                                 if (asc == 0x3F && ascq == 0x0E)
2685                                         h->drv_req_rescan = 1;
2686                                 break;
2687                         case ILLEGAL_REQUEST:
2688                                 if (asc == 0x25 && ascq == 0x00) {
2689                                         dev->removed = 1;
2690                                         cmd->result = DID_NO_CONNECT << 16;
2691                                 }
2692                                 break;
2693                         }
2694                         break;
2695                 }
2696                 /* Problem was not a check condition
2697                  * Pass it up to the upper layers...
2698                  */
2699                 if (ei->ScsiStatus) {
2700                         dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2701                                 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2702                                 "Returning result: 0x%x\n",
2703                                 cp, ei->ScsiStatus,
2704                                 sense_key, asc, ascq,
2705                                 cmd->result);
2706                 } else {  /* scsi status is zero??? How??? */
2707                         dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2708                                 "Returning no connection.\n", cp),
2709
2710                         /* Ordinarily, this case should never happen,
2711                          * but there is a bug in some released firmware
2712                          * revisions that allows it to happen if, for
2713                          * example, a 4100 backplane loses power and
2714                          * the tape drive is in it.  We assume that
2715                          * it's a fatal error of some kind because we
2716                          * can't show that it wasn't. We will make it
2717                          * look like selection timeout since that is
2718                          * the most common reason for this to occur,
2719                          * and it's severe enough.
2720                          */
2721
2722                         cmd->result = DID_NO_CONNECT << 16;
2723                 }
2724                 break;
2725
2726         case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2727                 break;
2728         case CMD_DATA_OVERRUN:
2729                 dev_warn(&h->pdev->dev,
2730                         "CDB %16phN data overrun\n", cp->Request.CDB);
2731                 break;
2732         case CMD_INVALID: {
2733                 /* print_bytes(cp, sizeof(*cp), 1, 0);
2734                 print_cmd(cp); */
2735                 /* We get CMD_INVALID if you address a non-existent device
2736                  * instead of a selection timeout (no response).  You will
2737                  * see this if you yank out a drive, then try to access it.
2738                  * This is kind of a shame because it means that any other
2739                  * CMD_INVALID (e.g. driver bug) will get interpreted as a
2740                  * missing target. */
2741                 cmd->result = DID_NO_CONNECT << 16;
2742         }
2743                 break;
2744         case CMD_PROTOCOL_ERR:
2745                 cmd->result = DID_ERROR << 16;
2746                 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2747                                 cp->Request.CDB);
2748                 break;
2749         case CMD_HARDWARE_ERR:
2750                 cmd->result = DID_ERROR << 16;
2751                 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2752                         cp->Request.CDB);
2753                 break;
2754         case CMD_CONNECTION_LOST:
2755                 cmd->result = DID_ERROR << 16;
2756                 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2757                         cp->Request.CDB);
2758                 break;
2759         case CMD_ABORTED:
2760                 cmd->result = DID_ABORT << 16;
2761                 break;
2762         case CMD_ABORT_FAILED:
2763                 cmd->result = DID_ERROR << 16;
2764                 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2765                         cp->Request.CDB);
2766                 break;
2767         case CMD_UNSOLICITED_ABORT:
2768                 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
2769                 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2770                         cp->Request.CDB);
2771                 break;
2772         case CMD_TIMEOUT:
2773                 cmd->result = DID_TIME_OUT << 16;
2774                 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2775                         cp->Request.CDB);
2776                 break;
2777         case CMD_UNABORTABLE:
2778                 cmd->result = DID_ERROR << 16;
2779                 dev_warn(&h->pdev->dev, "Command unabortable\n");
2780                 break;
2781         case CMD_TMF_STATUS:
2782                 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2783                         cmd->result = DID_ERROR << 16;
2784                 break;
2785         case CMD_IOACCEL_DISABLED:
2786                 /* This only handles the direct pass-through case since RAID
2787                  * offload is handled above.  Just attempt a retry.
2788                  */
2789                 cmd->result = DID_SOFT_ERROR << 16;
2790                 dev_warn(&h->pdev->dev,
2791                                 "cp %p had HP SSD Smart Path error\n", cp);
2792                 break;
2793         default:
2794                 cmd->result = DID_ERROR << 16;
2795                 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2796                                 cp, ei->CommandStatus);
2797         }
2798
2799         return hpsa_cmd_free_and_done(h, cp, cmd);
2800 }
2801
2802 static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c,
2803                 int sg_used, enum dma_data_direction data_direction)
2804 {
2805         int i;
2806
2807         for (i = 0; i < sg_used; i++)
2808                 dma_unmap_single(&pdev->dev, le64_to_cpu(c->SG[i].Addr),
2809                                 le32_to_cpu(c->SG[i].Len),
2810                                 data_direction);
2811 }
2812
2813 static int hpsa_map_one(struct pci_dev *pdev,
2814                 struct CommandList *cp,
2815                 unsigned char *buf,
2816                 size_t buflen,
2817                 enum dma_data_direction data_direction)
2818 {
2819         u64 addr64;
2820
2821         if (buflen == 0 || data_direction == DMA_NONE) {
2822                 cp->Header.SGList = 0;
2823                 cp->Header.SGTotal = cpu_to_le16(0);
2824                 return 0;
2825         }
2826
2827         addr64 = dma_map_single(&pdev->dev, buf, buflen, data_direction);
2828         if (dma_mapping_error(&pdev->dev, addr64)) {
2829                 /* Prevent subsequent unmap of something never mapped */
2830                 cp->Header.SGList = 0;
2831                 cp->Header.SGTotal = cpu_to_le16(0);
2832                 return -1;
2833         }
2834         cp->SG[0].Addr = cpu_to_le64(addr64);
2835         cp->SG[0].Len = cpu_to_le32(buflen);
2836         cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2837         cp->Header.SGList = 1;   /* no. SGs contig in this cmd */
2838         cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
2839         return 0;
2840 }
2841
2842 #define NO_TIMEOUT ((unsigned long) -1)
2843 #define DEFAULT_TIMEOUT 30000 /* milliseconds */
2844 static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2845         struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2846 {
2847         DECLARE_COMPLETION_ONSTACK(wait);
2848
2849         c->waiting = &wait;
2850         __enqueue_cmd_and_start_io(h, c, reply_queue);
2851         if (timeout_msecs == NO_TIMEOUT) {
2852                 /* TODO: get rid of this no-timeout thing */
2853                 wait_for_completion_io(&wait);
2854                 return IO_OK;
2855         }
2856         if (!wait_for_completion_io_timeout(&wait,
2857                                         msecs_to_jiffies(timeout_msecs))) {
2858                 dev_warn(&h->pdev->dev, "Command timed out.\n");
2859                 return -ETIMEDOUT;
2860         }
2861         return IO_OK;
2862 }
2863
2864 static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2865                                    int reply_queue, unsigned long timeout_msecs)
2866 {
2867         if (unlikely(lockup_detected(h))) {
2868                 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2869                 return IO_OK;
2870         }
2871         return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2872 }
2873
2874 static u32 lockup_detected(struct ctlr_info *h)
2875 {
2876         int cpu;
2877         u32 rc, *lockup_detected;
2878
2879         cpu = get_cpu();
2880         lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2881         rc = *lockup_detected;
2882         put_cpu();
2883         return rc;
2884 }
2885
2886 #define MAX_DRIVER_CMD_RETRIES 25
2887 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2888                 struct CommandList *c, enum dma_data_direction data_direction,
2889                 unsigned long timeout_msecs)
2890 {
2891         int backoff_time = 10, retry_count = 0;
2892         int rc;
2893
2894         do {
2895                 memset(c->err_info, 0, sizeof(*c->err_info));
2896                 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2897                                                   timeout_msecs);
2898                 if (rc)
2899                         break;
2900                 retry_count++;
2901                 if (retry_count > 3) {
2902                         msleep(backoff_time);
2903                         if (backoff_time < 1000)
2904                                 backoff_time *= 2;
2905                 }
2906         } while ((check_for_unit_attention(h, c) ||
2907                         check_for_busy(h, c)) &&
2908                         retry_count <= MAX_DRIVER_CMD_RETRIES);
2909         hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2910         if (retry_count > MAX_DRIVER_CMD_RETRIES)
2911                 rc = -EIO;
2912         return rc;
2913 }
2914
2915 static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2916                                 struct CommandList *c)
2917 {
2918         const u8 *cdb = c->Request.CDB;
2919         const u8 *lun = c->Header.LUN.LunAddrBytes;
2920
2921         dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n",
2922                  txt, lun, cdb);
2923 }
2924
2925 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2926                         struct CommandList *cp)
2927 {
2928         const struct ErrorInfo *ei = cp->err_info;
2929         struct device *d = &cp->h->pdev->dev;
2930         u8 sense_key, asc, ascq;
2931         int sense_len;
2932
2933         switch (ei->CommandStatus) {
2934         case CMD_TARGET_STATUS:
2935                 if (ei->SenseLen > sizeof(ei->SenseInfo))
2936                         sense_len = sizeof(ei->SenseInfo);
2937                 else
2938                         sense_len = ei->SenseLen;
2939                 decode_sense_data(ei->SenseInfo, sense_len,
2940                                         &sense_key, &asc, &ascq);
2941                 hpsa_print_cmd(h, "SCSI status", cp);
2942                 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2943                         dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2944                                 sense_key, asc, ascq);
2945                 else
2946                         dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2947                 if (ei->ScsiStatus == 0)
2948                         dev_warn(d, "SCSI status is abnormally zero.  "
2949                         "(probably indicates selection timeout "
2950                         "reported incorrectly due to a known "
2951                         "firmware bug, circa July, 2001.)\n");
2952                 break;
2953         case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2954                 break;
2955         case CMD_DATA_OVERRUN:
2956                 hpsa_print_cmd(h, "overrun condition", cp);
2957                 break;
2958         case CMD_INVALID: {
2959                 /* controller unfortunately reports SCSI passthru's
2960                  * to non-existent targets as invalid commands.
2961                  */
2962                 hpsa_print_cmd(h, "invalid command", cp);
2963                 dev_warn(d, "probably means device no longer present\n");
2964                 }
2965                 break;
2966         case CMD_PROTOCOL_ERR:
2967                 hpsa_print_cmd(h, "protocol error", cp);
2968                 break;
2969         case CMD_HARDWARE_ERR:
2970                 hpsa_print_cmd(h, "hardware error", cp);
2971                 break;
2972         case CMD_CONNECTION_LOST:
2973                 hpsa_print_cmd(h, "connection lost", cp);
2974                 break;
2975         case CMD_ABORTED:
2976                 hpsa_print_cmd(h, "aborted", cp);
2977                 break;
2978         case CMD_ABORT_FAILED:
2979                 hpsa_print_cmd(h, "abort failed", cp);
2980                 break;
2981         case CMD_UNSOLICITED_ABORT:
2982                 hpsa_print_cmd(h, "unsolicited abort", cp);
2983                 break;
2984         case CMD_TIMEOUT:
2985                 hpsa_print_cmd(h, "timed out", cp);
2986                 break;
2987         case CMD_UNABORTABLE:
2988                 hpsa_print_cmd(h, "unabortable", cp);
2989                 break;
2990         case CMD_CTLR_LOCKUP:
2991                 hpsa_print_cmd(h, "controller lockup detected", cp);
2992                 break;
2993         default:
2994                 hpsa_print_cmd(h, "unknown status", cp);
2995                 dev_warn(d, "Unknown command status %x\n",
2996                                 ei->CommandStatus);
2997         }
2998 }
2999
3000 static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr,
3001                                         u8 page, u8 *buf, size_t bufsize)
3002 {
3003         int rc = IO_OK;
3004         struct CommandList *c;
3005         struct ErrorInfo *ei;
3006
3007         c = cmd_alloc(h);
3008         if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buf, bufsize,
3009                         page, scsi3addr, TYPE_CMD)) {
3010                 rc = -1;
3011                 goto out;
3012         }
3013         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3014                         NO_TIMEOUT);
3015         if (rc)
3016                 goto out;
3017         ei = c->err_info;
3018         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3019                 hpsa_scsi_interpret_error(h, c);
3020                 rc = -1;
3021         }
3022 out:
3023         cmd_free(h, c);
3024         return rc;
3025 }
3026
3027 static u64 hpsa_get_enclosure_logical_identifier(struct ctlr_info *h,
3028                                                 u8 *scsi3addr)
3029 {
3030         u8 *buf;
3031         u64 sa = 0;
3032         int rc = 0;
3033
3034         buf = kzalloc(1024, GFP_KERNEL);
3035         if (!buf)
3036                 return 0;
3037
3038         rc = hpsa_do_receive_diagnostic(h, scsi3addr, RECEIVE_DIAGNOSTIC,
3039                                         buf, 1024);
3040
3041         if (rc)
3042                 goto out;
3043
3044         sa = get_unaligned_be64(buf+12);
3045
3046 out:
3047         kfree(buf);
3048         return sa;
3049 }
3050
3051 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
3052                         u16 page, unsigned char *buf,
3053                         unsigned char bufsize)
3054 {
3055         int rc = IO_OK;
3056         struct CommandList *c;
3057         struct ErrorInfo *ei;
3058
3059         c = cmd_alloc(h);
3060
3061         if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
3062                         page, scsi3addr, TYPE_CMD)) {
3063                 rc = -1;
3064                 goto out;
3065         }
3066         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3067                         NO_TIMEOUT);
3068         if (rc)
3069                 goto out;
3070         ei = c->err_info;
3071         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3072                 hpsa_scsi_interpret_error(h, c);
3073                 rc = -1;
3074         }
3075 out:
3076         cmd_free(h, c);
3077         return rc;
3078 }
3079
3080 static int hpsa_send_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
3081         u8 reset_type, int reply_queue)
3082 {
3083         int rc = IO_OK;
3084         struct CommandList *c;
3085         struct ErrorInfo *ei;
3086
3087         c = cmd_alloc(h);
3088         c->device = dev;
3089
3090         /* fill_cmd can't fail here, no data buffer to map. */
3091         (void) fill_cmd(c, reset_type, h, NULL, 0, 0, dev->scsi3addr, TYPE_MSG);
3092         rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
3093         if (rc) {
3094                 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
3095                 goto out;
3096         }
3097         /* no unmap needed here because no data xfer. */
3098
3099         ei = c->err_info;
3100         if (ei->CommandStatus != 0) {
3101                 hpsa_scsi_interpret_error(h, c);
3102                 rc = -1;
3103         }
3104 out:
3105         cmd_free(h, c);
3106         return rc;
3107 }
3108
3109 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
3110                                struct hpsa_scsi_dev_t *dev,
3111                                unsigned char *scsi3addr)
3112 {
3113         int i;
3114         bool match = false;
3115         struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
3116         struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
3117
3118         if (hpsa_is_cmd_idle(c))
3119                 return false;
3120
3121         switch (c->cmd_type) {
3122         case CMD_SCSI:
3123         case CMD_IOCTL_PEND:
3124                 match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
3125                                 sizeof(c->Header.LUN.LunAddrBytes));
3126                 break;
3127
3128         case CMD_IOACCEL1:
3129         case CMD_IOACCEL2:
3130                 if (c->phys_disk == dev) {
3131                         /* HBA mode match */
3132                         match = true;
3133                 } else {
3134                         /* Possible RAID mode -- check each phys dev. */
3135                         /* FIXME:  Do we need to take out a lock here?  If
3136                          * so, we could just call hpsa_get_pdisk_of_ioaccel2()
3137                          * instead. */
3138                         for (i = 0; i < dev->nphysical_disks && !match; i++) {
3139                                 /* FIXME: an alternate test might be
3140                                  *
3141                                  * match = dev->phys_disk[i]->ioaccel_handle
3142                                  *              == c2->scsi_nexus;      */
3143                                 match = dev->phys_disk[i] == c->phys_disk;
3144                         }
3145                 }
3146                 break;
3147
3148         case IOACCEL2_TMF:
3149                 for (i = 0; i < dev->nphysical_disks && !match; i++) {
3150                         match = dev->phys_disk[i]->ioaccel_handle ==
3151                                         le32_to_cpu(ac->it_nexus);
3152                 }
3153                 break;
3154
3155         case 0:         /* The command is in the middle of being initialized. */
3156                 match = false;
3157                 break;
3158
3159         default:
3160                 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
3161                         c->cmd_type);
3162                 BUG();
3163         }
3164
3165         return match;
3166 }
3167
3168 static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
3169         u8 reset_type, int reply_queue)
3170 {
3171         int rc = 0;
3172
3173         /* We can really only handle one reset at a time */
3174         if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
3175                 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
3176                 return -EINTR;
3177         }
3178
3179         rc = hpsa_send_reset(h, dev, reset_type, reply_queue);
3180         if (!rc) {
3181                 /* incremented by sending the reset request */
3182                 atomic_dec(&dev->commands_outstanding);
3183                 wait_event(h->event_sync_wait_queue,
3184                         atomic_read(&dev->commands_outstanding) <= 0 ||
3185                         lockup_detected(h));
3186         }
3187
3188         if (unlikely(lockup_detected(h))) {
3189                 dev_warn(&h->pdev->dev,
3190                          "Controller lockup detected during reset wait\n");
3191                 rc = -ENODEV;
3192         }
3193
3194         if (!rc)
3195                 rc = wait_for_device_to_become_ready(h, dev->scsi3addr, 0);
3196
3197         mutex_unlock(&h->reset_mutex);
3198         return rc;
3199 }
3200
3201 static void hpsa_get_raid_level(struct ctlr_info *h,
3202         unsigned char *scsi3addr, unsigned char *raid_level)
3203 {
3204         int rc;
3205         unsigned char *buf;
3206
3207         *raid_level = RAID_UNKNOWN;
3208         buf = kzalloc(64, GFP_KERNEL);
3209         if (!buf)
3210                 return;
3211
3212         if (!hpsa_vpd_page_supported(h, scsi3addr,
3213                 HPSA_VPD_LV_DEVICE_GEOMETRY))
3214                 goto exit;
3215
3216         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3217                 HPSA_VPD_LV_DEVICE_GEOMETRY, buf, 64);
3218
3219         if (rc == 0)
3220                 *raid_level = buf[8];
3221         if (*raid_level > RAID_UNKNOWN)
3222                 *raid_level = RAID_UNKNOWN;
3223 exit:
3224         kfree(buf);
3225         return;
3226 }
3227
3228 #define HPSA_MAP_DEBUG
3229 #ifdef HPSA_MAP_DEBUG
3230 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
3231                                 struct raid_map_data *map_buff)
3232 {
3233         struct raid_map_disk_data *dd = &map_buff->data[0];
3234         int map, row, col;
3235         u16 map_cnt, row_cnt, disks_per_row;
3236
3237         if (rc != 0)
3238                 return;
3239
3240         /* Show details only if debugging has been activated. */
3241         if (h->raid_offload_debug < 2)
3242                 return;
3243
3244         dev_info(&h->pdev->dev, "structure_size = %u\n",
3245                                 le32_to_cpu(map_buff->structure_size));
3246         dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
3247                         le32_to_cpu(map_buff->volume_blk_size));
3248         dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
3249                         le64_to_cpu(map_buff->volume_blk_cnt));
3250         dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
3251                         map_buff->phys_blk_shift);
3252         dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
3253                         map_buff->parity_rotation_shift);
3254         dev_info(&h->pdev->dev, "strip_size = %u\n",
3255                         le16_to_cpu(map_buff->strip_size));
3256         dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
3257                         le64_to_cpu(map_buff->disk_starting_blk));
3258         dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
3259                         le64_to_cpu(map_buff->disk_blk_cnt));
3260         dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
3261                         le16_to_cpu(map_buff->data_disks_per_row));
3262         dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
3263                         le16_to_cpu(map_buff->metadata_disks_per_row));
3264         dev_info(&h->pdev->dev, "row_cnt = %u\n",
3265                         le16_to_cpu(map_buff->row_cnt));
3266         dev_info(&h->pdev->dev, "layout_map_count = %u\n",
3267                         le16_to_cpu(map_buff->layout_map_count));
3268         dev_info(&h->pdev->dev, "flags = 0x%x\n",
3269                         le16_to_cpu(map_buff->flags));
3270         dev_info(&h->pdev->dev, "encryption = %s\n",
3271                         le16_to_cpu(map_buff->flags) &
3272                         RAID_MAP_FLAG_ENCRYPT_ON ?  "ON" : "OFF");
3273         dev_info(&h->pdev->dev, "dekindex = %u\n",
3274                         le16_to_cpu(map_buff->dekindex));
3275         map_cnt = le16_to_cpu(map_buff->layout_map_count);
3276         for (map = 0; map < map_cnt; map++) {
3277                 dev_info(&h->pdev->dev, "Map%u:\n", map);
3278                 row_cnt = le16_to_cpu(map_buff->row_cnt);
3279                 for (row = 0; row < row_cnt; row++) {
3280                         dev_info(&h->pdev->dev, "  Row%u:\n", row);
3281                         disks_per_row =
3282                                 le16_to_cpu(map_buff->data_disks_per_row);
3283                         for (col = 0; col < disks_per_row; col++, dd++)
3284                                 dev_info(&h->pdev->dev,
3285                                         "    D%02u: h=0x%04x xor=%u,%u\n",
3286                                         col, dd->ioaccel_handle,
3287                                         dd->xor_mult[0], dd->xor_mult[1]);
3288                         disks_per_row =
3289                                 le16_to_cpu(map_buff->metadata_disks_per_row);
3290                         for (col = 0; col < disks_per_row; col++, dd++)
3291                                 dev_info(&h->pdev->dev,
3292                                         "    M%02u: h=0x%04x xor=%u,%u\n",
3293                                         col, dd->ioaccel_handle,
3294                                         dd->xor_mult[0], dd->xor_mult[1]);
3295                 }
3296         }
3297 }
3298 #else
3299 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
3300                         __attribute__((unused)) int rc,
3301                         __attribute__((unused)) struct raid_map_data *map_buff)
3302 {
3303 }
3304 #endif
3305
3306 static int hpsa_get_raid_map(struct ctlr_info *h,
3307         unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3308 {
3309         int rc = 0;
3310         struct CommandList *c;
3311         struct ErrorInfo *ei;
3312
3313         c = cmd_alloc(h);
3314
3315         if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
3316                         sizeof(this_device->raid_map), 0,
3317                         scsi3addr, TYPE_CMD)) {
3318                 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
3319                 cmd_free(h, c);
3320                 return -1;
3321         }
3322         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3323                         NO_TIMEOUT);
3324         if (rc)
3325                 goto out;
3326         ei = c->err_info;
3327         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3328                 hpsa_scsi_interpret_error(h, c);
3329                 rc = -1;
3330                 goto out;
3331         }
3332         cmd_free(h, c);
3333
3334         /* @todo in the future, dynamically allocate RAID map memory */
3335         if (le32_to_cpu(this_device->raid_map.structure_size) >
3336                                 sizeof(this_device->raid_map)) {
3337                 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
3338                 rc = -1;
3339         }
3340         hpsa_debug_map_buff(h, rc, &this_device->raid_map);
3341         return rc;
3342 out:
3343         cmd_free(h, c);
3344         return rc;
3345 }
3346
3347 static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
3348                 unsigned char scsi3addr[], u16 bmic_device_index,
3349                 struct bmic_sense_subsystem_info *buf, size_t bufsize)
3350 {
3351         int rc = IO_OK;
3352         struct CommandList *c;
3353         struct ErrorInfo *ei;
3354
3355         c = cmd_alloc(h);
3356
3357         rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize,
3358                 0, RAID_CTLR_LUNID, TYPE_CMD);
3359         if (rc)
3360                 goto out;
3361
3362         c->Request.CDB[2] = bmic_device_index & 0xff;
3363         c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3364
3365         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3366                         NO_TIMEOUT);
3367         if (rc)
3368                 goto out;
3369         ei = c->err_info;
3370         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3371                 hpsa_scsi_interpret_error(h, c);
3372                 rc = -1;
3373         }
3374 out:
3375         cmd_free(h, c);
3376         return rc;
3377 }
3378
3379 static int hpsa_bmic_id_controller(struct ctlr_info *h,
3380         struct bmic_identify_controller *buf, size_t bufsize)
3381 {
3382         int rc = IO_OK;
3383         struct CommandList *c;
3384         struct ErrorInfo *ei;
3385
3386         c = cmd_alloc(h);
3387
3388         rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize,
3389                 0, RAID_CTLR_LUNID, TYPE_CMD);
3390         if (rc)
3391                 goto out;
3392
3393         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3394                         NO_TIMEOUT);
3395         if (rc)
3396                 goto out;
3397         ei = c->err_info;
3398         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3399                 hpsa_scsi_interpret_error(h, c);
3400                 rc = -1;
3401         }
3402 out:
3403         cmd_free(h, c);
3404         return rc;
3405 }
3406
3407 static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
3408                 unsigned char scsi3addr[], u16 bmic_device_index,
3409                 struct bmic_identify_physical_device *buf, size_t bufsize)
3410 {
3411         int rc = IO_OK;
3412         struct CommandList *c;
3413         struct ErrorInfo *ei;
3414
3415         c = cmd_alloc(h);
3416         rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
3417                 0, RAID_CTLR_LUNID, TYPE_CMD);
3418         if (rc)
3419                 goto out;
3420
3421         c->Request.CDB[2] = bmic_device_index & 0xff;
3422         c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3423
3424         hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3425                                                 NO_TIMEOUT);
3426         ei = c->err_info;
3427         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3428                 hpsa_scsi_interpret_error(h, c);
3429                 rc = -1;
3430         }
3431 out:
3432         cmd_free(h, c);
3433
3434         return rc;
3435 }
3436
3437 /*
3438  * get enclosure information
3439  * struct ReportExtendedLUNdata *rlep - Used for BMIC drive number
3440  * struct hpsa_scsi_dev_t *encl_dev - device entry for enclosure
3441  * Uses id_physical_device to determine the box_index.
3442  */
3443 static void hpsa_get_enclosure_info(struct ctlr_info *h,
3444                         unsigned char *scsi3addr,
3445                         struct ReportExtendedLUNdata *rlep, int rle_index,
3446                         struct hpsa_scsi_dev_t *encl_dev)
3447 {
3448         int rc = -1;
3449         struct CommandList *c = NULL;
3450         struct ErrorInfo *ei = NULL;
3451         struct bmic_sense_storage_box_params *bssbp = NULL;
3452         struct bmic_identify_physical_device *id_phys = NULL;
3453         struct ext_report_lun_entry *rle;
3454         u16 bmic_device_index = 0;
3455
3456         if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN)
3457                 return;
3458
3459         rle = &rlep->LUN[rle_index];
3460
3461         encl_dev->eli =
3462                 hpsa_get_enclosure_logical_identifier(h, scsi3addr);
3463
3464         bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
3465
3466         if (encl_dev->target == -1 || encl_dev->lun == -1) {
3467                 rc = IO_OK;
3468                 goto out;
3469         }
3470
3471         if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) {
3472                 rc = IO_OK;
3473                 goto out;
3474         }
3475
3476         bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL);
3477         if (!bssbp)
3478                 goto out;
3479
3480         id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3481         if (!id_phys)
3482                 goto out;
3483
3484         rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index,
3485                                                 id_phys, sizeof(*id_phys));
3486         if (rc) {
3487                 dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n",
3488                         __func__, encl_dev->external, bmic_device_index);
3489                 goto out;
3490         }
3491
3492         c = cmd_alloc(h);
3493
3494         rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp,
3495                         sizeof(*bssbp), 0, RAID_CTLR_LUNID, TYPE_CMD);
3496
3497         if (rc)
3498                 goto out;
3499
3500         if (id_phys->phys_connector[1] == 'E')
3501                 c->Request.CDB[5] = id_phys->box_index;
3502         else
3503                 c->Request.CDB[5] = 0;
3504
3505         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3506                                                 NO_TIMEOUT);
3507         if (rc)
3508                 goto out;
3509
3510         ei = c->err_info;
3511         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3512                 rc = -1;
3513                 goto out;
3514         }
3515
3516         encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port;
3517         memcpy(&encl_dev->phys_connector[id_phys->active_path_number],
3518                 bssbp->phys_connector, sizeof(bssbp->phys_connector));
3519
3520         rc = IO_OK;
3521 out:
3522         kfree(bssbp);
3523         kfree(id_phys);
3524
3525         if (c)
3526                 cmd_free(h, c);
3527
3528         if (rc != IO_OK)
3529                 hpsa_show_dev_msg(KERN_INFO, h, encl_dev,
3530                         "Error, could not get enclosure information");
3531 }
3532
3533 static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
3534                                                 unsigned char *scsi3addr)
3535 {
3536         struct ReportExtendedLUNdata *physdev;
3537         u32 nphysicals;
3538         u64 sa = 0;
3539         int i;
3540
3541         physdev = kzalloc(sizeof(*physdev), GFP_KERNEL);
3542         if (!physdev)
3543                 return 0;
3544
3545         if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3546                 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3547                 kfree(physdev);
3548                 return 0;
3549         }
3550         nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24;
3551
3552         for (i = 0; i < nphysicals; i++)
3553                 if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) {
3554                         sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]);
3555                         break;
3556                 }
3557
3558         kfree(physdev);
3559
3560         return sa;
3561 }
3562
3563 static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
3564                                         struct hpsa_scsi_dev_t *dev)
3565 {
3566         int rc;
3567         u64 sa = 0;
3568
3569         if (is_hba_lunid(scsi3addr)) {
3570                 struct bmic_sense_subsystem_info *ssi;
3571
3572                 ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
3573                 if (!ssi)
3574                         return;
3575
3576                 rc = hpsa_bmic_sense_subsystem_information(h,
3577                                         scsi3addr, 0, ssi, sizeof(*ssi));
3578                 if (rc == 0) {
3579                         sa = get_unaligned_be64(ssi->primary_world_wide_id);
3580                         h->sas_address = sa;
3581                 }
3582
3583                 kfree(ssi);
3584         } else
3585                 sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr);
3586
3587         dev->sas_address = sa;
3588 }
3589
3590 static void hpsa_ext_ctrl_present(struct ctlr_info *h,
3591         struct ReportExtendedLUNdata *physdev)
3592 {
3593         u32 nphysicals;
3594         int i;
3595
3596         if (h->discovery_polling)
3597                 return;
3598
3599         nphysicals = (get_unaligned_be32(physdev->LUNListLength) / 24) + 1;
3600
3601         for (i = 0; i < nphysicals; i++) {
3602                 if (physdev->LUN[i].device_type ==
3603                         BMIC_DEVICE_TYPE_CONTROLLER
3604                         && !is_hba_lunid(physdev->LUN[i].lunid)) {
3605                         dev_info(&h->pdev->dev,
3606                                 "External controller present, activate discovery polling and disable rld caching\n");
3607                         hpsa_disable_rld_caching(h);
3608                         h->discovery_polling = 1;
3609                         break;
3610                 }
3611         }
3612 }
3613
3614 /* Get a device id from inquiry page 0x83 */
3615 static bool hpsa_vpd_page_supported(struct ctlr_info *h,
3616         unsigned char scsi3addr[], u8 page)
3617 {
3618         int rc;
3619         int i;
3620         int pages;
3621         unsigned char *buf, bufsize;
3622
3623         buf = kzalloc(256, GFP_KERNEL);
3624         if (!buf)
3625                 return false;
3626
3627         /* Get the size of the page list first */
3628         rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3629                                 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3630                                 buf, HPSA_VPD_HEADER_SZ);
3631         if (rc != 0)
3632                 goto exit_unsupported;
3633         pages = buf[3];
3634         if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
3635                 bufsize = pages + HPSA_VPD_HEADER_SZ;
3636         else
3637                 bufsize = 255;
3638
3639         /* Get the whole VPD page list */
3640         rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3641                                 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3642                                 buf, bufsize);
3643         if (rc != 0)
3644                 goto exit_unsupported;
3645
3646         pages = buf[3];
3647         for (i = 1; i <= pages; i++)
3648                 if (buf[3 + i] == page)
3649                         goto exit_supported;
3650 exit_unsupported:
3651         kfree(buf);
3652         return false;
3653 exit_supported:
3654         kfree(buf);
3655         return true;
3656 }
3657
3658 /*
3659  * Called during a scan operation.
3660  * Sets ioaccel status on the new device list, not the existing device list
3661  *
3662  * The device list used during I/O will be updated later in
3663  * adjust_hpsa_scsi_table.
3664  */
3665 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
3666         unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3667 {
3668         int rc;
3669         unsigned char *buf;
3670         u8 ioaccel_status;
3671
3672         this_device->offload_config = 0;
3673         this_device->offload_enabled = 0;
3674         this_device->offload_to_be_enabled = 0;
3675
3676         buf = kzalloc(64, GFP_KERNEL);
3677         if (!buf)
3678                 return;
3679         if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
3680                 goto out;
3681         rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3682                         VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
3683         if (rc != 0)
3684                 goto out;
3685
3686 #define IOACCEL_STATUS_BYTE 4
3687 #define OFFLOAD_CONFIGURED_BIT 0x01
3688 #define OFFLOAD_ENABLED_BIT 0x02
3689         ioaccel_status = buf[IOACCEL_STATUS_BYTE];
3690         this_device->offload_config =
3691                 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
3692         if (this_device->offload_config) {
3693                 bool offload_enabled =
3694                         !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
3695                 /*
3696                  * Check to see if offload can be enabled.
3697                  */
3698                 if (offload_enabled) {
3699                         rc = hpsa_get_raid_map(h, scsi3addr, this_device);
3700                         if (rc) /* could not load raid_map */
3701                                 goto out;
3702                         this_device->offload_to_be_enabled = 1;
3703                 }
3704         }
3705
3706 out:
3707         kfree(buf);
3708         return;
3709 }
3710
3711 /* Get the device id from inquiry page 0x83 */
3712 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
3713         unsigned char *device_id, int index, int buflen)
3714 {
3715         int rc;
3716         unsigned char *buf;
3717
3718         /* Does controller have VPD for device id? */
3719         if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_ID))
3720                 return 1; /* not supported */
3721
3722         buf = kzalloc(64, GFP_KERNEL);
3723         if (!buf)
3724                 return -ENOMEM;
3725
3726         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3727                                         HPSA_VPD_LV_DEVICE_ID, buf, 64);
3728         if (rc == 0) {
3729                 if (buflen > 16)
3730                         buflen = 16;
3731                 memcpy(device_id, &buf[8], buflen);
3732         }
3733
3734         kfree(buf);
3735
3736         return rc; /*0 - got id,  otherwise, didn't */
3737 }
3738
3739 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
3740                 void *buf, int bufsize,
3741                 int extended_response)
3742 {
3743         int rc = IO_OK;
3744         struct CommandList *c;
3745         unsigned char scsi3addr[8];
3746         struct ErrorInfo *ei;
3747
3748         c = cmd_alloc(h);
3749
3750         /* address the controller */
3751         memset(scsi3addr, 0, sizeof(scsi3addr));
3752         if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
3753                 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
3754                 rc = -EAGAIN;
3755                 goto out;
3756         }
3757         if (extended_response)
3758                 c->Request.CDB[1] = extended_response;
3759         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3760                         NO_TIMEOUT);
3761         if (rc)
3762                 goto out;
3763         ei = c->err_info;
3764         if (ei->CommandStatus != 0 &&
3765             ei->CommandStatus != CMD_DATA_UNDERRUN) {
3766                 hpsa_scsi_interpret_error(h, c);
3767                 rc = -EIO;
3768         } else {
3769                 struct ReportLUNdata *rld = buf;
3770
3771                 if (rld->extended_response_flag != extended_response) {
3772                         if (!h->legacy_board) {
3773                                 dev_err(&h->pdev->dev,
3774                                         "report luns requested format %u, got %u\n",
3775                                         extended_response,
3776                                         rld->extended_response_flag);
3777                                 rc = -EINVAL;
3778                         } else
3779                                 rc = -EOPNOTSUPP;
3780                 }
3781         }
3782 out:
3783         cmd_free(h, c);
3784         return rc;
3785 }
3786
3787 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
3788                 struct ReportExtendedLUNdata *buf, int bufsize)
3789 {
3790         int rc;
3791         struct ReportLUNdata *lbuf;
3792
3793         rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
3794                                       HPSA_REPORT_PHYS_EXTENDED);
3795         if (!rc || rc != -EOPNOTSUPP)
3796                 return rc;
3797
3798         /* REPORT PHYS EXTENDED is not supported */
3799         lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL);
3800         if (!lbuf)
3801                 return -ENOMEM;
3802
3803         rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0);
3804         if (!rc) {
3805                 int i;
3806                 u32 nphys;
3807
3808                 /* Copy ReportLUNdata header */
3809                 memcpy(buf, lbuf, 8);
3810                 nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8;
3811                 for (i = 0; i < nphys; i++)
3812                         memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8);
3813         }
3814         kfree(lbuf);
3815         return rc;
3816 }
3817
3818 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
3819                 struct ReportLUNdata *buf, int bufsize)
3820 {
3821         return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
3822 }
3823
3824 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
3825         int bus, int target, int lun)
3826 {
3827         device->bus = bus;
3828         device->target = target;
3829         device->lun = lun;
3830 }
3831
3832 /* Use VPD inquiry to get details of volume status */
3833 static int hpsa_get_volume_status(struct ctlr_info *h,
3834                                         unsigned char scsi3addr[])
3835 {
3836         int rc;
3837         int status;
3838         int size;
3839         unsigned char *buf;
3840
3841         buf = kzalloc(64, GFP_KERNEL);
3842         if (!buf)
3843                 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3844
3845         /* Does controller have VPD for logical volume status? */
3846         if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
3847                 goto exit_failed;
3848
3849         /* Get the size of the VPD return buffer */
3850         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3851                                         buf, HPSA_VPD_HEADER_SZ);
3852         if (rc != 0)
3853                 goto exit_failed;
3854         size = buf[3];
3855
3856         /* Now get the whole VPD buffer */
3857         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3858                                         buf, size + HPSA_VPD_HEADER_SZ);
3859         if (rc != 0)
3860                 goto exit_failed;
3861         status = buf[4]; /* status byte */
3862
3863         kfree(buf);
3864         return status;
3865 exit_failed:
3866         kfree(buf);
3867         return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3868 }
3869
3870 /* Determine offline status of a volume.
3871  * Return either:
3872  *  0 (not offline)
3873  *  0xff (offline for unknown reasons)
3874  *  # (integer code indicating one of several NOT READY states
3875  *     describing why a volume is to be kept offline)
3876  */
3877 static unsigned char hpsa_volume_offline(struct ctlr_info *h,
3878                                         unsigned char scsi3addr[])
3879 {
3880         struct CommandList *c;
3881         unsigned char *sense;
3882         u8 sense_key, asc, ascq;
3883         int sense_len;
3884         int rc, ldstat = 0;
3885 #define ASC_LUN_NOT_READY 0x04
3886 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3887 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3888
3889         c = cmd_alloc(h);
3890
3891         (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
3892         rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
3893                                         NO_TIMEOUT);
3894         if (rc) {
3895                 cmd_free(h, c);
3896                 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3897         }
3898         sense = c->err_info->SenseInfo;
3899         if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3900                 sense_len = sizeof(c->err_info->SenseInfo);
3901         else
3902                 sense_len = c->err_info->SenseLen;
3903         decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
3904         cmd_free(h, c);
3905
3906         /* Determine the reason for not ready state */
3907         ldstat = hpsa_get_volume_status(h, scsi3addr);
3908
3909         /* Keep volume offline in certain cases: */
3910         switch (ldstat) {
3911         case HPSA_LV_FAILED:
3912         case HPSA_LV_UNDERGOING_ERASE:
3913         case HPSA_LV_NOT_AVAILABLE:
3914         case HPSA_LV_UNDERGOING_RPI:
3915         case HPSA_LV_PENDING_RPI:
3916         case HPSA_LV_ENCRYPTED_NO_KEY:
3917         case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3918         case HPSA_LV_UNDERGOING_ENCRYPTION:
3919         case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3920         case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3921                 return ldstat;
3922         case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3923                 /* If VPD status page isn't available,
3924                  * use ASC/ASCQ to determine state
3925                  */
3926                 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3927                         (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3928                         return ldstat;
3929                 break;
3930         default:
3931                 break;
3932         }
3933         return HPSA_LV_OK;
3934 }
3935
3936 static int hpsa_update_device_info(struct ctlr_info *h,
3937         unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3938         unsigned char *is_OBDR_device)
3939 {
3940
3941 #define OBDR_SIG_OFFSET 43
3942 #define OBDR_TAPE_SIG "$DR-10"
3943 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3944 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3945
3946         unsigned char *inq_buff;
3947         unsigned char *obdr_sig;
3948         int rc = 0;
3949
3950         inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
3951         if (!inq_buff) {
3952                 rc = -ENOMEM;
3953                 goto bail_out;
3954         }
3955
3956         /* Do an inquiry to the device to see what it is. */
3957         if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3958                 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3959                 dev_err(&h->pdev->dev,
3960                         "%s: inquiry failed, device will be skipped.\n",
3961                         __func__);
3962                 rc = HPSA_INQUIRY_FAILED;
3963                 goto bail_out;
3964         }
3965
3966         scsi_sanitize_inquiry_string(&inq_buff[8], 8);
3967         scsi_sanitize_inquiry_string(&inq_buff[16], 16);
3968
3969         this_device->devtype = (inq_buff[0] & 0x1f);
3970         memcpy(this_device->scsi3addr, scsi3addr, 8);
3971         memcpy(this_device->vendor, &inq_buff[8],
3972                 sizeof(this_device->vendor));
3973         memcpy(this_device->model, &inq_buff[16],
3974                 sizeof(this_device->model));
3975         this_device->rev = inq_buff[2];
3976         memset(this_device->device_id, 0,
3977                 sizeof(this_device->device_id));
3978         if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
3979                 sizeof(this_device->device_id)) < 0) {
3980                 dev_err(&h->pdev->dev,
3981                         "hpsa%d: %s: can't get device id for [%d:%d:%d:%d]\t%s\t%.16s\n",
3982                         h->ctlr, __func__,
3983                         h->scsi_host->host_no,
3984                         this_device->bus, this_device->target,
3985                         this_device->lun,
3986                         scsi_device_type(this_device->devtype),
3987                         this_device->model);
3988                 rc = HPSA_LV_FAILED;
3989                 goto bail_out;
3990         }
3991
3992         if ((this_device->devtype == TYPE_DISK ||
3993                 this_device->devtype == TYPE_ZBC) &&
3994                 is_logical_dev_addr_mode(scsi3addr)) {
3995                 unsigned char volume_offline;
3996
3997                 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
3998                 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3999                         hpsa_get_ioaccel_status(h, scsi3addr, this_device);
4000                 volume_offline = hpsa_volume_offline(h, scsi3addr);
4001                 if (volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED &&
4002                     h->legacy_board) {
4003                         /*
4004                          * Legacy boards might not support volume status
4005                          */
4006                         dev_info(&h->pdev->dev,
4007                                  "C0:T%d:L%d Volume status not available, assuming online.\n",
4008                                  this_device->target, this_device->lun);
4009                         volume_offline = 0;
4010                 }
4011                 this_device->volume_offline = volume_offline;
4012                 if (volume_offline == HPSA_LV_FAILED) {
4013                         rc = HPSA_LV_FAILED;
4014                         dev_err(&h->pdev->dev,
4015                                 "%s: LV failed, device will be skipped.\n",
4016                                 __func__);
4017                         goto bail_out;
4018                 }
4019         } else {
4020                 this_device->raid_level = RAID_UNKNOWN;
4021                 this_device->offload_config = 0;
4022                 hpsa_turn_off_ioaccel_for_device(this_device);
4023                 this_device->hba_ioaccel_enabled = 0;
4024                 this_device->volume_offline = 0;
4025                 this_device->queue_depth = h->nr_cmds;
4026         }
4027
4028         if (this_device->external)
4029                 this_device->queue_depth = EXTERNAL_QD;
4030
4031         if (is_OBDR_device) {
4032                 /* See if this is a One-Button-Disaster-Recovery device
4033                  * by looking for "$DR-10" at offset 43 in inquiry data.
4034                  */
4035                 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
4036                 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
4037                                         strncmp(obdr_sig, OBDR_TAPE_SIG,
4038                                                 OBDR_SIG_LEN) == 0);
4039         }
4040         kfree(inq_buff);
4041         return 0;
4042
4043 bail_out:
4044         kfree(inq_buff);
4045         return rc;
4046 }
4047
4048 /*
4049  * Helper function to assign bus, target, lun mapping of devices.
4050  * Logical drive target and lun are assigned at this time, but
4051  * physical device lun and target assignment are deferred (assigned
4052  * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
4053 */
4054 static void figure_bus_target_lun(struct ctlr_info *h,
4055         u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
4056 {
4057         u32 lunid = get_unaligned_le32(lunaddrbytes);
4058
4059         if (!is_logical_dev_addr_mode(lunaddrbytes)) {
4060                 /* physical device, target and lun filled in later */
4061                 if (is_hba_lunid(lunaddrbytes)) {
4062                         int bus = HPSA_HBA_BUS;
4063
4064                         if (!device->rev)
4065                                 bus = HPSA_LEGACY_HBA_BUS;
4066                         hpsa_set_bus_target_lun(device,
4067                                         bus, 0, lunid & 0x3fff);
4068                 } else
4069                         /* defer target, lun assignment for physical devices */
4070                         hpsa_set_bus_target_lun(device,
4071                                         HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
4072                 return;
4073         }
4074         /* It's a logical device */
4075         if (device->external) {
4076                 hpsa_set_bus_target_lun(device,
4077                         HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff,
4078                         lunid & 0x00ff);
4079                 return;
4080         }
4081         hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS,
4082                                 0, lunid & 0x3fff);
4083 }
4084
4085 static int  figure_external_status(struct ctlr_info *h, int raid_ctlr_position,
4086         int i, int nphysicals, int nlocal_logicals)
4087 {
4088         /* In report logicals, local logicals are listed first,
4089         * then any externals.
4090         */
4091         int logicals_start = nphysicals + (raid_ctlr_position == 0);
4092
4093         if (i == raid_ctlr_position)
4094                 return 0;
4095
4096         if (i < logicals_start)
4097                 return 0;
4098
4099         /* i is in logicals range, but still within local logicals */
4100         if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals)
4101                 return 0;
4102
4103         return 1; /* it's an external lun */
4104 }
4105
4106 /*
4107  * Do CISS_REPORT_PHYS and CISS_REPORT_LOG.  Data is returned in physdev,
4108  * logdev.  The number of luns in physdev and logdev are returned in
4109  * *nphysicals and *nlogicals, respectively.
4110  * Returns 0 on success, -1 otherwise.
4111  */
4112 static int hpsa_gather_lun_info(struct ctlr_info *h,
4113         struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
4114         struct ReportLUNdata *logdev, u32 *nlogicals)
4115 {
4116         if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
4117                 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
4118                 return -1;
4119         }
4120         *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
4121         if (*nphysicals > HPSA_MAX_PHYS_LUN) {
4122                 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
4123                         HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
4124                 *nphysicals = HPSA_MAX_PHYS_LUN;
4125         }
4126         if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
4127                 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
4128                 return -1;
4129         }
4130         *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
4131         /* Reject Logicals in excess of our max capability. */
4132         if (*nlogicals > HPSA_MAX_LUN) {
4133                 dev_warn(&h->pdev->dev,
4134                         "maximum logical LUNs (%d) exceeded.  "
4135                         "%d LUNs ignored.\n", HPSA_MAX_LUN,
4136                         *nlogicals - HPSA_MAX_LUN);
4137                 *nlogicals = HPSA_MAX_LUN;
4138         }
4139         if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
4140                 dev_warn(&h->pdev->dev,
4141                         "maximum logical + physical LUNs (%d) exceeded. "
4142                         "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
4143                         *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
4144                 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
4145         }
4146         return 0;
4147 }
4148
4149 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
4150         int i, int nphysicals, int nlogicals,
4151         struct ReportExtendedLUNdata *physdev_list,
4152         struct ReportLUNdata *logdev_list)
4153 {
4154         /* Helper function, figure out where the LUN ID info is coming from
4155          * given index i, lists of physical and logical devices, where in
4156          * the list the raid controller is supposed to appear (first or last)
4157          */
4158
4159         int logicals_start = nphysicals + (raid_ctlr_position == 0);
4160         int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
4161
4162         if (i == raid_ctlr_position)
4163                 return RAID_CTLR_LUNID;
4164
4165         if (i < logicals_start)
4166                 return &physdev_list->LUN[i -
4167                                 (raid_ctlr_position == 0)].lunid[0];
4168
4169         if (i < last_device)
4170                 return &logdev_list->LUN[i - nphysicals -
4171                         (raid_ctlr_position == 0)][0];
4172         BUG();
4173         return NULL;
4174 }
4175
4176 /* get physical drive ioaccel handle and queue depth */
4177 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
4178                 struct hpsa_scsi_dev_t *dev,
4179                 struct ReportExtendedLUNdata *rlep, int rle_index,
4180                 struct bmic_identify_physical_device *id_phys)
4181 {
4182         int rc;
4183         struct ext_report_lun_entry *rle;
4184
4185         if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN)
4186                 return;
4187
4188         rle = &rlep->LUN[rle_index];
4189
4190         dev->ioaccel_handle = rle->ioaccel_handle;
4191         if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
4192                 dev->hba_ioaccel_enabled = 1;
4193         memset(id_phys, 0, sizeof(*id_phys));
4194         rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0],
4195                         GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys,
4196                         sizeof(*id_phys));
4197         if (!rc)
4198                 /* Reserve space for FW operations */
4199 #define DRIVE_CMDS_RESERVED_FOR_FW 2
4200 #define DRIVE_QUEUE_DEPTH 7
4201                 dev->queue_depth =
4202                         le16_to_cpu(id_phys->current_queue_depth_limit) -
4203                                 DRIVE_CMDS_RESERVED_FOR_FW;
4204         else
4205                 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
4206 }
4207
4208 static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
4209         struct ReportExtendedLUNdata *rlep, int rle_index,
4210         struct bmic_identify_physical_device *id_phys)
4211 {
4212         struct ext_report_lun_entry *rle;
4213
4214         if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN)
4215                 return;
4216
4217         rle = &rlep->LUN[rle_index];
4218
4219         if ((rle->device_flags & 0x08) && this_device->ioaccel_handle)
4220                 this_device->hba_ioaccel_enabled = 1;
4221
4222         memcpy(&this_device->active_path_index,
4223                 &id_phys->active_path_number,
4224                 sizeof(this_device->active_path_index));
4225         memcpy(&this_device->path_map,
4226                 &id_phys->redundant_path_present_map,
4227                 sizeof(this_device->path_map));
4228         memcpy(&this_device->box,
4229                 &id_phys->alternate_paths_phys_box_on_port,
4230                 sizeof(this_device->box));
4231         memcpy(&this_device->phys_connector,
4232                 &id_phys->alternate_paths_phys_connector,
4233                 sizeof(this_device->phys_connector));
4234         memcpy(&this_device->bay,
4235                 &id_phys->phys_bay_in_box,
4236                 sizeof(this_device->bay));
4237 }
4238
4239 /* get number of local logical disks. */
4240 static int hpsa_set_local_logical_count(struct ctlr_info *h,
4241         struct bmic_identify_controller *id_ctlr,
4242         u32 *nlocals)
4243 {
4244         int rc;
4245
4246         if (!id_ctlr) {
4247                 dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n",
4248                         __func__);
4249                 return -ENOMEM;
4250         }
4251         memset(id_ctlr, 0, sizeof(*id_ctlr));
4252         rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
4253         if (!rc)
4254                 if (id_ctlr->configured_logical_drive_count < 255)
4255                         *nlocals = id_ctlr->configured_logical_drive_count;
4256                 else
4257                         *nlocals = le16_to_cpu(
4258                                         id_ctlr->extended_logical_unit_count);
4259         else
4260                 *nlocals = -1;
4261         return rc;
4262 }
4263
4264 static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes)
4265 {
4266         struct bmic_identify_physical_device *id_phys;
4267         bool is_spare = false;
4268         int rc;
4269
4270         id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4271         if (!id_phys)
4272                 return false;
4273
4274         rc = hpsa_bmic_id_physical_device(h,
4275                                         lunaddrbytes,
4276                                         GET_BMIC_DRIVE_NUMBER(lunaddrbytes),
4277                                         id_phys, sizeof(*id_phys));
4278         if (rc == 0)
4279                 is_spare = (id_phys->more_flags >> 6) & 0x01;
4280
4281         kfree(id_phys);
4282         return is_spare;
4283 }
4284
4285 #define RPL_DEV_FLAG_NON_DISK                           0x1
4286 #define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED  0x2
4287 #define RPL_DEV_FLAG_UNCONFIG_DISK                      0x4
4288
4289 #define BMIC_DEVICE_TYPE_ENCLOSURE  6
4290
4291 static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes,
4292                                 struct ext_report_lun_entry *rle)
4293 {
4294         u8 device_flags;
4295         u8 device_type;
4296
4297         if (!MASKED_DEVICE(lunaddrbytes))
4298                 return false;
4299
4300         device_flags = rle->device_flags;
4301         device_type = rle->device_type;
4302
4303         if (device_flags & RPL_DEV_FLAG_NON_DISK) {
4304                 if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE)
4305                         return false;
4306                 return true;
4307         }
4308
4309         if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED))
4310                 return false;
4311
4312         if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK)
4313                 return false;
4314
4315         /*
4316          * Spares may be spun down, we do not want to
4317          * do an Inquiry to a RAID set spare drive as
4318          * that would have them spun up, that is a
4319          * performance hit because I/O to the RAID device
4320          * stops while the spin up occurs which can take
4321          * over 50 seconds.
4322          */
4323         if (hpsa_is_disk_spare(h, lunaddrbytes))
4324                 return true;
4325
4326         return false;
4327 }
4328
4329 static void hpsa_update_scsi_devices(struct ctlr_info *h)
4330 {
4331         /* the idea here is we could get notified
4332          * that some devices have changed, so we do a report
4333          * physical luns and report logical luns cmd, and adjust
4334          * our list of devices accordingly.
4335          *
4336          * The scsi3addr's of devices won't change so long as the
4337          * adapter is not reset.  That means we can rescan and
4338          * tell which devices we already know about, vs. new
4339          * devices, vs.  disappearing devices.
4340          */
4341         struct ReportExtendedLUNdata *physdev_list = NULL;
4342         struct ReportLUNdata *logdev_list = NULL;
4343         struct bmic_identify_physical_device *id_phys = NULL;
4344         struct bmic_identify_controller *id_ctlr = NULL;
4345         u32 nphysicals = 0;
4346         u32 nlogicals = 0;
4347         u32 nlocal_logicals = 0;
4348         u32 ndev_allocated = 0;
4349         struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
4350         int ncurrent = 0;
4351         int i, ndevs_to_allocate;
4352         int raid_ctlr_position;
4353         bool physical_device;
4354         DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
4355
4356         currentsd = kcalloc(HPSA_MAX_DEVICES, sizeof(*currentsd), GFP_KERNEL);
4357         physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
4358         logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
4359         tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
4360         id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4361         id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL);
4362
4363         if (!currentsd || !physdev_list || !logdev_list ||
4364                 !tmpdevice || !id_phys || !id_ctlr) {
4365                 dev_err(&h->pdev->dev, "out of memory\n");
4366                 goto out;
4367         }
4368         memset(lunzerobits, 0, sizeof(lunzerobits));
4369
4370         h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */
4371
4372         if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
4373                         logdev_list, &nlogicals)) {
4374                 h->drv_req_rescan = 1;
4375                 goto out;
4376         }
4377
4378         /* Set number of local logicals (non PTRAID) */
4379         if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) {
4380                 dev_warn(&h->pdev->dev,
4381                         "%s: Can't determine number of local logical devices.\n",
4382                         __func__);
4383         }
4384
4385         /* We might see up to the maximum number of logical and physical disks
4386          * plus external target devices, and a device for the local RAID
4387          * controller.
4388          */
4389         ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
4390
4391         hpsa_ext_ctrl_present(h, physdev_list);
4392
4393         /* Allocate the per device structures */
4394         for (i = 0; i < ndevs_to_allocate; i++) {
4395                 if (i >= HPSA_MAX_DEVICES) {
4396                         dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
4397                                 "  %d devices ignored.\n", HPSA_MAX_DEVICES,
4398                                 ndevs_to_allocate - HPSA_MAX_DEVICES);
4399                         break;
4400                 }
4401
4402                 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
4403                 if (!currentsd[i]) {
4404                         h->drv_req_rescan = 1;
4405                         goto out;
4406                 }
4407                 ndev_allocated++;
4408         }
4409
4410         if (is_scsi_rev_5(h))
4411                 raid_ctlr_position = 0;
4412         else
4413                 raid_ctlr_position = nphysicals + nlogicals;
4414
4415         /* adjust our table of devices */
4416         for (i = 0; i < nphysicals + nlogicals + 1; i++) {
4417                 u8 *lunaddrbytes, is_OBDR = 0;
4418                 int rc = 0;
4419                 int phys_dev_index = i - (raid_ctlr_position == 0);
4420                 bool skip_device = false;
4421
4422                 memset(tmpdevice, 0, sizeof(*tmpdevice));
4423
4424                 physical_device = i < nphysicals + (raid_ctlr_position == 0);
4425
4426                 /* Figure out where the LUN ID info is coming from */
4427                 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
4428                         i, nphysicals, nlogicals, physdev_list, logdev_list);
4429
4430                 /* Determine if this is a lun from an external target array */
4431                 tmpdevice->external =
4432                         figure_external_status(h, raid_ctlr_position, i,
4433                                                 nphysicals, nlocal_logicals);
4434
4435                 /*
4436                  * Skip over some devices such as a spare.
4437                  */
4438                 if (phys_dev_index >= 0 && !tmpdevice->external &&
4439                         physical_device) {
4440                         skip_device = hpsa_skip_device(h, lunaddrbytes,
4441                                         &physdev_list->LUN[phys_dev_index]);
4442                         if (skip_device)
4443                                 continue;
4444                 }
4445
4446                 /* Get device type, vendor, model, device id, raid_map */
4447                 rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
4448                                                         &is_OBDR);
4449                 if (rc == -ENOMEM) {
4450                         dev_warn(&h->pdev->dev,
4451                                 "Out of memory, rescan deferred.\n");
4452                         h->drv_req_rescan = 1;
4453                         goto out;
4454                 }
4455                 if (rc) {
4456                         h->drv_req_rescan = 1;
4457                         continue;
4458                 }
4459
4460                 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
4461                 this_device = currentsd[ncurrent];
4462
4463                 *this_device = *tmpdevice;
4464                 this_device->physical_device = physical_device;
4465
4466                 /*
4467                  * Expose all devices except for physical devices that
4468                  * are masked.
4469                  */
4470                 if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device)
4471                         this_device->expose_device = 0;
4472                 else
4473                         this_device->expose_device = 1;
4474
4475
4476                 /*
4477                  * Get the SAS address for physical devices that are exposed.
4478                  */
4479                 if (this_device->physical_device && this_device->expose_device)
4480                         hpsa_get_sas_address(h, lunaddrbytes, this_device);
4481
4482                 switch (this_device->devtype) {
4483                 case TYPE_ROM:
4484                         /* We don't *really* support actual CD-ROM devices,
4485                          * just "One Button Disaster Recovery" tape drive
4486                          * which temporarily pretends to be a CD-ROM drive.
4487                          * So we check that the device is really an OBDR tape
4488                          * device by checking for "$DR-10" in bytes 43-48 of
4489                          * the inquiry data.
4490                          */
4491                         if (is_OBDR)
4492                                 ncurrent++;
4493                         break;
4494                 case TYPE_DISK:
4495                 case TYPE_ZBC:
4496                         if (this_device->physical_device) {
4497                                 /* The disk is in HBA mode. */
4498                                 /* Never use RAID mapper in HBA mode. */
4499                                 this_device->offload_enabled = 0;
4500                                 hpsa_get_ioaccel_drive_info(h, this_device,
4501                                         physdev_list, phys_dev_index, id_phys);
4502                                 hpsa_get_path_info(this_device,
4503                                         physdev_list, phys_dev_index, id_phys);
4504                         }
4505                         ncurrent++;
4506                         break;
4507                 case TYPE_TAPE:
4508                 case TYPE_MEDIUM_CHANGER:
4509                         ncurrent++;
4510                         break;
4511                 case TYPE_ENCLOSURE:
4512                         if (!this_device->external)
4513                                 hpsa_get_enclosure_info(h, lunaddrbytes,
4514                                                 physdev_list, phys_dev_index,
4515                                                 this_device);
4516                         ncurrent++;
4517                         break;
4518                 case TYPE_RAID:
4519                         /* Only present the Smartarray HBA as a RAID controller.
4520                          * If it's a RAID controller other than the HBA itself
4521                          * (an external RAID controller, MSA500 or similar)
4522                          * don't present it.
4523                          */
4524                         if (!is_hba_lunid(lunaddrbytes))
4525                                 break;
4526                         ncurrent++;
4527                         break;
4528                 default:
4529                         break;
4530                 }
4531                 if (ncurrent >= HPSA_MAX_DEVICES)
4532                         break;
4533         }
4534
4535         if (h->sas_host == NULL) {
4536                 int rc = 0;
4537
4538                 rc = hpsa_add_sas_host(h);
4539                 if (rc) {
4540                         dev_warn(&h->pdev->dev,
4541                                 "Could not add sas host %d\n", rc);
4542                         goto out;
4543                 }
4544         }
4545
4546         adjust_hpsa_scsi_table(h, currentsd, ncurrent);
4547 out:
4548         kfree(tmpdevice);
4549         for (i = 0; i < ndev_allocated; i++)
4550                 kfree(currentsd[i]);
4551         kfree(currentsd);
4552         kfree(physdev_list);
4553         kfree(logdev_list);
4554         kfree(id_ctlr);
4555         kfree(id_phys);
4556 }
4557
4558 static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
4559                                    struct scatterlist *sg)
4560 {
4561         u64 addr64 = (u64) sg_dma_address(sg);
4562         unsigned int len = sg_dma_len(sg);
4563
4564         desc->Addr = cpu_to_le64(addr64);
4565         desc->Len = cpu_to_le32(len);
4566         desc->Ext = 0;
4567 }
4568
4569 /*
4570  * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
4571  * dma mapping  and fills in the scatter gather entries of the
4572  * hpsa command, cp.
4573  */
4574 static int hpsa_scatter_gather(struct ctlr_info *h,
4575                 struct CommandList *cp,
4576                 struct scsi_cmnd *cmd)
4577 {
4578         struct scatterlist *sg;
4579         int use_sg, i, sg_limit, chained;
4580         struct SGDescriptor *curr_sg;
4581
4582         BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4583
4584         use_sg = scsi_dma_map(cmd);
4585         if (use_sg < 0)
4586                 return use_sg;
4587
4588         if (!use_sg)
4589                 goto sglist_finished;
4590
4591         /*
4592          * If the number of entries is greater than the max for a single list,
4593          * then we have a chained list; we will set up all but one entry in the
4594          * first list (the last entry is saved for link information);
4595          * otherwise, we don't have a chained list and we'll set up at each of
4596          * the entries in the one list.
4597          */
4598         curr_sg = cp->SG;
4599         chained = use_sg > h->max_cmd_sg_entries;
4600         sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
4601         scsi_for_each_sg(cmd, sg, sg_limit, i) {
4602                 hpsa_set_sg_descriptor(curr_sg, sg);
4603                 curr_sg++;
4604         }
4605
4606         if (chained) {
4607                 /*
4608                  * Continue with the chained list.  Set curr_sg to the chained
4609                  * list.  Modify the limit to the total count less the entries
4610                  * we've already set up.  Resume the scan at the list entry
4611                  * where the previous loop left off.
4612                  */
4613                 curr_sg = h->cmd_sg_list[cp->cmdindex];
4614                 sg_limit = use_sg - sg_limit;
4615                 for_each_sg(sg, sg, sg_limit, i) {
4616                         hpsa_set_sg_descriptor(curr_sg, sg);
4617                         curr_sg++;
4618                 }
4619         }
4620
4621         /* Back the pointer up to the last entry and mark it as "last". */
4622         (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
4623
4624         if (use_sg + chained > h->maxSG)
4625                 h->maxSG = use_sg + chained;
4626
4627         if (chained) {
4628                 cp->Header.SGList = h->max_cmd_sg_entries;
4629                 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
4630                 if (hpsa_map_sg_chain_block(h, cp)) {
4631                         scsi_dma_unmap(cmd);
4632                         return -1;
4633                 }
4634                 return 0;
4635         }
4636
4637 sglist_finished:
4638
4639         cp->Header.SGList = (u8) use_sg;   /* no. SGs contig in this cmd */
4640         cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
4641         return 0;
4642 }
4643
4644 static inline void warn_zero_length_transfer(struct ctlr_info *h,
4645                                                 u8 *cdb, int cdb_len,
4646                                                 const char *func)
4647 {
4648         dev_warn(&h->pdev->dev,
4649                  "%s: Blocking zero-length request: CDB:%*phN\n",
4650                  func, cdb_len, cdb);
4651 }
4652
4653 #define IO_ACCEL_INELIGIBLE 1
4654 /* zero-length transfers trigger hardware errors. */
4655 static bool is_zero_length_transfer(u8 *cdb)
4656 {
4657         u32 block_cnt;
4658
4659         /* Block zero-length transfer sizes on certain commands. */
4660         switch (cdb[0]) {
4661         case READ_10:
4662         case WRITE_10:
4663         case VERIFY:            /* 0x2F */
4664         case WRITE_VERIFY:      /* 0x2E */
4665                 block_cnt = get_unaligned_be16(&cdb[7]);
4666                 break;
4667         case READ_12:
4668         case WRITE_12:
4669         case VERIFY_12: /* 0xAF */
4670         case WRITE_VERIFY_12:   /* 0xAE */
4671                 block_cnt = get_unaligned_be32(&cdb[6]);
4672                 break;
4673         case READ_16:
4674         case WRITE_16:
4675         case VERIFY_16:         /* 0x8F */
4676                 block_cnt = get_unaligned_be32(&cdb[10]);
4677                 break;
4678         default:
4679                 return false;
4680         }
4681
4682         return block_cnt == 0;
4683 }
4684
4685 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
4686 {
4687         int is_write = 0;
4688         u32 block;
4689         u32 block_cnt;
4690
4691         /* Perform some CDB fixups if needed using 10 byte reads/writes only */
4692         switch (cdb[0]) {
4693         case WRITE_6:
4694         case WRITE_12:
4695                 is_write = 1;
4696                 fallthrough;
4697         case READ_6:
4698         case READ_12:
4699                 if (*cdb_len == 6) {
4700                         block = (((cdb[1] & 0x1F) << 16) |
4701                                 (cdb[2] << 8) |
4702                                 cdb[3]);
4703                         block_cnt = cdb[4];
4704                         if (block_cnt == 0)
4705                                 block_cnt = 256;
4706                 } else {
4707                         BUG_ON(*cdb_len != 12);
4708                         block = get_unaligned_be32(&cdb[2]);
4709                         block_cnt = get_unaligned_be32(&cdb[6]);
4710                 }
4711                 if (block_cnt > 0xffff)
4712                         return IO_ACCEL_INELIGIBLE;
4713
4714                 cdb[0] = is_write ? WRITE_10 : READ_10;
4715                 cdb[1] = 0;
4716                 cdb[2] = (u8) (block >> 24);
4717                 cdb[3] = (u8) (block >> 16);
4718                 cdb[4] = (u8) (block >> 8);
4719                 cdb[5] = (u8) (block);
4720                 cdb[6] = 0;
4721                 cdb[7] = (u8) (block_cnt >> 8);
4722                 cdb[8] = (u8) (block_cnt);
4723                 cdb[9] = 0;
4724                 *cdb_len = 10;
4725                 break;
4726         }
4727         return 0;
4728 }
4729
4730 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
4731         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4732         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4733 {
4734         struct scsi_cmnd *cmd = c->scsi_cmd;
4735         struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
4736         unsigned int len;
4737         unsigned int total_len = 0;
4738         struct scatterlist *sg;
4739         u64 addr64;
4740         int use_sg, i;
4741         struct SGDescriptor *curr_sg;
4742         u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
4743
4744         /* TODO: implement chaining support */
4745         if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
4746                 atomic_dec(&phys_disk->ioaccel_cmds_out);
4747                 return IO_ACCEL_INELIGIBLE;
4748         }
4749
4750         BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
4751
4752         if (is_zero_length_transfer(cdb)) {
4753                 warn_zero_length_transfer(h, cdb, cdb_len, __func__);
4754                 atomic_dec(&phys_disk->ioaccel_cmds_out);
4755                 return IO_ACCEL_INELIGIBLE;
4756         }
4757
4758         if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4759                 atomic_dec(&phys_disk->ioaccel_cmds_out);
4760                 return IO_ACCEL_INELIGIBLE;
4761         }
4762
4763         c->cmd_type = CMD_IOACCEL1;
4764
4765         /* Adjust the DMA address to point to the accelerated command buffer */
4766         c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
4767                                 (c->cmdindex * sizeof(*cp));
4768         BUG_ON(c->busaddr & 0x0000007F);
4769
4770         use_sg = scsi_dma_map(cmd);
4771         if (use_sg < 0) {
4772                 atomic_dec(&phys_disk->ioaccel_cmds_out);
4773                 return use_sg;
4774         }
4775
4776         if (use_sg) {
4777                 curr_sg = cp->SG;
4778                 scsi_for_each_sg(cmd, sg, use_sg, i) {
4779                         addr64 = (u64) sg_dma_address(sg);
4780                         len  = sg_dma_len(sg);
4781                         total_len += len;
4782                         curr_sg->Addr = cpu_to_le64(addr64);
4783                         curr_sg->Len = cpu_to_le32(len);
4784                         curr_sg->Ext = cpu_to_le32(0);
4785                         curr_sg++;
4786                 }
4787                 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
4788
4789                 switch (cmd->sc_data_direction) {
4790                 case DMA_TO_DEVICE:
4791                         control |= IOACCEL1_CONTROL_DATA_OUT;
4792                         break;
4793                 case DMA_FROM_DEVICE:
4794                         control |= IOACCEL1_CONTROL_DATA_IN;
4795                         break;
4796                 case DMA_NONE:
4797                         control |= IOACCEL1_CONTROL_NODATAXFER;
4798                         break;
4799                 default:
4800                         dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4801                         cmd->sc_data_direction);
4802                         BUG();
4803                         break;
4804                 }
4805         } else {
4806                 control |= IOACCEL1_CONTROL_NODATAXFER;
4807         }
4808
4809         c->Header.SGList = use_sg;
4810         /* Fill out the command structure to submit */
4811         cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
4812         cp->transfer_len = cpu_to_le32(total_len);
4813         cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
4814                         (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
4815         cp->control = cpu_to_le32(control);
4816         memcpy(cp->CDB, cdb, cdb_len);
4817         memcpy(cp->CISS_LUN, scsi3addr, 8);
4818         /* Tag was already set at init time. */
4819         enqueue_cmd_and_start_io(h, c);
4820         return 0;
4821 }
4822
4823 /*
4824  * Queue a command directly to a device behind the controller using the
4825  * I/O accelerator path.
4826  */
4827 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
4828         struct CommandList *c)
4829 {
4830         struct scsi_cmnd *cmd = c->scsi_cmd;
4831         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4832
4833         if (!dev)
4834                 return -1;
4835
4836         c->phys_disk = dev;
4837
4838         if (dev->in_reset)
4839                 return -1;
4840
4841         return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
4842                 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
4843 }
4844
4845 /*
4846  * Set encryption parameters for the ioaccel2 request
4847  */
4848 static void set_encrypt_ioaccel2(struct ctlr_info *h,
4849         struct CommandList *c, struct io_accel2_cmd *cp)
4850 {
4851         struct scsi_cmnd *cmd = c->scsi_cmd;
4852         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4853         struct raid_map_data *map = &dev->raid_map;
4854         u64 first_block;
4855
4856         /* Are we doing encryption on this device */
4857         if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
4858                 return;
4859         /* Set the data encryption key index. */
4860         cp->dekindex = map->dekindex;
4861
4862         /* Set the encryption enable flag, encoded into direction field. */
4863         cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
4864
4865         /* Set encryption tweak values based on logical block address
4866          * If block size is 512, tweak value is LBA.
4867          * For other block sizes, tweak is (LBA * block size)/ 512)
4868          */
4869         switch (cmd->cmnd[0]) {
4870         /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
4871         case READ_6:
4872         case WRITE_6:
4873                 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
4874                                 (cmd->cmnd[2] << 8) |
4875                                 cmd->cmnd[3]);
4876                 break;
4877         case WRITE_10:
4878         case READ_10:
4879         /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
4880         case WRITE_12:
4881         case READ_12:
4882                 first_block = get_unaligned_be32(&cmd->cmnd[2]);
4883                 break;
4884         case WRITE_16:
4885         case READ_16:
4886                 first_block = get_unaligned_be64(&cmd->cmnd[2]);
4887                 break;
4888         default:
4889                 dev_err(&h->pdev->dev,
4890                         "ERROR: %s: size (0x%x) not supported for encryption\n",
4891                         __func__, cmd->cmnd[0]);
4892                 BUG();
4893                 break;
4894         }
4895
4896         if (le32_to_cpu(map->volume_blk_size) != 512)
4897                 first_block = first_block *
4898                                 le32_to_cpu(map->volume_blk_size)/512;
4899
4900         cp->tweak_lower = cpu_to_le32(first_block);
4901         cp->tweak_upper = cpu_to_le32(first_block >> 32);
4902 }
4903
4904 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
4905         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4906         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4907 {
4908         struct scsi_cmnd *cmd = c->scsi_cmd;
4909         struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
4910         struct ioaccel2_sg_element *curr_sg;
4911         int use_sg, i;
4912         struct scatterlist *sg;
4913         u64 addr64;
4914         u32 len;
4915         u32 total_len = 0;
4916
4917         if (!cmd->device)
4918                 return -1;
4919
4920         if (!cmd->device->hostdata)
4921                 return -1;
4922
4923         BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4924
4925         if (is_zero_length_transfer(cdb)) {
4926                 warn_zero_length_transfer(h, cdb, cdb_len, __func__);
4927                 atomic_dec(&phys_disk->ioaccel_cmds_out);
4928                 return IO_ACCEL_INELIGIBLE;
4929         }
4930
4931         if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4932                 atomic_dec(&phys_disk->ioaccel_cmds_out);
4933                 return IO_ACCEL_INELIGIBLE;
4934         }
4935
4936         c->cmd_type = CMD_IOACCEL2;
4937         /* Adjust the DMA address to point to the accelerated command buffer */
4938         c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
4939                                 (c->cmdindex * sizeof(*cp));
4940         BUG_ON(c->busaddr & 0x0000007F);
4941
4942         memset(cp, 0, sizeof(*cp));
4943         cp->IU_type = IOACCEL2_IU_TYPE;
4944
4945         use_sg = scsi_dma_map(cmd);
4946         if (use_sg < 0) {
4947                 atomic_dec(&phys_disk->ioaccel_cmds_out);
4948                 return use_sg;
4949         }
4950
4951         if (use_sg) {
4952                 curr_sg = cp->sg;
4953                 if (use_sg > h->ioaccel_maxsg) {
4954                         addr64 = le64_to_cpu(
4955                                 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4956                         curr_sg->address = cpu_to_le64(addr64);
4957                         curr_sg->length = 0;
4958                         curr_sg->reserved[0] = 0;
4959                         curr_sg->reserved[1] = 0;
4960                         curr_sg->reserved[2] = 0;
4961                         curr_sg->chain_indicator = IOACCEL2_CHAIN;
4962
4963                         curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4964                 }
4965                 scsi_for_each_sg(cmd, sg, use_sg, i) {
4966                         addr64 = (u64) sg_dma_address(sg);
4967                         len  = sg_dma_len(sg);
4968                         total_len += len;
4969                         curr_sg->address = cpu_to_le64(addr64);
4970                         curr_sg->length = cpu_to_le32(len);
4971                         curr_sg->reserved[0] = 0;
4972                         curr_sg->reserved[1] = 0;
4973                         curr_sg->reserved[2] = 0;
4974                         curr_sg->chain_indicator = 0;
4975                         curr_sg++;
4976                 }
4977
4978                 /*
4979                  * Set the last s/g element bit
4980                  */
4981                 (curr_sg - 1)->chain_indicator = IOACCEL2_LAST_SG;
4982
4983                 switch (cmd->sc_data_direction) {
4984                 case DMA_TO_DEVICE:
4985                         cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4986                         cp->direction |= IOACCEL2_DIR_DATA_OUT;
4987                         break;
4988                 case DMA_FROM_DEVICE:
4989                         cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4990                         cp->direction |= IOACCEL2_DIR_DATA_IN;
4991                         break;
4992                 case DMA_NONE:
4993                         cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4994                         cp->direction |= IOACCEL2_DIR_NO_DATA;
4995                         break;
4996                 default:
4997                         dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4998                                 cmd->sc_data_direction);
4999                         BUG();
5000                         break;
5001                 }
5002         } else {
5003                 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
5004                 cp->direction |= IOACCEL2_DIR_NO_DATA;
5005         }
5006
5007         /* Set encryption parameters, if necessary */
5008         set_encrypt_ioaccel2(h, c, cp);
5009
5010         cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
5011         cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
5012         memcpy(cp->cdb, cdb, sizeof(cp->cdb));
5013
5014         cp->data_len = cpu_to_le32(total_len);
5015         cp->err_ptr = cpu_to_le64(c->busaddr +
5016                         offsetof(struct io_accel2_cmd, error_data));
5017         cp->err_len = cpu_to_le32(sizeof(cp->error_data));
5018
5019         /* fill in sg elements */
5020         if (use_sg > h->ioaccel_maxsg) {
5021                 cp->sg_count = 1;
5022                 cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0]));
5023                 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
5024                         atomic_dec(&phys_disk->ioaccel_cmds_out);
5025                         scsi_dma_unmap(cmd);
5026                         return -1;
5027                 }
5028         } else
5029                 cp->sg_count = (u8) use_sg;
5030
5031         if (phys_disk->in_reset) {
5032                 cmd->result = DID_RESET << 16;
5033                 return -1;
5034         }
5035
5036         enqueue_cmd_and_start_io(h, c);
5037         return 0;
5038 }
5039
5040 /*
5041  * Queue a command to the correct I/O accelerator path.
5042  */
5043 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
5044         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
5045         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
5046 {
5047         if (!c->scsi_cmd->device)
5048                 return -1;
5049
5050         if (!c->scsi_cmd->device->hostdata)
5051                 return -1;
5052
5053         if (phys_disk->in_reset)
5054                 return -1;
5055
5056         /* Try to honor the device's queue depth */
5057         if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
5058                                         phys_disk->queue_depth) {
5059                 atomic_dec(&phys_disk->ioaccel_cmds_out);
5060                 return IO_ACCEL_INELIGIBLE;
5061         }
5062         if (h->transMethod & CFGTBL_Trans_io_accel1)
5063                 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
5064                                                 cdb, cdb_len, scsi3addr,
5065                                                 phys_disk);
5066         else
5067                 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
5068                                                 cdb, cdb_len, scsi3addr,
5069                                                 phys_disk);
5070 }
5071
5072 static void raid_map_helper(struct raid_map_data *map,
5073                 int offload_to_mirror, u32 *map_index, u32 *current_group)
5074 {
5075         if (offload_to_mirror == 0)  {
5076                 /* use physical disk in the first mirrored group. */
5077                 *map_index %= le16_to_cpu(map->data_disks_per_row);
5078                 return;
5079         }
5080         do {
5081                 /* determine mirror group that *map_index indicates */
5082                 *current_group = *map_index /
5083                         le16_to_cpu(map->data_disks_per_row);
5084                 if (offload_to_mirror == *current_group)
5085                         continue;
5086                 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
5087                         /* select map index from next group */
5088                         *map_index += le16_to_cpu(map->data_disks_per_row);
5089                         (*current_group)++;
5090                 } else {
5091                         /* select map index from first group */
5092                         *map_index %= le16_to_cpu(map->data_disks_per_row);
5093                         *current_group = 0;
5094                 }
5095         } while (offload_to_mirror != *current_group);
5096 }
5097
5098 /*
5099  * Attempt to perform offload RAID mapping for a logical volume I/O.
5100  */
5101 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
5102         struct CommandList *c)
5103 {
5104         struct scsi_cmnd *cmd = c->scsi_cmd;
5105         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5106         struct raid_map_data *map = &dev->raid_map;
5107         struct raid_map_disk_data *dd = &map->data[0];
5108         int is_write = 0;
5109         u32 map_index;
5110         u64 first_block, last_block;
5111         u32 block_cnt;
5112         u32 blocks_per_row;
5113         u64 first_row, last_row;
5114         u32 first_row_offset, last_row_offset;
5115         u32 first_column, last_column;
5116         u64 r0_first_row, r0_last_row;
5117         u32 r5or6_blocks_per_row;
5118         u64 r5or6_first_row, r5or6_last_row;
5119         u32 r5or6_first_row_offset, r5or6_last_row_offset;
5120         u32 r5or6_first_column, r5or6_last_column;
5121         u32 total_disks_per_row;
5122         u32 stripesize;
5123         u32 first_group, last_group, current_group;
5124         u32 map_row;
5125         u32 disk_handle;
5126         u64 disk_block;
5127         u32 disk_block_cnt;
5128         u8 cdb[16];
5129         u8 cdb_len;
5130         u16 strip_size;
5131 #if BITS_PER_LONG == 32
5132         u64 tmpdiv;
5133 #endif
5134         int offload_to_mirror;
5135
5136         if (!dev)
5137                 return -1;
5138
5139         if (dev->in_reset)
5140                 return -1;
5141
5142         /* check for valid opcode, get LBA and block count */
5143         switch (cmd->cmnd[0]) {
5144         case WRITE_6:
5145                 is_write = 1;
5146                 fallthrough;
5147         case READ_6:
5148                 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
5149                                 (cmd->cmnd[2] << 8) |
5150                                 cmd->cmnd[3]);
5151                 block_cnt = cmd->cmnd[4];
5152                 if (block_cnt == 0)
5153                         block_cnt = 256;
5154                 break;
5155         case WRITE_10:
5156                 is_write = 1;
5157                 fallthrough;
5158         case READ_10:
5159                 first_block =
5160                         (((u64) cmd->cmnd[2]) << 24) |
5161                         (((u64) cmd->cmnd[3]) << 16) |
5162                         (((u64) cmd->cmnd[4]) << 8) |
5163                         cmd->cmnd[5];
5164                 block_cnt =
5165                         (((u32) cmd->cmnd[7]) << 8) |
5166                         cmd->cmnd[8];
5167                 break;
5168         case WRITE_12:
5169                 is_write = 1;
5170                 fallthrough;
5171         case READ_12:
5172                 first_block =
5173                         (((u64) cmd->cmnd[2]) << 24) |
5174                         (((u64) cmd->cmnd[3]) << 16) |
5175                         (((u64) cmd->cmnd[4]) << 8) |
5176                         cmd->cmnd[5];
5177                 block_cnt =
5178                         (((u32) cmd->cmnd[6]) << 24) |
5179                         (((u32) cmd->cmnd[7]) << 16) |
5180                         (((u32) cmd->cmnd[8]) << 8) |
5181                 cmd->cmnd[9];
5182                 break;
5183         case WRITE_16:
5184                 is_write = 1;
5185                 fallthrough;
5186         case READ_16:
5187                 first_block =
5188                         (((u64) cmd->cmnd[2]) << 56) |
5189                         (((u64) cmd->cmnd[3]) << 48) |
5190                         (((u64) cmd->cmnd[4]) << 40) |
5191                         (((u64) cmd->cmnd[5]) << 32) |
5192                         (((u64) cmd->cmnd[6]) << 24) |
5193                         (((u64) cmd->cmnd[7]) << 16) |
5194                         (((u64) cmd->cmnd[8]) << 8) |
5195                         cmd->cmnd[9];
5196                 block_cnt =
5197                         (((u32) cmd->cmnd[10]) << 24) |
5198                         (((u32) cmd->cmnd[11]) << 16) |
5199                         (((u32) cmd->cmnd[12]) << 8) |
5200                         cmd->cmnd[13];
5201                 break;
5202         default:
5203                 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
5204         }
5205         last_block = first_block + block_cnt - 1;
5206
5207         /* check for write to non-RAID-0 */
5208         if (is_write && dev->raid_level != 0)
5209                 return IO_ACCEL_INELIGIBLE;
5210
5211         /* check for invalid block or wraparound */
5212         if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
5213                 last_block < first_block)
5214                 return IO_ACCEL_INELIGIBLE;
5215
5216         /* calculate stripe information for the request */
5217         blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
5218                                 le16_to_cpu(map->strip_size);
5219         strip_size = le16_to_cpu(map->strip_size);
5220 #if BITS_PER_LONG == 32
5221         tmpdiv = first_block;
5222         (void) do_div(tmpdiv, blocks_per_row);
5223         first_row = tmpdiv;
5224         tmpdiv = last_block;
5225         (void) do_div(tmpdiv, blocks_per_row);
5226         last_row = tmpdiv;
5227         first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5228         last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5229         tmpdiv = first_row_offset;
5230         (void) do_div(tmpdiv, strip_size);
5231         first_column = tmpdiv;
5232         tmpdiv = last_row_offset;
5233         (void) do_div(tmpdiv, strip_size);
5234         last_column = tmpdiv;
5235 #else
5236         first_row = first_block / blocks_per_row;
5237         last_row = last_block / blocks_per_row;
5238         first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5239         last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5240         first_column = first_row_offset / strip_size;
5241         last_column = last_row_offset / strip_size;
5242 #endif
5243
5244         /* if this isn't a single row/column then give to the controller */
5245         if ((first_row != last_row) || (first_column != last_column))
5246                 return IO_ACCEL_INELIGIBLE;
5247
5248         /* proceeding with driver mapping */
5249         total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
5250                                 le16_to_cpu(map->metadata_disks_per_row);
5251         map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5252                                 le16_to_cpu(map->row_cnt);
5253         map_index = (map_row * total_disks_per_row) + first_column;
5254
5255         switch (dev->raid_level) {
5256         case HPSA_RAID_0:
5257                 break; /* nothing special to do */
5258         case HPSA_RAID_1:
5259                 /* Handles load balance across RAID 1 members.
5260                  * (2-drive R1 and R10 with even # of drives.)
5261                  * Appropriate for SSDs, not optimal for HDDs
5262                  * Ensure we have the correct raid_map.
5263                  */
5264                 if (le16_to_cpu(map->layout_map_count) != 2) {
5265                         hpsa_turn_off_ioaccel_for_device(dev);
5266                         return IO_ACCEL_INELIGIBLE;
5267                 }
5268                 if (dev->offload_to_mirror)
5269                         map_index += le16_to_cpu(map->data_disks_per_row);
5270                 dev->offload_to_mirror = !dev->offload_to_mirror;
5271                 break;
5272         case HPSA_RAID_ADM:
5273                 /* Handles N-way mirrors  (R1-ADM)
5274                  * and R10 with # of drives divisible by 3.)
5275                  * Ensure we have the correct raid_map.
5276                  */
5277                 if (le16_to_cpu(map->layout_map_count) != 3) {
5278                         hpsa_turn_off_ioaccel_for_device(dev);
5279                         return IO_ACCEL_INELIGIBLE;
5280                 }
5281
5282                 offload_to_mirror = dev->offload_to_mirror;
5283                 raid_map_helper(map, offload_to_mirror,
5284                                 &map_index, &current_group);
5285                 /* set mirror group to use next time */
5286                 offload_to_mirror =
5287                         (offload_to_mirror >=
5288                         le16_to_cpu(map->layout_map_count) - 1)
5289                         ? 0 : offload_to_mirror + 1;
5290                 dev->offload_to_mirror = offload_to_mirror;
5291                 /* Avoid direct use of dev->offload_to_mirror within this
5292                  * function since multiple threads might simultaneously
5293                  * increment it beyond the range of dev->layout_map_count -1.
5294                  */
5295                 break;
5296         case HPSA_RAID_5:
5297         case HPSA_RAID_6:
5298                 if (le16_to_cpu(map->layout_map_count) <= 1)
5299                         break;
5300
5301                 /* Verify first and last block are in same RAID group */
5302                 r5or6_blocks_per_row =
5303                         le16_to_cpu(map->strip_size) *
5304                         le16_to_cpu(map->data_disks_per_row);
5305                 if (r5or6_blocks_per_row == 0) {
5306                         hpsa_turn_off_ioaccel_for_device(dev);
5307                         return IO_ACCEL_INELIGIBLE;
5308                 }
5309                 stripesize = r5or6_blocks_per_row *
5310                         le16_to_cpu(map->layout_map_count);
5311 #if BITS_PER_LONG == 32
5312                 tmpdiv = first_block;
5313                 first_group = do_div(tmpdiv, stripesize);
5314                 tmpdiv = first_group;
5315                 (void) do_div(tmpdiv, r5or6_blocks_per_row);
5316                 first_group = tmpdiv;
5317                 tmpdiv = last_block;
5318                 last_group = do_div(tmpdiv, stripesize);
5319                 tmpdiv = last_group;
5320                 (void) do_div(tmpdiv, r5or6_blocks_per_row);
5321                 last_group = tmpdiv;
5322 #else
5323                 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
5324                 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
5325 #endif
5326                 if (first_group != last_group)
5327                         return IO_ACCEL_INELIGIBLE;
5328
5329                 /* Verify request is in a single row of RAID 5/6 */
5330 #if BITS_PER_LONG == 32
5331                 tmpdiv = first_block;
5332                 (void) do_div(tmpdiv, stripesize);
5333                 first_row = r5or6_first_row = r0_first_row = tmpdiv;
5334                 tmpdiv = last_block;
5335                 (void) do_div(tmpdiv, stripesize);
5336                 r5or6_last_row = r0_last_row = tmpdiv;
5337 #else
5338                 first_row = r5or6_first_row = r0_first_row =
5339                                                 first_block / stripesize;
5340                 r5or6_last_row = r0_last_row = last_block / stripesize;
5341 #endif
5342                 if (r5or6_first_row != r5or6_last_row)
5343                         return IO_ACCEL_INELIGIBLE;
5344
5345
5346                 /* Verify request is in a single column */
5347 #if BITS_PER_LONG == 32
5348                 tmpdiv = first_block;
5349                 first_row_offset = do_div(tmpdiv, stripesize);
5350                 tmpdiv = first_row_offset;
5351                 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
5352                 r5or6_first_row_offset = first_row_offset;
5353                 tmpdiv = last_block;
5354                 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
5355                 tmpdiv = r5or6_last_row_offset;
5356                 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
5357                 tmpdiv = r5or6_first_row_offset;
5358                 (void) do_div(tmpdiv, map->strip_size);
5359                 first_column = r5or6_first_column = tmpdiv;
5360                 tmpdiv = r5or6_last_row_offset;
5361                 (void) do_div(tmpdiv, map->strip_size);
5362                 r5or6_last_column = tmpdiv;
5363 #else
5364                 first_row_offset = r5or6_first_row_offset =
5365                         (u32)((first_block % stripesize) %
5366                                                 r5or6_blocks_per_row);
5367
5368                 r5or6_last_row_offset =
5369                         (u32)((last_block % stripesize) %
5370                                                 r5or6_blocks_per_row);
5371
5372                 first_column = r5or6_first_column =
5373                         r5or6_first_row_offset / le16_to_cpu(map->strip_size);
5374                 r5or6_last_column =
5375                         r5or6_last_row_offset / le16_to_cpu(map->strip_size);
5376 #endif
5377                 if (r5or6_first_column != r5or6_last_column)
5378                         return IO_ACCEL_INELIGIBLE;
5379
5380                 /* Request is eligible */
5381                 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5382                         le16_to_cpu(map->row_cnt);
5383
5384                 map_index = (first_group *
5385                         (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
5386                         (map_row * total_disks_per_row) + first_column;
5387                 break;
5388         default:
5389                 return IO_ACCEL_INELIGIBLE;
5390         }
5391
5392         if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
5393                 return IO_ACCEL_INELIGIBLE;
5394
5395         c->phys_disk = dev->phys_disk[map_index];
5396         if (!c->phys_disk)
5397                 return IO_ACCEL_INELIGIBLE;
5398
5399         disk_handle = dd[map_index].ioaccel_handle;
5400         disk_block = le64_to_cpu(map->disk_starting_blk) +
5401                         first_row * le16_to_cpu(map->strip_size) +
5402                         (first_row_offset - first_column *
5403                         le16_to_cpu(map->strip_size));
5404         disk_block_cnt = block_cnt;
5405
5406         /* handle differing logical/physical block sizes */
5407         if (map->phys_blk_shift) {
5408                 disk_block <<= map->phys_blk_shift;
5409                 disk_block_cnt <<= map->phys_blk_shift;
5410         }
5411         BUG_ON(disk_block_cnt > 0xffff);
5412
5413         /* build the new CDB for the physical disk I/O */
5414         if (disk_block > 0xffffffff) {
5415                 cdb[0] = is_write ? WRITE_16 : READ_16;
5416                 cdb[1] = 0;
5417                 cdb[2] = (u8) (disk_block >> 56);
5418                 cdb[3] = (u8) (disk_block >> 48);
5419                 cdb[4] = (u8) (disk_block >> 40);
5420                 cdb[5] = (u8) (disk_block >> 32);
5421                 cdb[6] = (u8) (disk_block >> 24);
5422                 cdb[7] = (u8) (disk_block >> 16);
5423                 cdb[8] = (u8) (disk_block >> 8);
5424                 cdb[9] = (u8) (disk_block);
5425                 cdb[10] = (u8) (disk_block_cnt >> 24);
5426                 cdb[11] = (u8) (disk_block_cnt >> 16);
5427                 cdb[12] = (u8) (disk_block_cnt >> 8);
5428                 cdb[13] = (u8) (disk_block_cnt);
5429                 cdb[14] = 0;
5430                 cdb[15] = 0;
5431                 cdb_len = 16;
5432         } else {
5433                 cdb[0] = is_write ? WRITE_10 : READ_10;
5434                 cdb[1] = 0;
5435                 cdb[2] = (u8) (disk_block >> 24);
5436                 cdb[3] = (u8) (disk_block >> 16);
5437                 cdb[4] = (u8) (disk_block >> 8);
5438                 cdb[5] = (u8) (disk_block);
5439                 cdb[6] = 0;
5440                 cdb[7] = (u8) (disk_block_cnt >> 8);
5441                 cdb[8] = (u8) (disk_block_cnt);
5442                 cdb[9] = 0;
5443                 cdb_len = 10;
5444         }
5445         return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
5446                                                 dev->scsi3addr,
5447                                                 dev->phys_disk[map_index]);
5448 }
5449
5450 /*
5451  * Submit commands down the "normal" RAID stack path
5452  * All callers to hpsa_ciss_submit must check lockup_detected
5453  * beforehand, before (opt.) and after calling cmd_alloc
5454  */
5455 static int hpsa_ciss_submit(struct ctlr_info *h,
5456         struct CommandList *c, struct scsi_cmnd *cmd,
5457         struct hpsa_scsi_dev_t *dev)
5458 {
5459         cmd->host_scribble = (unsigned char *) c;
5460         c->cmd_type = CMD_SCSI;
5461         c->scsi_cmd = cmd;
5462         c->Header.ReplyQueue = 0;  /* unused in simple mode */
5463         memcpy(&c->Header.LUN.LunAddrBytes[0], &dev->scsi3addr[0], 8);
5464         c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
5465
5466         /* Fill in the request block... */
5467
5468         c->Request.Timeout = 0;
5469         BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
5470         c->Request.CDBLen = cmd->cmd_len;
5471         memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
5472         switch (cmd->sc_data_direction) {
5473         case DMA_TO_DEVICE:
5474                 c->Request.type_attr_dir =
5475                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
5476                 break;
5477         case DMA_FROM_DEVICE:
5478                 c->Request.type_attr_dir =
5479                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
5480                 break;
5481         case DMA_NONE:
5482                 c->Request.type_attr_dir =
5483                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
5484                 break;
5485         case DMA_BIDIRECTIONAL:
5486                 /* This can happen if a buggy application does a scsi passthru
5487                  * and sets both inlen and outlen to non-zero. ( see
5488                  * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
5489                  */
5490
5491                 c->Request.type_attr_dir =
5492                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
5493                 /* This is technically wrong, and hpsa controllers should
5494                  * reject it with CMD_INVALID, which is the most correct
5495                  * response, but non-fibre backends appear to let it
5496                  * slide by, and give the same results as if this field
5497                  * were set correctly.  Either way is acceptable for
5498                  * our purposes here.
5499                  */
5500
5501                 break;
5502
5503         default:
5504                 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
5505                         cmd->sc_data_direction);
5506                 BUG();
5507                 break;
5508         }
5509
5510         if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
5511                 hpsa_cmd_resolve_and_free(h, c);
5512                 return SCSI_MLQUEUE_HOST_BUSY;
5513         }
5514
5515         if (dev->in_reset) {
5516                 hpsa_cmd_resolve_and_free(h, c);
5517                 return SCSI_MLQUEUE_HOST_BUSY;
5518         }
5519
5520         c->device = dev;
5521
5522         enqueue_cmd_and_start_io(h, c);
5523         /* the cmd'll come back via intr handler in complete_scsi_command()  */
5524         return 0;
5525 }
5526
5527 static void hpsa_cmd_init(struct ctlr_info *h, int index,
5528                                 struct CommandList *c)
5529 {
5530         dma_addr_t cmd_dma_handle, err_dma_handle;
5531
5532         /* Zero out all of commandlist except the last field, refcount */
5533         memset(c, 0, offsetof(struct CommandList, refcount));
5534         c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
5535         cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5536         c->err_info = h->errinfo_pool + index;
5537         memset(c->err_info, 0, sizeof(*c->err_info));
5538         err_dma_handle = h->errinfo_pool_dhandle
5539             + index * sizeof(*c->err_info);
5540         c->cmdindex = index;
5541         c->busaddr = (u32) cmd_dma_handle;
5542         c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
5543         c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
5544         c->h = h;
5545         c->scsi_cmd = SCSI_CMD_IDLE;
5546 }
5547
5548 static void hpsa_preinitialize_commands(struct ctlr_info *h)
5549 {
5550         int i;
5551
5552         for (i = 0; i < h->nr_cmds; i++) {
5553                 struct CommandList *c = h->cmd_pool + i;
5554
5555                 hpsa_cmd_init(h, i, c);
5556                 atomic_set(&c->refcount, 0);
5557         }
5558 }
5559
5560 static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
5561                                 struct CommandList *c)
5562 {
5563         dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5564
5565         BUG_ON(c->cmdindex != index);
5566
5567         memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
5568         memset(c->err_info, 0, sizeof(*c->err_info));
5569         c->busaddr = (u32) cmd_dma_handle;
5570 }
5571
5572 static int hpsa_ioaccel_submit(struct ctlr_info *h,
5573                 struct CommandList *c, struct scsi_cmnd *cmd,
5574                 bool retry)
5575 {
5576         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5577         int rc = IO_ACCEL_INELIGIBLE;
5578
5579         if (!dev)
5580                 return SCSI_MLQUEUE_HOST_BUSY;
5581
5582         if (dev->in_reset)
5583                 return SCSI_MLQUEUE_HOST_BUSY;
5584
5585         if (hpsa_simple_mode)
5586                 return IO_ACCEL_INELIGIBLE;
5587
5588         cmd->host_scribble = (unsigned char *) c;
5589
5590         if (dev->offload_enabled) {
5591                 hpsa_cmd_init(h, c->cmdindex, c); /* Zeroes out all fields */
5592                 c->cmd_type = CMD_SCSI;
5593                 c->scsi_cmd = cmd;
5594                 c->device = dev;
5595                 if (retry) /* Resubmit but do not increment device->commands_outstanding. */
5596                         c->retry_pending = true;
5597                 rc = hpsa_scsi_ioaccel_raid_map(h, c);
5598                 if (rc < 0)     /* scsi_dma_map failed. */
5599                         rc = SCSI_MLQUEUE_HOST_BUSY;
5600         } else if (dev->hba_ioaccel_enabled) {
5601                 hpsa_cmd_init(h, c->cmdindex, c); /* Zeroes out all fields */
5602                 c->cmd_type = CMD_SCSI;
5603                 c->scsi_cmd = cmd;
5604                 c->device = dev;
5605                 if (retry) /* Resubmit but do not increment device->commands_outstanding. */
5606                         c->retry_pending = true;
5607                 rc = hpsa_scsi_ioaccel_direct_map(h, c);
5608                 if (rc < 0)     /* scsi_dma_map failed. */
5609                         rc = SCSI_MLQUEUE_HOST_BUSY;
5610         }
5611         return rc;
5612 }
5613
5614 static void hpsa_command_resubmit_worker(struct work_struct *work)
5615 {
5616         struct scsi_cmnd *cmd;
5617         struct hpsa_scsi_dev_t *dev;
5618         struct CommandList *c = container_of(work, struct CommandList, work);
5619
5620         cmd = c->scsi_cmd;
5621         dev = cmd->device->hostdata;
5622         if (!dev) {
5623                 cmd->result = DID_NO_CONNECT << 16;
5624                 return hpsa_cmd_free_and_done(c->h, c, cmd);
5625         }
5626
5627         if (dev->in_reset) {
5628                 cmd->result = DID_RESET << 16;
5629                 return hpsa_cmd_free_and_done(c->h, c, cmd);
5630         }
5631
5632         if (c->cmd_type == CMD_IOACCEL2) {
5633                 struct ctlr_info *h = c->h;
5634                 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5635                 int rc;
5636
5637                 if (c2->error_data.serv_response ==
5638                                 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
5639                         /* Resubmit with the retry_pending flag set. */
5640                         rc = hpsa_ioaccel_submit(h, c, cmd, true);
5641                         if (rc == 0)
5642                                 return;
5643                         if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5644                                 /*
5645                                  * If we get here, it means dma mapping failed.
5646                                  * Try again via scsi mid layer, which will
5647                                  * then get SCSI_MLQUEUE_HOST_BUSY.
5648                                  */
5649                                 cmd->result = DID_IMM_RETRY << 16;
5650                                 return hpsa_cmd_free_and_done(h, c, cmd);
5651                         }
5652                         /* else, fall thru and resubmit down CISS path */
5653                 }
5654         }
5655         hpsa_cmd_partial_init(c->h, c->cmdindex, c);
5656         /*
5657          * Here we have not come in though queue_command, so we
5658          * can set the retry_pending flag to true for a driver initiated
5659          * retry attempt (I.E. not a SML retry).
5660          * I.E. We are submitting a driver initiated retry.
5661          * Note: hpsa_ciss_submit does not zero out the command fields like
5662          *       ioaccel submit does.
5663          */
5664         c->retry_pending = true;
5665         if (hpsa_ciss_submit(c->h, c, cmd, dev)) {
5666                 /*
5667                  * If we get here, it means dma mapping failed. Try
5668                  * again via scsi mid layer, which will then get
5669                  * SCSI_MLQUEUE_HOST_BUSY.
5670                  *
5671                  * hpsa_ciss_submit will have already freed c
5672                  * if it encountered a dma mapping failure.
5673                  */
5674                 cmd->result = DID_IMM_RETRY << 16;
5675                 cmd->scsi_done(cmd);
5676         }
5677 }
5678
5679 /* Running in struct Scsi_Host->host_lock less mode */
5680 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
5681 {
5682         struct ctlr_info *h;
5683         struct hpsa_scsi_dev_t *dev;
5684         struct CommandList *c;
5685         int rc = 0;
5686
5687         /* Get the ptr to our adapter structure out of cmd->host. */
5688         h = sdev_to_hba(cmd->device);
5689
5690         BUG_ON(cmd->request->tag < 0);
5691
5692         dev = cmd->device->hostdata;
5693         if (!dev) {
5694                 cmd->result = DID_NO_CONNECT << 16;
5695                 cmd->scsi_done(cmd);
5696                 return 0;
5697         }
5698
5699         if (dev->removed) {
5700                 cmd->result = DID_NO_CONNECT << 16;
5701                 cmd->scsi_done(cmd);
5702                 return 0;
5703         }
5704
5705         if (unlikely(lockup_detected(h))) {
5706                 cmd->result = DID_NO_CONNECT << 16;
5707                 cmd->scsi_done(cmd);
5708                 return 0;
5709         }
5710
5711         if (dev->in_reset)
5712                 return SCSI_MLQUEUE_DEVICE_BUSY;
5713
5714         c = cmd_tagged_alloc(h, cmd);
5715         if (c == NULL)
5716                 return SCSI_MLQUEUE_DEVICE_BUSY;
5717
5718         /*
5719          * This is necessary because the SML doesn't zero out this field during
5720          * error recovery.
5721          */
5722         cmd->result = 0;
5723
5724         /*
5725          * Call alternate submit routine for I/O accelerated commands.
5726          * Retries always go down the normal I/O path.
5727          * Note: If cmd->retries is non-zero, then this is a SML
5728          *       initiated retry and not a driver initiated retry.
5729          *       This command has been obtained from cmd_tagged_alloc
5730          *       and is therefore a brand-new command.
5731          */
5732         if (likely(cmd->retries == 0 &&
5733                         !blk_rq_is_passthrough(cmd->request) &&
5734                         h->acciopath_status)) {
5735                 /* Submit with the retry_pending flag unset. */
5736                 rc = hpsa_ioaccel_submit(h, c, cmd, false);
5737                 if (rc == 0)
5738                         return 0;
5739                 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5740                         hpsa_cmd_resolve_and_free(h, c);
5741                         return SCSI_MLQUEUE_HOST_BUSY;
5742                 }
5743         }
5744         return hpsa_ciss_submit(h, c, cmd, dev);
5745 }
5746
5747 static void hpsa_scan_complete(struct ctlr_info *h)
5748 {
5749         unsigned long flags;
5750
5751         spin_lock_irqsave(&h->scan_lock, flags);
5752         h->scan_finished = 1;
5753         wake_up(&h->scan_wait_queue);
5754         spin_unlock_irqrestore(&h->scan_lock, flags);
5755 }
5756
5757 static void hpsa_scan_start(struct Scsi_Host *sh)
5758 {
5759         struct ctlr_info *h = shost_to_hba(sh);
5760         unsigned long flags;
5761
5762         /*
5763          * Don't let rescans be initiated on a controller known to be locked
5764          * up.  If the controller locks up *during* a rescan, that thread is
5765          * probably hosed, but at least we can prevent new rescan threads from
5766          * piling up on a locked up controller.
5767          */
5768         if (unlikely(lockup_detected(h)))
5769                 return hpsa_scan_complete(h);
5770
5771         /*
5772          * If a scan is already waiting to run, no need to add another
5773          */
5774         spin_lock_irqsave(&h->scan_lock, flags);
5775         if (h->scan_waiting) {
5776                 spin_unlock_irqrestore(&h->scan_lock, flags);
5777                 return;
5778         }
5779
5780         spin_unlock_irqrestore(&h->scan_lock, flags);
5781
5782         /* wait until any scan already in progress is finished. */
5783         while (1) {
5784                 spin_lock_irqsave(&h->scan_lock, flags);
5785                 if (h->scan_finished)
5786                         break;
5787                 h->scan_waiting = 1;
5788                 spin_unlock_irqrestore(&h->scan_lock, flags);
5789                 wait_event(h->scan_wait_queue, h->scan_finished);
5790                 /* Note: We don't need to worry about a race between this
5791                  * thread and driver unload because the midlayer will
5792                  * have incremented the reference count, so unload won't
5793                  * happen if we're in here.
5794                  */
5795         }
5796         h->scan_finished = 0; /* mark scan as in progress */
5797         h->scan_waiting = 0;
5798         spin_unlock_irqrestore(&h->scan_lock, flags);
5799
5800         if (unlikely(lockup_detected(h)))
5801                 return hpsa_scan_complete(h);
5802
5803         /*
5804          * Do the scan after a reset completion
5805          */
5806         spin_lock_irqsave(&h->reset_lock, flags);
5807         if (h->reset_in_progress) {
5808                 h->drv_req_rescan = 1;
5809                 spin_unlock_irqrestore(&h->reset_lock, flags);
5810                 hpsa_scan_complete(h);
5811                 return;
5812         }
5813         spin_unlock_irqrestore(&h->reset_lock, flags);
5814
5815         hpsa_update_scsi_devices(h);
5816
5817         hpsa_scan_complete(h);
5818 }
5819
5820 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
5821 {
5822         struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
5823
5824         if (!logical_drive)
5825                 return -ENODEV;
5826
5827         if (qdepth < 1)
5828                 qdepth = 1;
5829         else if (qdepth > logical_drive->queue_depth)
5830                 qdepth = logical_drive->queue_depth;
5831
5832         return scsi_change_queue_depth(sdev, qdepth);
5833 }
5834
5835 static int hpsa_scan_finished(struct Scsi_Host *sh,
5836         unsigned long elapsed_time)
5837 {
5838         struct ctlr_info *h = shost_to_hba(sh);
5839         unsigned long flags;
5840         int finished;
5841
5842         spin_lock_irqsave(&h->scan_lock, flags);
5843         finished = h->scan_finished;
5844         spin_unlock_irqrestore(&h->scan_lock, flags);
5845         return finished;
5846 }
5847
5848 static int hpsa_scsi_host_alloc(struct ctlr_info *h)
5849 {
5850         struct Scsi_Host *sh;
5851
5852         sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
5853         if (sh == NULL) {
5854                 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
5855                 return -ENOMEM;
5856         }
5857
5858         sh->io_port = 0;
5859         sh->n_io_port = 0;
5860         sh->this_id = -1;
5861         sh->max_channel = 3;
5862         sh->max_cmd_len = MAX_COMMAND_SIZE;
5863         sh->max_lun = HPSA_MAX_LUN;
5864         sh->max_id = HPSA_MAX_LUN;
5865         sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
5866         sh->cmd_per_lun = sh->can_queue;
5867         sh->sg_tablesize = h->maxsgentries;
5868         sh->transportt = hpsa_sas_transport_template;
5869         sh->hostdata[0] = (unsigned long) h;
5870         sh->irq = pci_irq_vector(h->pdev, 0);
5871         sh->unique_id = sh->irq;
5872
5873         h->scsi_host = sh;
5874         return 0;
5875 }
5876
5877 static int hpsa_scsi_add_host(struct ctlr_info *h)
5878 {
5879         int rv;
5880
5881         rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
5882         if (rv) {
5883                 dev_err(&h->pdev->dev, "scsi_add_host failed\n");
5884                 return rv;
5885         }
5886         scsi_scan_host(h->scsi_host);
5887         return 0;
5888 }
5889
5890 /*
5891  * The block layer has already gone to the trouble of picking out a unique,
5892  * small-integer tag for this request.  We use an offset from that value as
5893  * an index to select our command block.  (The offset allows us to reserve the
5894  * low-numbered entries for our own uses.)
5895  */
5896 static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
5897 {
5898         int idx = scmd->request->tag;
5899
5900         if (idx < 0)
5901                 return idx;
5902
5903         /* Offset to leave space for internal cmds. */
5904         return idx += HPSA_NRESERVED_CMDS;
5905 }
5906
5907 /*
5908  * Send a TEST_UNIT_READY command to the specified LUN using the specified
5909  * reply queue; returns zero if the unit is ready, and non-zero otherwise.
5910  */
5911 static int hpsa_send_test_unit_ready(struct ctlr_info *h,
5912                                 struct CommandList *c, unsigned char lunaddr[],
5913                                 int reply_queue)
5914 {
5915         int rc;
5916
5917         /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
5918         (void) fill_cmd(c, TEST_UNIT_READY, h,
5919                         NULL, 0, 0, lunaddr, TYPE_CMD);
5920         rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5921         if (rc)
5922                 return rc;
5923         /* no unmap needed here because no data xfer. */
5924
5925         /* Check if the unit is already ready. */
5926         if (c->err_info->CommandStatus == CMD_SUCCESS)
5927                 return 0;
5928
5929         /*
5930          * The first command sent after reset will receive "unit attention" to
5931          * indicate that the LUN has been reset...this is actually what we're
5932          * looking for (but, success is good too).
5933          */
5934         if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5935                 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
5936                         (c->err_info->SenseInfo[2] == NO_SENSE ||
5937                          c->err_info->SenseInfo[2] == UNIT_ATTENTION))
5938                 return 0;
5939
5940         return 1;
5941 }
5942
5943 /*
5944  * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
5945  * returns zero when the unit is ready, and non-zero when giving up.
5946  */
5947 static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
5948                                 struct CommandList *c,
5949                                 unsigned char lunaddr[], int reply_queue)
5950 {
5951         int rc;
5952         int count = 0;
5953         int waittime = 1; /* seconds */
5954
5955         /* Send test unit ready until device ready, or give up. */
5956         for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
5957
5958                 /*
5959                  * Wait for a bit.  do this first, because if we send
5960                  * the TUR right away, the reset will just abort it.
5961                  */
5962                 msleep(1000 * waittime);
5963
5964                 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
5965                 if (!rc)
5966                         break;
5967
5968                 /* Increase wait time with each try, up to a point. */
5969                 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
5970                         waittime *= 2;
5971
5972                 dev_warn(&h->pdev->dev,
5973                          "waiting %d secs for device to become ready.\n",
5974                          waittime);
5975         }
5976
5977         return rc;
5978 }
5979
5980 static int wait_for_device_to_become_ready(struct ctlr_info *h,
5981                                            unsigned char lunaddr[],
5982                                            int reply_queue)
5983 {
5984         int first_queue;
5985         int last_queue;
5986         int rq;
5987         int rc = 0;
5988         struct CommandList *c;
5989
5990         c = cmd_alloc(h);
5991
5992         /*
5993          * If no specific reply queue was requested, then send the TUR
5994          * repeatedly, requesting a reply on each reply queue; otherwise execute
5995          * the loop exactly once using only the specified queue.
5996          */
5997         if (reply_queue == DEFAULT_REPLY_QUEUE) {
5998                 first_queue = 0;
5999                 last_queue = h->nreply_queues - 1;
6000         } else {
6001                 first_queue = reply_queue;
6002                 last_queue = reply_queue;
6003         }
6004
6005         for (rq = first_queue; rq <= last_queue; rq++) {
6006                 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
6007                 if (rc)
6008                         break;
6009         }
6010
6011         if (rc)
6012                 dev_warn(&h->pdev->dev, "giving up on device.\n");
6013         else
6014                 dev_warn(&h->pdev->dev, "device is ready.\n");
6015
6016         cmd_free(h, c);
6017         return rc;
6018 }
6019
6020 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
6021  * complaining.  Doing a host- or bus-reset can't do anything good here.
6022  */
6023 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
6024 {
6025         int rc = SUCCESS;
6026         int i;
6027         struct ctlr_info *h;
6028         struct hpsa_scsi_dev_t *dev = NULL;
6029         u8 reset_type;
6030         char msg[48];
6031         unsigned long flags;
6032
6033         /* find the controller to which the command to be aborted was sent */
6034         h = sdev_to_hba(scsicmd->device);
6035         if (h == NULL) /* paranoia */
6036                 return FAILED;
6037
6038         spin_lock_irqsave(&h->reset_lock, flags);
6039         h->reset_in_progress = 1;
6040         spin_unlock_irqrestore(&h->reset_lock, flags);
6041
6042         if (lockup_detected(h)) {
6043                 rc = FAILED;
6044                 goto return_reset_status;
6045         }
6046
6047         dev = scsicmd->device->hostdata;
6048         if (!dev) {
6049                 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
6050                 rc = FAILED;
6051                 goto return_reset_status;
6052         }
6053
6054         if (dev->devtype == TYPE_ENCLOSURE) {
6055                 rc = SUCCESS;
6056                 goto return_reset_status;
6057         }
6058
6059         /* if controller locked up, we can guarantee command won't complete */
6060         if (lockup_detected(h)) {
6061                 snprintf(msg, sizeof(msg),
6062                          "cmd %d RESET FAILED, lockup detected",
6063                          hpsa_get_cmd_index(scsicmd));
6064                 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6065                 rc = FAILED;
6066                 goto return_reset_status;
6067         }
6068
6069         /* this reset request might be the result of a lockup; check */
6070         if (detect_controller_lockup(h)) {
6071                 snprintf(msg, sizeof(msg),
6072                          "cmd %d RESET FAILED, new lockup detected",
6073                          hpsa_get_cmd_index(scsicmd));
6074                 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6075                 rc = FAILED;
6076                 goto return_reset_status;
6077         }
6078
6079         /* Do not attempt on controller */
6080         if (is_hba_lunid(dev->scsi3addr)) {
6081                 rc = SUCCESS;
6082                 goto return_reset_status;
6083         }
6084
6085         if (is_logical_dev_addr_mode(dev->scsi3addr))
6086                 reset_type = HPSA_DEVICE_RESET_MSG;
6087         else
6088                 reset_type = HPSA_PHYS_TARGET_RESET;
6089
6090         sprintf(msg, "resetting %s",
6091                 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
6092         hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6093
6094         /*
6095          * wait to see if any commands will complete before sending reset
6096          */
6097         dev->in_reset = true; /* block any new cmds from OS for this device */
6098         for (i = 0; i < 10; i++) {
6099                 if (atomic_read(&dev->commands_outstanding) > 0)
6100                         msleep(1000);
6101                 else
6102                         break;
6103         }
6104
6105         /* send a reset to the SCSI LUN which the command was sent to */
6106         rc = hpsa_do_reset(h, dev, reset_type, DEFAULT_REPLY_QUEUE);
6107         if (rc == 0)
6108                 rc = SUCCESS;
6109         else
6110                 rc = FAILED;
6111
6112         sprintf(msg, "reset %s %s",
6113                 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ",
6114                 rc == SUCCESS ? "completed successfully" : "failed");
6115         hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6116
6117 return_reset_status:
6118         spin_lock_irqsave(&h->reset_lock, flags);
6119         h->reset_in_progress = 0;
6120         if (dev)
6121                 dev->in_reset = false;
6122         spin_unlock_irqrestore(&h->reset_lock, flags);
6123         return rc;
6124 }
6125
6126 /*
6127  * For operations with an associated SCSI command, a command block is allocated
6128  * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
6129  * block request tag as an index into a table of entries.  cmd_tagged_free() is
6130  * the complement, although cmd_free() may be called instead.
6131  * This function is only called for new requests from queue_command.
6132  */
6133 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
6134                                             struct scsi_cmnd *scmd)
6135 {
6136         int idx = hpsa_get_cmd_index(scmd);
6137         struct CommandList *c = h->cmd_pool + idx;
6138
6139         if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
6140                 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
6141                         idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
6142                 /* The index value comes from the block layer, so if it's out of
6143                  * bounds, it's probably not our bug.
6144                  */
6145                 BUG();
6146         }
6147
6148         if (unlikely(!hpsa_is_cmd_idle(c))) {
6149                 /*
6150                  * We expect that the SCSI layer will hand us a unique tag
6151                  * value.  Thus, there should never be a collision here between
6152                  * two requests...because if the selected command isn't idle
6153                  * then someone is going to be very disappointed.
6154                  */
6155                 if (idx != h->last_collision_tag) { /* Print once per tag */
6156                         dev_warn(&h->pdev->dev,
6157                                 "%s: tag collision (tag=%d)\n", __func__, idx);
6158                         if (scmd)
6159                                 scsi_print_command(scmd);
6160                         h->last_collision_tag = idx;
6161                 }
6162                 return NULL;
6163         }
6164
6165         atomic_inc(&c->refcount);
6166         hpsa_cmd_partial_init(h, idx, c);
6167
6168         /*
6169          * This is a new command obtained from queue_command so
6170          * there have not been any driver initiated retry attempts.
6171          */
6172         c->retry_pending = false;
6173
6174         return c;
6175 }
6176
6177 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
6178 {
6179         /*
6180          * Release our reference to the block.  We don't need to do anything
6181          * else to free it, because it is accessed by index.
6182          */
6183         (void)atomic_dec(&c->refcount);
6184 }
6185
6186 /*
6187  * For operations that cannot sleep, a command block is allocated at init,
6188  * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
6189  * which ones are free or in use.  Lock must be held when calling this.
6190  * cmd_free() is the complement.
6191  * This function never gives up and returns NULL.  If it hangs,
6192  * another thread must call cmd_free() to free some tags.
6193  */
6194
6195 static struct CommandList *cmd_alloc(struct ctlr_info *h)
6196 {
6197         struct CommandList *c;
6198         int refcount, i;
6199         int offset = 0;
6200
6201         /*
6202          * There is some *extremely* small but non-zero chance that that
6203          * multiple threads could get in here, and one thread could
6204          * be scanning through the list of bits looking for a free
6205          * one, but the free ones are always behind him, and other
6206          * threads sneak in behind him and eat them before he can
6207          * get to them, so that while there is always a free one, a
6208          * very unlucky thread might be starved anyway, never able to
6209          * beat the other threads.  In reality, this happens so
6210          * infrequently as to be indistinguishable from never.
6211          *
6212          * Note that we start allocating commands before the SCSI host structure
6213          * is initialized.  Since the search starts at bit zero, this
6214          * all works, since we have at least one command structure available;
6215          * however, it means that the structures with the low indexes have to be
6216          * reserved for driver-initiated requests, while requests from the block
6217          * layer will use the higher indexes.
6218          */
6219
6220         for (;;) {
6221                 i = find_next_zero_bit(h->cmd_pool_bits,
6222                                         HPSA_NRESERVED_CMDS,
6223                                         offset);
6224                 if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
6225                         offset = 0;
6226                         continue;
6227                 }
6228                 c = h->cmd_pool + i;
6229                 refcount = atomic_inc_return(&c->refcount);
6230                 if (unlikely(refcount > 1)) {
6231                         cmd_free(h, c); /* already in use */
6232                         offset = (i + 1) % HPSA_NRESERVED_CMDS;
6233                         continue;
6234                 }
6235                 set_bit(i & (BITS_PER_LONG - 1),
6236                         h->cmd_pool_bits + (i / BITS_PER_LONG));
6237                 break; /* it's ours now. */
6238         }
6239         hpsa_cmd_partial_init(h, i, c);
6240         c->device = NULL;
6241
6242         /*
6243          * cmd_alloc is for "internal" commands and they are never
6244          * retried.
6245          */
6246         c->retry_pending = false;
6247
6248         return c;
6249 }
6250
6251 /*
6252  * This is the complementary operation to cmd_alloc().  Note, however, in some
6253  * corner cases it may also be used to free blocks allocated by
6254  * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
6255  * the clear-bit is harmless.
6256  */
6257 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
6258 {
6259         if (atomic_dec_and_test(&c->refcount)) {
6260                 int i;
6261
6262                 i = c - h->cmd_pool;
6263                 clear_bit(i & (BITS_PER_LONG - 1),
6264                           h->cmd_pool_bits + (i / BITS_PER_LONG));
6265         }
6266 }
6267
6268 #ifdef CONFIG_COMPAT
6269
6270 static int hpsa_ioctl32_passthru(struct scsi_device *dev, unsigned int cmd,
6271         void __user *arg)
6272 {
6273         struct ctlr_info *h = sdev_to_hba(dev);
6274         IOCTL32_Command_struct __user *arg32 = arg;
6275         IOCTL_Command_struct arg64;
6276         int err;
6277         u32 cp;
6278
6279         if (!arg)
6280                 return -EINVAL;
6281
6282         memset(&arg64, 0, sizeof(arg64));
6283         if (copy_from_user(&arg64, arg32, offsetof(IOCTL_Command_struct, buf)))
6284                 return -EFAULT;
6285         if (get_user(cp, &arg32->buf))
6286                 return -EFAULT;
6287         arg64.buf = compat_ptr(cp);
6288
6289         if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6290                 return -EAGAIN;
6291         err = hpsa_passthru_ioctl(h, &arg64);
6292         atomic_inc(&h->passthru_cmds_avail);
6293         if (err)
6294                 return err;
6295         if (copy_to_user(&arg32->error_info, &arg64.error_info,
6296                          sizeof(arg32->error_info)))
6297                 return -EFAULT;
6298         return 0;
6299 }
6300
6301 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
6302         unsigned int cmd, void __user *arg)
6303 {
6304         struct ctlr_info *h = sdev_to_hba(dev);
6305         BIG_IOCTL32_Command_struct __user *arg32 = arg;
6306         BIG_IOCTL_Command_struct arg64;
6307         int err;
6308         u32 cp;
6309
6310         if (!arg)
6311                 return -EINVAL;
6312         memset(&arg64, 0, sizeof(arg64));
6313         if (copy_from_user(&arg64, arg32,
6314                            offsetof(BIG_IOCTL32_Command_struct, buf)))
6315                 return -EFAULT;
6316         if (get_user(cp, &arg32->buf))
6317                 return -EFAULT;
6318         arg64.buf = compat_ptr(cp);
6319
6320         if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6321                 return -EAGAIN;
6322         err = hpsa_big_passthru_ioctl(h, &arg64);
6323         atomic_inc(&h->passthru_cmds_avail);
6324         if (err)
6325                 return err;
6326         if (copy_to_user(&arg32->error_info, &arg64.error_info,
6327                          sizeof(arg32->error_info)))
6328                 return -EFAULT;
6329         return 0;
6330 }
6331
6332 static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
6333                              void __user *arg)
6334 {
6335         switch (cmd) {
6336         case CCISS_GETPCIINFO:
6337         case CCISS_GETINTINFO:
6338         case CCISS_SETINTINFO:
6339         case CCISS_GETNODENAME:
6340         case CCISS_SETNODENAME:
6341         case CCISS_GETHEARTBEAT:
6342         case CCISS_GETBUSTYPES:
6343         case CCISS_GETFIRMVER:
6344         case CCISS_GETDRIVVER:
6345         case CCISS_REVALIDVOLS:
6346         case CCISS_DEREGDISK:
6347         case CCISS_REGNEWDISK:
6348         case CCISS_REGNEWD:
6349         case CCISS_RESCANDISK:
6350         case CCISS_GETLUNINFO:
6351                 return hpsa_ioctl(dev, cmd, arg);
6352
6353         case CCISS_PASSTHRU32:
6354                 return hpsa_ioctl32_passthru(dev, cmd, arg);
6355         case CCISS_BIG_PASSTHRU32:
6356                 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
6357
6358         default:
6359                 return -ENOIOCTLCMD;
6360         }
6361 }
6362 #endif
6363
6364 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
6365 {
6366         struct hpsa_pci_info pciinfo;
6367
6368         if (!argp)
6369                 return -EINVAL;
6370         pciinfo.domain = pci_domain_nr(h->pdev->bus);
6371         pciinfo.bus = h->pdev->bus->number;
6372         pciinfo.dev_fn = h->pdev->devfn;
6373         pciinfo.board_id = h->board_id;
6374         if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
6375                 return -EFAULT;
6376         return 0;
6377 }
6378
6379 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
6380 {
6381         DriverVer_type DriverVer;
6382         unsigned char vmaj, vmin, vsubmin;
6383         int rc;
6384
6385         rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
6386                 &vmaj, &vmin, &vsubmin);
6387         if (rc != 3) {
6388                 dev_info(&h->pdev->dev, "driver version string '%s' "
6389                         "unrecognized.", HPSA_DRIVER_VERSION);
6390                 vmaj = 0;
6391                 vmin = 0;
6392                 vsubmin = 0;
6393         }
6394         DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
6395         if (!argp)
6396                 return -EINVAL;
6397         if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
6398                 return -EFAULT;
6399         return 0;
6400 }
6401
6402 static int hpsa_passthru_ioctl(struct ctlr_info *h,
6403                                IOCTL_Command_struct *iocommand)
6404 {
6405         struct CommandList *c;
6406         char *buff = NULL;
6407         u64 temp64;
6408         int rc = 0;
6409
6410         if (!capable(CAP_SYS_RAWIO))
6411                 return -EPERM;
6412         if ((iocommand->buf_size < 1) &&
6413             (iocommand->Request.Type.Direction != XFER_NONE)) {
6414                 return -EINVAL;
6415         }
6416         if (iocommand->buf_size > 0) {
6417                 buff = kmalloc(iocommand->buf_size, GFP_KERNEL);
6418                 if (buff == NULL)
6419                         return -ENOMEM;
6420                 if (iocommand->Request.Type.Direction & XFER_WRITE) {
6421                         /* Copy the data into the buffer we created */
6422                         if (copy_from_user(buff, iocommand->buf,
6423                                 iocommand->buf_size)) {
6424                                 rc = -EFAULT;
6425                                 goto out_kfree;
6426                         }
6427                 } else {
6428                         memset(buff, 0, iocommand->buf_size);
6429                 }
6430         }
6431         c = cmd_alloc(h);
6432
6433         /* Fill in the command type */
6434         c->cmd_type = CMD_IOCTL_PEND;
6435         c->scsi_cmd = SCSI_CMD_BUSY;
6436         /* Fill in Command Header */
6437         c->Header.ReplyQueue = 0; /* unused in simple mode */
6438         if (iocommand->buf_size > 0) {  /* buffer to fill */
6439                 c->Header.SGList = 1;
6440                 c->Header.SGTotal = cpu_to_le16(1);
6441         } else  { /* no buffers to fill */
6442                 c->Header.SGList = 0;
6443                 c->Header.SGTotal = cpu_to_le16(0);
6444         }
6445         memcpy(&c->Header.LUN, &iocommand->LUN_info, sizeof(c->Header.LUN));
6446
6447         /* Fill in Request block */
6448         memcpy(&c->Request, &iocommand->Request,
6449                 sizeof(c->Request));
6450
6451         /* Fill in the scatter gather information */
6452         if (iocommand->buf_size > 0) {
6453                 temp64 = dma_map_single(&h->pdev->dev, buff,
6454                         iocommand->buf_size, DMA_BIDIRECTIONAL);
6455                 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
6456                         c->SG[0].Addr = cpu_to_le64(0);
6457                         c->SG[0].Len = cpu_to_le32(0);
6458                         rc = -ENOMEM;
6459                         goto out;
6460                 }
6461                 c->SG[0].Addr = cpu_to_le64(temp64);
6462                 c->SG[0].Len = cpu_to_le32(iocommand->buf_size);
6463                 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
6464         }
6465         rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6466                                         NO_TIMEOUT);
6467         if (iocommand->buf_size > 0)
6468                 hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL);
6469         check_ioctl_unit_attention(h, c);
6470         if (rc) {
6471                 rc = -EIO;
6472                 goto out;
6473         }
6474
6475         /* Copy the error information out */
6476         memcpy(&iocommand->error_info, c->err_info,
6477                 sizeof(iocommand->error_info));
6478         if ((iocommand->Request.Type.Direction & XFER_READ) &&
6479                 iocommand->buf_size > 0) {
6480                 /* Copy the data out of the buffer we created */
6481                 if (copy_to_user(iocommand->buf, buff, iocommand->buf_size)) {
6482                         rc = -EFAULT;
6483                         goto out;
6484                 }
6485         }
6486 out:
6487         cmd_free(h, c);
6488 out_kfree:
6489         kfree(buff);
6490         return rc;
6491 }
6492
6493 static int hpsa_big_passthru_ioctl(struct ctlr_info *h,
6494                                    BIG_IOCTL_Command_struct *ioc)
6495 {
6496         struct CommandList *c;
6497         unsigned char **buff = NULL;
6498         int *buff_size = NULL;
6499         u64 temp64;
6500         BYTE sg_used = 0;
6501         int status = 0;
6502         u32 left;
6503         u32 sz;
6504         BYTE __user *data_ptr;
6505
6506         if (!capable(CAP_SYS_RAWIO))
6507                 return -EPERM;
6508
6509         if ((ioc->buf_size < 1) &&
6510             (ioc->Request.Type.Direction != XFER_NONE))
6511                 return -EINVAL;
6512         /* Check kmalloc limits  using all SGs */
6513         if (ioc->malloc_size > MAX_KMALLOC_SIZE)
6514                 return -EINVAL;
6515         if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD)
6516                 return -EINVAL;
6517         buff = kcalloc(SG_ENTRIES_IN_CMD, sizeof(char *), GFP_KERNEL);
6518         if (!buff) {
6519                 status = -ENOMEM;
6520                 goto cleanup1;
6521         }
6522         buff_size = kmalloc_array(SG_ENTRIES_IN_CMD, sizeof(int), GFP_KERNEL);
6523         if (!buff_size) {
6524                 status = -ENOMEM;
6525                 goto cleanup1;
6526         }
6527         left = ioc->buf_size;
6528         data_ptr = ioc->buf;
6529         while (left) {
6530                 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
6531                 buff_size[sg_used] = sz;
6532                 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
6533                 if (buff[sg_used] == NULL) {
6534                         status = -ENOMEM;
6535                         goto cleanup1;
6536                 }
6537                 if (ioc->Request.Type.Direction & XFER_WRITE) {
6538                         if (copy_from_user(buff[sg_used], data_ptr, sz)) {
6539                                 status = -EFAULT;
6540                                 goto cleanup1;
6541                         }
6542                 } else
6543                         memset(buff[sg_used], 0, sz);
6544                 left -= sz;
6545                 data_ptr += sz;
6546                 sg_used++;
6547         }
6548         c = cmd_alloc(h);
6549
6550         c->cmd_type = CMD_IOCTL_PEND;
6551         c->scsi_cmd = SCSI_CMD_BUSY;
6552         c->Header.ReplyQueue = 0;
6553         c->Header.SGList = (u8) sg_used;
6554         c->Header.SGTotal = cpu_to_le16(sg_used);
6555         memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
6556         memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
6557         if (ioc->buf_size > 0) {
6558                 int i;
6559                 for (i = 0; i < sg_used; i++) {
6560                         temp64 = dma_map_single(&h->pdev->dev, buff[i],
6561                                     buff_size[i], DMA_BIDIRECTIONAL);
6562                         if (dma_mapping_error(&h->pdev->dev,
6563                                                         (dma_addr_t) temp64)) {
6564                                 c->SG[i].Addr = cpu_to_le64(0);
6565                                 c->SG[i].Len = cpu_to_le32(0);
6566                                 hpsa_pci_unmap(h->pdev, c, i,
6567                                         DMA_BIDIRECTIONAL);
6568                                 status = -ENOMEM;
6569                                 goto cleanup0;
6570                         }
6571                         c->SG[i].Addr = cpu_to_le64(temp64);
6572                         c->SG[i].Len = cpu_to_le32(buff_size[i]);
6573                         c->SG[i].Ext = cpu_to_le32(0);
6574                 }
6575                 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
6576         }
6577         status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6578                                                 NO_TIMEOUT);
6579         if (sg_used)
6580                 hpsa_pci_unmap(h->pdev, c, sg_used, DMA_BIDIRECTIONAL);
6581         check_ioctl_unit_attention(h, c);
6582         if (status) {
6583                 status = -EIO;
6584                 goto cleanup0;
6585         }
6586
6587         /* Copy the error information out */
6588         memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
6589         if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
6590                 int i;
6591
6592                 /* Copy the data out of the buffer we created */
6593                 BYTE __user *ptr = ioc->buf;
6594                 for (i = 0; i < sg_used; i++) {
6595                         if (copy_to_user(ptr, buff[i], buff_size[i])) {
6596                                 status = -EFAULT;
6597                                 goto cleanup0;
6598                         }
6599                         ptr += buff_size[i];
6600                 }
6601         }
6602         status = 0;
6603 cleanup0:
6604         cmd_free(h, c);
6605 cleanup1:
6606         if (buff) {
6607                 int i;
6608
6609                 for (i = 0; i < sg_used; i++)
6610                         kfree(buff[i]);
6611                 kfree(buff);
6612         }
6613         kfree(buff_size);
6614         return status;
6615 }
6616
6617 static void check_ioctl_unit_attention(struct ctlr_info *h,
6618         struct CommandList *c)
6619 {
6620         if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
6621                         c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
6622                 (void) check_for_unit_attention(h, c);
6623 }
6624
6625 /*
6626  * ioctl
6627  */
6628 static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
6629                       void __user *argp)
6630 {
6631         struct ctlr_info *h = sdev_to_hba(dev);
6632         int rc;
6633
6634         switch (cmd) {
6635         case CCISS_DEREGDISK:
6636         case CCISS_REGNEWDISK:
6637         case CCISS_REGNEWD:
6638                 hpsa_scan_start(h->scsi_host);
6639                 return 0;
6640         case CCISS_GETPCIINFO:
6641                 return hpsa_getpciinfo_ioctl(h, argp);
6642         case CCISS_GETDRIVVER:
6643                 return hpsa_getdrivver_ioctl(h, argp);
6644         case CCISS_PASSTHRU: {
6645                 IOCTL_Command_struct iocommand;
6646
6647                 if (!argp)
6648                         return -EINVAL;
6649                 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
6650                         return -EFAULT;
6651                 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6652                         return -EAGAIN;
6653                 rc = hpsa_passthru_ioctl(h, &iocommand);
6654                 atomic_inc(&h->passthru_cmds_avail);
6655                 if (!rc && copy_to_user(argp, &iocommand, sizeof(iocommand)))
6656                         rc = -EFAULT;
6657                 return rc;
6658         }
6659         case CCISS_BIG_PASSTHRU: {
6660                 BIG_IOCTL_Command_struct ioc;
6661                 if (!argp)
6662                         return -EINVAL;
6663                 if (copy_from_user(&ioc, argp, sizeof(ioc)))
6664                         return -EFAULT;
6665                 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6666                         return -EAGAIN;
6667                 rc = hpsa_big_passthru_ioctl(h, &ioc);
6668                 atomic_inc(&h->passthru_cmds_avail);
6669                 if (!rc && copy_to_user(argp, &ioc, sizeof(ioc)))
6670                         rc = -EFAULT;
6671                 return rc;
6672         }
6673         default:
6674                 return -ENOTTY;
6675         }
6676 }
6677
6678 static void hpsa_send_host_reset(struct ctlr_info *h, u8 reset_type)
6679 {
6680         struct CommandList *c;
6681
6682         c = cmd_alloc(h);
6683
6684         /* fill_cmd can't fail here, no data buffer to map */
6685         (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
6686                 RAID_CTLR_LUNID, TYPE_MSG);
6687         c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
6688         c->waiting = NULL;
6689         enqueue_cmd_and_start_io(h, c);
6690         /* Don't wait for completion, the reset won't complete.  Don't free
6691          * the command either.  This is the last command we will send before
6692          * re-initializing everything, so it doesn't matter and won't leak.
6693          */
6694         return;
6695 }
6696
6697 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
6698         void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
6699         int cmd_type)
6700 {
6701         enum dma_data_direction dir = DMA_NONE;
6702
6703         c->cmd_type = CMD_IOCTL_PEND;
6704         c->scsi_cmd = SCSI_CMD_BUSY;
6705         c->Header.ReplyQueue = 0;
6706         if (buff != NULL && size > 0) {
6707                 c->Header.SGList = 1;
6708                 c->Header.SGTotal = cpu_to_le16(1);
6709         } else {
6710                 c->Header.SGList = 0;
6711                 c->Header.SGTotal = cpu_to_le16(0);
6712         }
6713         memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6714
6715         if (cmd_type == TYPE_CMD) {
6716                 switch (cmd) {
6717                 case HPSA_INQUIRY:
6718                         /* are we trying to read a vital product page */
6719                         if (page_code & VPD_PAGE) {
6720                                 c->Request.CDB[1] = 0x01;
6721                                 c->Request.CDB[2] = (page_code & 0xff);
6722                         }
6723                         c->Request.CDBLen = 6;
6724                         c->Request.type_attr_dir =
6725                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6726                         c->Request.Timeout = 0;
6727                         c->Request.CDB[0] = HPSA_INQUIRY;
6728                         c->Request.CDB[4] = size & 0xFF;
6729                         break;
6730                 case RECEIVE_DIAGNOSTIC:
6731                         c->Request.CDBLen = 6;
6732                         c->Request.type_attr_dir =
6733                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6734                         c->Request.Timeout = 0;
6735                         c->Request.CDB[0] = cmd;
6736                         c->Request.CDB[1] = 1;
6737                         c->Request.CDB[2] = 1;
6738                         c->Request.CDB[3] = (size >> 8) & 0xFF;
6739                         c->Request.CDB[4] = size & 0xFF;
6740                         break;
6741                 case HPSA_REPORT_LOG:
6742                 case HPSA_REPORT_PHYS:
6743                         /* Talking to controller so It's a physical command
6744                            mode = 00 target = 0.  Nothing to write.
6745                          */
6746                         c->Request.CDBLen = 12;
6747                         c->Request.type_attr_dir =
6748                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6749                         c->Request.Timeout = 0;
6750                         c->Request.CDB[0] = cmd;
6751                         c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6752                         c->Request.CDB[7] = (size >> 16) & 0xFF;
6753                         c->Request.CDB[8] = (size >> 8) & 0xFF;
6754                         c->Request.CDB[9] = size & 0xFF;
6755                         break;
6756                 case BMIC_SENSE_DIAG_OPTIONS:
6757                         c->Request.CDBLen = 16;
6758                         c->Request.type_attr_dir =
6759                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6760                         c->Request.Timeout = 0;
6761                         /* Spec says this should be BMIC_WRITE */
6762                         c->Request.CDB[0] = BMIC_READ;
6763                         c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS;
6764                         break;
6765                 case BMIC_SET_DIAG_OPTIONS:
6766                         c->Request.CDBLen = 16;
6767                         c->Request.type_attr_dir =
6768                                         TYPE_ATTR_DIR(cmd_type,
6769                                                 ATTR_SIMPLE, XFER_WRITE);
6770                         c->Request.Timeout = 0;
6771                         c->Request.CDB[0] = BMIC_WRITE;
6772                         c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS;
6773                         break;
6774                 case HPSA_CACHE_FLUSH:
6775                         c->Request.CDBLen = 12;
6776                         c->Request.type_attr_dir =
6777                                         TYPE_ATTR_DIR(cmd_type,
6778                                                 ATTR_SIMPLE, XFER_WRITE);
6779                         c->Request.Timeout = 0;
6780                         c->Request.CDB[0] = BMIC_WRITE;
6781                         c->Request.CDB[6] = BMIC_CACHE_FLUSH;
6782                         c->Request.CDB[7] = (size >> 8) & 0xFF;
6783                         c->Request.CDB[8] = size & 0xFF;
6784                         break;
6785                 case TEST_UNIT_READY:
6786                         c->Request.CDBLen = 6;
6787                         c->Request.type_attr_dir =
6788                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6789                         c->Request.Timeout = 0;
6790                         break;
6791                 case HPSA_GET_RAID_MAP:
6792                         c->Request.CDBLen = 12;
6793                         c->Request.type_attr_dir =
6794                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6795                         c->Request.Timeout = 0;
6796                         c->Request.CDB[0] = HPSA_CISS_READ;
6797                         c->Request.CDB[1] = cmd;
6798                         c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6799                         c->Request.CDB[7] = (size >> 16) & 0xFF;
6800                         c->Request.CDB[8] = (size >> 8) & 0xFF;
6801                         c->Request.CDB[9] = size & 0xFF;
6802                         break;
6803                 case BMIC_SENSE_CONTROLLER_PARAMETERS:
6804                         c->Request.CDBLen = 10;
6805                         c->Request.type_attr_dir =
6806                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6807                         c->Request.Timeout = 0;
6808                         c->Request.CDB[0] = BMIC_READ;
6809                         c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
6810                         c->Request.CDB[7] = (size >> 16) & 0xFF;
6811                         c->Request.CDB[8] = (size >> 8) & 0xFF;
6812                         break;
6813                 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6814                         c->Request.CDBLen = 10;
6815                         c->Request.type_attr_dir =
6816                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6817                         c->Request.Timeout = 0;
6818                         c->Request.CDB[0] = BMIC_READ;
6819                         c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
6820                         c->Request.CDB[7] = (size >> 16) & 0xFF;
6821                         c->Request.CDB[8] = (size >> 8) & 0XFF;
6822                         break;
6823                 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
6824                         c->Request.CDBLen = 10;
6825                         c->Request.type_attr_dir =
6826                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6827                         c->Request.Timeout = 0;
6828                         c->Request.CDB[0] = BMIC_READ;
6829                         c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION;
6830                         c->Request.CDB[7] = (size >> 16) & 0xFF;
6831                         c->Request.CDB[8] = (size >> 8) & 0XFF;
6832                         break;
6833                 case BMIC_SENSE_STORAGE_BOX_PARAMS:
6834                         c->Request.CDBLen = 10;
6835                         c->Request.type_attr_dir =
6836                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6837                         c->Request.Timeout = 0;
6838                         c->Request.CDB[0] = BMIC_READ;
6839                         c->Request.CDB[6] = BMIC_SENSE_STORAGE_BOX_PARAMS;
6840                         c->Request.CDB[7] = (size >> 16) & 0xFF;
6841                         c->Request.CDB[8] = (size >> 8) & 0XFF;
6842                         break;
6843                 case BMIC_IDENTIFY_CONTROLLER:
6844                         c->Request.CDBLen = 10;
6845                         c->Request.type_attr_dir =
6846                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6847                         c->Request.Timeout = 0;
6848                         c->Request.CDB[0] = BMIC_READ;
6849                         c->Request.CDB[1] = 0;
6850                         c->Request.CDB[2] = 0;
6851                         c->Request.CDB[3] = 0;
6852                         c->Request.CDB[4] = 0;
6853                         c->Request.CDB[5] = 0;
6854                         c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER;
6855                         c->Request.CDB[7] = (size >> 16) & 0xFF;
6856                         c->Request.CDB[8] = (size >> 8) & 0XFF;
6857                         c->Request.CDB[9] = 0;
6858                         break;
6859                 default:
6860                         dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
6861                         BUG();
6862                 }
6863         } else if (cmd_type == TYPE_MSG) {
6864                 switch (cmd) {
6865
6866                 case  HPSA_PHYS_TARGET_RESET:
6867                         c->Request.CDBLen = 16;
6868                         c->Request.type_attr_dir =
6869                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6870                         c->Request.Timeout = 0; /* Don't time out */
6871                         memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6872                         c->Request.CDB[0] = HPSA_RESET;
6873                         c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE;
6874                         /* Physical target reset needs no control bytes 4-7*/
6875                         c->Request.CDB[4] = 0x00;
6876                         c->Request.CDB[5] = 0x00;
6877                         c->Request.CDB[6] = 0x00;
6878                         c->Request.CDB[7] = 0x00;
6879                         break;
6880                 case  HPSA_DEVICE_RESET_MSG:
6881                         c->Request.CDBLen = 16;
6882                         c->Request.type_attr_dir =
6883                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6884                         c->Request.Timeout = 0; /* Don't time out */
6885                         memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6886                         c->Request.CDB[0] =  cmd;
6887                         c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
6888                         /* If bytes 4-7 are zero, it means reset the */
6889                         /* LunID device */
6890                         c->Request.CDB[4] = 0x00;
6891                         c->Request.CDB[5] = 0x00;
6892                         c->Request.CDB[6] = 0x00;
6893                         c->Request.CDB[7] = 0x00;
6894                         break;
6895                 default:
6896                         dev_warn(&h->pdev->dev, "unknown message type %d\n",
6897                                 cmd);
6898                         BUG();
6899                 }
6900         } else {
6901                 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
6902                 BUG();
6903         }
6904
6905         switch (GET_DIR(c->Request.type_attr_dir)) {
6906         case XFER_READ:
6907                 dir = DMA_FROM_DEVICE;
6908                 break;
6909         case XFER_WRITE:
6910                 dir = DMA_TO_DEVICE;
6911                 break;
6912         case XFER_NONE:
6913                 dir = DMA_NONE;
6914                 break;
6915         default:
6916                 dir = DMA_BIDIRECTIONAL;
6917         }
6918         if (hpsa_map_one(h->pdev, c, buff, size, dir))
6919                 return -1;
6920         return 0;
6921 }
6922
6923 /*
6924  * Map (physical) PCI mem into (virtual) kernel space
6925  */
6926 static void __iomem *remap_pci_mem(ulong base, ulong size)
6927 {
6928         ulong page_base = ((ulong) base) & PAGE_MASK;
6929         ulong page_offs = ((ulong) base) - page_base;
6930         void __iomem *page_remapped = ioremap(page_base,
6931                 page_offs + size);
6932
6933         return page_remapped ? (page_remapped + page_offs) : NULL;
6934 }
6935
6936 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
6937 {
6938         return h->access.command_completed(h, q);
6939 }
6940
6941 static inline bool interrupt_pending(struct ctlr_info *h)
6942 {
6943         return h->access.intr_pending(h);
6944 }
6945
6946 static inline long interrupt_not_for_us(struct ctlr_info *h)
6947 {
6948         return (h->access.intr_pending(h) == 0) ||
6949                 (h->interrupts_enabled == 0);
6950 }
6951
6952 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
6953         u32 raw_tag)
6954 {
6955         if (unlikely(tag_index >= h->nr_cmds)) {
6956                 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
6957                 return 1;
6958         }
6959         return 0;
6960 }
6961
6962 static inline void finish_cmd(struct CommandList *c)
6963 {
6964         dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
6965         if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
6966                         || c->cmd_type == CMD_IOACCEL2))
6967                 complete_scsi_command(c);
6968         else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
6969                 complete(c->waiting);
6970 }
6971
6972 /* process completion of an indexed ("direct lookup") command */
6973 static inline void process_indexed_cmd(struct ctlr_info *h,
6974         u32 raw_tag)
6975 {
6976         u32 tag_index;
6977         struct CommandList *c;
6978
6979         tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
6980         if (!bad_tag(h, tag_index, raw_tag)) {
6981                 c = h->cmd_pool + tag_index;
6982                 finish_cmd(c);
6983         }
6984 }
6985
6986 /* Some controllers, like p400, will give us one interrupt
6987  * after a soft reset, even if we turned interrupts off.
6988  * Only need to check for this in the hpsa_xxx_discard_completions
6989  * functions.
6990  */
6991 static int ignore_bogus_interrupt(struct ctlr_info *h)
6992 {
6993         if (likely(!reset_devices))
6994                 return 0;
6995
6996         if (likely(h->interrupts_enabled))
6997                 return 0;
6998
6999         dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
7000                 "(known firmware bug.)  Ignoring.\n");
7001
7002         return 1;
7003 }
7004
7005 /*
7006  * Convert &h->q[x] (passed to interrupt handlers) back to h.
7007  * Relies on (h-q[x] == x) being true for x such that
7008  * 0 <= x < MAX_REPLY_QUEUES.
7009  */
7010 static struct ctlr_info *queue_to_hba(u8 *queue)
7011 {
7012         return container_of((queue - *queue), struct ctlr_info, q[0]);
7013 }
7014
7015 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
7016 {
7017         struct ctlr_info *h = queue_to_hba(queue);
7018         u8 q = *(u8 *) queue;
7019         u32 raw_tag;
7020
7021         if (ignore_bogus_interrupt(h))
7022                 return IRQ_NONE;
7023
7024         if (interrupt_not_for_us(h))
7025                 return IRQ_NONE;
7026         h->last_intr_timestamp = get_jiffies_64();
7027         while (interrupt_pending(h)) {
7028                 raw_tag = get_next_completion(h, q);
7029                 while (raw_tag != FIFO_EMPTY)
7030                         raw_tag = next_command(h, q);
7031         }
7032         return IRQ_HANDLED;
7033 }
7034
7035 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
7036 {
7037         struct ctlr_info *h = queue_to_hba(queue);
7038         u32 raw_tag;
7039         u8 q = *(u8 *) queue;
7040
7041         if (ignore_bogus_interrupt(h))
7042                 return IRQ_NONE;
7043
7044         h->last_intr_timestamp = get_jiffies_64();
7045         raw_tag = get_next_completion(h, q);
7046         while (raw_tag != FIFO_EMPTY)
7047                 raw_tag = next_command(h, q);
7048         return IRQ_HANDLED;
7049 }
7050
7051 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
7052 {
7053         struct ctlr_info *h = queue_to_hba((u8 *) queue);
7054         u32 raw_tag;
7055         u8 q = *(u8 *) queue;
7056
7057         if (interrupt_not_for_us(h))
7058                 return IRQ_NONE;
7059         h->last_intr_timestamp = get_jiffies_64();
7060         while (interrupt_pending(h)) {
7061                 raw_tag = get_next_completion(h, q);
7062                 while (raw_tag != FIFO_EMPTY) {
7063                         process_indexed_cmd(h, raw_tag);
7064                         raw_tag = next_command(h, q);
7065                 }
7066         }
7067         return IRQ_HANDLED;
7068 }
7069
7070 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
7071 {
7072         struct ctlr_info *h = queue_to_hba(queue);
7073         u32 raw_tag;
7074         u8 q = *(u8 *) queue;
7075
7076         h->last_intr_timestamp = get_jiffies_64();
7077         raw_tag = get_next_completion(h, q);
7078         while (raw_tag != FIFO_EMPTY) {
7079                 process_indexed_cmd(h, raw_tag);
7080                 raw_tag = next_command(h, q);
7081         }
7082         return IRQ_HANDLED;
7083 }
7084
7085 /* Send a message CDB to the firmware. Careful, this only works
7086  * in simple mode, not performant mode due to the tag lookup.
7087  * We only ever use this immediately after a controller reset.
7088  */
7089 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
7090                         unsigned char type)
7091 {
7092         struct Command {
7093                 struct CommandListHeader CommandHeader;
7094                 struct RequestBlock Request;
7095                 struct ErrDescriptor ErrorDescriptor;
7096         };
7097         struct Command *cmd;
7098         static const size_t cmd_sz = sizeof(*cmd) +
7099                                         sizeof(cmd->ErrorDescriptor);
7100         dma_addr_t paddr64;
7101         __le32 paddr32;
7102         u32 tag;
7103         void __iomem *vaddr;
7104         int i, err;
7105
7106         vaddr = pci_ioremap_bar(pdev, 0);
7107         if (vaddr == NULL)
7108                 return -ENOMEM;
7109
7110         /* The Inbound Post Queue only accepts 32-bit physical addresses for the
7111          * CCISS commands, so they must be allocated from the lower 4GiB of
7112          * memory.
7113          */
7114         err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7115         if (err) {
7116                 iounmap(vaddr);
7117                 return err;
7118         }
7119
7120         cmd = dma_alloc_coherent(&pdev->dev, cmd_sz, &paddr64, GFP_KERNEL);
7121         if (cmd == NULL) {
7122                 iounmap(vaddr);
7123                 return -ENOMEM;
7124         }
7125
7126         /* This must fit, because of the 32-bit consistent DMA mask.  Also,
7127          * although there's no guarantee, we assume that the address is at
7128          * least 4-byte aligned (most likely, it's page-aligned).
7129          */
7130         paddr32 = cpu_to_le32(paddr64);
7131
7132         cmd->CommandHeader.ReplyQueue = 0;
7133         cmd->CommandHeader.SGList = 0;
7134         cmd->CommandHeader.SGTotal = cpu_to_le16(0);
7135         cmd->CommandHeader.tag = cpu_to_le64(paddr64);
7136         memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
7137
7138         cmd->Request.CDBLen = 16;
7139         cmd->Request.type_attr_dir =
7140                         TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
7141         cmd->Request.Timeout = 0; /* Don't time out */
7142         cmd->Request.CDB[0] = opcode;
7143         cmd->Request.CDB[1] = type;
7144         memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
7145         cmd->ErrorDescriptor.Addr =
7146                         cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
7147         cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
7148
7149         writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
7150
7151         for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
7152                 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
7153                 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
7154                         break;
7155                 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
7156         }
7157
7158         iounmap(vaddr);
7159
7160         /* we leak the DMA buffer here ... no choice since the controller could
7161          *  still complete the command.
7162          */
7163         if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
7164                 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
7165                         opcode, type);
7166                 return -ETIMEDOUT;
7167         }
7168
7169         dma_free_coherent(&pdev->dev, cmd_sz, cmd, paddr64);
7170
7171         if (tag & HPSA_ERROR_BIT) {
7172                 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
7173                         opcode, type);
7174                 return -EIO;
7175         }
7176
7177         dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
7178                 opcode, type);
7179         return 0;
7180 }
7181
7182 #define hpsa_noop(p) hpsa_message(p, 3, 0)
7183
7184 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
7185         void __iomem *vaddr, u32 use_doorbell)
7186 {
7187
7188         if (use_doorbell) {
7189                 /* For everything after the P600, the PCI power state method
7190                  * of resetting the controller doesn't work, so we have this
7191                  * other way using the doorbell register.
7192                  */
7193                 dev_info(&pdev->dev, "using doorbell to reset controller\n");
7194                 writel(use_doorbell, vaddr + SA5_DOORBELL);
7195
7196                 /* PMC hardware guys tell us we need a 10 second delay after
7197                  * doorbell reset and before any attempt to talk to the board
7198                  * at all to ensure that this actually works and doesn't fall
7199                  * over in some weird corner cases.
7200                  */
7201                 msleep(10000);
7202         } else { /* Try to do it the PCI power state way */
7203
7204                 /* Quoting from the Open CISS Specification: "The Power
7205                  * Management Control/Status Register (CSR) controls the power
7206                  * state of the device.  The normal operating state is D0,
7207                  * CSR=00h.  The software off state is D3, CSR=03h.  To reset
7208                  * the controller, place the interface device in D3 then to D0,
7209                  * this causes a secondary PCI reset which will reset the
7210                  * controller." */
7211
7212                 int rc = 0;
7213
7214                 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
7215
7216                 /* enter the D3hot power management state */
7217                 rc = pci_set_power_state(pdev, PCI_D3hot);
7218                 if (rc)
7219                         return rc;
7220
7221                 msleep(500);
7222
7223                 /* enter the D0 power management state */
7224                 rc = pci_set_power_state(pdev, PCI_D0);
7225                 if (rc)
7226                         return rc;
7227
7228                 /*
7229                  * The P600 requires a small delay when changing states.
7230                  * Otherwise we may think the board did not reset and we bail.
7231                  * This for kdump only and is particular to the P600.
7232                  */
7233                 msleep(500);
7234         }
7235         return 0;
7236 }
7237
7238 static void init_driver_version(char *driver_version, int len)
7239 {
7240         memset(driver_version, 0, len);
7241         strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
7242 }
7243
7244 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
7245 {
7246         char *driver_version;
7247         int i, size = sizeof(cfgtable->driver_version);
7248
7249         driver_version = kmalloc(size, GFP_KERNEL);
7250         if (!driver_version)
7251                 return -ENOMEM;
7252
7253         init_driver_version(driver_version, size);
7254         for (i = 0; i < size; i++)
7255                 writeb(driver_version[i], &cfgtable->driver_version[i]);
7256         kfree(driver_version);
7257         return 0;
7258 }
7259
7260 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
7261                                           unsigned char *driver_ver)
7262 {
7263         int i;
7264
7265         for (i = 0; i < sizeof(cfgtable->driver_version); i++)
7266                 driver_ver[i] = readb(&cfgtable->driver_version[i]);
7267 }
7268
7269 static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
7270 {
7271
7272         char *driver_ver, *old_driver_ver;
7273         int rc, size = sizeof(cfgtable->driver_version);
7274
7275         old_driver_ver = kmalloc_array(2, size, GFP_KERNEL);
7276         if (!old_driver_ver)
7277                 return -ENOMEM;
7278         driver_ver = old_driver_ver + size;
7279
7280         /* After a reset, the 32 bytes of "driver version" in the cfgtable
7281          * should have been changed, otherwise we know the reset failed.
7282          */
7283         init_driver_version(old_driver_ver, size);
7284         read_driver_ver_from_cfgtable(cfgtable, driver_ver);
7285         rc = !memcmp(driver_ver, old_driver_ver, size);
7286         kfree(old_driver_ver);
7287         return rc;
7288 }
7289 /* This does a hard reset of the controller using PCI power management
7290  * states or the using the doorbell register.
7291  */
7292 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
7293 {
7294         u64 cfg_offset;
7295         u32 cfg_base_addr;
7296         u64 cfg_base_addr_index;
7297         void __iomem *vaddr;
7298         unsigned long paddr;
7299         u32 misc_fw_support;
7300         int rc;
7301         struct CfgTable __iomem *cfgtable;
7302         u32 use_doorbell;
7303         u16 command_register;
7304
7305         /* For controllers as old as the P600, this is very nearly
7306          * the same thing as
7307          *
7308          * pci_save_state(pci_dev);
7309          * pci_set_power_state(pci_dev, PCI_D3hot);
7310          * pci_set_power_state(pci_dev, PCI_D0);
7311          * pci_restore_state(pci_dev);
7312          *
7313          * For controllers newer than the P600, the pci power state
7314          * method of resetting doesn't work so we have another way
7315          * using the doorbell register.
7316          */
7317
7318         if (!ctlr_is_resettable(board_id)) {
7319                 dev_warn(&pdev->dev, "Controller not resettable\n");
7320                 return -ENODEV;
7321         }
7322
7323         /* if controller is soft- but not hard resettable... */
7324         if (!ctlr_is_hard_resettable(board_id))
7325                 return -ENOTSUPP; /* try soft reset later. */
7326
7327         /* Save the PCI command register */
7328         pci_read_config_word(pdev, 4, &command_register);
7329         pci_save_state(pdev);
7330
7331         /* find the first memory BAR, so we can find the cfg table */
7332         rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
7333         if (rc)
7334                 return rc;
7335         vaddr = remap_pci_mem(paddr, 0x250);
7336         if (!vaddr)
7337                 return -ENOMEM;
7338
7339         /* find cfgtable in order to check if reset via doorbell is supported */
7340         rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
7341                                         &cfg_base_addr_index, &cfg_offset);
7342         if (rc)
7343                 goto unmap_vaddr;
7344         cfgtable = remap_pci_mem(pci_resource_start(pdev,
7345                        cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
7346         if (!cfgtable) {
7347                 rc = -ENOMEM;
7348                 goto unmap_vaddr;
7349         }
7350         rc = write_driver_ver_to_cfgtable(cfgtable);
7351         if (rc)
7352                 goto unmap_cfgtable;
7353
7354         /* If reset via doorbell register is supported, use that.
7355          * There are two such methods.  Favor the newest method.
7356          */
7357         misc_fw_support = readl(&cfgtable->misc_fw_support);
7358         use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
7359         if (use_doorbell) {
7360                 use_doorbell = DOORBELL_CTLR_RESET2;
7361         } else {
7362                 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
7363                 if (use_doorbell) {
7364                         dev_warn(&pdev->dev,
7365                                 "Soft reset not supported. Firmware update is required.\n");
7366                         rc = -ENOTSUPP; /* try soft reset */
7367                         goto unmap_cfgtable;
7368                 }
7369         }
7370
7371         rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
7372         if (rc)
7373                 goto unmap_cfgtable;
7374
7375         pci_restore_state(pdev);
7376         pci_write_config_word(pdev, 4, command_register);
7377
7378         /* Some devices (notably the HP Smart Array 5i Controller)
7379            need a little pause here */
7380         msleep(HPSA_POST_RESET_PAUSE_MSECS);
7381
7382         rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
7383         if (rc) {
7384                 dev_warn(&pdev->dev,
7385                         "Failed waiting for board to become ready after hard reset\n");
7386                 goto unmap_cfgtable;
7387         }
7388
7389         rc = controller_reset_failed(vaddr);
7390         if (rc < 0)
7391                 goto unmap_cfgtable;
7392         if (rc) {
7393                 dev_warn(&pdev->dev, "Unable to successfully reset "
7394                         "controller. Will try soft reset.\n");
7395                 rc = -ENOTSUPP;
7396         } else {
7397                 dev_info(&pdev->dev, "board ready after hard reset.\n");
7398         }
7399
7400 unmap_cfgtable:
7401         iounmap(cfgtable);
7402
7403 unmap_vaddr:
7404         iounmap(vaddr);
7405         return rc;
7406 }
7407
7408 /*
7409  *  We cannot read the structure directly, for portability we must use
7410  *   the io functions.
7411  *   This is for debug only.
7412  */
7413 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
7414 {
7415 #ifdef HPSA_DEBUG
7416         int i;
7417         char temp_name[17];
7418
7419         dev_info(dev, "Controller Configuration information\n");
7420         dev_info(dev, "------------------------------------\n");
7421         for (i = 0; i < 4; i++)
7422                 temp_name[i] = readb(&(tb->Signature[i]));
7423         temp_name[4] = '\0';
7424         dev_info(dev, "   Signature = %s\n", temp_name);
7425         dev_info(dev, "   Spec Number = %d\n", readl(&(tb->SpecValence)));
7426         dev_info(dev, "   Transport methods supported = 0x%x\n",
7427                readl(&(tb->TransportSupport)));
7428         dev_info(dev, "   Transport methods active = 0x%x\n",
7429                readl(&(tb->TransportActive)));
7430         dev_info(dev, "   Requested transport Method = 0x%x\n",
7431                readl(&(tb->HostWrite.TransportRequest)));
7432         dev_info(dev, "   Coalesce Interrupt Delay = 0x%x\n",
7433                readl(&(tb->HostWrite.CoalIntDelay)));
7434         dev_info(dev, "   Coalesce Interrupt Count = 0x%x\n",
7435                readl(&(tb->HostWrite.CoalIntCount)));
7436         dev_info(dev, "   Max outstanding commands = %d\n",
7437                readl(&(tb->CmdsOutMax)));
7438         dev_info(dev, "   Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
7439         for (i = 0; i < 16; i++)
7440                 temp_name[i] = readb(&(tb->ServerName[i]));
7441         temp_name[16] = '\0';
7442         dev_info(dev, "   Server Name = %s\n", temp_name);
7443         dev_info(dev, "   Heartbeat Counter = 0x%x\n\n\n",
7444                 readl(&(tb->HeartBeat)));
7445 #endif                          /* HPSA_DEBUG */
7446 }
7447
7448 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
7449 {
7450         int i, offset, mem_type, bar_type;
7451
7452         if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
7453                 return 0;
7454         offset = 0;
7455         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
7456                 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
7457                 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
7458                         offset += 4;
7459                 else {
7460                         mem_type = pci_resource_flags(pdev, i) &
7461                             PCI_BASE_ADDRESS_MEM_TYPE_MASK;
7462                         switch (mem_type) {
7463                         case PCI_BASE_ADDRESS_MEM_TYPE_32:
7464                         case PCI_BASE_ADDRESS_MEM_TYPE_1M:
7465                                 offset += 4;    /* 32 bit */
7466                                 break;
7467                         case PCI_BASE_ADDRESS_MEM_TYPE_64:
7468                                 offset += 8;
7469                                 break;
7470                         default:        /* reserved in PCI 2.2 */
7471                                 dev_warn(&pdev->dev,
7472                                        "base address is invalid\n");
7473                                 return -1;
7474                         }
7475                 }
7476                 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
7477                         return i + 1;
7478         }
7479         return -1;
7480 }
7481
7482 static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
7483 {
7484         pci_free_irq_vectors(h->pdev);
7485         h->msix_vectors = 0;
7486 }
7487
7488 static void hpsa_setup_reply_map(struct ctlr_info *h)
7489 {
7490         const struct cpumask *mask;
7491         unsigned int queue, cpu;
7492
7493         for (queue = 0; queue < h->msix_vectors; queue++) {
7494                 mask = pci_irq_get_affinity(h->pdev, queue);
7495                 if (!mask)
7496                         goto fallback;
7497
7498                 for_each_cpu(cpu, mask)
7499                         h->reply_map[cpu] = queue;
7500         }
7501         return;
7502
7503 fallback:
7504         for_each_possible_cpu(cpu)
7505                 h->reply_map[cpu] = 0;
7506 }
7507
7508 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
7509  * controllers that are capable. If not, we use legacy INTx mode.
7510  */
7511 static int hpsa_interrupt_mode(struct ctlr_info *h)
7512 {
7513         unsigned int flags = PCI_IRQ_LEGACY;
7514         int ret;
7515
7516         /* Some boards advertise MSI but don't really support it */
7517         switch (h->board_id) {
7518         case 0x40700E11:
7519         case 0x40800E11:
7520         case 0x40820E11:
7521         case 0x40830E11:
7522                 break;
7523         default:
7524                 ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES,
7525                                 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
7526                 if (ret > 0) {
7527                         h->msix_vectors = ret;
7528                         return 0;
7529                 }
7530
7531                 flags |= PCI_IRQ_MSI;
7532                 break;
7533         }
7534
7535         ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags);
7536         if (ret < 0)
7537                 return ret;
7538         return 0;
7539 }
7540
7541 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
7542                                 bool *legacy_board)
7543 {
7544         int i;
7545         u32 subsystem_vendor_id, subsystem_device_id;
7546
7547         subsystem_vendor_id = pdev->subsystem_vendor;
7548         subsystem_device_id = pdev->subsystem_device;
7549         *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
7550                     subsystem_vendor_id;
7551
7552         if (legacy_board)
7553                 *legacy_board = false;
7554         for (i = 0; i < ARRAY_SIZE(products); i++)
7555                 if (*board_id == products[i].board_id) {
7556                         if (products[i].access != &SA5A_access &&
7557                             products[i].access != &SA5B_access)
7558                                 return i;
7559                         dev_warn(&pdev->dev,
7560                                  "legacy board ID: 0x%08x\n",
7561                                  *board_id);
7562                         if (legacy_board)
7563                             *legacy_board = true;
7564                         return i;
7565                 }
7566
7567         dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x\n", *board_id);
7568         if (legacy_board)
7569                 *legacy_board = true;
7570         return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
7571 }
7572
7573 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
7574                                     unsigned long *memory_bar)
7575 {
7576         int i;
7577
7578         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
7579                 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
7580                         /* addressing mode bits already removed */
7581                         *memory_bar = pci_resource_start(pdev, i);
7582                         dev_dbg(&pdev->dev, "memory BAR = %lx\n",
7583                                 *memory_bar);
7584                         return 0;
7585                 }
7586         dev_warn(&pdev->dev, "no memory BAR found\n");
7587         return -ENODEV;
7588 }
7589
7590 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
7591                                      int wait_for_ready)
7592 {
7593         int i, iterations;
7594         u32 scratchpad;
7595         if (wait_for_ready)
7596                 iterations = HPSA_BOARD_READY_ITERATIONS;
7597         else
7598                 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
7599
7600         for (i = 0; i < iterations; i++) {
7601                 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
7602                 if (wait_for_ready) {
7603                         if (scratchpad == HPSA_FIRMWARE_READY)
7604                                 return 0;
7605                 } else {
7606                         if (scratchpad != HPSA_FIRMWARE_READY)
7607                                 return 0;
7608                 }
7609                 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
7610         }
7611         dev_warn(&pdev->dev, "board not ready, timed out.\n");
7612         return -ENODEV;
7613 }
7614
7615 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
7616                                u32 *cfg_base_addr, u64 *cfg_base_addr_index,
7617                                u64 *cfg_offset)
7618 {
7619         *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
7620         *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
7621         *cfg_base_addr &= (u32) 0x0000ffff;
7622         *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
7623         if (*cfg_base_addr_index == -1) {
7624                 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
7625                 return -ENODEV;
7626         }
7627         return 0;
7628 }
7629
7630 static void hpsa_free_cfgtables(struct ctlr_info *h)
7631 {
7632         if (h->transtable) {
7633                 iounmap(h->transtable);
7634                 h->transtable = NULL;
7635         }
7636         if (h->cfgtable) {
7637                 iounmap(h->cfgtable);
7638                 h->cfgtable = NULL;
7639         }
7640 }
7641
7642 /* Find and map CISS config table and transfer table
7643 + * several items must be unmapped (freed) later
7644 + * */
7645 static int hpsa_find_cfgtables(struct ctlr_info *h)
7646 {
7647         u64 cfg_offset;
7648         u32 cfg_base_addr;
7649         u64 cfg_base_addr_index;
7650         u32 trans_offset;
7651         int rc;
7652
7653         rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7654                 &cfg_base_addr_index, &cfg_offset);
7655         if (rc)
7656                 return rc;
7657         h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
7658                        cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
7659         if (!h->cfgtable) {
7660                 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
7661                 return -ENOMEM;
7662         }
7663         rc = write_driver_ver_to_cfgtable(h->cfgtable);
7664         if (rc)
7665                 return rc;
7666         /* Find performant mode table. */
7667         trans_offset = readl(&h->cfgtable->TransMethodOffset);
7668         h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
7669                                 cfg_base_addr_index)+cfg_offset+trans_offset,
7670                                 sizeof(*h->transtable));
7671         if (!h->transtable) {
7672                 dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
7673                 hpsa_free_cfgtables(h);
7674                 return -ENOMEM;
7675         }
7676         return 0;
7677 }
7678
7679 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
7680 {
7681 #define MIN_MAX_COMMANDS 16
7682         BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
7683
7684         h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
7685
7686         /* Limit commands in memory limited kdump scenario. */
7687         if (reset_devices && h->max_commands > 32)
7688                 h->max_commands = 32;
7689
7690         if (h->max_commands < MIN_MAX_COMMANDS) {
7691                 dev_warn(&h->pdev->dev,
7692                         "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7693                         h->max_commands,
7694                         MIN_MAX_COMMANDS);
7695                 h->max_commands = MIN_MAX_COMMANDS;
7696         }
7697 }
7698
7699 /* If the controller reports that the total max sg entries is greater than 512,
7700  * then we know that chained SG blocks work.  (Original smart arrays did not
7701  * support chained SG blocks and would return zero for max sg entries.)
7702  */
7703 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
7704 {
7705         return h->maxsgentries > 512;
7706 }
7707
7708 /* Interrogate the hardware for some limits:
7709  * max commands, max SG elements without chaining, and with chaining,
7710  * SG chain block size, etc.
7711  */
7712 static void hpsa_find_board_params(struct ctlr_info *h)
7713 {
7714         hpsa_get_max_perf_mode_cmds(h);
7715         h->nr_cmds = h->max_commands;
7716         h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
7717         h->fw_support = readl(&(h->cfgtable->misc_fw_support));
7718         if (hpsa_supports_chained_sg_blocks(h)) {
7719                 /* Limit in-command s/g elements to 32 save dma'able memory. */
7720                 h->max_cmd_sg_entries = 32;
7721                 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
7722                 h->maxsgentries--; /* save one for chain pointer */
7723         } else {
7724                 /*
7725                  * Original smart arrays supported at most 31 s/g entries
7726                  * embedded inline in the command (trying to use more
7727                  * would lock up the controller)
7728                  */
7729                 h->max_cmd_sg_entries = 31;
7730                 h->maxsgentries = 31; /* default to traditional values */
7731                 h->chainsize = 0;
7732         }
7733
7734         /* Find out what task management functions are supported and cache */
7735         h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
7736         if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7737                 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7738         if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7739                 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
7740         if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7741                 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
7742 }
7743
7744 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7745 {
7746         if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
7747                 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
7748                 return false;
7749         }
7750         return true;
7751 }
7752
7753 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
7754 {
7755         u32 driver_support;
7756
7757         driver_support = readl(&(h->cfgtable->driver_support));
7758         /* Need to enable prefetch in the SCSI core for 6400 in x86 */
7759 #ifdef CONFIG_X86
7760         driver_support |= ENABLE_SCSI_PREFETCH;
7761 #endif
7762         driver_support |= ENABLE_UNIT_ATTN;
7763         writel(driver_support, &(h->cfgtable->driver_support));
7764 }
7765
7766 /* Disable DMA prefetch for the P600.  Otherwise an ASIC bug may result
7767  * in a prefetch beyond physical memory.
7768  */
7769 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7770 {
7771         u32 dma_prefetch;
7772
7773         if (h->board_id != 0x3225103C)
7774                 return;
7775         dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7776         dma_prefetch |= 0x8000;
7777         writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7778 }
7779
7780 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
7781 {
7782         int i;
7783         u32 doorbell_value;
7784         unsigned long flags;
7785         /* wait until the clear_event_notify bit 6 is cleared by controller. */
7786         for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
7787                 spin_lock_irqsave(&h->lock, flags);
7788                 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7789                 spin_unlock_irqrestore(&h->lock, flags);
7790                 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
7791                         goto done;
7792                 /* delay and try again */
7793                 msleep(CLEAR_EVENT_WAIT_INTERVAL);
7794         }
7795         return -ENODEV;
7796 done:
7797         return 0;
7798 }
7799
7800 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
7801 {
7802         int i;
7803         u32 doorbell_value;
7804         unsigned long flags;
7805
7806         /* under certain very rare conditions, this can take awhile.
7807          * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
7808          * as we enter this code.)
7809          */
7810         for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
7811                 if (h->remove_in_progress)
7812                         goto done;
7813                 spin_lock_irqsave(&h->lock, flags);
7814                 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7815                 spin_unlock_irqrestore(&h->lock, flags);
7816                 if (!(doorbell_value & CFGTBL_ChangeReq))
7817                         goto done;
7818                 /* delay and try again */
7819                 msleep(MODE_CHANGE_WAIT_INTERVAL);
7820         }
7821         return -ENODEV;
7822 done:
7823         return 0;
7824 }
7825
7826 /* return -ENODEV or other reason on error, 0 on success */
7827 static int hpsa_enter_simple_mode(struct ctlr_info *h)
7828 {
7829         u32 trans_support;
7830
7831         trans_support = readl(&(h->cfgtable->TransportSupport));
7832         if (!(trans_support & SIMPLE_MODE))
7833                 return -ENOTSUPP;
7834
7835         h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
7836
7837         /* Update the field, and then ring the doorbell */
7838         writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
7839         writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7840         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7841         if (hpsa_wait_for_mode_change_ack(h))
7842                 goto error;
7843         print_cfg_table(&h->pdev->dev, h->cfgtable);
7844         if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
7845                 goto error;
7846         h->transMethod = CFGTBL_Trans_Simple;
7847         return 0;
7848 error:
7849         dev_err(&h->pdev->dev, "failed to enter simple mode\n");
7850         return -ENODEV;
7851 }
7852
7853 /* free items allocated or mapped by hpsa_pci_init */
7854 static void hpsa_free_pci_init(struct ctlr_info *h)
7855 {
7856         hpsa_free_cfgtables(h);                 /* pci_init 4 */
7857         iounmap(h->vaddr);                      /* pci_init 3 */
7858         h->vaddr = NULL;
7859         hpsa_disable_interrupt_mode(h);         /* pci_init 2 */
7860         /*
7861          * call pci_disable_device before pci_release_regions per
7862          * Documentation/driver-api/pci/pci.rst
7863          */
7864         pci_disable_device(h->pdev);            /* pci_init 1 */
7865         pci_release_regions(h->pdev);           /* pci_init 2 */
7866 }
7867
7868 /* several items must be freed later */
7869 static int hpsa_pci_init(struct ctlr_info *h)
7870 {
7871         int prod_index, err;
7872         bool legacy_board;
7873
7874         prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id, &legacy_board);
7875         if (prod_index < 0)
7876                 return prod_index;
7877         h->product_name = products[prod_index].product_name;
7878         h->access = *(products[prod_index].access);
7879         h->legacy_board = legacy_board;
7880         pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
7881                                PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
7882
7883         err = pci_enable_device(h->pdev);
7884         if (err) {
7885                 dev_err(&h->pdev->dev, "failed to enable PCI device\n");
7886                 pci_disable_device(h->pdev);
7887                 return err;
7888         }
7889
7890         err = pci_request_regions(h->pdev, HPSA);
7891         if (err) {
7892                 dev_err(&h->pdev->dev,
7893                         "failed to obtain PCI resources\n");
7894                 pci_disable_device(h->pdev);
7895                 return err;
7896         }
7897
7898         pci_set_master(h->pdev);
7899
7900         err = hpsa_interrupt_mode(h);
7901         if (err)
7902                 goto clean1;
7903
7904         /* setup mapping between CPU and reply queue */
7905         hpsa_setup_reply_map(h);
7906
7907         err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
7908         if (err)
7909                 goto clean2;    /* intmode+region, pci */
7910         h->vaddr = remap_pci_mem(h->paddr, 0x250);
7911         if (!h->vaddr) {
7912                 dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
7913                 err = -ENOMEM;
7914                 goto clean2;    /* intmode+region, pci */
7915         }
7916         err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7917         if (err)
7918                 goto clean3;    /* vaddr, intmode+region, pci */
7919         err = hpsa_find_cfgtables(h);
7920         if (err)
7921                 goto clean3;    /* vaddr, intmode+region, pci */
7922         hpsa_find_board_params(h);
7923
7924         if (!hpsa_CISS_signature_present(h)) {
7925                 err = -ENODEV;
7926                 goto clean4;    /* cfgtables, vaddr, intmode+region, pci */
7927         }
7928         hpsa_set_driver_support_bits(h);
7929         hpsa_p600_dma_prefetch_quirk(h);
7930         err = hpsa_enter_simple_mode(h);
7931         if (err)
7932                 goto clean4;    /* cfgtables, vaddr, intmode+region, pci */
7933         return 0;
7934
7935 clean4: /* cfgtables, vaddr, intmode+region, pci */
7936         hpsa_free_cfgtables(h);
7937 clean3: /* vaddr, intmode+region, pci */
7938         iounmap(h->vaddr);
7939         h->vaddr = NULL;
7940 clean2: /* intmode+region, pci */
7941         hpsa_disable_interrupt_mode(h);
7942 clean1:
7943         /*
7944          * call pci_disable_device before pci_release_regions per
7945          * Documentation/driver-api/pci/pci.rst
7946          */
7947         pci_disable_device(h->pdev);
7948         pci_release_regions(h->pdev);
7949         return err;
7950 }
7951
7952 static void hpsa_hba_inquiry(struct ctlr_info *h)
7953 {
7954         int rc;
7955
7956 #define HBA_INQUIRY_BYTE_COUNT 64
7957         h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
7958         if (!h->hba_inquiry_data)
7959                 return;
7960         rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
7961                 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
7962         if (rc != 0) {
7963                 kfree(h->hba_inquiry_data);
7964                 h->hba_inquiry_data = NULL;
7965         }
7966 }
7967
7968 static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
7969 {
7970         int rc, i;
7971         void __iomem *vaddr;
7972
7973         if (!reset_devices)
7974                 return 0;
7975
7976         /* kdump kernel is loading, we don't know in which state is
7977          * the pci interface. The dev->enable_cnt is equal zero
7978          * so we call enable+disable, wait a while and switch it on.
7979          */
7980         rc = pci_enable_device(pdev);
7981         if (rc) {
7982                 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
7983                 return -ENODEV;
7984         }
7985         pci_disable_device(pdev);
7986         msleep(260);                    /* a randomly chosen number */
7987         rc = pci_enable_device(pdev);
7988         if (rc) {
7989                 dev_warn(&pdev->dev, "failed to enable device.\n");
7990                 return -ENODEV;
7991         }
7992
7993         pci_set_master(pdev);
7994
7995         vaddr = pci_ioremap_bar(pdev, 0);
7996         if (vaddr == NULL) {
7997                 rc = -ENOMEM;
7998                 goto out_disable;
7999         }
8000         writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
8001         iounmap(vaddr);
8002
8003         /* Reset the controller with a PCI power-cycle or via doorbell */
8004         rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
8005
8006         /* -ENOTSUPP here means we cannot reset the controller
8007          * but it's already (and still) up and running in
8008          * "performant mode".  Or, it might be 640x, which can't reset
8009          * due to concerns about shared bbwc between 6402/6404 pair.
8010          */
8011         if (rc)
8012                 goto out_disable;
8013
8014         /* Now try to get the controller to respond to a no-op */
8015         dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
8016         for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
8017                 if (hpsa_noop(pdev) == 0)
8018                         break;
8019                 else
8020                         dev_warn(&pdev->dev, "no-op failed%s\n",
8021                                         (i < 11 ? "; re-trying" : ""));
8022         }
8023
8024 out_disable:
8025
8026         pci_disable_device(pdev);
8027         return rc;
8028 }
8029
8030 static void hpsa_free_cmd_pool(struct ctlr_info *h)
8031 {
8032         kfree(h->cmd_pool_bits);
8033         h->cmd_pool_bits = NULL;
8034         if (h->cmd_pool) {
8035                 dma_free_coherent(&h->pdev->dev,
8036                                 h->nr_cmds * sizeof(struct CommandList),
8037                                 h->cmd_pool,
8038                                 h->cmd_pool_dhandle);
8039                 h->cmd_pool = NULL;
8040                 h->cmd_pool_dhandle = 0;
8041         }
8042         if (h->errinfo_pool) {
8043                 dma_free_coherent(&h->pdev->dev,
8044                                 h->nr_cmds * sizeof(struct ErrorInfo),
8045                                 h->errinfo_pool,
8046                                 h->errinfo_pool_dhandle);
8047                 h->errinfo_pool = NULL;
8048                 h->errinfo_pool_dhandle = 0;
8049         }
8050 }
8051
8052 static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
8053 {
8054         h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG),
8055                                    sizeof(unsigned long),
8056                                    GFP_KERNEL);
8057         h->cmd_pool = dma_alloc_coherent(&h->pdev->dev,
8058                     h->nr_cmds * sizeof(*h->cmd_pool),
8059                     &h->cmd_pool_dhandle, GFP_KERNEL);
8060         h->errinfo_pool = dma_alloc_coherent(&h->pdev->dev,
8061                     h->nr_cmds * sizeof(*h->errinfo_pool),
8062                     &h->errinfo_pool_dhandle, GFP_KERNEL);
8063         if ((h->cmd_pool_bits == NULL)
8064             || (h->cmd_pool == NULL)
8065             || (h->errinfo_pool == NULL)) {
8066                 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
8067                 goto clean_up;
8068         }
8069         hpsa_preinitialize_commands(h);
8070         return 0;
8071 clean_up:
8072         hpsa_free_cmd_pool(h);
8073         return -ENOMEM;
8074 }
8075
8076 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
8077 static void hpsa_free_irqs(struct ctlr_info *h)
8078 {
8079         int i;
8080         int irq_vector = 0;
8081
8082         if (hpsa_simple_mode)
8083                 irq_vector = h->intr_mode;
8084
8085         if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
8086                 /* Single reply queue, only one irq to free */
8087                 free_irq(pci_irq_vector(h->pdev, irq_vector),
8088                                 &h->q[h->intr_mode]);
8089                 h->q[h->intr_mode] = 0;
8090                 return;
8091         }
8092
8093         for (i = 0; i < h->msix_vectors; i++) {
8094                 free_irq(pci_irq_vector(h->pdev, i), &h->q[i]);
8095                 h->q[i] = 0;
8096         }
8097         for (; i < MAX_REPLY_QUEUES; i++)
8098                 h->q[i] = 0;
8099 }
8100
8101 /* returns 0 on success; cleans up and returns -Enn on error */
8102 static int hpsa_request_irqs(struct ctlr_info *h,
8103         irqreturn_t (*msixhandler)(int, void *),
8104         irqreturn_t (*intxhandler)(int, void *))
8105 {
8106         int rc, i;
8107         int irq_vector = 0;
8108
8109         if (hpsa_simple_mode)
8110                 irq_vector = h->intr_mode;
8111
8112         /*
8113          * initialize h->q[x] = x so that interrupt handlers know which
8114          * queue to process.
8115          */
8116         for (i = 0; i < MAX_REPLY_QUEUES; i++)
8117                 h->q[i] = (u8) i;
8118
8119         if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) {
8120                 /* If performant mode and MSI-X, use multiple reply queues */
8121                 for (i = 0; i < h->msix_vectors; i++) {
8122                         sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
8123                         rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler,
8124                                         0, h->intrname[i],
8125                                         &h->q[i]);
8126                         if (rc) {
8127                                 int j;
8128
8129                                 dev_err(&h->pdev->dev,
8130                                         "failed to get irq %d for %s\n",
8131                                        pci_irq_vector(h->pdev, i), h->devname);
8132                                 for (j = 0; j < i; j++) {
8133                                         free_irq(pci_irq_vector(h->pdev, j), &h->q[j]);
8134                                         h->q[j] = 0;
8135                                 }
8136                                 for (; j < MAX_REPLY_QUEUES; j++)
8137                                         h->q[j] = 0;
8138                                 return rc;
8139                         }
8140                 }
8141         } else {
8142                 /* Use single reply pool */
8143                 if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
8144                         sprintf(h->intrname[0], "%s-msi%s", h->devname,
8145                                 h->msix_vectors ? "x" : "");
8146                         rc = request_irq(pci_irq_vector(h->pdev, irq_vector),
8147                                 msixhandler, 0,
8148                                 h->intrname[0],
8149                                 &h->q[h->intr_mode]);
8150                 } else {
8151                         sprintf(h->intrname[h->intr_mode],
8152                                 "%s-intx", h->devname);
8153                         rc = request_irq(pci_irq_vector(h->pdev, irq_vector),
8154                                 intxhandler, IRQF_SHARED,
8155                                 h->intrname[0],
8156                                 &h->q[h->intr_mode]);
8157                 }
8158         }
8159         if (rc) {
8160                 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
8161                        pci_irq_vector(h->pdev, irq_vector), h->devname);
8162                 hpsa_free_irqs(h);
8163                 return -ENODEV;
8164         }
8165         return 0;
8166 }
8167
8168 static int hpsa_kdump_soft_reset(struct ctlr_info *h)
8169 {
8170         int rc;
8171         hpsa_send_host_reset(h, HPSA_RESET_TYPE_CONTROLLER);
8172
8173         dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
8174         rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
8175         if (rc) {
8176                 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
8177                 return rc;
8178         }
8179
8180         dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
8181         rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
8182         if (rc) {
8183                 dev_warn(&h->pdev->dev, "Board failed to become ready "
8184                         "after soft reset.\n");
8185                 return rc;
8186         }
8187
8188         return 0;
8189 }
8190
8191 static void hpsa_free_reply_queues(struct ctlr_info *h)
8192 {
8193         int i;
8194
8195         for (i = 0; i < h->nreply_queues; i++) {
8196                 if (!h->reply_queue[i].head)
8197                         continue;
8198                 dma_free_coherent(&h->pdev->dev,
8199                                         h->reply_queue_size,
8200                                         h->reply_queue[i].head,
8201                                         h->reply_queue[i].busaddr);
8202                 h->reply_queue[i].head = NULL;
8203                 h->reply_queue[i].busaddr = 0;
8204         }
8205         h->reply_queue_size = 0;
8206 }
8207
8208 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
8209 {
8210         hpsa_free_performant_mode(h);           /* init_one 7 */
8211         hpsa_free_sg_chain_blocks(h);           /* init_one 6 */
8212         hpsa_free_cmd_pool(h);                  /* init_one 5 */
8213         hpsa_free_irqs(h);                      /* init_one 4 */
8214         scsi_host_put(h->scsi_host);            /* init_one 3 */
8215         h->scsi_host = NULL;                    /* init_one 3 */
8216         hpsa_free_pci_init(h);                  /* init_one 2_5 */
8217         free_percpu(h->lockup_detected);        /* init_one 2 */
8218         h->lockup_detected = NULL;              /* init_one 2 */
8219         if (h->resubmit_wq) {
8220                 destroy_workqueue(h->resubmit_wq);      /* init_one 1 */
8221                 h->resubmit_wq = NULL;
8222         }
8223         if (h->rescan_ctlr_wq) {
8224                 destroy_workqueue(h->rescan_ctlr_wq);
8225                 h->rescan_ctlr_wq = NULL;
8226         }
8227         if (h->monitor_ctlr_wq) {
8228                 destroy_workqueue(h->monitor_ctlr_wq);
8229                 h->monitor_ctlr_wq = NULL;
8230         }
8231
8232         kfree(h);                               /* init_one 1 */
8233 }
8234
8235 /* Called when controller lockup detected. */
8236 static void fail_all_outstanding_cmds(struct ctlr_info *h)
8237 {
8238         int i, refcount;
8239         struct CommandList *c;
8240         int failcount = 0;
8241
8242         flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
8243         for (i = 0; i < h->nr_cmds; i++) {
8244                 c = h->cmd_pool + i;
8245                 refcount = atomic_inc_return(&c->refcount);
8246                 if (refcount > 1) {
8247                         c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
8248                         finish_cmd(c);
8249                         atomic_dec(&h->commands_outstanding);
8250                         failcount++;
8251                 }
8252                 cmd_free(h, c);
8253         }
8254         dev_warn(&h->pdev->dev,
8255                 "failed %d commands in fail_all\n", failcount);
8256 }
8257
8258 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
8259 {
8260         int cpu;
8261
8262         for_each_online_cpu(cpu) {
8263                 u32 *lockup_detected;
8264                 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
8265                 *lockup_detected = value;
8266         }
8267         wmb(); /* be sure the per-cpu variables are out to memory */
8268 }
8269
8270 static void controller_lockup_detected(struct ctlr_info *h)
8271 {
8272         unsigned long flags;
8273         u32 lockup_detected;
8274
8275         h->access.set_intr_mask(h, HPSA_INTR_OFF);
8276         spin_lock_irqsave(&h->lock, flags);
8277         lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
8278         if (!lockup_detected) {
8279                 /* no heartbeat, but controller gave us a zero. */
8280                 dev_warn(&h->pdev->dev,
8281                         "lockup detected after %d but scratchpad register is zero\n",
8282                         h->heartbeat_sample_interval / HZ);
8283                 lockup_detected = 0xffffffff;
8284         }
8285         set_lockup_detected_for_all_cpus(h, lockup_detected);
8286         spin_unlock_irqrestore(&h->lock, flags);
8287         dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
8288                         lockup_detected, h->heartbeat_sample_interval / HZ);
8289         if (lockup_detected == 0xffff0000) {
8290                 dev_warn(&h->pdev->dev, "Telling controller to do a CHKPT\n");
8291                 writel(DOORBELL_GENERATE_CHKPT, h->vaddr + SA5_DOORBELL);
8292         }
8293         pci_disable_device(h->pdev);
8294         fail_all_outstanding_cmds(h);
8295 }
8296
8297 static int detect_controller_lockup(struct ctlr_info *h)
8298 {
8299         u64 now;
8300         u32 heartbeat;
8301         unsigned long flags;
8302
8303         now = get_jiffies_64();
8304         /* If we've received an interrupt recently, we're ok. */
8305         if (time_after64(h->last_intr_timestamp +
8306                                 (h->heartbeat_sample_interval), now))
8307                 return false;
8308
8309         /*
8310          * If we've already checked the heartbeat recently, we're ok.
8311          * This could happen if someone sends us a signal. We
8312          * otherwise don't care about signals in this thread.
8313          */
8314         if (time_after64(h->last_heartbeat_timestamp +
8315                                 (h->heartbeat_sample_interval), now))
8316                 return false;
8317
8318         /* If heartbeat has not changed since we last looked, we're not ok. */
8319         spin_lock_irqsave(&h->lock, flags);
8320         heartbeat = readl(&h->cfgtable->HeartBeat);
8321         spin_unlock_irqrestore(&h->lock, flags);
8322         if (h->last_heartbeat == heartbeat) {
8323                 controller_lockup_detected(h);
8324                 return true;
8325         }
8326
8327         /* We're ok. */
8328         h->last_heartbeat = heartbeat;
8329         h->last_heartbeat_timestamp = now;
8330         return false;
8331 }
8332
8333 /*
8334  * Set ioaccel status for all ioaccel volumes.
8335  *
8336  * Called from monitor controller worker (hpsa_event_monitor_worker)
8337  *
8338  * A Volume (or Volumes that comprise an Array set) may be undergoing a
8339  * transformation, so we will be turning off ioaccel for all volumes that
8340  * make up the Array.
8341  */
8342 static void hpsa_set_ioaccel_status(struct ctlr_info *h)
8343 {
8344         int rc;
8345         int i;
8346         u8 ioaccel_status;
8347         unsigned char *buf;
8348         struct hpsa_scsi_dev_t *device;
8349
8350         if (!h)
8351                 return;
8352
8353         buf = kmalloc(64, GFP_KERNEL);
8354         if (!buf)
8355                 return;
8356
8357         /*
8358          * Run through current device list used during I/O requests.
8359          */
8360         for (i = 0; i < h->ndevices; i++) {
8361                 int offload_to_be_enabled = 0;
8362                 int offload_config = 0;
8363
8364                 device = h->dev[i];
8365
8366                 if (!device)
8367                         continue;
8368                 if (!hpsa_vpd_page_supported(h, device->scsi3addr,
8369                                                 HPSA_VPD_LV_IOACCEL_STATUS))
8370                         continue;
8371
8372                 memset(buf, 0, 64);
8373
8374                 rc = hpsa_scsi_do_inquiry(h, device->scsi3addr,
8375                                         VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS,
8376                                         buf, 64);
8377                 if (rc != 0)
8378                         continue;
8379
8380                 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
8381
8382                 /*
8383                  * Check if offload is still configured on
8384                  */
8385                 offload_config =
8386                                 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
8387                 /*
8388                  * If offload is configured on, check to see if ioaccel
8389                  * needs to be enabled.
8390                  */
8391                 if (offload_config)
8392                         offload_to_be_enabled =
8393                                 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
8394
8395                 /*
8396                  * If ioaccel is to be re-enabled, re-enable later during the
8397                  * scan operation so the driver can get a fresh raidmap
8398                  * before turning ioaccel back on.
8399                  */
8400                 if (offload_to_be_enabled)
8401                         continue;
8402
8403                 /*
8404                  * Immediately turn off ioaccel for any volume the
8405                  * controller tells us to. Some of the reasons could be:
8406                  *    transformation - change to the LVs of an Array.
8407                  *    degraded volume - component failure
8408                  */
8409                 hpsa_turn_off_ioaccel_for_device(device);
8410         }
8411
8412         kfree(buf);
8413 }
8414
8415 static void hpsa_ack_ctlr_events(struct ctlr_info *h)
8416 {
8417         char *event_type;
8418
8419         if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8420                 return;
8421
8422         /* Ask the controller to clear the events we're handling. */
8423         if ((h->transMethod & (CFGTBL_Trans_io_accel1
8424                         | CFGTBL_Trans_io_accel2)) &&
8425                 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
8426                  h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
8427
8428                 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
8429                         event_type = "state change";
8430                 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
8431                         event_type = "configuration change";
8432                 /* Stop sending new RAID offload reqs via the IO accelerator */
8433                 scsi_block_requests(h->scsi_host);
8434                 hpsa_set_ioaccel_status(h);
8435                 hpsa_drain_accel_commands(h);
8436                 /* Set 'accelerator path config change' bit */
8437                 dev_warn(&h->pdev->dev,
8438                         "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
8439                         h->events, event_type);
8440                 writel(h->events, &(h->cfgtable->clear_event_notify));
8441                 /* Set the "clear event notify field update" bit 6 */
8442                 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8443                 /* Wait until ctlr clears 'clear event notify field', bit 6 */
8444                 hpsa_wait_for_clear_event_notify_ack(h);
8445                 scsi_unblock_requests(h->scsi_host);
8446         } else {
8447                 /* Acknowledge controller notification events. */
8448                 writel(h->events, &(h->cfgtable->clear_event_notify));
8449                 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8450                 hpsa_wait_for_clear_event_notify_ack(h);
8451         }
8452         return;
8453 }
8454
8455 /* Check a register on the controller to see if there are configuration
8456  * changes (added/changed/removed logical drives, etc.) which mean that
8457  * we should rescan the controller for devices.
8458  * Also check flag for driver-initiated rescan.
8459  */
8460 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
8461 {
8462         if (h->drv_req_rescan) {
8463                 h->drv_req_rescan = 0;
8464                 return 1;
8465         }
8466
8467         if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8468                 return 0;
8469
8470         h->events = readl(&(h->cfgtable->event_notify));
8471         return h->events & RESCAN_REQUIRED_EVENT_BITS;
8472 }
8473
8474 /*
8475  * Check if any of the offline devices have become ready
8476  */
8477 static int hpsa_offline_devices_ready(struct ctlr_info *h)
8478 {
8479         unsigned long flags;
8480         struct offline_device_entry *d;
8481         struct list_head *this, *tmp;
8482
8483         spin_lock_irqsave(&h->offline_device_lock, flags);
8484         list_for_each_safe(this, tmp, &h->offline_device_list) {
8485                 d = list_entry(this, struct offline_device_entry,
8486                                 offline_list);
8487                 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8488                 if (!hpsa_volume_offline(h, d->scsi3addr)) {
8489                         spin_lock_irqsave(&h->offline_device_lock, flags);
8490                         list_del(&d->offline_list);
8491                         spin_unlock_irqrestore(&h->offline_device_lock, flags);
8492                         return 1;
8493                 }
8494                 spin_lock_irqsave(&h->offline_device_lock, flags);
8495         }
8496         spin_unlock_irqrestore(&h->offline_device_lock, flags);
8497         return 0;
8498 }
8499
8500 static int hpsa_luns_changed(struct ctlr_info *h)
8501 {
8502         int rc = 1; /* assume there are changes */
8503         struct ReportLUNdata *logdev = NULL;
8504
8505         /* if we can't find out if lun data has changed,
8506          * assume that it has.
8507          */
8508
8509         if (!h->lastlogicals)
8510                 return rc;
8511
8512         logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
8513         if (!logdev)
8514                 return rc;
8515
8516         if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
8517                 dev_warn(&h->pdev->dev,
8518                         "report luns failed, can't track lun changes.\n");
8519                 goto out;
8520         }
8521         if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) {
8522                 dev_info(&h->pdev->dev,
8523                         "Lun changes detected.\n");
8524                 memcpy(h->lastlogicals, logdev, sizeof(*logdev));
8525                 goto out;
8526         } else
8527                 rc = 0; /* no changes detected. */
8528 out:
8529         kfree(logdev);
8530         return rc;
8531 }
8532
8533 static void hpsa_perform_rescan(struct ctlr_info *h)
8534 {
8535         struct Scsi_Host *sh = NULL;
8536         unsigned long flags;
8537
8538         /*
8539          * Do the scan after the reset
8540          */
8541         spin_lock_irqsave(&h->reset_lock, flags);
8542         if (h->reset_in_progress) {
8543                 h->drv_req_rescan = 1;
8544                 spin_unlock_irqrestore(&h->reset_lock, flags);
8545                 return;
8546         }
8547         spin_unlock_irqrestore(&h->reset_lock, flags);
8548
8549         sh = scsi_host_get(h->scsi_host);
8550         if (sh != NULL) {
8551                 hpsa_scan_start(sh);
8552                 scsi_host_put(sh);
8553                 h->drv_req_rescan = 0;
8554         }
8555 }
8556
8557 /*
8558  * watch for controller events
8559  */
8560 static void hpsa_event_monitor_worker(struct work_struct *work)
8561 {
8562         struct ctlr_info *h = container_of(to_delayed_work(work),
8563                                         struct ctlr_info, event_monitor_work);
8564         unsigned long flags;
8565
8566         spin_lock_irqsave(&h->lock, flags);
8567         if (h->remove_in_progress) {
8568                 spin_unlock_irqrestore(&h->lock, flags);
8569                 return;
8570         }
8571         spin_unlock_irqrestore(&h->lock, flags);
8572
8573         if (hpsa_ctlr_needs_rescan(h)) {
8574                 hpsa_ack_ctlr_events(h);
8575                 hpsa_perform_rescan(h);
8576         }
8577
8578         spin_lock_irqsave(&h->lock, flags);
8579         if (!h->remove_in_progress)
8580                 queue_delayed_work(h->monitor_ctlr_wq, &h->event_monitor_work,
8581                                 HPSA_EVENT_MONITOR_INTERVAL);
8582         spin_unlock_irqrestore(&h->lock, flags);
8583 }
8584
8585 static void hpsa_rescan_ctlr_worker(struct work_struct *work)
8586 {
8587         unsigned long flags;
8588         struct ctlr_info *h = container_of(to_delayed_work(work),
8589                                         struct ctlr_info, rescan_ctlr_work);
8590
8591         spin_lock_irqsave(&h->lock, flags);
8592         if (h->remove_in_progress) {
8593                 spin_unlock_irqrestore(&h->lock, flags);
8594                 return;
8595         }
8596         spin_unlock_irqrestore(&h->lock, flags);
8597
8598         if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) {
8599                 hpsa_perform_rescan(h);
8600         } else if (h->discovery_polling) {
8601                 if (hpsa_luns_changed(h)) {
8602                         dev_info(&h->pdev->dev,
8603                                 "driver discovery polling rescan.\n");
8604                         hpsa_perform_rescan(h);
8605                 }
8606         }
8607         spin_lock_irqsave(&h->lock, flags);
8608         if (!h->remove_in_progress)
8609                 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8610                                 h->heartbeat_sample_interval);
8611         spin_unlock_irqrestore(&h->lock, flags);
8612 }
8613
8614 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
8615 {
8616         unsigned long flags;
8617         struct ctlr_info *h = container_of(to_delayed_work(work),
8618                                         struct ctlr_info, monitor_ctlr_work);
8619
8620         detect_controller_lockup(h);
8621         if (lockup_detected(h))
8622                 return;
8623
8624         spin_lock_irqsave(&h->lock, flags);
8625         if (!h->remove_in_progress)
8626                 queue_delayed_work(h->monitor_ctlr_wq, &h->monitor_ctlr_work,
8627                                 h->heartbeat_sample_interval);
8628         spin_unlock_irqrestore(&h->lock, flags);
8629 }
8630
8631 static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
8632                                                 char *name)
8633 {
8634         struct workqueue_struct *wq = NULL;
8635
8636         wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
8637         if (!wq)
8638                 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
8639
8640         return wq;
8641 }
8642
8643 static void hpda_free_ctlr_info(struct ctlr_info *h)
8644 {
8645         kfree(h->reply_map);
8646         kfree(h);
8647 }
8648
8649 static struct ctlr_info *hpda_alloc_ctlr_info(void)
8650 {
8651         struct ctlr_info *h;
8652
8653         h = kzalloc(sizeof(*h), GFP_KERNEL);
8654         if (!h)
8655                 return NULL;
8656
8657         h->reply_map = kcalloc(nr_cpu_ids, sizeof(*h->reply_map), GFP_KERNEL);
8658         if (!h->reply_map) {
8659                 kfree(h);
8660                 return NULL;
8661         }
8662         return h;
8663 }
8664
8665 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8666 {
8667         int rc;
8668         struct ctlr_info *h;
8669         int try_soft_reset = 0;
8670         unsigned long flags;
8671         u32 board_id;
8672
8673         if (number_of_controllers == 0)
8674                 printk(KERN_INFO DRIVER_NAME "\n");
8675
8676         rc = hpsa_lookup_board_id(pdev, &board_id, NULL);
8677         if (rc < 0) {
8678                 dev_warn(&pdev->dev, "Board ID not found\n");
8679                 return rc;
8680         }
8681
8682         rc = hpsa_init_reset_devices(pdev, board_id);
8683         if (rc) {
8684                 if (rc != -ENOTSUPP)
8685                         return rc;
8686                 /* If the reset fails in a particular way (it has no way to do
8687                  * a proper hard reset, so returns -ENOTSUPP) we can try to do
8688                  * a soft reset once we get the controller configured up to the
8689                  * point that it can accept a command.
8690                  */
8691                 try_soft_reset = 1;
8692                 rc = 0;
8693         }
8694
8695 reinit_after_soft_reset:
8696
8697         /* Command structures must be aligned on a 32-byte boundary because
8698          * the 5 lower bits of the address are used by the hardware. and by
8699          * the driver.  See comments in hpsa.h for more info.
8700          */
8701         BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
8702         h = hpda_alloc_ctlr_info();
8703         if (!h) {
8704                 dev_err(&pdev->dev, "Failed to allocate controller head\n");
8705                 return -ENOMEM;
8706         }
8707
8708         h->pdev = pdev;
8709
8710         h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
8711         INIT_LIST_HEAD(&h->offline_device_list);
8712         spin_lock_init(&h->lock);
8713         spin_lock_init(&h->offline_device_lock);
8714         spin_lock_init(&h->scan_lock);
8715         spin_lock_init(&h->reset_lock);
8716         atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
8717
8718         /* Allocate and clear per-cpu variable lockup_detected */
8719         h->lockup_detected = alloc_percpu(u32);
8720         if (!h->lockup_detected) {
8721                 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
8722                 rc = -ENOMEM;
8723                 goto clean1;    /* aer/h */
8724         }
8725         set_lockup_detected_for_all_cpus(h, 0);
8726
8727         rc = hpsa_pci_init(h);
8728         if (rc)
8729                 goto clean2;    /* lu, aer/h */
8730
8731         /* relies on h-> settings made by hpsa_pci_init, including
8732          * interrupt_mode h->intr */
8733         rc = hpsa_scsi_host_alloc(h);
8734         if (rc)
8735                 goto clean2_5;  /* pci, lu, aer/h */
8736
8737         sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
8738         h->ctlr = number_of_controllers;
8739         number_of_controllers++;
8740
8741         /* configure PCI DMA stuff */
8742         rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
8743         if (rc != 0) {
8744                 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
8745                 if (rc != 0) {
8746                         dev_err(&pdev->dev, "no suitable DMA available\n");
8747                         goto clean3;    /* shost, pci, lu, aer/h */
8748                 }
8749         }
8750
8751         /* make sure the board interrupts are off */
8752         h->access.set_intr_mask(h, HPSA_INTR_OFF);
8753
8754         rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
8755         if (rc)
8756                 goto clean3;    /* shost, pci, lu, aer/h */
8757         rc = hpsa_alloc_cmd_pool(h);
8758         if (rc)
8759                 goto clean4;    /* irq, shost, pci, lu, aer/h */
8760         rc = hpsa_alloc_sg_chain_blocks(h);
8761         if (rc)
8762                 goto clean5;    /* cmd, irq, shost, pci, lu, aer/h */
8763         init_waitqueue_head(&h->scan_wait_queue);
8764         init_waitqueue_head(&h->event_sync_wait_queue);
8765         mutex_init(&h->reset_mutex);
8766         h->scan_finished = 1; /* no scan currently in progress */
8767         h->scan_waiting = 0;
8768
8769         pci_set_drvdata(pdev, h);
8770         h->ndevices = 0;
8771
8772         spin_lock_init(&h->devlock);
8773         rc = hpsa_put_ctlr_into_performant_mode(h);
8774         if (rc)
8775                 goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
8776
8777         /* create the resubmit workqueue */
8778         h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
8779         if (!h->rescan_ctlr_wq) {
8780                 rc = -ENOMEM;
8781                 goto clean7;
8782         }
8783
8784         h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
8785         if (!h->resubmit_wq) {
8786                 rc = -ENOMEM;
8787                 goto clean7;    /* aer/h */
8788         }
8789
8790         h->monitor_ctlr_wq = hpsa_create_controller_wq(h, "monitor");
8791         if (!h->monitor_ctlr_wq) {
8792                 rc = -ENOMEM;
8793                 goto clean7;
8794         }
8795
8796         /*
8797          * At this point, the controller is ready to take commands.
8798          * Now, if reset_devices and the hard reset didn't work, try
8799          * the soft reset and see if that works.
8800          */
8801         if (try_soft_reset) {
8802
8803                 /* This is kind of gross.  We may or may not get a completion
8804                  * from the soft reset command, and if we do, then the value
8805                  * from the fifo may or may not be valid.  So, we wait 10 secs
8806                  * after the reset throwing away any completions we get during
8807                  * that time.  Unregister the interrupt handler and register
8808                  * fake ones to scoop up any residual completions.
8809                  */
8810                 spin_lock_irqsave(&h->lock, flags);
8811                 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8812                 spin_unlock_irqrestore(&h->lock, flags);
8813                 hpsa_free_irqs(h);
8814                 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
8815                                         hpsa_intx_discard_completions);
8816                 if (rc) {
8817                         dev_warn(&h->pdev->dev,
8818                                 "Failed to request_irq after soft reset.\n");
8819                         /*
8820                          * cannot goto clean7 or free_irqs will be called
8821                          * again. Instead, do its work
8822                          */
8823                         hpsa_free_performant_mode(h);   /* clean7 */
8824                         hpsa_free_sg_chain_blocks(h);   /* clean6 */
8825                         hpsa_free_cmd_pool(h);          /* clean5 */
8826                         /*
8827                          * skip hpsa_free_irqs(h) clean4 since that
8828                          * was just called before request_irqs failed
8829                          */
8830                         goto clean3;
8831                 }
8832
8833                 rc = hpsa_kdump_soft_reset(h);
8834                 if (rc)
8835                         /* Neither hard nor soft reset worked, we're hosed. */
8836                         goto clean7;
8837
8838                 dev_info(&h->pdev->dev, "Board READY.\n");
8839                 dev_info(&h->pdev->dev,
8840                         "Waiting for stale completions to drain.\n");
8841                 h->access.set_intr_mask(h, HPSA_INTR_ON);
8842                 msleep(10000);
8843                 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8844
8845                 rc = controller_reset_failed(h->cfgtable);
8846                 if (rc)
8847                         dev_info(&h->pdev->dev,
8848                                 "Soft reset appears to have failed.\n");
8849
8850                 /* since the controller's reset, we have to go back and re-init
8851                  * everything.  Easiest to just forget what we've done and do it
8852                  * all over again.
8853                  */
8854                 hpsa_undo_allocations_after_kdump_soft_reset(h);
8855                 try_soft_reset = 0;
8856                 if (rc)
8857                         /* don't goto clean, we already unallocated */
8858                         return -ENODEV;
8859
8860                 goto reinit_after_soft_reset;
8861         }
8862
8863         /* Enable Accelerated IO path at driver layer */
8864         h->acciopath_status = 1;
8865         /* Disable discovery polling.*/
8866         h->discovery_polling = 0;
8867
8868
8869         /* Turn the interrupts on so we can service requests */
8870         h->access.set_intr_mask(h, HPSA_INTR_ON);
8871
8872         hpsa_hba_inquiry(h);
8873
8874         h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL);
8875         if (!h->lastlogicals)
8876                 dev_info(&h->pdev->dev,
8877                         "Can't track change to report lun data\n");
8878
8879         /* hook into SCSI subsystem */
8880         rc = hpsa_scsi_add_host(h);
8881         if (rc)
8882                 goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
8883
8884         /* Monitor the controller for firmware lockups */
8885         h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
8886         INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
8887         schedule_delayed_work(&h->monitor_ctlr_work,
8888                                 h->heartbeat_sample_interval);
8889         INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
8890         queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8891                                 h->heartbeat_sample_interval);
8892         INIT_DELAYED_WORK(&h->event_monitor_work, hpsa_event_monitor_worker);
8893         schedule_delayed_work(&h->event_monitor_work,
8894                                 HPSA_EVENT_MONITOR_INTERVAL);
8895         return 0;
8896
8897 clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
8898         kfree(h->lastlogicals);
8899 clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8900         hpsa_free_performant_mode(h);
8901         h->access.set_intr_mask(h, HPSA_INTR_OFF);
8902 clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
8903         hpsa_free_sg_chain_blocks(h);
8904 clean5: /* cmd, irq, shost, pci, lu, aer/h */
8905         hpsa_free_cmd_pool(h);
8906 clean4: /* irq, shost, pci, lu, aer/h */
8907         hpsa_free_irqs(h);
8908 clean3: /* shost, pci, lu, aer/h */
8909         scsi_host_put(h->scsi_host);
8910         h->scsi_host = NULL;
8911 clean2_5: /* pci, lu, aer/h */
8912         hpsa_free_pci_init(h);
8913 clean2: /* lu, aer/h */
8914         if (h->lockup_detected) {
8915                 free_percpu(h->lockup_detected);
8916                 h->lockup_detected = NULL;
8917         }
8918 clean1: /* wq/aer/h */
8919         if (h->resubmit_wq) {
8920                 destroy_workqueue(h->resubmit_wq);
8921                 h->resubmit_wq = NULL;
8922         }
8923         if (h->rescan_ctlr_wq) {
8924                 destroy_workqueue(h->rescan_ctlr_wq);
8925                 h->rescan_ctlr_wq = NULL;
8926         }
8927         if (h->monitor_ctlr_wq) {
8928                 destroy_workqueue(h->monitor_ctlr_wq);
8929                 h->monitor_ctlr_wq = NULL;
8930         }
8931         kfree(h);
8932         return rc;
8933 }
8934
8935 static void hpsa_flush_cache(struct ctlr_info *h)
8936 {
8937         char *flush_buf;
8938         struct CommandList *c;
8939         int rc;
8940
8941         if (unlikely(lockup_detected(h)))
8942                 return;
8943         flush_buf = kzalloc(4, GFP_KERNEL);
8944         if (!flush_buf)
8945                 return;
8946
8947         c = cmd_alloc(h);
8948
8949         if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
8950                 RAID_CTLR_LUNID, TYPE_CMD)) {
8951                 goto out;
8952         }
8953         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
8954                         DEFAULT_TIMEOUT);
8955         if (rc)
8956                 goto out;
8957         if (c->err_info->CommandStatus != 0)
8958 out:
8959                 dev_warn(&h->pdev->dev,
8960                         "error flushing cache on controller\n");
8961         cmd_free(h, c);
8962         kfree(flush_buf);
8963 }
8964
8965 /* Make controller gather fresh report lun data each time we
8966  * send down a report luns request
8967  */
8968 static void hpsa_disable_rld_caching(struct ctlr_info *h)
8969 {
8970         u32 *options;
8971         struct CommandList *c;
8972         int rc;
8973
8974         /* Don't bother trying to set diag options if locked up */
8975         if (unlikely(h->lockup_detected))
8976                 return;
8977
8978         options = kzalloc(sizeof(*options), GFP_KERNEL);
8979         if (!options)
8980                 return;
8981
8982         c = cmd_alloc(h);
8983
8984         /* first, get the current diag options settings */
8985         if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
8986                 RAID_CTLR_LUNID, TYPE_CMD))
8987                 goto errout;
8988
8989         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
8990                         NO_TIMEOUT);
8991         if ((rc != 0) || (c->err_info->CommandStatus != 0))
8992                 goto errout;
8993
8994         /* Now, set the bit for disabling the RLD caching */
8995         *options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING;
8996
8997         if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0,
8998                 RAID_CTLR_LUNID, TYPE_CMD))
8999                 goto errout;
9000
9001         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
9002                         NO_TIMEOUT);
9003         if ((rc != 0)  || (c->err_info->CommandStatus != 0))
9004                 goto errout;
9005
9006         /* Now verify that it got set: */
9007         if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
9008                 RAID_CTLR_LUNID, TYPE_CMD))
9009                 goto errout;
9010
9011         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
9012                         NO_TIMEOUT);
9013         if ((rc != 0)  || (c->err_info->CommandStatus != 0))
9014                 goto errout;
9015
9016         if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
9017                 goto out;
9018
9019 errout:
9020         dev_err(&h->pdev->dev,
9021                         "Error: failed to disable report lun data caching.\n");
9022 out:
9023         cmd_free(h, c);
9024         kfree(options);
9025 }
9026
9027 static void __hpsa_shutdown(struct pci_dev *pdev)
9028 {
9029         struct ctlr_info *h;
9030
9031         h = pci_get_drvdata(pdev);
9032         /* Turn board interrupts off  and send the flush cache command
9033          * sendcmd will turn off interrupt, and send the flush...
9034          * To write all data in the battery backed cache to disks
9035          */
9036         hpsa_flush_cache(h);
9037         h->access.set_intr_mask(h, HPSA_INTR_OFF);
9038         hpsa_free_irqs(h);                      /* init_one 4 */
9039         hpsa_disable_interrupt_mode(h);         /* pci_init 2 */
9040 }
9041
9042 static void hpsa_shutdown(struct pci_dev *pdev)
9043 {
9044         __hpsa_shutdown(pdev);
9045         pci_disable_device(pdev);
9046 }
9047
9048 static void hpsa_free_device_info(struct ctlr_info *h)
9049 {
9050         int i;
9051
9052         for (i = 0; i < h->ndevices; i++) {
9053                 kfree(h->dev[i]);
9054                 h->dev[i] = NULL;
9055         }
9056 }
9057
9058 static void hpsa_remove_one(struct pci_dev *pdev)
9059 {
9060         struct ctlr_info *h;
9061         unsigned long flags;
9062
9063         if (pci_get_drvdata(pdev) == NULL) {
9064                 dev_err(&pdev->dev, "unable to remove device\n");
9065                 return;
9066         }
9067         h = pci_get_drvdata(pdev);
9068
9069         /* Get rid of any controller monitoring work items */
9070         spin_lock_irqsave(&h->lock, flags);
9071         h->remove_in_progress = 1;
9072         spin_unlock_irqrestore(&h->lock, flags);
9073         cancel_delayed_work_sync(&h->monitor_ctlr_work);
9074         cancel_delayed_work_sync(&h->rescan_ctlr_work);
9075         cancel_delayed_work_sync(&h->event_monitor_work);
9076         destroy_workqueue(h->rescan_ctlr_wq);
9077         destroy_workqueue(h->resubmit_wq);
9078         destroy_workqueue(h->monitor_ctlr_wq);
9079
9080         hpsa_delete_sas_host(h);
9081
9082         /*
9083          * Call before disabling interrupts.
9084          * scsi_remove_host can trigger I/O operations especially
9085          * when multipath is enabled. There can be SYNCHRONIZE CACHE
9086          * operations which cannot complete and will hang the system.
9087          */
9088         if (h->scsi_host)
9089                 scsi_remove_host(h->scsi_host);         /* init_one 8 */
9090         /* includes hpsa_free_irqs - init_one 4 */
9091         /* includes hpsa_disable_interrupt_mode - pci_init 2 */
9092         __hpsa_shutdown(pdev);
9093
9094         hpsa_free_device_info(h);               /* scan */
9095
9096         kfree(h->hba_inquiry_data);                     /* init_one 10 */
9097         h->hba_inquiry_data = NULL;                     /* init_one 10 */
9098         hpsa_free_ioaccel2_sg_chain_blocks(h);
9099         hpsa_free_performant_mode(h);                   /* init_one 7 */
9100         hpsa_free_sg_chain_blocks(h);                   /* init_one 6 */
9101         hpsa_free_cmd_pool(h);                          /* init_one 5 */
9102         kfree(h->lastlogicals);
9103
9104         /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
9105
9106         scsi_host_put(h->scsi_host);                    /* init_one 3 */
9107         h->scsi_host = NULL;                            /* init_one 3 */
9108
9109         /* includes hpsa_disable_interrupt_mode - pci_init 2 */
9110         hpsa_free_pci_init(h);                          /* init_one 2.5 */
9111
9112         free_percpu(h->lockup_detected);                /* init_one 2 */
9113         h->lockup_detected = NULL;                      /* init_one 2 */
9114         /* (void) pci_disable_pcie_error_reporting(pdev); */    /* init_one 1 */
9115
9116         hpda_free_ctlr_info(h);                         /* init_one 1 */
9117 }
9118
9119 static int __maybe_unused hpsa_suspend(
9120         __attribute__((unused)) struct device *dev)
9121 {
9122         return -ENOSYS;
9123 }
9124
9125 static int __maybe_unused hpsa_resume
9126         (__attribute__((unused)) struct device *dev)
9127 {
9128         return -ENOSYS;
9129 }
9130
9131 static SIMPLE_DEV_PM_OPS(hpsa_pm_ops, hpsa_suspend, hpsa_resume);
9132
9133 static struct pci_driver hpsa_pci_driver = {
9134         .name = HPSA,
9135         .probe = hpsa_init_one,
9136         .remove = hpsa_remove_one,
9137         .id_table = hpsa_pci_device_id, /* id_table */
9138         .shutdown = hpsa_shutdown,
9139         .driver.pm = &hpsa_pm_ops,
9140 };
9141
9142 /* Fill in bucket_map[], given nsgs (the max number of
9143  * scatter gather elements supported) and bucket[],
9144  * which is an array of 8 integers.  The bucket[] array
9145  * contains 8 different DMA transfer sizes (in 16
9146  * byte increments) which the controller uses to fetch
9147  * commands.  This function fills in bucket_map[], which
9148  * maps a given number of scatter gather elements to one of
9149  * the 8 DMA transfer sizes.  The point of it is to allow the
9150  * controller to only do as much DMA as needed to fetch the
9151  * command, with the DMA transfer size encoded in the lower
9152  * bits of the command address.
9153  */
9154 static void  calc_bucket_map(int bucket[], int num_buckets,
9155         int nsgs, int min_blocks, u32 *bucket_map)
9156 {
9157         int i, j, b, size;
9158
9159         /* Note, bucket_map must have nsgs+1 entries. */
9160         for (i = 0; i <= nsgs; i++) {
9161                 /* Compute size of a command with i SG entries */
9162                 size = i + min_blocks;
9163                 b = num_buckets; /* Assume the biggest bucket */
9164                 /* Find the bucket that is just big enough */
9165                 for (j = 0; j < num_buckets; j++) {
9166                         if (bucket[j] >= size) {
9167                                 b = j;
9168                                 break;
9169                         }
9170                 }
9171                 /* for a command with i SG entries, use bucket b. */
9172                 bucket_map[i] = b;
9173         }
9174 }
9175
9176 /*
9177  * return -ENODEV on err, 0 on success (or no action)
9178  * allocates numerous items that must be freed later
9179  */
9180 static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
9181 {
9182         int i;
9183         unsigned long register_value;
9184         unsigned long transMethod = CFGTBL_Trans_Performant |
9185                         (trans_support & CFGTBL_Trans_use_short_tags) |
9186                                 CFGTBL_Trans_enable_directed_msix |
9187                         (trans_support & (CFGTBL_Trans_io_accel1 |
9188                                 CFGTBL_Trans_io_accel2));
9189         struct access_method access = SA5_performant_access;
9190
9191         /* This is a bit complicated.  There are 8 registers on
9192          * the controller which we write to to tell it 8 different
9193          * sizes of commands which there may be.  It's a way of
9194          * reducing the DMA done to fetch each command.  Encoded into
9195          * each command's tag are 3 bits which communicate to the controller
9196          * which of the eight sizes that command fits within.  The size of
9197          * each command depends on how many scatter gather entries there are.
9198          * Each SG entry requires 16 bytes.  The eight registers are programmed
9199          * with the number of 16-byte blocks a command of that size requires.
9200          * The smallest command possible requires 5 such 16 byte blocks.
9201          * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
9202          * blocks.  Note, this only extends to the SG entries contained
9203          * within the command block, and does not extend to chained blocks
9204          * of SG elements.   bft[] contains the eight values we write to
9205          * the registers.  They are not evenly distributed, but have more
9206          * sizes for small commands, and fewer sizes for larger commands.
9207          */
9208         int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
9209 #define MIN_IOACCEL2_BFT_ENTRY 5
9210 #define HPSA_IOACCEL2_HEADER_SZ 4
9211         int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
9212                         13, 14, 15, 16, 17, 18, 19,
9213                         HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
9214         BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
9215         BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
9216         BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
9217                                  16 * MIN_IOACCEL2_BFT_ENTRY);
9218         BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
9219         BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
9220         /*  5 = 1 s/g entry or 4k
9221          *  6 = 2 s/g entry or 8k
9222          *  8 = 4 s/g entry or 16k
9223          * 10 = 6 s/g entry or 24k
9224          */
9225
9226         /* If the controller supports either ioaccel method then
9227          * we can also use the RAID stack submit path that does not
9228          * perform the superfluous readl() after each command submission.
9229          */
9230         if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
9231                 access = SA5_performant_access_no_read;
9232
9233         /* Controller spec: zero out this buffer. */
9234         for (i = 0; i < h->nreply_queues; i++)
9235                 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
9236
9237         bft[7] = SG_ENTRIES_IN_CMD + 4;
9238         calc_bucket_map(bft, ARRAY_SIZE(bft),
9239                                 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
9240         for (i = 0; i < 8; i++)
9241                 writel(bft[i], &h->transtable->BlockFetch[i]);
9242
9243         /* size of controller ring buffer */
9244         writel(h->max_commands, &h->transtable->RepQSize);
9245         writel(h->nreply_queues, &h->transtable->RepQCount);
9246         writel(0, &h->transtable->RepQCtrAddrLow32);
9247         writel(0, &h->transtable->RepQCtrAddrHigh32);
9248
9249         for (i = 0; i < h->nreply_queues; i++) {
9250                 writel(0, &h->transtable->RepQAddr[i].upper);
9251                 writel(h->reply_queue[i].busaddr,
9252                         &h->transtable->RepQAddr[i].lower);
9253         }
9254
9255         writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
9256         writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
9257         /*
9258          * enable outbound interrupt coalescing in accelerator mode;
9259          */
9260         if (trans_support & CFGTBL_Trans_io_accel1) {
9261                 access = SA5_ioaccel_mode1_access;
9262                 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
9263                 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
9264         } else
9265                 if (trans_support & CFGTBL_Trans_io_accel2)
9266                         access = SA5_ioaccel_mode2_access;
9267         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9268         if (hpsa_wait_for_mode_change_ack(h)) {
9269                 dev_err(&h->pdev->dev,
9270                         "performant mode problem - doorbell timeout\n");
9271                 return -ENODEV;
9272         }
9273         register_value = readl(&(h->cfgtable->TransportActive));
9274         if (!(register_value & CFGTBL_Trans_Performant)) {
9275                 dev_err(&h->pdev->dev,
9276                         "performant mode problem - transport not active\n");
9277                 return -ENODEV;
9278         }
9279         /* Change the access methods to the performant access methods */
9280         h->access = access;
9281         h->transMethod = transMethod;
9282
9283         if (!((trans_support & CFGTBL_Trans_io_accel1) ||
9284                 (trans_support & CFGTBL_Trans_io_accel2)))
9285                 return 0;
9286
9287         if (trans_support & CFGTBL_Trans_io_accel1) {
9288                 /* Set up I/O accelerator mode */
9289                 for (i = 0; i < h->nreply_queues; i++) {
9290                         writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
9291                         h->reply_queue[i].current_entry =
9292                                 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
9293                 }
9294                 bft[7] = h->ioaccel_maxsg + 8;
9295                 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
9296                                 h->ioaccel1_blockFetchTable);
9297
9298                 /* initialize all reply queue entries to unused */
9299                 for (i = 0; i < h->nreply_queues; i++)
9300                         memset(h->reply_queue[i].head,
9301                                 (u8) IOACCEL_MODE1_REPLY_UNUSED,
9302                                 h->reply_queue_size);
9303
9304                 /* set all the constant fields in the accelerator command
9305                  * frames once at init time to save CPU cycles later.
9306                  */
9307                 for (i = 0; i < h->nr_cmds; i++) {
9308                         struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
9309
9310                         cp->function = IOACCEL1_FUNCTION_SCSIIO;
9311                         cp->err_info = (u32) (h->errinfo_pool_dhandle +
9312                                         (i * sizeof(struct ErrorInfo)));
9313                         cp->err_info_len = sizeof(struct ErrorInfo);
9314                         cp->sgl_offset = IOACCEL1_SGLOFFSET;
9315                         cp->host_context_flags =
9316                                 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
9317                         cp->timeout_sec = 0;
9318                         cp->ReplyQueue = 0;
9319                         cp->tag =
9320                                 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
9321                         cp->host_addr =
9322                                 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
9323                                         (i * sizeof(struct io_accel1_cmd)));
9324                 }
9325         } else if (trans_support & CFGTBL_Trans_io_accel2) {
9326                 u64 cfg_offset, cfg_base_addr_index;
9327                 u32 bft2_offset, cfg_base_addr;
9328
9329                 hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
9330                                     &cfg_base_addr_index, &cfg_offset);
9331                 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
9332                 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
9333                 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
9334                                 4, h->ioaccel2_blockFetchTable);
9335                 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
9336                 BUILD_BUG_ON(offsetof(struct CfgTable,
9337                                 io_accel_request_size_offset) != 0xb8);
9338                 h->ioaccel2_bft2_regs =
9339                         remap_pci_mem(pci_resource_start(h->pdev,
9340                                         cfg_base_addr_index) +
9341                                         cfg_offset + bft2_offset,
9342                                         ARRAY_SIZE(bft2) *
9343                                         sizeof(*h->ioaccel2_bft2_regs));
9344                 for (i = 0; i < ARRAY_SIZE(bft2); i++)
9345                         writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
9346         }
9347         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9348         if (hpsa_wait_for_mode_change_ack(h)) {
9349                 dev_err(&h->pdev->dev,
9350                         "performant mode problem - enabling ioaccel mode\n");
9351                 return -ENODEV;
9352         }
9353         return 0;
9354 }
9355
9356 /* Free ioaccel1 mode command blocks and block fetch table */
9357 static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9358 {
9359         if (h->ioaccel_cmd_pool) {
9360                 dma_free_coherent(&h->pdev->dev,
9361                                   h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9362                                   h->ioaccel_cmd_pool,
9363                                   h->ioaccel_cmd_pool_dhandle);
9364                 h->ioaccel_cmd_pool = NULL;
9365                 h->ioaccel_cmd_pool_dhandle = 0;
9366         }
9367         kfree(h->ioaccel1_blockFetchTable);
9368         h->ioaccel1_blockFetchTable = NULL;
9369 }
9370
9371 /* Allocate ioaccel1 mode command blocks and block fetch table */
9372 static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9373 {
9374         h->ioaccel_maxsg =
9375                 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9376         if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
9377                 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
9378
9379         /* Command structures must be aligned on a 128-byte boundary
9380          * because the 7 lower bits of the address are used by the
9381          * hardware.
9382          */
9383         BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
9384                         IOACCEL1_COMMANDLIST_ALIGNMENT);
9385         h->ioaccel_cmd_pool =
9386                 dma_alloc_coherent(&h->pdev->dev,
9387                         h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9388                         &h->ioaccel_cmd_pool_dhandle, GFP_KERNEL);
9389
9390         h->ioaccel1_blockFetchTable =
9391                 kmalloc(((h->ioaccel_maxsg + 1) *
9392                                 sizeof(u32)), GFP_KERNEL);
9393
9394         if ((h->ioaccel_cmd_pool == NULL) ||
9395                 (h->ioaccel1_blockFetchTable == NULL))
9396                 goto clean_up;
9397
9398         memset(h->ioaccel_cmd_pool, 0,
9399                 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
9400         return 0;
9401
9402 clean_up:
9403         hpsa_free_ioaccel1_cmd_and_bft(h);
9404         return -ENOMEM;
9405 }
9406
9407 /* Free ioaccel2 mode command blocks and block fetch table */
9408 static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9409 {
9410         hpsa_free_ioaccel2_sg_chain_blocks(h);
9411
9412         if (h->ioaccel2_cmd_pool) {
9413                 dma_free_coherent(&h->pdev->dev,
9414                                   h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9415                                   h->ioaccel2_cmd_pool,
9416                                   h->ioaccel2_cmd_pool_dhandle);
9417                 h->ioaccel2_cmd_pool = NULL;
9418                 h->ioaccel2_cmd_pool_dhandle = 0;
9419         }
9420         kfree(h->ioaccel2_blockFetchTable);
9421         h->ioaccel2_blockFetchTable = NULL;
9422 }
9423
9424 /* Allocate ioaccel2 mode command blocks and block fetch table */
9425 static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9426 {
9427         int rc;
9428
9429         /* Allocate ioaccel2 mode command blocks and block fetch table */
9430
9431         h->ioaccel_maxsg =
9432                 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9433         if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
9434                 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
9435
9436         BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
9437                         IOACCEL2_COMMANDLIST_ALIGNMENT);
9438         h->ioaccel2_cmd_pool =
9439                 dma_alloc_coherent(&h->pdev->dev,
9440                         h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9441                         &h->ioaccel2_cmd_pool_dhandle, GFP_KERNEL);
9442
9443         h->ioaccel2_blockFetchTable =
9444                 kmalloc(((h->ioaccel_maxsg + 1) *
9445                                 sizeof(u32)), GFP_KERNEL);
9446
9447         if ((h->ioaccel2_cmd_pool == NULL) ||
9448                 (h->ioaccel2_blockFetchTable == NULL)) {
9449                 rc = -ENOMEM;
9450                 goto clean_up;
9451         }
9452
9453         rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
9454         if (rc)
9455                 goto clean_up;
9456
9457         memset(h->ioaccel2_cmd_pool, 0,
9458                 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
9459         return 0;
9460
9461 clean_up:
9462         hpsa_free_ioaccel2_cmd_and_bft(h);
9463         return rc;
9464 }
9465
9466 /* Free items allocated by hpsa_put_ctlr_into_performant_mode */
9467 static void hpsa_free_performant_mode(struct ctlr_info *h)
9468 {
9469         kfree(h->blockFetchTable);
9470         h->blockFetchTable = NULL;
9471         hpsa_free_reply_queues(h);
9472         hpsa_free_ioaccel1_cmd_and_bft(h);
9473         hpsa_free_ioaccel2_cmd_and_bft(h);
9474 }
9475
9476 /* return -ENODEV on error, 0 on success (or no action)
9477  * allocates numerous items that must be freed later
9478  */
9479 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
9480 {
9481         u32 trans_support;
9482         unsigned long transMethod = CFGTBL_Trans_Performant |
9483                                         CFGTBL_Trans_use_short_tags;
9484         int i, rc;
9485
9486         if (hpsa_simple_mode)
9487                 return 0;
9488
9489         trans_support = readl(&(h->cfgtable->TransportSupport));
9490         if (!(trans_support & PERFORMANT_MODE))
9491                 return 0;
9492
9493         /* Check for I/O accelerator mode support */
9494         if (trans_support & CFGTBL_Trans_io_accel1) {
9495                 transMethod |= CFGTBL_Trans_io_accel1 |
9496                                 CFGTBL_Trans_enable_directed_msix;
9497                 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
9498                 if (rc)
9499                         return rc;
9500         } else if (trans_support & CFGTBL_Trans_io_accel2) {
9501                 transMethod |= CFGTBL_Trans_io_accel2 |
9502                                 CFGTBL_Trans_enable_directed_msix;
9503                 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
9504                 if (rc)
9505                         return rc;
9506         }
9507
9508         h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1;
9509         hpsa_get_max_perf_mode_cmds(h);
9510         /* Performant mode ring buffer and supporting data structures */
9511         h->reply_queue_size = h->max_commands * sizeof(u64);
9512
9513         for (i = 0; i < h->nreply_queues; i++) {
9514                 h->reply_queue[i].head = dma_alloc_coherent(&h->pdev->dev,
9515                                                 h->reply_queue_size,
9516                                                 &h->reply_queue[i].busaddr,
9517                                                 GFP_KERNEL);
9518                 if (!h->reply_queue[i].head) {
9519                         rc = -ENOMEM;
9520                         goto clean1;    /* rq, ioaccel */
9521                 }
9522                 h->reply_queue[i].size = h->max_commands;
9523                 h->reply_queue[i].wraparound = 1;  /* spec: init to 1 */
9524                 h->reply_queue[i].current_entry = 0;
9525         }
9526
9527         /* Need a block fetch table for performant mode */
9528         h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
9529                                 sizeof(u32)), GFP_KERNEL);
9530         if (!h->blockFetchTable) {
9531                 rc = -ENOMEM;
9532                 goto clean1;    /* rq, ioaccel */
9533         }
9534
9535         rc = hpsa_enter_performant_mode(h, trans_support);
9536         if (rc)
9537                 goto clean2;    /* bft, rq, ioaccel */
9538         return 0;
9539
9540 clean2: /* bft, rq, ioaccel */
9541         kfree(h->blockFetchTable);
9542         h->blockFetchTable = NULL;
9543 clean1: /* rq, ioaccel */
9544         hpsa_free_reply_queues(h);
9545         hpsa_free_ioaccel1_cmd_and_bft(h);
9546         hpsa_free_ioaccel2_cmd_and_bft(h);
9547         return rc;
9548 }
9549
9550 static int is_accelerated_cmd(struct CommandList *c)
9551 {
9552         return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
9553 }
9554
9555 static void hpsa_drain_accel_commands(struct ctlr_info *h)
9556 {
9557         struct CommandList *c = NULL;
9558         int i, accel_cmds_out;
9559         int refcount;
9560
9561         do { /* wait for all outstanding ioaccel commands to drain out */
9562                 accel_cmds_out = 0;
9563                 for (i = 0; i < h->nr_cmds; i++) {
9564                         c = h->cmd_pool + i;
9565                         refcount = atomic_inc_return(&c->refcount);
9566                         if (refcount > 1) /* Command is allocated */
9567                                 accel_cmds_out += is_accelerated_cmd(c);
9568                         cmd_free(h, c);
9569                 }
9570                 if (accel_cmds_out <= 0)
9571                         break;
9572                 msleep(100);
9573         } while (1);
9574 }
9575
9576 static struct hpsa_sas_phy *hpsa_alloc_sas_phy(
9577                                 struct hpsa_sas_port *hpsa_sas_port)
9578 {
9579         struct hpsa_sas_phy *hpsa_sas_phy;
9580         struct sas_phy *phy;
9581
9582         hpsa_sas_phy = kzalloc(sizeof(*hpsa_sas_phy), GFP_KERNEL);
9583         if (!hpsa_sas_phy)
9584                 return NULL;
9585
9586         phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev,
9587                 hpsa_sas_port->next_phy_index);
9588         if (!phy) {
9589                 kfree(hpsa_sas_phy);
9590                 return NULL;
9591         }
9592
9593         hpsa_sas_port->next_phy_index++;
9594         hpsa_sas_phy->phy = phy;
9595         hpsa_sas_phy->parent_port = hpsa_sas_port;
9596
9597         return hpsa_sas_phy;
9598 }
9599
9600 static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9601 {
9602         struct sas_phy *phy = hpsa_sas_phy->phy;
9603
9604         sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
9605         if (hpsa_sas_phy->added_to_port)
9606                 list_del(&hpsa_sas_phy->phy_list_entry);
9607         sas_phy_delete(phy);
9608         kfree(hpsa_sas_phy);
9609 }
9610
9611 static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9612 {
9613         int rc;
9614         struct hpsa_sas_port *hpsa_sas_port;
9615         struct sas_phy *phy;
9616         struct sas_identify *identify;
9617
9618         hpsa_sas_port = hpsa_sas_phy->parent_port;
9619         phy = hpsa_sas_phy->phy;
9620
9621         identify = &phy->identify;
9622         memset(identify, 0, sizeof(*identify));
9623         identify->sas_address = hpsa_sas_port->sas_address;
9624         identify->device_type = SAS_END_DEVICE;
9625         identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9626         identify->target_port_protocols = SAS_PROTOCOL_STP;
9627         phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9628         phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9629         phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
9630         phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
9631         phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
9632
9633         rc = sas_phy_add(hpsa_sas_phy->phy);
9634         if (rc)
9635                 return rc;
9636
9637         sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy);
9638         list_add_tail(&hpsa_sas_phy->phy_list_entry,
9639                         &hpsa_sas_port->phy_list_head);
9640         hpsa_sas_phy->added_to_port = true;
9641
9642         return 0;
9643 }
9644
9645 static int
9646         hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port,
9647                                 struct sas_rphy *rphy)
9648 {
9649         struct sas_identify *identify;
9650
9651         identify = &rphy->identify;
9652         identify->sas_address = hpsa_sas_port->sas_address;
9653         identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9654         identify->target_port_protocols = SAS_PROTOCOL_STP;
9655
9656         return sas_rphy_add(rphy);
9657 }
9658
9659 static struct hpsa_sas_port
9660         *hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node,
9661                                 u64 sas_address)
9662 {
9663         int rc;
9664         struct hpsa_sas_port *hpsa_sas_port;
9665         struct sas_port *port;
9666
9667         hpsa_sas_port = kzalloc(sizeof(*hpsa_sas_port), GFP_KERNEL);
9668         if (!hpsa_sas_port)
9669                 return NULL;
9670
9671         INIT_LIST_HEAD(&hpsa_sas_port->phy_list_head);
9672         hpsa_sas_port->parent_node = hpsa_sas_node;
9673
9674         port = sas_port_alloc_num(hpsa_sas_node->parent_dev);
9675         if (!port)
9676                 goto free_hpsa_port;
9677
9678         rc = sas_port_add(port);
9679         if (rc)
9680                 goto free_sas_port;
9681
9682         hpsa_sas_port->port = port;
9683         hpsa_sas_port->sas_address = sas_address;
9684         list_add_tail(&hpsa_sas_port->port_list_entry,
9685                         &hpsa_sas_node->port_list_head);
9686
9687         return hpsa_sas_port;
9688
9689 free_sas_port:
9690         sas_port_free(port);
9691 free_hpsa_port:
9692         kfree(hpsa_sas_port);
9693
9694         return NULL;
9695 }
9696
9697 static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port)
9698 {
9699         struct hpsa_sas_phy *hpsa_sas_phy;
9700         struct hpsa_sas_phy *next;
9701
9702         list_for_each_entry_safe(hpsa_sas_phy, next,
9703                         &hpsa_sas_port->phy_list_head, phy_list_entry)
9704                 hpsa_free_sas_phy(hpsa_sas_phy);
9705
9706         sas_port_delete(hpsa_sas_port->port);
9707         list_del(&hpsa_sas_port->port_list_entry);
9708         kfree(hpsa_sas_port);
9709 }
9710
9711 static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev)
9712 {
9713         struct hpsa_sas_node *hpsa_sas_node;
9714
9715         hpsa_sas_node = kzalloc(sizeof(*hpsa_sas_node), GFP_KERNEL);
9716         if (hpsa_sas_node) {
9717                 hpsa_sas_node->parent_dev = parent_dev;
9718                 INIT_LIST_HEAD(&hpsa_sas_node->port_list_head);
9719         }
9720
9721         return hpsa_sas_node;
9722 }
9723
9724 static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node)
9725 {
9726         struct hpsa_sas_port *hpsa_sas_port;
9727         struct hpsa_sas_port *next;
9728
9729         if (!hpsa_sas_node)
9730                 return;
9731
9732         list_for_each_entry_safe(hpsa_sas_port, next,
9733                         &hpsa_sas_node->port_list_head, port_list_entry)
9734                 hpsa_free_sas_port(hpsa_sas_port);
9735
9736         kfree(hpsa_sas_node);
9737 }
9738
9739 static struct hpsa_scsi_dev_t
9740         *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
9741                                         struct sas_rphy *rphy)
9742 {
9743         int i;
9744         struct hpsa_scsi_dev_t *device;
9745
9746         for (i = 0; i < h->ndevices; i++) {
9747                 device = h->dev[i];
9748                 if (!device->sas_port)
9749                         continue;
9750                 if (device->sas_port->rphy == rphy)
9751                         return device;
9752         }
9753
9754         return NULL;
9755 }
9756
9757 static int hpsa_add_sas_host(struct ctlr_info *h)
9758 {
9759         int rc;
9760         struct device *parent_dev;
9761         struct hpsa_sas_node *hpsa_sas_node;
9762         struct hpsa_sas_port *hpsa_sas_port;
9763         struct hpsa_sas_phy *hpsa_sas_phy;
9764
9765         parent_dev = &h->scsi_host->shost_dev;
9766
9767         hpsa_sas_node = hpsa_alloc_sas_node(parent_dev);
9768         if (!hpsa_sas_node)
9769                 return -ENOMEM;
9770
9771         hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address);
9772         if (!hpsa_sas_port) {
9773                 rc = -ENODEV;
9774                 goto free_sas_node;
9775         }
9776
9777         hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port);
9778         if (!hpsa_sas_phy) {
9779                 rc = -ENODEV;
9780                 goto free_sas_port;
9781         }
9782
9783         rc = hpsa_sas_port_add_phy(hpsa_sas_phy);
9784         if (rc)
9785                 goto free_sas_phy;
9786
9787         h->sas_host = hpsa_sas_node;
9788
9789         return 0;
9790
9791 free_sas_phy:
9792         hpsa_free_sas_phy(hpsa_sas_phy);
9793 free_sas_port:
9794         hpsa_free_sas_port(hpsa_sas_port);
9795 free_sas_node:
9796         hpsa_free_sas_node(hpsa_sas_node);
9797
9798         return rc;
9799 }
9800
9801 static void hpsa_delete_sas_host(struct ctlr_info *h)
9802 {
9803         hpsa_free_sas_node(h->sas_host);
9804 }
9805
9806 static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
9807                                 struct hpsa_scsi_dev_t *device)
9808 {
9809         int rc;
9810         struct hpsa_sas_port *hpsa_sas_port;
9811         struct sas_rphy *rphy;
9812
9813         hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, device->sas_address);
9814         if (!hpsa_sas_port)
9815                 return -ENOMEM;
9816
9817         rphy = sas_end_device_alloc(hpsa_sas_port->port);
9818         if (!rphy) {
9819                 rc = -ENODEV;
9820                 goto free_sas_port;
9821         }
9822
9823         hpsa_sas_port->rphy = rphy;
9824         device->sas_port = hpsa_sas_port;
9825
9826         rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy);
9827         if (rc)
9828                 goto free_sas_port;
9829
9830         return 0;
9831
9832 free_sas_port:
9833         hpsa_free_sas_port(hpsa_sas_port);
9834         device->sas_port = NULL;
9835
9836         return rc;
9837 }
9838
9839 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device)
9840 {
9841         if (device->sas_port) {
9842                 hpsa_free_sas_port(device->sas_port);
9843                 device->sas_port = NULL;
9844         }
9845 }
9846
9847 static int
9848 hpsa_sas_get_linkerrors(struct sas_phy *phy)
9849 {
9850         return 0;
9851 }
9852
9853 static int
9854 hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
9855 {
9856         struct Scsi_Host *shost = phy_to_shost(rphy);
9857         struct ctlr_info *h;
9858         struct hpsa_scsi_dev_t *sd;
9859
9860         if (!shost)
9861                 return -ENXIO;
9862
9863         h = shost_to_hba(shost);
9864
9865         if (!h)
9866                 return -ENXIO;
9867
9868         sd = hpsa_find_device_by_sas_rphy(h, rphy);
9869         if (!sd)
9870                 return -ENXIO;
9871
9872         *identifier = sd->eli;
9873
9874         return 0;
9875 }
9876
9877 static int
9878 hpsa_sas_get_bay_identifier(struct sas_rphy *rphy)
9879 {
9880         return -ENXIO;
9881 }
9882
9883 static int
9884 hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset)
9885 {
9886         return 0;
9887 }
9888
9889 static int
9890 hpsa_sas_phy_enable(struct sas_phy *phy, int enable)
9891 {
9892         return 0;
9893 }
9894
9895 static int
9896 hpsa_sas_phy_setup(struct sas_phy *phy)
9897 {
9898         return 0;
9899 }
9900
9901 static void
9902 hpsa_sas_phy_release(struct sas_phy *phy)
9903 {
9904 }
9905
9906 static int
9907 hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
9908 {
9909         return -EINVAL;
9910 }
9911
9912 static struct sas_function_template hpsa_sas_transport_functions = {
9913         .get_linkerrors = hpsa_sas_get_linkerrors,
9914         .get_enclosure_identifier = hpsa_sas_get_enclosure_identifier,
9915         .get_bay_identifier = hpsa_sas_get_bay_identifier,
9916         .phy_reset = hpsa_sas_phy_reset,
9917         .phy_enable = hpsa_sas_phy_enable,
9918         .phy_setup = hpsa_sas_phy_setup,
9919         .phy_release = hpsa_sas_phy_release,
9920         .set_phy_speed = hpsa_sas_phy_speed,
9921 };
9922
9923 /*
9924  *  This is it.  Register the PCI driver information for the cards we control
9925  *  the OS will call our registered routines when it finds one of our cards.
9926  */
9927 static int __init hpsa_init(void)
9928 {
9929         int rc;
9930
9931         hpsa_sas_transport_template =
9932                 sas_attach_transport(&hpsa_sas_transport_functions);
9933         if (!hpsa_sas_transport_template)
9934                 return -ENODEV;
9935
9936         rc = pci_register_driver(&hpsa_pci_driver);
9937
9938         if (rc)
9939                 sas_release_transport(hpsa_sas_transport_template);
9940
9941         return rc;
9942 }
9943
9944 static void __exit hpsa_cleanup(void)
9945 {
9946         pci_unregister_driver(&hpsa_pci_driver);
9947         sas_release_transport(hpsa_sas_transport_template);
9948 }
9949
9950 static void __attribute__((unused)) verify_offsets(void)
9951 {
9952 #define VERIFY_OFFSET(member, offset) \
9953         BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
9954
9955         VERIFY_OFFSET(structure_size, 0);
9956         VERIFY_OFFSET(volume_blk_size, 4);
9957         VERIFY_OFFSET(volume_blk_cnt, 8);
9958         VERIFY_OFFSET(phys_blk_shift, 16);
9959         VERIFY_OFFSET(parity_rotation_shift, 17);
9960         VERIFY_OFFSET(strip_size, 18);
9961         VERIFY_OFFSET(disk_starting_blk, 20);
9962         VERIFY_OFFSET(disk_blk_cnt, 28);
9963         VERIFY_OFFSET(data_disks_per_row, 36);
9964         VERIFY_OFFSET(metadata_disks_per_row, 38);
9965         VERIFY_OFFSET(row_cnt, 40);
9966         VERIFY_OFFSET(layout_map_count, 42);
9967         VERIFY_OFFSET(flags, 44);
9968         VERIFY_OFFSET(dekindex, 46);
9969         /* VERIFY_OFFSET(reserved, 48 */
9970         VERIFY_OFFSET(data, 64);
9971
9972 #undef VERIFY_OFFSET
9973
9974 #define VERIFY_OFFSET(member, offset) \
9975         BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
9976
9977         VERIFY_OFFSET(IU_type, 0);
9978         VERIFY_OFFSET(direction, 1);
9979         VERIFY_OFFSET(reply_queue, 2);
9980         /* VERIFY_OFFSET(reserved1, 3);  */
9981         VERIFY_OFFSET(scsi_nexus, 4);
9982         VERIFY_OFFSET(Tag, 8);
9983         VERIFY_OFFSET(cdb, 16);
9984         VERIFY_OFFSET(cciss_lun, 32);
9985         VERIFY_OFFSET(data_len, 40);
9986         VERIFY_OFFSET(cmd_priority_task_attr, 44);
9987         VERIFY_OFFSET(sg_count, 45);
9988         /* VERIFY_OFFSET(reserved3 */
9989         VERIFY_OFFSET(err_ptr, 48);
9990         VERIFY_OFFSET(err_len, 56);
9991         /* VERIFY_OFFSET(reserved4  */
9992         VERIFY_OFFSET(sg, 64);
9993
9994 #undef VERIFY_OFFSET
9995
9996 #define VERIFY_OFFSET(member, offset) \
9997         BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
9998
9999         VERIFY_OFFSET(dev_handle, 0x00);
10000         VERIFY_OFFSET(reserved1, 0x02);
10001         VERIFY_OFFSET(function, 0x03);
10002         VERIFY_OFFSET(reserved2, 0x04);
10003         VERIFY_OFFSET(err_info, 0x0C);
10004         VERIFY_OFFSET(reserved3, 0x10);
10005         VERIFY_OFFSET(err_info_len, 0x12);
10006         VERIFY_OFFSET(reserved4, 0x13);
10007         VERIFY_OFFSET(sgl_offset, 0x14);
10008         VERIFY_OFFSET(reserved5, 0x15);
10009         VERIFY_OFFSET(transfer_len, 0x1C);
10010         VERIFY_OFFSET(reserved6, 0x20);
10011         VERIFY_OFFSET(io_flags, 0x24);
10012         VERIFY_OFFSET(reserved7, 0x26);
10013         VERIFY_OFFSET(LUN, 0x34);
10014         VERIFY_OFFSET(control, 0x3C);
10015         VERIFY_OFFSET(CDB, 0x40);
10016         VERIFY_OFFSET(reserved8, 0x50);
10017         VERIFY_OFFSET(host_context_flags, 0x60);
10018         VERIFY_OFFSET(timeout_sec, 0x62);
10019         VERIFY_OFFSET(ReplyQueue, 0x64);
10020         VERIFY_OFFSET(reserved9, 0x65);
10021         VERIFY_OFFSET(tag, 0x68);
10022         VERIFY_OFFSET(host_addr, 0x70);
10023         VERIFY_OFFSET(CISS_LUN, 0x78);
10024         VERIFY_OFFSET(SG, 0x78 + 8);
10025 #undef VERIFY_OFFSET
10026 }
10027
10028 module_init(hpsa_init);
10029 module_exit(hpsa_cleanup);