2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/delay.h>
30 #include <linux/timer.h>
31 #include <linux/seq_file.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/compat.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/uaccess.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/completion.h>
40 #include <linux/moduleparam.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_tcq.h>
46 #include <linux/cciss_ioctl.h>
47 #include <linux/string.h>
48 #include <linux/bitmap.h>
49 #include <asm/atomic.h>
50 #include <linux/kthread.h>
54 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
55 #define HPSA_DRIVER_VERSION "2.0.2-1"
56 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
58 /* How long to wait (in milliseconds) for board to go into simple mode */
59 #define MAX_CONFIG_WAIT 30000
60 #define MAX_IOCTL_CONFIG_WAIT 1000
62 /*define how many times we will try a command because of bus resets */
63 #define MAX_CMD_RETRIES 3
65 /* Embedded module documentation macros - see modules.h */
66 MODULE_AUTHOR("Hewlett-Packard Company");
67 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
69 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
70 MODULE_VERSION(HPSA_DRIVER_VERSION);
71 MODULE_LICENSE("GPL");
73 static int hpsa_allow_any;
74 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
75 MODULE_PARM_DESC(hpsa_allow_any,
76 "Allow hpsa driver to access unknown HP Smart Array hardware");
77 static int hpsa_simple_mode;
78 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
79 MODULE_PARM_DESC(hpsa_simple_mode,
80 "Use 'simple mode' rather than 'performant mode'");
82 /* define the PCI info for the cards we can control */
83 static const struct pci_device_id hpsa_pci_device_id[] = {
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
99 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
100 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
104 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
106 /* board_id = Subsystem Device ID & Vendor ID
107 * product = Marketing Name for the board
108 * access = Address of the struct of function pointers
110 static struct board_type products[] = {
111 {0x3241103C, "Smart Array P212", &SA5_access},
112 {0x3243103C, "Smart Array P410", &SA5_access},
113 {0x3245103C, "Smart Array P410i", &SA5_access},
114 {0x3247103C, "Smart Array P411", &SA5_access},
115 {0x3249103C, "Smart Array P812", &SA5_access},
116 {0x324a103C, "Smart Array P712m", &SA5_access},
117 {0x324b103C, "Smart Array P711m", &SA5_access},
118 {0x3350103C, "Smart Array", &SA5_access},
119 {0x3351103C, "Smart Array", &SA5_access},
120 {0x3352103C, "Smart Array", &SA5_access},
121 {0x3353103C, "Smart Array", &SA5_access},
122 {0x3354103C, "Smart Array", &SA5_access},
123 {0x3355103C, "Smart Array", &SA5_access},
124 {0x3356103C, "Smart Array", &SA5_access},
125 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
128 static int number_of_controllers;
130 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
131 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
132 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
133 static void start_io(struct ctlr_info *h);
136 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
139 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
140 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
141 static struct CommandList *cmd_alloc(struct ctlr_info *h);
142 static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
143 static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
144 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
147 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
148 static void hpsa_scan_start(struct Scsi_Host *);
149 static int hpsa_scan_finished(struct Scsi_Host *sh,
150 unsigned long elapsed_time);
151 static int hpsa_change_queue_depth(struct scsi_device *sdev,
152 int qdepth, int reason);
154 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
155 static int hpsa_slave_alloc(struct scsi_device *sdev);
156 static void hpsa_slave_destroy(struct scsi_device *sdev);
158 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
159 static int check_for_unit_attention(struct ctlr_info *h,
160 struct CommandList *c);
161 static void check_ioctl_unit_attention(struct ctlr_info *h,
162 struct CommandList *c);
163 /* performant mode helper functions */
164 static void calc_bucket_map(int *bucket, int num_buckets,
165 int nsgs, int *bucket_map);
166 static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
167 static inline u32 next_command(struct ctlr_info *h);
168 static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
169 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
171 static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
172 unsigned long *memory_bar);
173 static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
174 static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
175 void __iomem *vaddr, int wait_for_ready);
176 #define BOARD_NOT_READY 0
177 #define BOARD_READY 1
179 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
181 unsigned long *priv = shost_priv(sdev->host);
182 return (struct ctlr_info *) *priv;
185 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
187 unsigned long *priv = shost_priv(sh);
188 return (struct ctlr_info *) *priv;
191 static int check_for_unit_attention(struct ctlr_info *h,
192 struct CommandList *c)
194 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
197 switch (c->err_info->SenseInfo[12]) {
199 dev_warn(&h->pdev->dev, "hpsa%d: a state change "
200 "detected, command retried\n", h->ctlr);
203 dev_warn(&h->pdev->dev, "hpsa%d: LUN failure "
204 "detected, action required\n", h->ctlr);
206 case REPORT_LUNS_CHANGED:
207 dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
208 "changed, action required\n", h->ctlr);
210 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
214 dev_warn(&h->pdev->dev, "hpsa%d: a power on "
215 "or device reset detected\n", h->ctlr);
217 case UNIT_ATTENTION_CLEARED:
218 dev_warn(&h->pdev->dev, "hpsa%d: unit attention "
219 "cleared by another initiator\n", h->ctlr);
222 dev_warn(&h->pdev->dev, "hpsa%d: unknown "
223 "unit attention detected\n", h->ctlr);
229 static ssize_t host_store_rescan(struct device *dev,
230 struct device_attribute *attr,
231 const char *buf, size_t count)
234 struct Scsi_Host *shost = class_to_shost(dev);
235 h = shost_to_hba(shost);
236 hpsa_scan_start(h->scsi_host);
240 static ssize_t host_show_firmware_revision(struct device *dev,
241 struct device_attribute *attr, char *buf)
244 struct Scsi_Host *shost = class_to_shost(dev);
245 unsigned char *fwrev;
247 h = shost_to_hba(shost);
248 if (!h->hba_inquiry_data)
250 fwrev = &h->hba_inquiry_data[32];
251 return snprintf(buf, 20, "%c%c%c%c\n",
252 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
255 static ssize_t host_show_commands_outstanding(struct device *dev,
256 struct device_attribute *attr, char *buf)
258 struct Scsi_Host *shost = class_to_shost(dev);
259 struct ctlr_info *h = shost_to_hba(shost);
261 return snprintf(buf, 20, "%d\n", h->commands_outstanding);
264 static ssize_t host_show_transport_mode(struct device *dev,
265 struct device_attribute *attr, char *buf)
268 struct Scsi_Host *shost = class_to_shost(dev);
270 h = shost_to_hba(shost);
271 return snprintf(buf, 20, "%s\n",
272 h->transMethod & CFGTBL_Trans_Performant ?
273 "performant" : "simple");
276 /* List of controllers which cannot be reset on kexec with reset_devices */
277 static u32 unresettable_controller[] = {
278 0x324a103C, /* Smart Array P712m */
279 0x324b103C, /* SmartArray P711m */
280 0x3223103C, /* Smart Array P800 */
281 0x3234103C, /* Smart Array P400 */
282 0x3235103C, /* Smart Array P400i */
283 0x3211103C, /* Smart Array E200i */
284 0x3212103C, /* Smart Array E200 */
285 0x3213103C, /* Smart Array E200i */
286 0x3214103C, /* Smart Array E200i */
287 0x3215103C, /* Smart Array E200i */
288 0x3237103C, /* Smart Array E500 */
289 0x323D103C, /* Smart Array P700m */
290 0x409C0E11, /* Smart Array 6400 */
291 0x409D0E11, /* Smart Array 6400 EM */
294 static int ctlr_is_resettable(struct ctlr_info *h)
298 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
299 if (unresettable_controller[i] == h->board_id)
304 static ssize_t host_show_resettable(struct device *dev,
305 struct device_attribute *attr, char *buf)
308 struct Scsi_Host *shost = class_to_shost(dev);
310 h = shost_to_hba(shost);
311 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h));
314 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
316 return (scsi3addr[3] & 0xC0) == 0x40;
319 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
322 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
324 static ssize_t raid_level_show(struct device *dev,
325 struct device_attribute *attr, char *buf)
328 unsigned char rlevel;
330 struct scsi_device *sdev;
331 struct hpsa_scsi_dev_t *hdev;
334 sdev = to_scsi_device(dev);
335 h = sdev_to_hba(sdev);
336 spin_lock_irqsave(&h->lock, flags);
337 hdev = sdev->hostdata;
339 spin_unlock_irqrestore(&h->lock, flags);
343 /* Is this even a logical drive? */
344 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
345 spin_unlock_irqrestore(&h->lock, flags);
346 l = snprintf(buf, PAGE_SIZE, "N/A\n");
350 rlevel = hdev->raid_level;
351 spin_unlock_irqrestore(&h->lock, flags);
352 if (rlevel > RAID_UNKNOWN)
353 rlevel = RAID_UNKNOWN;
354 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
358 static ssize_t lunid_show(struct device *dev,
359 struct device_attribute *attr, char *buf)
362 struct scsi_device *sdev;
363 struct hpsa_scsi_dev_t *hdev;
365 unsigned char lunid[8];
367 sdev = to_scsi_device(dev);
368 h = sdev_to_hba(sdev);
369 spin_lock_irqsave(&h->lock, flags);
370 hdev = sdev->hostdata;
372 spin_unlock_irqrestore(&h->lock, flags);
375 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
376 spin_unlock_irqrestore(&h->lock, flags);
377 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
378 lunid[0], lunid[1], lunid[2], lunid[3],
379 lunid[4], lunid[5], lunid[6], lunid[7]);
382 static ssize_t unique_id_show(struct device *dev,
383 struct device_attribute *attr, char *buf)
386 struct scsi_device *sdev;
387 struct hpsa_scsi_dev_t *hdev;
389 unsigned char sn[16];
391 sdev = to_scsi_device(dev);
392 h = sdev_to_hba(sdev);
393 spin_lock_irqsave(&h->lock, flags);
394 hdev = sdev->hostdata;
396 spin_unlock_irqrestore(&h->lock, flags);
399 memcpy(sn, hdev->device_id, sizeof(sn));
400 spin_unlock_irqrestore(&h->lock, flags);
401 return snprintf(buf, 16 * 2 + 2,
402 "%02X%02X%02X%02X%02X%02X%02X%02X"
403 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
404 sn[0], sn[1], sn[2], sn[3],
405 sn[4], sn[5], sn[6], sn[7],
406 sn[8], sn[9], sn[10], sn[11],
407 sn[12], sn[13], sn[14], sn[15]);
410 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
411 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
412 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
413 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
414 static DEVICE_ATTR(firmware_revision, S_IRUGO,
415 host_show_firmware_revision, NULL);
416 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
417 host_show_commands_outstanding, NULL);
418 static DEVICE_ATTR(transport_mode, S_IRUGO,
419 host_show_transport_mode, NULL);
420 static DEVICE_ATTR(resettable, S_IRUGO,
421 host_show_resettable, NULL);
423 static struct device_attribute *hpsa_sdev_attrs[] = {
424 &dev_attr_raid_level,
430 static struct device_attribute *hpsa_shost_attrs[] = {
432 &dev_attr_firmware_revision,
433 &dev_attr_commands_outstanding,
434 &dev_attr_transport_mode,
435 &dev_attr_resettable,
439 static struct scsi_host_template hpsa_driver_template = {
440 .module = THIS_MODULE,
443 .queuecommand = hpsa_scsi_queue_command,
444 .scan_start = hpsa_scan_start,
445 .scan_finished = hpsa_scan_finished,
446 .change_queue_depth = hpsa_change_queue_depth,
448 .use_clustering = ENABLE_CLUSTERING,
449 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
451 .slave_alloc = hpsa_slave_alloc,
452 .slave_destroy = hpsa_slave_destroy,
454 .compat_ioctl = hpsa_compat_ioctl,
456 .sdev_attrs = hpsa_sdev_attrs,
457 .shost_attrs = hpsa_shost_attrs,
461 /* Enqueuing and dequeuing functions for cmdlists. */
462 static inline void addQ(struct list_head *list, struct CommandList *c)
464 list_add_tail(&c->list, list);
467 static inline u32 next_command(struct ctlr_info *h)
471 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
472 return h->access.command_completed(h);
474 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
475 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
476 (h->reply_pool_head)++;
477 h->commands_outstanding--;
481 /* Check for wraparound */
482 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
483 h->reply_pool_head = h->reply_pool;
484 h->reply_pool_wraparound ^= 1;
489 /* set_performant_mode: Modify the tag for cciss performant
490 * set bit 0 for pull model, bits 3-1 for block fetch
493 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
495 if (likely(h->transMethod & CFGTBL_Trans_Performant))
496 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
499 static void enqueue_cmd_and_start_io(struct ctlr_info *h,
500 struct CommandList *c)
504 set_performant_mode(h, c);
505 spin_lock_irqsave(&h->lock, flags);
509 spin_unlock_irqrestore(&h->lock, flags);
512 static inline void removeQ(struct CommandList *c)
514 if (WARN_ON(list_empty(&c->list)))
516 list_del_init(&c->list);
519 static inline int is_hba_lunid(unsigned char scsi3addr[])
521 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
524 static inline int is_scsi_rev_5(struct ctlr_info *h)
526 if (!h->hba_inquiry_data)
528 if ((h->hba_inquiry_data[2] & 0x07) == 5)
533 static int hpsa_find_target_lun(struct ctlr_info *h,
534 unsigned char scsi3addr[], int bus, int *target, int *lun)
536 /* finds an unused bus, target, lun for a new physical device
537 * assumes h->devlock is held
540 DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA);
542 memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3);
544 for (i = 0; i < h->ndevices; i++) {
545 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
546 set_bit(h->dev[i]->target, lun_taken);
549 for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) {
550 if (!test_bit(i, lun_taken)) {
561 /* Add an entry into h->dev[] array. */
562 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
563 struct hpsa_scsi_dev_t *device,
564 struct hpsa_scsi_dev_t *added[], int *nadded)
566 /* assumes h->devlock is held */
569 unsigned char addr1[8], addr2[8];
570 struct hpsa_scsi_dev_t *sd;
572 if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) {
573 dev_err(&h->pdev->dev, "too many devices, some will be "
578 /* physical devices do not have lun or target assigned until now. */
579 if (device->lun != -1)
580 /* Logical device, lun is already assigned. */
583 /* If this device a non-zero lun of a multi-lun device
584 * byte 4 of the 8-byte LUN addr will contain the logical
585 * unit no, zero otherise.
587 if (device->scsi3addr[4] == 0) {
588 /* This is not a non-zero lun of a multi-lun device */
589 if (hpsa_find_target_lun(h, device->scsi3addr,
590 device->bus, &device->target, &device->lun) != 0)
595 /* This is a non-zero lun of a multi-lun device.
596 * Search through our list and find the device which
597 * has the same 8 byte LUN address, excepting byte 4.
598 * Assign the same bus and target for this new LUN.
599 * Use the logical unit number from the firmware.
601 memcpy(addr1, device->scsi3addr, 8);
603 for (i = 0; i < n; i++) {
605 memcpy(addr2, sd->scsi3addr, 8);
607 /* differ only in byte 4? */
608 if (memcmp(addr1, addr2, 8) == 0) {
609 device->bus = sd->bus;
610 device->target = sd->target;
611 device->lun = device->scsi3addr[4];
615 if (device->lun == -1) {
616 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
617 " suspect firmware bug or unsupported hardware "
626 added[*nadded] = device;
629 /* initially, (before registering with scsi layer) we don't
630 * know our hostno and we don't want to print anything first
631 * time anyway (the scsi layer's inquiries will show that info)
633 /* if (hostno != -1) */
634 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
635 scsi_device_type(device->devtype), hostno,
636 device->bus, device->target, device->lun);
640 /* Replace an entry from h->dev[] array. */
641 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
642 int entry, struct hpsa_scsi_dev_t *new_entry,
643 struct hpsa_scsi_dev_t *added[], int *nadded,
644 struct hpsa_scsi_dev_t *removed[], int *nremoved)
646 /* assumes h->devlock is held */
647 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
648 removed[*nremoved] = h->dev[entry];
650 h->dev[entry] = new_entry;
651 added[*nadded] = new_entry;
653 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
654 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
655 new_entry->target, new_entry->lun);
658 /* Remove an entry from h->dev[] array. */
659 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
660 struct hpsa_scsi_dev_t *removed[], int *nremoved)
662 /* assumes h->devlock is held */
664 struct hpsa_scsi_dev_t *sd;
666 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
669 removed[*nremoved] = h->dev[entry];
672 for (i = entry; i < h->ndevices-1; i++)
673 h->dev[i] = h->dev[i+1];
675 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
676 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
680 #define SCSI3ADDR_EQ(a, b) ( \
681 (a)[7] == (b)[7] && \
682 (a)[6] == (b)[6] && \
683 (a)[5] == (b)[5] && \
684 (a)[4] == (b)[4] && \
685 (a)[3] == (b)[3] && \
686 (a)[2] == (b)[2] && \
687 (a)[1] == (b)[1] && \
690 static void fixup_botched_add(struct ctlr_info *h,
691 struct hpsa_scsi_dev_t *added)
693 /* called when scsi_add_device fails in order to re-adjust
694 * h->dev[] to match the mid layer's view.
699 spin_lock_irqsave(&h->lock, flags);
700 for (i = 0; i < h->ndevices; i++) {
701 if (h->dev[i] == added) {
702 for (j = i; j < h->ndevices-1; j++)
703 h->dev[j] = h->dev[j+1];
708 spin_unlock_irqrestore(&h->lock, flags);
712 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
713 struct hpsa_scsi_dev_t *dev2)
715 /* we compare everything except lun and target as these
716 * are not yet assigned. Compare parts likely
719 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
720 sizeof(dev1->scsi3addr)) != 0)
722 if (memcmp(dev1->device_id, dev2->device_id,
723 sizeof(dev1->device_id)) != 0)
725 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
727 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
729 if (dev1->devtype != dev2->devtype)
731 if (dev1->bus != dev2->bus)
736 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
737 * and return needle location in *index. If scsi3addr matches, but not
738 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
739 * location in *index. If needle not found, return DEVICE_NOT_FOUND.
741 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
742 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
746 #define DEVICE_NOT_FOUND 0
747 #define DEVICE_CHANGED 1
748 #define DEVICE_SAME 2
749 for (i = 0; i < haystack_size; i++) {
750 if (haystack[i] == NULL) /* previously removed. */
752 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
754 if (device_is_the_same(needle, haystack[i]))
757 return DEVICE_CHANGED;
761 return DEVICE_NOT_FOUND;
764 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
765 struct hpsa_scsi_dev_t *sd[], int nsds)
767 /* sd contains scsi3 addresses and devtypes, and inquiry
768 * data. This function takes what's in sd to be the current
769 * reality and updates h->dev[] to reflect that reality.
771 int i, entry, device_change, changes = 0;
772 struct hpsa_scsi_dev_t *csd;
774 struct hpsa_scsi_dev_t **added, **removed;
775 int nadded, nremoved;
776 struct Scsi_Host *sh = NULL;
778 added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA,
780 removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA,
783 if (!added || !removed) {
784 dev_warn(&h->pdev->dev, "out of memory in "
785 "adjust_hpsa_scsi_table\n");
789 spin_lock_irqsave(&h->devlock, flags);
791 /* find any devices in h->dev[] that are not in
792 * sd[] and remove them from h->dev[], and for any
793 * devices which have changed, remove the old device
794 * info and add the new device info.
799 while (i < h->ndevices) {
801 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
802 if (device_change == DEVICE_NOT_FOUND) {
804 hpsa_scsi_remove_entry(h, hostno, i,
806 continue; /* remove ^^^, hence i not incremented */
807 } else if (device_change == DEVICE_CHANGED) {
809 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
810 added, &nadded, removed, &nremoved);
811 /* Set it to NULL to prevent it from being freed
812 * at the bottom of hpsa_update_scsi_devices()
819 /* Now, make sure every device listed in sd[] is also
820 * listed in h->dev[], adding them if they aren't found
823 for (i = 0; i < nsds; i++) {
824 if (!sd[i]) /* if already added above. */
826 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
827 h->ndevices, &entry);
828 if (device_change == DEVICE_NOT_FOUND) {
830 if (hpsa_scsi_add_entry(h, hostno, sd[i],
831 added, &nadded) != 0)
833 sd[i] = NULL; /* prevent from being freed later. */
834 } else if (device_change == DEVICE_CHANGED) {
835 /* should never happen... */
837 dev_warn(&h->pdev->dev,
838 "device unexpectedly changed.\n");
839 /* but if it does happen, we just ignore that device */
842 spin_unlock_irqrestore(&h->devlock, flags);
844 /* Don't notify scsi mid layer of any changes the first time through
845 * (or if there are no changes) scsi_scan_host will do it later the
846 * first time through.
848 if (hostno == -1 || !changes)
852 /* Notify scsi mid layer of any removed devices */
853 for (i = 0; i < nremoved; i++) {
854 struct scsi_device *sdev =
855 scsi_device_lookup(sh, removed[i]->bus,
856 removed[i]->target, removed[i]->lun);
858 scsi_remove_device(sdev);
859 scsi_device_put(sdev);
861 /* We don't expect to get here.
862 * future cmds to this device will get selection
863 * timeout as if the device was gone.
865 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
866 " for removal.", hostno, removed[i]->bus,
867 removed[i]->target, removed[i]->lun);
873 /* Notify scsi mid layer of any added devices */
874 for (i = 0; i < nadded; i++) {
875 if (scsi_add_device(sh, added[i]->bus,
876 added[i]->target, added[i]->lun) == 0)
878 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
879 "device not added.\n", hostno, added[i]->bus,
880 added[i]->target, added[i]->lun);
881 /* now we have to remove it from h->dev,
882 * since it didn't get added to scsi mid layer
884 fixup_botched_add(h, added[i]);
893 * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t *
894 * Assume's h->devlock is held.
896 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
897 int bus, int target, int lun)
900 struct hpsa_scsi_dev_t *sd;
902 for (i = 0; i < h->ndevices; i++) {
904 if (sd->bus == bus && sd->target == target && sd->lun == lun)
910 /* link sdev->hostdata to our per-device structure. */
911 static int hpsa_slave_alloc(struct scsi_device *sdev)
913 struct hpsa_scsi_dev_t *sd;
917 h = sdev_to_hba(sdev);
918 spin_lock_irqsave(&h->devlock, flags);
919 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
920 sdev_id(sdev), sdev->lun);
923 spin_unlock_irqrestore(&h->devlock, flags);
927 static void hpsa_slave_destroy(struct scsi_device *sdev)
932 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
938 for (i = 0; i < h->nr_cmds; i++) {
939 kfree(h->cmd_sg_list[i]);
940 h->cmd_sg_list[i] = NULL;
942 kfree(h->cmd_sg_list);
943 h->cmd_sg_list = NULL;
946 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
950 if (h->chainsize <= 0)
953 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
957 for (i = 0; i < h->nr_cmds; i++) {
958 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
959 h->chainsize, GFP_KERNEL);
960 if (!h->cmd_sg_list[i])
966 hpsa_free_sg_chain_blocks(h);
970 static void hpsa_map_sg_chain_block(struct ctlr_info *h,
971 struct CommandList *c)
973 struct SGDescriptor *chain_sg, *chain_block;
976 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
977 chain_block = h->cmd_sg_list[c->cmdindex];
978 chain_sg->Ext = HPSA_SG_CHAIN;
979 chain_sg->Len = sizeof(*chain_sg) *
980 (c->Header.SGTotal - h->max_cmd_sg_entries);
981 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
983 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
984 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
987 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
988 struct CommandList *c)
990 struct SGDescriptor *chain_sg;
993 if (c->Header.SGTotal <= h->max_cmd_sg_entries)
996 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
997 temp64.val32.lower = chain_sg->Addr.lower;
998 temp64.val32.upper = chain_sg->Addr.upper;
999 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
1002 static void complete_scsi_command(struct CommandList *cp)
1004 struct scsi_cmnd *cmd;
1005 struct ctlr_info *h;
1006 struct ErrorInfo *ei;
1008 unsigned char sense_key;
1009 unsigned char asc; /* additional sense code */
1010 unsigned char ascq; /* additional sense code qualifier */
1013 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
1016 scsi_dma_unmap(cmd); /* undo the DMA mappings */
1017 if (cp->Header.SGTotal > h->max_cmd_sg_entries)
1018 hpsa_unmap_sg_chain_block(h, cp);
1020 cmd->result = (DID_OK << 16); /* host byte */
1021 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
1022 cmd->result |= ei->ScsiStatus;
1024 /* copy the sense data whether we need to or not. */
1025 memcpy(cmd->sense_buffer, ei->SenseInfo,
1026 ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
1027 SCSI_SENSE_BUFFERSIZE :
1029 scsi_set_resid(cmd, ei->ResidualCnt);
1031 if (ei->CommandStatus == 0) {
1032 cmd->scsi_done(cmd);
1037 /* an error has occurred */
1038 switch (ei->CommandStatus) {
1040 case CMD_TARGET_STATUS:
1041 if (ei->ScsiStatus) {
1043 sense_key = 0xf & ei->SenseInfo[2];
1044 /* Get additional sense code */
1045 asc = ei->SenseInfo[12];
1046 /* Get addition sense code qualifier */
1047 ascq = ei->SenseInfo[13];
1050 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1051 if (check_for_unit_attention(h, cp)) {
1052 cmd->result = DID_SOFT_ERROR << 16;
1055 if (sense_key == ILLEGAL_REQUEST) {
1057 * SCSI REPORT_LUNS is commonly unsupported on
1058 * Smart Array. Suppress noisy complaint.
1060 if (cp->Request.CDB[0] == REPORT_LUNS)
1063 /* If ASC/ASCQ indicate Logical Unit
1064 * Not Supported condition,
1066 if ((asc == 0x25) && (ascq == 0x0)) {
1067 dev_warn(&h->pdev->dev, "cp %p "
1068 "has check condition\n", cp);
1073 if (sense_key == NOT_READY) {
1074 /* If Sense is Not Ready, Logical Unit
1075 * Not ready, Manual Intervention
1078 if ((asc == 0x04) && (ascq == 0x03)) {
1079 dev_warn(&h->pdev->dev, "cp %p "
1080 "has check condition: unit "
1081 "not ready, manual "
1082 "intervention required\n", cp);
1086 if (sense_key == ABORTED_COMMAND) {
1087 /* Aborted command is retryable */
1088 dev_warn(&h->pdev->dev, "cp %p "
1089 "has check condition: aborted command: "
1090 "ASC: 0x%x, ASCQ: 0x%x\n",
1092 cmd->result = DID_SOFT_ERROR << 16;
1095 /* Must be some other type of check condition */
1096 dev_warn(&h->pdev->dev, "cp %p has check condition: "
1098 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1099 "Returning result: 0x%x, "
1100 "cmd=[%02x %02x %02x %02x %02x "
1101 "%02x %02x %02x %02x %02x %02x "
1102 "%02x %02x %02x %02x %02x]\n",
1103 cp, sense_key, asc, ascq,
1105 cmd->cmnd[0], cmd->cmnd[1],
1106 cmd->cmnd[2], cmd->cmnd[3],
1107 cmd->cmnd[4], cmd->cmnd[5],
1108 cmd->cmnd[6], cmd->cmnd[7],
1109 cmd->cmnd[8], cmd->cmnd[9],
1110 cmd->cmnd[10], cmd->cmnd[11],
1111 cmd->cmnd[12], cmd->cmnd[13],
1112 cmd->cmnd[14], cmd->cmnd[15]);
1117 /* Problem was not a check condition
1118 * Pass it up to the upper layers...
1120 if (ei->ScsiStatus) {
1121 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1122 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1123 "Returning result: 0x%x\n",
1125 sense_key, asc, ascq,
1127 } else { /* scsi status is zero??? How??? */
1128 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1129 "Returning no connection.\n", cp),
1131 /* Ordinarily, this case should never happen,
1132 * but there is a bug in some released firmware
1133 * revisions that allows it to happen if, for
1134 * example, a 4100 backplane loses power and
1135 * the tape drive is in it. We assume that
1136 * it's a fatal error of some kind because we
1137 * can't show that it wasn't. We will make it
1138 * look like selection timeout since that is
1139 * the most common reason for this to occur,
1140 * and it's severe enough.
1143 cmd->result = DID_NO_CONNECT << 16;
1147 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1149 case CMD_DATA_OVERRUN:
1150 dev_warn(&h->pdev->dev, "cp %p has"
1151 " completed with data overrun "
1155 /* print_bytes(cp, sizeof(*cp), 1, 0);
1157 /* We get CMD_INVALID if you address a non-existent device
1158 * instead of a selection timeout (no response). You will
1159 * see this if you yank out a drive, then try to access it.
1160 * This is kind of a shame because it means that any other
1161 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1162 * missing target. */
1163 cmd->result = DID_NO_CONNECT << 16;
1166 case CMD_PROTOCOL_ERR:
1167 dev_warn(&h->pdev->dev, "cp %p has "
1168 "protocol error \n", cp);
1170 case CMD_HARDWARE_ERR:
1171 cmd->result = DID_ERROR << 16;
1172 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
1174 case CMD_CONNECTION_LOST:
1175 cmd->result = DID_ERROR << 16;
1176 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1179 cmd->result = DID_ABORT << 16;
1180 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1181 cp, ei->ScsiStatus);
1183 case CMD_ABORT_FAILED:
1184 cmd->result = DID_ERROR << 16;
1185 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1187 case CMD_UNSOLICITED_ABORT:
1188 cmd->result = DID_RESET << 16;
1189 dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited "
1193 cmd->result = DID_TIME_OUT << 16;
1194 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1196 case CMD_UNABORTABLE:
1197 cmd->result = DID_ERROR << 16;
1198 dev_warn(&h->pdev->dev, "Command unabortable\n");
1201 cmd->result = DID_ERROR << 16;
1202 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1203 cp, ei->CommandStatus);
1205 cmd->scsi_done(cmd);
1209 static int hpsa_scsi_detect(struct ctlr_info *h)
1211 struct Scsi_Host *sh;
1214 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
1221 sh->max_channel = 3;
1222 sh->max_cmd_len = MAX_COMMAND_SIZE;
1223 sh->max_lun = HPSA_MAX_LUN;
1224 sh->max_id = HPSA_MAX_LUN;
1225 sh->can_queue = h->nr_cmds;
1226 sh->cmd_per_lun = h->nr_cmds;
1227 sh->sg_tablesize = h->maxsgentries;
1229 sh->hostdata[0] = (unsigned long) h;
1230 sh->irq = h->intr[h->intr_mode];
1231 sh->unique_id = sh->irq;
1232 error = scsi_add_host(sh, &h->pdev->dev);
1239 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host"
1240 " failed for controller %d\n", h->ctlr);
1244 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc"
1245 " failed for controller %d\n", h->ctlr);
1249 static void hpsa_pci_unmap(struct pci_dev *pdev,
1250 struct CommandList *c, int sg_used, int data_direction)
1253 union u64bit addr64;
1255 for (i = 0; i < sg_used; i++) {
1256 addr64.val32.lower = c->SG[i].Addr.lower;
1257 addr64.val32.upper = c->SG[i].Addr.upper;
1258 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
1263 static void hpsa_map_one(struct pci_dev *pdev,
1264 struct CommandList *cp,
1271 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1272 cp->Header.SGList = 0;
1273 cp->Header.SGTotal = 0;
1277 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
1278 cp->SG[0].Addr.lower =
1279 (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
1280 cp->SG[0].Addr.upper =
1281 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
1282 cp->SG[0].Len = buflen;
1283 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
1284 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
1287 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1288 struct CommandList *c)
1290 DECLARE_COMPLETION_ONSTACK(wait);
1293 enqueue_cmd_and_start_io(h, c);
1294 wait_for_completion(&wait);
1297 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
1298 struct CommandList *c, int data_direction)
1300 int retry_count = 0;
1303 memset(c->err_info, 0, sizeof(c->err_info));
1304 hpsa_scsi_do_simple_cmd_core(h, c);
1306 } while (check_for_unit_attention(h, c) && retry_count <= 3);
1307 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
1310 static void hpsa_scsi_interpret_error(struct CommandList *cp)
1312 struct ErrorInfo *ei;
1313 struct device *d = &cp->h->pdev->dev;
1316 switch (ei->CommandStatus) {
1317 case CMD_TARGET_STATUS:
1318 dev_warn(d, "cmd %p has completed with errors\n", cp);
1319 dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
1321 if (ei->ScsiStatus == 0)
1322 dev_warn(d, "SCSI status is abnormally zero. "
1323 "(probably indicates selection timeout "
1324 "reported incorrectly due to a known "
1325 "firmware bug, circa July, 2001.)\n");
1327 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1328 dev_info(d, "UNDERRUN\n");
1330 case CMD_DATA_OVERRUN:
1331 dev_warn(d, "cp %p has completed with data overrun\n", cp);
1334 /* controller unfortunately reports SCSI passthru's
1335 * to non-existent targets as invalid commands.
1337 dev_warn(d, "cp %p is reported invalid (probably means "
1338 "target device no longer present)\n", cp);
1339 /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
1343 case CMD_PROTOCOL_ERR:
1344 dev_warn(d, "cp %p has protocol error \n", cp);
1346 case CMD_HARDWARE_ERR:
1347 /* cmd->result = DID_ERROR << 16; */
1348 dev_warn(d, "cp %p had hardware error\n", cp);
1350 case CMD_CONNECTION_LOST:
1351 dev_warn(d, "cp %p had connection lost\n", cp);
1354 dev_warn(d, "cp %p was aborted\n", cp);
1356 case CMD_ABORT_FAILED:
1357 dev_warn(d, "cp %p reports abort failed\n", cp);
1359 case CMD_UNSOLICITED_ABORT:
1360 dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
1363 dev_warn(d, "cp %p timed out\n", cp);
1365 case CMD_UNABORTABLE:
1366 dev_warn(d, "Command unabortable\n");
1369 dev_warn(d, "cp %p returned unknown status %x\n", cp,
1374 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
1375 unsigned char page, unsigned char *buf,
1376 unsigned char bufsize)
1379 struct CommandList *c;
1380 struct ErrorInfo *ei;
1382 c = cmd_special_alloc(h);
1384 if (c == NULL) { /* trouble... */
1385 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1389 fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
1390 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1392 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
1393 hpsa_scsi_interpret_error(c);
1396 cmd_special_free(h, c);
1400 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
1403 struct CommandList *c;
1404 struct ErrorInfo *ei;
1406 c = cmd_special_alloc(h);
1408 if (c == NULL) { /* trouble... */
1409 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1413 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
1414 hpsa_scsi_do_simple_cmd_core(h, c);
1415 /* no unmap needed here because no data xfer. */
1418 if (ei->CommandStatus != 0) {
1419 hpsa_scsi_interpret_error(c);
1422 cmd_special_free(h, c);
1426 static void hpsa_get_raid_level(struct ctlr_info *h,
1427 unsigned char *scsi3addr, unsigned char *raid_level)
1432 *raid_level = RAID_UNKNOWN;
1433 buf = kzalloc(64, GFP_KERNEL);
1436 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
1438 *raid_level = buf[8];
1439 if (*raid_level > RAID_UNKNOWN)
1440 *raid_level = RAID_UNKNOWN;
1445 /* Get the device id from inquiry page 0x83 */
1446 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
1447 unsigned char *device_id, int buflen)
1454 buf = kzalloc(64, GFP_KERNEL);
1457 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
1459 memcpy(device_id, &buf[8], buflen);
1464 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
1465 struct ReportLUNdata *buf, int bufsize,
1466 int extended_response)
1469 struct CommandList *c;
1470 unsigned char scsi3addr[8];
1471 struct ErrorInfo *ei;
1473 c = cmd_special_alloc(h);
1474 if (c == NULL) { /* trouble... */
1475 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1478 /* address the controller */
1479 memset(scsi3addr, 0, sizeof(scsi3addr));
1480 fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
1481 buf, bufsize, 0, scsi3addr, TYPE_CMD);
1482 if (extended_response)
1483 c->Request.CDB[1] = extended_response;
1484 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1486 if (ei->CommandStatus != 0 &&
1487 ei->CommandStatus != CMD_DATA_UNDERRUN) {
1488 hpsa_scsi_interpret_error(c);
1491 cmd_special_free(h, c);
1495 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
1496 struct ReportLUNdata *buf,
1497 int bufsize, int extended_response)
1499 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
1502 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
1503 struct ReportLUNdata *buf, int bufsize)
1505 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
1508 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
1509 int bus, int target, int lun)
1512 device->target = target;
1516 static int hpsa_update_device_info(struct ctlr_info *h,
1517 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device)
1519 #define OBDR_TAPE_INQ_SIZE 49
1520 unsigned char *inq_buff;
1522 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1526 /* Do an inquiry to the device to see what it is. */
1527 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
1528 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
1529 /* Inquiry failed (msg printed already) */
1530 dev_err(&h->pdev->dev,
1531 "hpsa_update_device_info: inquiry failed\n");
1535 this_device->devtype = (inq_buff[0] & 0x1f);
1536 memcpy(this_device->scsi3addr, scsi3addr, 8);
1537 memcpy(this_device->vendor, &inq_buff[8],
1538 sizeof(this_device->vendor));
1539 memcpy(this_device->model, &inq_buff[16],
1540 sizeof(this_device->model));
1541 memset(this_device->device_id, 0,
1542 sizeof(this_device->device_id));
1543 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
1544 sizeof(this_device->device_id));
1546 if (this_device->devtype == TYPE_DISK &&
1547 is_logical_dev_addr_mode(scsi3addr))
1548 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
1550 this_device->raid_level = RAID_UNKNOWN;
1560 static unsigned char *msa2xxx_model[] = {
1568 static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1572 for (i = 0; msa2xxx_model[i]; i++)
1573 if (strncmp(device->model, msa2xxx_model[i],
1574 strlen(msa2xxx_model[i])) == 0)
1579 /* Helper function to assign bus, target, lun mapping of devices.
1580 * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical
1581 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
1582 * Logical drive target and lun are assigned at this time, but
1583 * physical device lun and target assignment are deferred (assigned
1584 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
1586 static void figure_bus_target_lun(struct ctlr_info *h,
1587 u8 *lunaddrbytes, int *bus, int *target, int *lun,
1588 struct hpsa_scsi_dev_t *device)
1592 if (is_logical_dev_addr_mode(lunaddrbytes)) {
1593 /* logical device */
1594 if (unlikely(is_scsi_rev_5(h))) {
1595 /* p1210m, logical drives lun assignments
1596 * match SCSI REPORT LUNS data.
1598 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1601 *lun = (lunid & 0x3fff) + 1;
1604 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1605 if (is_msa2xxx(h, device)) {
1606 /* msa2xxx way, put logicals on bus 1
1607 * and match target/lun numbers box
1611 *target = (lunid >> 16) & 0x3fff;
1612 *lun = lunid & 0x00ff;
1614 /* Traditional smart array way. */
1617 *target = lunid & 0x3fff;
1621 /* physical device */
1622 if (is_hba_lunid(lunaddrbytes))
1623 if (unlikely(is_scsi_rev_5(h))) {
1624 *bus = 0; /* put p1210m ctlr at 0,0,0 */
1629 *bus = 3; /* traditional smartarray */
1631 *bus = 2; /* physical disk */
1633 *lun = -1; /* we will fill these in later. */
1638 * If there is no lun 0 on a target, linux won't find any devices.
1639 * For the MSA2xxx boxes, we have to manually detect the enclosure
1640 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
1641 * it for some reason. *tmpdevice is the target we're adding,
1642 * this_device is a pointer into the current element of currentsd[]
1643 * that we're building up in update_scsi_devices(), below.
1644 * lunzerobits is a bitmap that tracks which targets already have a
1646 * Returns 1 if an enclosure was added, 0 if not.
1648 static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
1649 struct hpsa_scsi_dev_t *tmpdevice,
1650 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
1651 int bus, int target, int lun, unsigned long lunzerobits[],
1652 int *nmsa2xxx_enclosures)
1654 unsigned char scsi3addr[8];
1656 if (test_bit(target, lunzerobits))
1657 return 0; /* There is already a lun 0 on this target. */
1659 if (!is_logical_dev_addr_mode(lunaddrbytes))
1660 return 0; /* It's the logical targets that may lack lun 0. */
1662 if (!is_msa2xxx(h, tmpdevice))
1663 return 0; /* It's only the MSA2xxx that have this problem. */
1665 if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
1668 memset(scsi3addr, 0, 8);
1669 scsi3addr[3] = target;
1670 if (is_hba_lunid(scsi3addr))
1671 return 0; /* Don't add the RAID controller here. */
1673 if (is_scsi_rev_5(h))
1674 return 0; /* p1210m doesn't need to do this. */
1676 #define MAX_MSA2XXX_ENCLOSURES 32
1677 if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
1678 dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
1679 "enclosures exceeded. Check your hardware "
1684 if (hpsa_update_device_info(h, scsi3addr, this_device))
1686 (*nmsa2xxx_enclosures)++;
1687 hpsa_set_bus_target_lun(this_device, bus, target, 0);
1688 set_bit(target, lunzerobits);
1693 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
1694 * logdev. The number of luns in physdev and logdev are returned in
1695 * *nphysicals and *nlogicals, respectively.
1696 * Returns 0 on success, -1 otherwise.
1698 static int hpsa_gather_lun_info(struct ctlr_info *h,
1700 struct ReportLUNdata *physdev, u32 *nphysicals,
1701 struct ReportLUNdata *logdev, u32 *nlogicals)
1703 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
1704 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
1707 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8;
1708 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
1709 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
1710 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1711 *nphysicals - HPSA_MAX_PHYS_LUN);
1712 *nphysicals = HPSA_MAX_PHYS_LUN;
1714 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
1715 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
1718 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
1719 /* Reject Logicals in excess of our max capability. */
1720 if (*nlogicals > HPSA_MAX_LUN) {
1721 dev_warn(&h->pdev->dev,
1722 "maximum logical LUNs (%d) exceeded. "
1723 "%d LUNs ignored.\n", HPSA_MAX_LUN,
1724 *nlogicals - HPSA_MAX_LUN);
1725 *nlogicals = HPSA_MAX_LUN;
1727 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
1728 dev_warn(&h->pdev->dev,
1729 "maximum logical + physical LUNs (%d) exceeded. "
1730 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1731 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
1732 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
1737 u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
1738 int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list,
1739 struct ReportLUNdata *logdev_list)
1741 /* Helper function, figure out where the LUN ID info is coming from
1742 * given index i, lists of physical and logical devices, where in
1743 * the list the raid controller is supposed to appear (first or last)
1746 int logicals_start = nphysicals + (raid_ctlr_position == 0);
1747 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
1749 if (i == raid_ctlr_position)
1750 return RAID_CTLR_LUNID;
1752 if (i < logicals_start)
1753 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0];
1755 if (i < last_device)
1756 return &logdev_list->LUN[i - nphysicals -
1757 (raid_ctlr_position == 0)][0];
1762 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1764 /* the idea here is we could get notified
1765 * that some devices have changed, so we do a report
1766 * physical luns and report logical luns cmd, and adjust
1767 * our list of devices accordingly.
1769 * The scsi3addr's of devices won't change so long as the
1770 * adapter is not reset. That means we can rescan and
1771 * tell which devices we already know about, vs. new
1772 * devices, vs. disappearing devices.
1774 struct ReportLUNdata *physdev_list = NULL;
1775 struct ReportLUNdata *logdev_list = NULL;
1776 unsigned char *inq_buff = NULL;
1779 u32 ndev_allocated = 0;
1780 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
1782 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
1783 int i, nmsa2xxx_enclosures, ndevs_to_allocate;
1784 int bus, target, lun;
1785 int raid_ctlr_position;
1786 DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
1788 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA,
1790 physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1791 logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1792 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1793 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
1795 if (!currentsd || !physdev_list || !logdev_list ||
1796 !inq_buff || !tmpdevice) {
1797 dev_err(&h->pdev->dev, "out of memory\n");
1800 memset(lunzerobits, 0, sizeof(lunzerobits));
1802 if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
1803 logdev_list, &nlogicals))
1806 /* We might see up to 32 MSA2xxx enclosures, actually 8 of them
1807 * but each of them 4 times through different paths. The plus 1
1808 * is for the RAID controller.
1810 ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1;
1812 /* Allocate the per device structures */
1813 for (i = 0; i < ndevs_to_allocate; i++) {
1814 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
1815 if (!currentsd[i]) {
1816 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
1817 __FILE__, __LINE__);
1823 if (unlikely(is_scsi_rev_5(h)))
1824 raid_ctlr_position = 0;
1826 raid_ctlr_position = nphysicals + nlogicals;
1828 /* adjust our table of devices */
1829 nmsa2xxx_enclosures = 0;
1830 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
1833 /* Figure out where the LUN ID info is coming from */
1834 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
1835 i, nphysicals, nlogicals, physdev_list, logdev_list);
1836 /* skip masked physical devices. */
1837 if (lunaddrbytes[3] & 0xC0 &&
1838 i < nphysicals + (raid_ctlr_position == 0))
1841 /* Get device type, vendor, model, device id */
1842 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice))
1843 continue; /* skip it if we can't talk to it. */
1844 figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
1846 this_device = currentsd[ncurrent];
1849 * For the msa2xxx boxes, we have to insert a LUN 0 which
1850 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
1851 * is nonetheless an enclosure device there. We have to
1852 * present that otherwise linux won't find anything if
1853 * there is no lun 0.
1855 if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device,
1856 lunaddrbytes, bus, target, lun, lunzerobits,
1857 &nmsa2xxx_enclosures)) {
1859 this_device = currentsd[ncurrent];
1862 *this_device = *tmpdevice;
1863 hpsa_set_bus_target_lun(this_device, bus, target, lun);
1865 switch (this_device->devtype) {
1867 /* We don't *really* support actual CD-ROM devices,
1868 * just "One Button Disaster Recovery" tape drive
1869 * which temporarily pretends to be a CD-ROM drive.
1870 * So we check that the device is really an OBDR tape
1871 * device by checking for "$DR-10" in bytes 43-48 of
1875 #define OBDR_TAPE_SIG "$DR-10"
1876 strncpy(obdr_sig, &inq_buff[43], 6);
1878 if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
1879 /* Not OBDR device, ignore it. */
1890 case TYPE_MEDIUM_CHANGER:
1894 /* Only present the Smartarray HBA as a RAID controller.
1895 * If it's a RAID controller other than the HBA itself
1896 * (an external RAID controller, MSA500 or similar)
1899 if (!is_hba_lunid(lunaddrbytes))
1906 if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA)
1909 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
1912 for (i = 0; i < ndev_allocated; i++)
1913 kfree(currentsd[i]);
1916 kfree(physdev_list);
1920 /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
1921 * dma mapping and fills in the scatter gather entries of the
1924 static int hpsa_scatter_gather(struct ctlr_info *h,
1925 struct CommandList *cp,
1926 struct scsi_cmnd *cmd)
1929 struct scatterlist *sg;
1931 int use_sg, i, sg_index, chained;
1932 struct SGDescriptor *curr_sg;
1934 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
1936 use_sg = scsi_dma_map(cmd);
1941 goto sglist_finished;
1946 scsi_for_each_sg(cmd, sg, use_sg, i) {
1947 if (i == h->max_cmd_sg_entries - 1 &&
1948 use_sg > h->max_cmd_sg_entries) {
1950 curr_sg = h->cmd_sg_list[cp->cmdindex];
1953 addr64 = (u64) sg_dma_address(sg);
1954 len = sg_dma_len(sg);
1955 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
1956 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
1958 curr_sg->Ext = 0; /* we are not chaining */
1962 if (use_sg + chained > h->maxSG)
1963 h->maxSG = use_sg + chained;
1966 cp->Header.SGList = h->max_cmd_sg_entries;
1967 cp->Header.SGTotal = (u16) (use_sg + 1);
1968 hpsa_map_sg_chain_block(h, cp);
1974 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
1975 cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
1980 static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
1981 void (*done)(struct scsi_cmnd *))
1983 struct ctlr_info *h;
1984 struct hpsa_scsi_dev_t *dev;
1985 unsigned char scsi3addr[8];
1986 struct CommandList *c;
1987 unsigned long flags;
1989 /* Get the ptr to our adapter structure out of cmd->host. */
1990 h = sdev_to_hba(cmd->device);
1991 dev = cmd->device->hostdata;
1993 cmd->result = DID_NO_CONNECT << 16;
1997 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
1999 /* Need a lock as this is being allocated from the pool */
2000 spin_lock_irqsave(&h->lock, flags);
2002 spin_unlock_irqrestore(&h->lock, flags);
2003 if (c == NULL) { /* trouble... */
2004 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2005 return SCSI_MLQUEUE_HOST_BUSY;
2008 /* Fill in the command list header */
2010 cmd->scsi_done = done; /* save this for use by completion code */
2012 /* save c in case we have to abort it */
2013 cmd->host_scribble = (unsigned char *) c;
2015 c->cmd_type = CMD_SCSI;
2017 c->Header.ReplyQueue = 0; /* unused in simple mode */
2018 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
2019 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
2020 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
2022 /* Fill in the request block... */
2024 c->Request.Timeout = 0;
2025 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
2026 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
2027 c->Request.CDBLen = cmd->cmd_len;
2028 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
2029 c->Request.Type.Type = TYPE_CMD;
2030 c->Request.Type.Attribute = ATTR_SIMPLE;
2031 switch (cmd->sc_data_direction) {
2033 c->Request.Type.Direction = XFER_WRITE;
2035 case DMA_FROM_DEVICE:
2036 c->Request.Type.Direction = XFER_READ;
2039 c->Request.Type.Direction = XFER_NONE;
2041 case DMA_BIDIRECTIONAL:
2042 /* This can happen if a buggy application does a scsi passthru
2043 * and sets both inlen and outlen to non-zero. ( see
2044 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
2047 c->Request.Type.Direction = XFER_RSVD;
2048 /* This is technically wrong, and hpsa controllers should
2049 * reject it with CMD_INVALID, which is the most correct
2050 * response, but non-fibre backends appear to let it
2051 * slide by, and give the same results as if this field
2052 * were set correctly. Either way is acceptable for
2053 * our purposes here.
2059 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
2060 cmd->sc_data_direction);
2065 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
2067 return SCSI_MLQUEUE_HOST_BUSY;
2069 enqueue_cmd_and_start_io(h, c);
2070 /* the cmd'll come back via intr handler in complete_scsi_command() */
2074 static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
2076 static void hpsa_scan_start(struct Scsi_Host *sh)
2078 struct ctlr_info *h = shost_to_hba(sh);
2079 unsigned long flags;
2081 /* wait until any scan already in progress is finished. */
2083 spin_lock_irqsave(&h->scan_lock, flags);
2084 if (h->scan_finished)
2086 spin_unlock_irqrestore(&h->scan_lock, flags);
2087 wait_event(h->scan_wait_queue, h->scan_finished);
2088 /* Note: We don't need to worry about a race between this
2089 * thread and driver unload because the midlayer will
2090 * have incremented the reference count, so unload won't
2091 * happen if we're in here.
2094 h->scan_finished = 0; /* mark scan as in progress */
2095 spin_unlock_irqrestore(&h->scan_lock, flags);
2097 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
2099 spin_lock_irqsave(&h->scan_lock, flags);
2100 h->scan_finished = 1; /* mark scan as finished. */
2101 wake_up_all(&h->scan_wait_queue);
2102 spin_unlock_irqrestore(&h->scan_lock, flags);
2105 static int hpsa_scan_finished(struct Scsi_Host *sh,
2106 unsigned long elapsed_time)
2108 struct ctlr_info *h = shost_to_hba(sh);
2109 unsigned long flags;
2112 spin_lock_irqsave(&h->scan_lock, flags);
2113 finished = h->scan_finished;
2114 spin_unlock_irqrestore(&h->scan_lock, flags);
2118 static int hpsa_change_queue_depth(struct scsi_device *sdev,
2119 int qdepth, int reason)
2121 struct ctlr_info *h = sdev_to_hba(sdev);
2123 if (reason != SCSI_QDEPTH_DEFAULT)
2129 if (qdepth > h->nr_cmds)
2130 qdepth = h->nr_cmds;
2131 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2132 return sdev->queue_depth;
2135 static void hpsa_unregister_scsi(struct ctlr_info *h)
2137 /* we are being forcibly unloaded, and may not refuse. */
2138 scsi_remove_host(h->scsi_host);
2139 scsi_host_put(h->scsi_host);
2140 h->scsi_host = NULL;
2143 static int hpsa_register_scsi(struct ctlr_info *h)
2147 rc = hpsa_scsi_detect(h);
2149 dev_err(&h->pdev->dev, "hpsa_register_scsi: failed"
2150 " hpsa_scsi_detect(), rc is %d\n", rc);
2154 static int wait_for_device_to_become_ready(struct ctlr_info *h,
2155 unsigned char lunaddr[])
2159 int waittime = 1; /* seconds */
2160 struct CommandList *c;
2162 c = cmd_special_alloc(h);
2164 dev_warn(&h->pdev->dev, "out of memory in "
2165 "wait_for_device_to_become_ready.\n");
2169 /* Send test unit ready until device ready, or give up. */
2170 while (count < HPSA_TUR_RETRY_LIMIT) {
2172 /* Wait for a bit. do this first, because if we send
2173 * the TUR right away, the reset will just abort it.
2175 msleep(1000 * waittime);
2178 /* Increase wait time with each try, up to a point. */
2179 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
2180 waittime = waittime * 2;
2182 /* Send the Test Unit Ready */
2183 fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
2184 hpsa_scsi_do_simple_cmd_core(h, c);
2185 /* no unmap needed here because no data xfer. */
2187 if (c->err_info->CommandStatus == CMD_SUCCESS)
2190 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2191 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
2192 (c->err_info->SenseInfo[2] == NO_SENSE ||
2193 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
2196 dev_warn(&h->pdev->dev, "waiting %d secs "
2197 "for device to become ready.\n", waittime);
2198 rc = 1; /* device not ready. */
2202 dev_warn(&h->pdev->dev, "giving up on device.\n");
2204 dev_warn(&h->pdev->dev, "device is ready.\n");
2206 cmd_special_free(h, c);
2210 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
2211 * complaining. Doing a host- or bus-reset can't do anything good here.
2213 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
2216 struct ctlr_info *h;
2217 struct hpsa_scsi_dev_t *dev;
2219 /* find the controller to which the command to be aborted was sent */
2220 h = sdev_to_hba(scsicmd->device);
2221 if (h == NULL) /* paranoia */
2223 dev = scsicmd->device->hostdata;
2225 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
2226 "device lookup failed.\n");
2229 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
2230 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
2231 /* send a reset to the SCSI LUN which the command was sent to */
2232 rc = hpsa_send_reset(h, dev->scsi3addr);
2233 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
2236 dev_warn(&h->pdev->dev, "resetting device failed.\n");
2241 * For operations that cannot sleep, a command block is allocated at init,
2242 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
2243 * which ones are free or in use. Lock must be held when calling this.
2244 * cmd_free() is the complement.
2246 static struct CommandList *cmd_alloc(struct ctlr_info *h)
2248 struct CommandList *c;
2250 union u64bit temp64;
2251 dma_addr_t cmd_dma_handle, err_dma_handle;
2254 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
2255 if (i == h->nr_cmds)
2257 } while (test_and_set_bit
2258 (i & (BITS_PER_LONG - 1),
2259 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
2260 c = h->cmd_pool + i;
2261 memset(c, 0, sizeof(*c));
2262 cmd_dma_handle = h->cmd_pool_dhandle
2264 c->err_info = h->errinfo_pool + i;
2265 memset(c->err_info, 0, sizeof(*c->err_info));
2266 err_dma_handle = h->errinfo_pool_dhandle
2267 + i * sizeof(*c->err_info);
2272 INIT_LIST_HEAD(&c->list);
2273 c->busaddr = (u32) cmd_dma_handle;
2274 temp64.val = (u64) err_dma_handle;
2275 c->ErrDesc.Addr.lower = temp64.val32.lower;
2276 c->ErrDesc.Addr.upper = temp64.val32.upper;
2277 c->ErrDesc.Len = sizeof(*c->err_info);
2283 /* For operations that can wait for kmalloc to possibly sleep,
2284 * this routine can be called. Lock need not be held to call
2285 * cmd_special_alloc. cmd_special_free() is the complement.
2287 static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
2289 struct CommandList *c;
2290 union u64bit temp64;
2291 dma_addr_t cmd_dma_handle, err_dma_handle;
2293 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
2296 memset(c, 0, sizeof(*c));
2300 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
2303 if (c->err_info == NULL) {
2304 pci_free_consistent(h->pdev,
2305 sizeof(*c), c, cmd_dma_handle);
2308 memset(c->err_info, 0, sizeof(*c->err_info));
2310 INIT_LIST_HEAD(&c->list);
2311 c->busaddr = (u32) cmd_dma_handle;
2312 temp64.val = (u64) err_dma_handle;
2313 c->ErrDesc.Addr.lower = temp64.val32.lower;
2314 c->ErrDesc.Addr.upper = temp64.val32.upper;
2315 c->ErrDesc.Len = sizeof(*c->err_info);
2321 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
2325 i = c - h->cmd_pool;
2326 clear_bit(i & (BITS_PER_LONG - 1),
2327 h->cmd_pool_bits + (i / BITS_PER_LONG));
2331 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
2333 union u64bit temp64;
2335 temp64.val32.lower = c->ErrDesc.Addr.lower;
2336 temp64.val32.upper = c->ErrDesc.Addr.upper;
2337 pci_free_consistent(h->pdev, sizeof(*c->err_info),
2338 c->err_info, (dma_addr_t) temp64.val);
2339 pci_free_consistent(h->pdev, sizeof(*c),
2340 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
2343 #ifdef CONFIG_COMPAT
2345 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
2347 IOCTL32_Command_struct __user *arg32 =
2348 (IOCTL32_Command_struct __user *) arg;
2349 IOCTL_Command_struct arg64;
2350 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
2354 memset(&arg64, 0, sizeof(arg64));
2356 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2357 sizeof(arg64.LUN_info));
2358 err |= copy_from_user(&arg64.Request, &arg32->Request,
2359 sizeof(arg64.Request));
2360 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2361 sizeof(arg64.error_info));
2362 err |= get_user(arg64.buf_size, &arg32->buf_size);
2363 err |= get_user(cp, &arg32->buf);
2364 arg64.buf = compat_ptr(cp);
2365 err |= copy_to_user(p, &arg64, sizeof(arg64));
2370 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p);
2373 err |= copy_in_user(&arg32->error_info, &p->error_info,
2374 sizeof(arg32->error_info));
2380 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
2383 BIG_IOCTL32_Command_struct __user *arg32 =
2384 (BIG_IOCTL32_Command_struct __user *) arg;
2385 BIG_IOCTL_Command_struct arg64;
2386 BIG_IOCTL_Command_struct __user *p =
2387 compat_alloc_user_space(sizeof(arg64));
2391 memset(&arg64, 0, sizeof(arg64));
2393 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2394 sizeof(arg64.LUN_info));
2395 err |= copy_from_user(&arg64.Request, &arg32->Request,
2396 sizeof(arg64.Request));
2397 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2398 sizeof(arg64.error_info));
2399 err |= get_user(arg64.buf_size, &arg32->buf_size);
2400 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
2401 err |= get_user(cp, &arg32->buf);
2402 arg64.buf = compat_ptr(cp);
2403 err |= copy_to_user(p, &arg64, sizeof(arg64));
2408 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
2411 err |= copy_in_user(&arg32->error_info, &p->error_info,
2412 sizeof(arg32->error_info));
2418 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
2421 case CCISS_GETPCIINFO:
2422 case CCISS_GETINTINFO:
2423 case CCISS_SETINTINFO:
2424 case CCISS_GETNODENAME:
2425 case CCISS_SETNODENAME:
2426 case CCISS_GETHEARTBEAT:
2427 case CCISS_GETBUSTYPES:
2428 case CCISS_GETFIRMVER:
2429 case CCISS_GETDRIVVER:
2430 case CCISS_REVALIDVOLS:
2431 case CCISS_DEREGDISK:
2432 case CCISS_REGNEWDISK:
2434 case CCISS_RESCANDISK:
2435 case CCISS_GETLUNINFO:
2436 return hpsa_ioctl(dev, cmd, arg);
2438 case CCISS_PASSTHRU32:
2439 return hpsa_ioctl32_passthru(dev, cmd, arg);
2440 case CCISS_BIG_PASSTHRU32:
2441 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
2444 return -ENOIOCTLCMD;
2449 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
2451 struct hpsa_pci_info pciinfo;
2455 pciinfo.domain = pci_domain_nr(h->pdev->bus);
2456 pciinfo.bus = h->pdev->bus->number;
2457 pciinfo.dev_fn = h->pdev->devfn;
2458 pciinfo.board_id = h->board_id;
2459 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
2464 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
2466 DriverVer_type DriverVer;
2467 unsigned char vmaj, vmin, vsubmin;
2470 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
2471 &vmaj, &vmin, &vsubmin);
2473 dev_info(&h->pdev->dev, "driver version string '%s' "
2474 "unrecognized.", HPSA_DRIVER_VERSION);
2479 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
2482 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
2487 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2489 IOCTL_Command_struct iocommand;
2490 struct CommandList *c;
2492 union u64bit temp64;
2496 if (!capable(CAP_SYS_RAWIO))
2498 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
2500 if ((iocommand.buf_size < 1) &&
2501 (iocommand.Request.Type.Direction != XFER_NONE)) {
2504 if (iocommand.buf_size > 0) {
2505 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
2508 if (iocommand.Request.Type.Direction == XFER_WRITE) {
2509 /* Copy the data into the buffer we created */
2510 if (copy_from_user(buff, iocommand.buf,
2511 iocommand.buf_size)) {
2516 memset(buff, 0, iocommand.buf_size);
2519 c = cmd_special_alloc(h);
2524 /* Fill in the command type */
2525 c->cmd_type = CMD_IOCTL_PEND;
2526 /* Fill in Command Header */
2527 c->Header.ReplyQueue = 0; /* unused in simple mode */
2528 if (iocommand.buf_size > 0) { /* buffer to fill */
2529 c->Header.SGList = 1;
2530 c->Header.SGTotal = 1;
2531 } else { /* no buffers to fill */
2532 c->Header.SGList = 0;
2533 c->Header.SGTotal = 0;
2535 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
2536 /* use the kernel address the cmd block for tag */
2537 c->Header.Tag.lower = c->busaddr;
2539 /* Fill in Request block */
2540 memcpy(&c->Request, &iocommand.Request,
2541 sizeof(c->Request));
2543 /* Fill in the scatter gather information */
2544 if (iocommand.buf_size > 0) {
2545 temp64.val = pci_map_single(h->pdev, buff,
2546 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
2547 c->SG[0].Addr.lower = temp64.val32.lower;
2548 c->SG[0].Addr.upper = temp64.val32.upper;
2549 c->SG[0].Len = iocommand.buf_size;
2550 c->SG[0].Ext = 0; /* we are not chaining*/
2552 hpsa_scsi_do_simple_cmd_core(h, c);
2553 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
2554 check_ioctl_unit_attention(h, c);
2556 /* Copy the error information out */
2557 memcpy(&iocommand.error_info, c->err_info,
2558 sizeof(iocommand.error_info));
2559 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
2561 cmd_special_free(h, c);
2564 if (iocommand.Request.Type.Direction == XFER_READ &&
2565 iocommand.buf_size > 0) {
2566 /* Copy the data out of the buffer we created */
2567 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
2569 cmd_special_free(h, c);
2574 cmd_special_free(h, c);
2578 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2580 BIG_IOCTL_Command_struct *ioc;
2581 struct CommandList *c;
2582 unsigned char **buff = NULL;
2583 int *buff_size = NULL;
2584 union u64bit temp64;
2590 BYTE __user *data_ptr;
2594 if (!capable(CAP_SYS_RAWIO))
2596 ioc = (BIG_IOCTL_Command_struct *)
2597 kmalloc(sizeof(*ioc), GFP_KERNEL);
2602 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
2606 if ((ioc->buf_size < 1) &&
2607 (ioc->Request.Type.Direction != XFER_NONE)) {
2611 /* Check kmalloc limits using all SGs */
2612 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
2616 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
2620 buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
2625 buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
2630 left = ioc->buf_size;
2631 data_ptr = ioc->buf;
2633 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
2634 buff_size[sg_used] = sz;
2635 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
2636 if (buff[sg_used] == NULL) {
2640 if (ioc->Request.Type.Direction == XFER_WRITE) {
2641 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
2646 memset(buff[sg_used], 0, sz);
2651 c = cmd_special_alloc(h);
2656 c->cmd_type = CMD_IOCTL_PEND;
2657 c->Header.ReplyQueue = 0;
2658 c->Header.SGList = c->Header.SGTotal = sg_used;
2659 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
2660 c->Header.Tag.lower = c->busaddr;
2661 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
2662 if (ioc->buf_size > 0) {
2664 for (i = 0; i < sg_used; i++) {
2665 temp64.val = pci_map_single(h->pdev, buff[i],
2666 buff_size[i], PCI_DMA_BIDIRECTIONAL);
2667 c->SG[i].Addr.lower = temp64.val32.lower;
2668 c->SG[i].Addr.upper = temp64.val32.upper;
2669 c->SG[i].Len = buff_size[i];
2670 /* we are not chaining */
2674 hpsa_scsi_do_simple_cmd_core(h, c);
2676 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
2677 check_ioctl_unit_attention(h, c);
2678 /* Copy the error information out */
2679 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
2680 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
2681 cmd_special_free(h, c);
2685 if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) {
2686 /* Copy the data out of the buffer we created */
2687 BYTE __user *ptr = ioc->buf;
2688 for (i = 0; i < sg_used; i++) {
2689 if (copy_to_user(ptr, buff[i], buff_size[i])) {
2690 cmd_special_free(h, c);
2694 ptr += buff_size[i];
2697 cmd_special_free(h, c);
2701 for (i = 0; i < sg_used; i++)
2710 static void check_ioctl_unit_attention(struct ctlr_info *h,
2711 struct CommandList *c)
2713 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2714 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
2715 (void) check_for_unit_attention(h, c);
2720 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
2722 struct ctlr_info *h;
2723 void __user *argp = (void __user *)arg;
2725 h = sdev_to_hba(dev);
2728 case CCISS_DEREGDISK:
2729 case CCISS_REGNEWDISK:
2731 hpsa_scan_start(h->scsi_host);
2733 case CCISS_GETPCIINFO:
2734 return hpsa_getpciinfo_ioctl(h, argp);
2735 case CCISS_GETDRIVVER:
2736 return hpsa_getdrivver_ioctl(h, argp);
2737 case CCISS_PASSTHRU:
2738 return hpsa_passthru_ioctl(h, argp);
2739 case CCISS_BIG_PASSTHRU:
2740 return hpsa_big_passthru_ioctl(h, argp);
2746 static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
2747 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
2750 int pci_dir = XFER_NONE;
2752 c->cmd_type = CMD_IOCTL_PEND;
2753 c->Header.ReplyQueue = 0;
2754 if (buff != NULL && size > 0) {
2755 c->Header.SGList = 1;
2756 c->Header.SGTotal = 1;
2758 c->Header.SGList = 0;
2759 c->Header.SGTotal = 0;
2761 c->Header.Tag.lower = c->busaddr;
2762 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
2764 c->Request.Type.Type = cmd_type;
2765 if (cmd_type == TYPE_CMD) {
2768 /* are we trying to read a vital product page */
2769 if (page_code != 0) {
2770 c->Request.CDB[1] = 0x01;
2771 c->Request.CDB[2] = page_code;
2773 c->Request.CDBLen = 6;
2774 c->Request.Type.Attribute = ATTR_SIMPLE;
2775 c->Request.Type.Direction = XFER_READ;
2776 c->Request.Timeout = 0;
2777 c->Request.CDB[0] = HPSA_INQUIRY;
2778 c->Request.CDB[4] = size & 0xFF;
2780 case HPSA_REPORT_LOG:
2781 case HPSA_REPORT_PHYS:
2782 /* Talking to controller so It's a physical command
2783 mode = 00 target = 0. Nothing to write.
2785 c->Request.CDBLen = 12;
2786 c->Request.Type.Attribute = ATTR_SIMPLE;
2787 c->Request.Type.Direction = XFER_READ;
2788 c->Request.Timeout = 0;
2789 c->Request.CDB[0] = cmd;
2790 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
2791 c->Request.CDB[7] = (size >> 16) & 0xFF;
2792 c->Request.CDB[8] = (size >> 8) & 0xFF;
2793 c->Request.CDB[9] = size & 0xFF;
2795 case HPSA_CACHE_FLUSH:
2796 c->Request.CDBLen = 12;
2797 c->Request.Type.Attribute = ATTR_SIMPLE;
2798 c->Request.Type.Direction = XFER_WRITE;
2799 c->Request.Timeout = 0;
2800 c->Request.CDB[0] = BMIC_WRITE;
2801 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
2803 case TEST_UNIT_READY:
2804 c->Request.CDBLen = 6;
2805 c->Request.Type.Attribute = ATTR_SIMPLE;
2806 c->Request.Type.Direction = XFER_NONE;
2807 c->Request.Timeout = 0;
2810 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
2814 } else if (cmd_type == TYPE_MSG) {
2817 case HPSA_DEVICE_RESET_MSG:
2818 c->Request.CDBLen = 16;
2819 c->Request.Type.Type = 1; /* It is a MSG not a CMD */
2820 c->Request.Type.Attribute = ATTR_SIMPLE;
2821 c->Request.Type.Direction = XFER_NONE;
2822 c->Request.Timeout = 0; /* Don't time out */
2823 c->Request.CDB[0] = 0x01; /* RESET_MSG is 0x01 */
2824 c->Request.CDB[1] = 0x03; /* Reset target above */
2825 /* If bytes 4-7 are zero, it means reset the */
2827 c->Request.CDB[4] = 0x00;
2828 c->Request.CDB[5] = 0x00;
2829 c->Request.CDB[6] = 0x00;
2830 c->Request.CDB[7] = 0x00;
2834 dev_warn(&h->pdev->dev, "unknown message type %d\n",
2839 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
2843 switch (c->Request.Type.Direction) {
2845 pci_dir = PCI_DMA_FROMDEVICE;
2848 pci_dir = PCI_DMA_TODEVICE;
2851 pci_dir = PCI_DMA_NONE;
2854 pci_dir = PCI_DMA_BIDIRECTIONAL;
2857 hpsa_map_one(h->pdev, c, buff, size, pci_dir);
2863 * Map (physical) PCI mem into (virtual) kernel space
2865 static void __iomem *remap_pci_mem(ulong base, ulong size)
2867 ulong page_base = ((ulong) base) & PAGE_MASK;
2868 ulong page_offs = ((ulong) base) - page_base;
2869 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2871 return page_remapped ? (page_remapped + page_offs) : NULL;
2874 /* Takes cmds off the submission queue and sends them to the hardware,
2875 * then puts them on the queue of cmds waiting for completion.
2877 static void start_io(struct ctlr_info *h)
2879 struct CommandList *c;
2881 while (!list_empty(&h->reqQ)) {
2882 c = list_entry(h->reqQ.next, struct CommandList, list);
2883 /* can't do anything if fifo is full */
2884 if ((h->access.fifo_full(h))) {
2885 dev_warn(&h->pdev->dev, "fifo full\n");
2889 /* Get the first entry from the Request Q */
2893 /* Tell the controller execute command */
2894 h->access.submit_command(h, c);
2896 /* Put job onto the completed Q */
2901 static inline unsigned long get_next_completion(struct ctlr_info *h)
2903 return h->access.command_completed(h);
2906 static inline bool interrupt_pending(struct ctlr_info *h)
2908 return h->access.intr_pending(h);
2911 static inline long interrupt_not_for_us(struct ctlr_info *h)
2913 return (h->access.intr_pending(h) == 0) ||
2914 (h->interrupts_enabled == 0);
2917 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
2920 if (unlikely(tag_index >= h->nr_cmds)) {
2921 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
2927 static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
2930 if (likely(c->cmd_type == CMD_SCSI))
2931 complete_scsi_command(c);
2932 else if (c->cmd_type == CMD_IOCTL_PEND)
2933 complete(c->waiting);
2936 static inline u32 hpsa_tag_contains_index(u32 tag)
2938 return tag & DIRECT_LOOKUP_BIT;
2941 static inline u32 hpsa_tag_to_index(u32 tag)
2943 return tag >> DIRECT_LOOKUP_SHIFT;
2947 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
2949 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
2950 #define HPSA_SIMPLE_ERROR_BITS 0x03
2951 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
2952 return tag & ~HPSA_SIMPLE_ERROR_BITS;
2953 return tag & ~HPSA_PERF_ERROR_BITS;
2956 /* process completion of an indexed ("direct lookup") command */
2957 static inline u32 process_indexed_cmd(struct ctlr_info *h,
2961 struct CommandList *c;
2963 tag_index = hpsa_tag_to_index(raw_tag);
2964 if (bad_tag(h, tag_index, raw_tag))
2965 return next_command(h);
2966 c = h->cmd_pool + tag_index;
2967 finish_cmd(c, raw_tag);
2968 return next_command(h);
2971 /* process completion of a non-indexed command */
2972 static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
2976 struct CommandList *c = NULL;
2978 tag = hpsa_tag_discard_error_bits(h, raw_tag);
2979 list_for_each_entry(c, &h->cmpQ, list) {
2980 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
2981 finish_cmd(c, raw_tag);
2982 return next_command(h);
2985 bad_tag(h, h->nr_cmds + 1, raw_tag);
2986 return next_command(h);
2989 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id)
2991 struct ctlr_info *h = dev_id;
2992 unsigned long flags;
2995 if (interrupt_not_for_us(h))
2997 spin_lock_irqsave(&h->lock, flags);
2998 while (interrupt_pending(h)) {
2999 raw_tag = get_next_completion(h);
3000 while (raw_tag != FIFO_EMPTY) {
3001 if (hpsa_tag_contains_index(raw_tag))
3002 raw_tag = process_indexed_cmd(h, raw_tag);
3004 raw_tag = process_nonindexed_cmd(h, raw_tag);
3007 spin_unlock_irqrestore(&h->lock, flags);
3011 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id)
3013 struct ctlr_info *h = dev_id;
3014 unsigned long flags;
3017 spin_lock_irqsave(&h->lock, flags);
3018 raw_tag = get_next_completion(h);
3019 while (raw_tag != FIFO_EMPTY) {
3020 if (hpsa_tag_contains_index(raw_tag))
3021 raw_tag = process_indexed_cmd(h, raw_tag);
3023 raw_tag = process_nonindexed_cmd(h, raw_tag);
3025 spin_unlock_irqrestore(&h->lock, flags);
3029 /* Send a message CDB to the firmware. Careful, this only works
3030 * in simple mode, not performant mode due to the tag lookup.
3031 * We only ever use this immediately after a controller reset.
3033 static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
3037 struct CommandListHeader CommandHeader;
3038 struct RequestBlock Request;
3039 struct ErrDescriptor ErrorDescriptor;
3041 struct Command *cmd;
3042 static const size_t cmd_sz = sizeof(*cmd) +
3043 sizeof(cmd->ErrorDescriptor);
3045 uint32_t paddr32, tag;
3046 void __iomem *vaddr;
3049 vaddr = pci_ioremap_bar(pdev, 0);
3053 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
3054 * CCISS commands, so they must be allocated from the lower 4GiB of
3057 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3063 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
3069 /* This must fit, because of the 32-bit consistent DMA mask. Also,
3070 * although there's no guarantee, we assume that the address is at
3071 * least 4-byte aligned (most likely, it's page-aligned).
3075 cmd->CommandHeader.ReplyQueue = 0;
3076 cmd->CommandHeader.SGList = 0;
3077 cmd->CommandHeader.SGTotal = 0;
3078 cmd->CommandHeader.Tag.lower = paddr32;
3079 cmd->CommandHeader.Tag.upper = 0;
3080 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
3082 cmd->Request.CDBLen = 16;
3083 cmd->Request.Type.Type = TYPE_MSG;
3084 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
3085 cmd->Request.Type.Direction = XFER_NONE;
3086 cmd->Request.Timeout = 0; /* Don't time out */
3087 cmd->Request.CDB[0] = opcode;
3088 cmd->Request.CDB[1] = type;
3089 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
3090 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
3091 cmd->ErrorDescriptor.Addr.upper = 0;
3092 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
3094 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
3096 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
3097 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
3098 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
3100 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
3105 /* we leak the DMA buffer here ... no choice since the controller could
3106 * still complete the command.
3108 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
3109 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
3114 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
3116 if (tag & HPSA_ERROR_BIT) {
3117 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
3122 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
3127 #define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0)
3128 #define hpsa_noop(p) hpsa_message(p, 3, 0)
3130 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
3131 void * __iomem vaddr, bool use_doorbell)
3137 /* For everything after the P600, the PCI power state method
3138 * of resetting the controller doesn't work, so we have this
3139 * other way using the doorbell register.
3141 dev_info(&pdev->dev, "using doorbell to reset controller\n");
3142 writel(DOORBELL_CTLR_RESET, vaddr + SA5_DOORBELL);
3144 } else { /* Try to do it the PCI power state way */
3146 /* Quoting from the Open CISS Specification: "The Power
3147 * Management Control/Status Register (CSR) controls the power
3148 * state of the device. The normal operating state is D0,
3149 * CSR=00h. The software off state is D3, CSR=03h. To reset
3150 * the controller, place the interface device in D3 then to D0,
3151 * this causes a secondary PCI reset which will reset the
3154 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
3157 "hpsa_reset_controller: "
3158 "PCI PM not supported\n");
3161 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
3162 /* enter the D3hot power management state */
3163 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
3164 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3166 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3170 /* enter the D0 power management state */
3171 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3173 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3180 static __devinit void init_driver_version(char *driver_version, int len)
3182 memset(driver_version, 0, len);
3183 strncpy(driver_version, "hpsa " HPSA_DRIVER_VERSION, len - 1);
3186 static __devinit int write_driver_ver_to_cfgtable(
3187 struct CfgTable __iomem *cfgtable)
3189 char *driver_version;
3190 int i, size = sizeof(cfgtable->driver_version);
3192 driver_version = kmalloc(size, GFP_KERNEL);
3193 if (!driver_version)
3196 init_driver_version(driver_version, size);
3197 for (i = 0; i < size; i++)
3198 writeb(driver_version[i], &cfgtable->driver_version[i]);
3199 kfree(driver_version);
3203 static __devinit void read_driver_ver_from_cfgtable(
3204 struct CfgTable __iomem *cfgtable, unsigned char *driver_ver)
3208 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
3209 driver_ver[i] = readb(&cfgtable->driver_version[i]);
3212 static __devinit int controller_reset_failed(
3213 struct CfgTable __iomem *cfgtable)
3216 char *driver_ver, *old_driver_ver;
3217 int rc, size = sizeof(cfgtable->driver_version);
3219 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
3220 if (!old_driver_ver)
3222 driver_ver = old_driver_ver + size;
3224 /* After a reset, the 32 bytes of "driver version" in the cfgtable
3225 * should have been changed, otherwise we know the reset failed.
3227 init_driver_version(old_driver_ver, size);
3228 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
3229 rc = !memcmp(driver_ver, old_driver_ver, size);
3230 kfree(old_driver_ver);
3233 /* This does a hard reset of the controller using PCI power management
3234 * states or the using the doorbell register.
3236 static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
3240 u64 cfg_base_addr_index;
3241 void __iomem *vaddr;
3242 unsigned long paddr;
3243 u32 misc_fw_support;
3245 struct CfgTable __iomem *cfgtable;
3248 u16 command_register;
3250 /* For controllers as old as the P600, this is very nearly
3253 * pci_save_state(pci_dev);
3254 * pci_set_power_state(pci_dev, PCI_D3hot);
3255 * pci_set_power_state(pci_dev, PCI_D0);
3256 * pci_restore_state(pci_dev);
3258 * For controllers newer than the P600, the pci power state
3259 * method of resetting doesn't work so we have another way
3260 * using the doorbell register.
3263 /* Exclude 640x boards. These are two pci devices in one slot
3264 * which share a battery backed cache module. One controls the
3265 * cache, the other accesses the cache through the one that controls
3266 * it. If we reset the one controlling the cache, the other will
3267 * likely not be happy. Just forbid resetting this conjoined mess.
3268 * The 640x isn't really supported by hpsa anyway.
3270 rc = hpsa_lookup_board_id(pdev, &board_id);
3272 dev_warn(&pdev->dev, "Not resetting device.\n");
3275 if (board_id == 0x409C0E11 || board_id == 0x409D0E11)
3278 /* Save the PCI command register */
3279 pci_read_config_word(pdev, 4, &command_register);
3280 /* Turn the board off. This is so that later pci_restore_state()
3281 * won't turn the board on before the rest of config space is ready.
3283 pci_disable_device(pdev);
3284 pci_save_state(pdev);
3286 /* find the first memory BAR, so we can find the cfg table */
3287 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
3290 vaddr = remap_pci_mem(paddr, 0x250);
3294 /* find cfgtable in order to check if reset via doorbell is supported */
3295 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
3296 &cfg_base_addr_index, &cfg_offset);
3299 cfgtable = remap_pci_mem(pci_resource_start(pdev,
3300 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
3305 rc = write_driver_ver_to_cfgtable(cfgtable);
3309 /* If reset via doorbell register is supported, use that. */
3310 misc_fw_support = readl(&cfgtable->misc_fw_support);
3311 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
3313 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
3315 goto unmap_cfgtable;
3317 pci_restore_state(pdev);
3318 rc = pci_enable_device(pdev);
3320 dev_warn(&pdev->dev, "failed to enable device.\n");
3321 goto unmap_cfgtable;
3323 pci_write_config_word(pdev, 4, command_register);
3325 /* Some devices (notably the HP Smart Array 5i Controller)
3326 need a little pause here */
3327 msleep(HPSA_POST_RESET_PAUSE_MSECS);
3329 /* Wait for board to become not ready, then ready. */
3330 dev_info(&pdev->dev, "Waiting for board to reset.\n");
3331 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
3333 dev_warn(&pdev->dev,
3334 "failed waiting for board to reset\n");
3335 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
3337 dev_warn(&pdev->dev,
3338 "failed waiting for board to become ready\n");
3339 goto unmap_cfgtable;
3342 rc = controller_reset_failed(vaddr);
3344 goto unmap_cfgtable;
3346 dev_warn(&pdev->dev, "Unable to successfully reset controller,"
3347 " Ignoring controller.\n");
3350 dev_info(&pdev->dev, "board ready.\n");
3362 * We cannot read the structure directly, for portability we must use
3364 * This is for debug only.
3366 static void print_cfg_table(struct device *dev, struct CfgTable *tb)
3372 dev_info(dev, "Controller Configuration information\n");
3373 dev_info(dev, "------------------------------------\n");
3374 for (i = 0; i < 4; i++)
3375 temp_name[i] = readb(&(tb->Signature[i]));
3376 temp_name[4] = '\0';
3377 dev_info(dev, " Signature = %s\n", temp_name);
3378 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
3379 dev_info(dev, " Transport methods supported = 0x%x\n",
3380 readl(&(tb->TransportSupport)));
3381 dev_info(dev, " Transport methods active = 0x%x\n",
3382 readl(&(tb->TransportActive)));
3383 dev_info(dev, " Requested transport Method = 0x%x\n",
3384 readl(&(tb->HostWrite.TransportRequest)));
3385 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
3386 readl(&(tb->HostWrite.CoalIntDelay)));
3387 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
3388 readl(&(tb->HostWrite.CoalIntCount)));
3389 dev_info(dev, " Max outstanding commands = 0x%d\n",
3390 readl(&(tb->CmdsOutMax)));
3391 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
3392 for (i = 0; i < 16; i++)
3393 temp_name[i] = readb(&(tb->ServerName[i]));
3394 temp_name[16] = '\0';
3395 dev_info(dev, " Server Name = %s\n", temp_name);
3396 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
3397 readl(&(tb->HeartBeat)));
3398 #endif /* HPSA_DEBUG */
3401 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
3403 int i, offset, mem_type, bar_type;
3405 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
3408 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3409 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
3410 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
3413 mem_type = pci_resource_flags(pdev, i) &
3414 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
3416 case PCI_BASE_ADDRESS_MEM_TYPE_32:
3417 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
3418 offset += 4; /* 32 bit */
3420 case PCI_BASE_ADDRESS_MEM_TYPE_64:
3423 default: /* reserved in PCI 2.2 */
3424 dev_warn(&pdev->dev,
3425 "base address is invalid\n");
3430 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
3436 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
3437 * controllers that are capable. If not, we use IO-APIC mode.
3440 static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
3442 #ifdef CONFIG_PCI_MSI
3444 struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1},
3448 /* Some boards advertise MSI but don't really support it */
3449 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
3450 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
3451 goto default_int_mode;
3452 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
3453 dev_info(&h->pdev->dev, "MSIX\n");
3454 err = pci_enable_msix(h->pdev, hpsa_msix_entries, 4);
3456 h->intr[0] = hpsa_msix_entries[0].vector;
3457 h->intr[1] = hpsa_msix_entries[1].vector;
3458 h->intr[2] = hpsa_msix_entries[2].vector;
3459 h->intr[3] = hpsa_msix_entries[3].vector;
3464 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
3465 "available\n", err);
3466 goto default_int_mode;
3468 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
3470 goto default_int_mode;
3473 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
3474 dev_info(&h->pdev->dev, "MSI\n");
3475 if (!pci_enable_msi(h->pdev))
3478 dev_warn(&h->pdev->dev, "MSI init failed\n");
3481 #endif /* CONFIG_PCI_MSI */
3482 /* if we get here we're going to use the default interrupt mode */
3483 h->intr[h->intr_mode] = h->pdev->irq;
3486 static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
3489 u32 subsystem_vendor_id, subsystem_device_id;
3491 subsystem_vendor_id = pdev->subsystem_vendor;
3492 subsystem_device_id = pdev->subsystem_device;
3493 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
3494 subsystem_vendor_id;
3496 for (i = 0; i < ARRAY_SIZE(products); i++)
3497 if (*board_id == products[i].board_id)
3500 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
3501 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
3503 dev_warn(&pdev->dev, "unrecognized board ID: "
3504 "0x%08x, ignoring.\n", *board_id);
3507 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
3510 static inline bool hpsa_board_disabled(struct pci_dev *pdev)
3514 (void) pci_read_config_word(pdev, PCI_COMMAND, &command);
3515 return ((command & PCI_COMMAND_MEMORY) == 0);
3518 static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
3519 unsigned long *memory_bar)
3523 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
3524 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3525 /* addressing mode bits already removed */
3526 *memory_bar = pci_resource_start(pdev, i);
3527 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
3531 dev_warn(&pdev->dev, "no memory BAR found\n");
3535 static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
3536 void __iomem *vaddr, int wait_for_ready)
3541 iterations = HPSA_BOARD_READY_ITERATIONS;
3543 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
3545 for (i = 0; i < iterations; i++) {
3546 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
3547 if (wait_for_ready) {
3548 if (scratchpad == HPSA_FIRMWARE_READY)
3551 if (scratchpad != HPSA_FIRMWARE_READY)
3554 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
3556 dev_warn(&pdev->dev, "board not ready, timed out.\n");
3560 static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
3561 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
3564 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
3565 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
3566 *cfg_base_addr &= (u32) 0x0000ffff;
3567 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
3568 if (*cfg_base_addr_index == -1) {
3569 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
3575 static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
3579 u64 cfg_base_addr_index;
3583 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
3584 &cfg_base_addr_index, &cfg_offset);
3587 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
3588 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
3591 rc = write_driver_ver_to_cfgtable(h->cfgtable);
3594 /* Find performant mode table. */
3595 trans_offset = readl(&h->cfgtable->TransMethodOffset);
3596 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
3597 cfg_base_addr_index)+cfg_offset+trans_offset,
3598 sizeof(*h->transtable));
3604 static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
3606 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
3608 /* Limit commands in memory limited kdump scenario. */
3609 if (reset_devices && h->max_commands > 32)
3610 h->max_commands = 32;
3612 if (h->max_commands < 16) {
3613 dev_warn(&h->pdev->dev, "Controller reports "
3614 "max supported commands of %d, an obvious lie. "
3615 "Using 16. Ensure that firmware is up to date.\n",
3617 h->max_commands = 16;
3621 /* Interrogate the hardware for some limits:
3622 * max commands, max SG elements without chaining, and with chaining,
3623 * SG chain block size, etc.
3625 static void __devinit hpsa_find_board_params(struct ctlr_info *h)
3627 hpsa_get_max_perf_mode_cmds(h);
3628 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
3629 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
3631 * Limit in-command s/g elements to 32 save dma'able memory.
3632 * Howvever spec says if 0, use 31
3634 h->max_cmd_sg_entries = 31;
3635 if (h->maxsgentries > 512) {
3636 h->max_cmd_sg_entries = 32;
3637 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
3638 h->maxsgentries--; /* save one for chain pointer */
3640 h->maxsgentries = 31; /* default to traditional values */
3645 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
3647 if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
3648 (readb(&h->cfgtable->Signature[1]) != 'I') ||
3649 (readb(&h->cfgtable->Signature[2]) != 'S') ||
3650 (readb(&h->cfgtable->Signature[3]) != 'S')) {
3651 dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
3657 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3658 static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h)
3663 prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
3665 writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
3669 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
3670 * in a prefetch beyond physical memory.
3672 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
3676 if (h->board_id != 0x3225103C)
3678 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
3679 dma_prefetch |= 0x8000;
3680 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
3683 static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
3687 unsigned long flags;
3689 /* under certain very rare conditions, this can take awhile.
3690 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3691 * as we enter this code.)
3693 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3694 spin_lock_irqsave(&h->lock, flags);
3695 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
3696 spin_unlock_irqrestore(&h->lock, flags);
3697 if (!(doorbell_value & CFGTBL_ChangeReq))
3699 /* delay and try again */
3700 usleep_range(10000, 20000);
3704 static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
3708 trans_support = readl(&(h->cfgtable->TransportSupport));
3709 if (!(trans_support & SIMPLE_MODE))
3712 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
3713 /* Update the field, and then ring the doorbell */
3714 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
3715 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
3716 hpsa_wait_for_mode_change_ack(h);
3717 print_cfg_table(&h->pdev->dev, h->cfgtable);
3718 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3719 dev_warn(&h->pdev->dev,
3720 "unable to get board into simple mode\n");
3723 h->transMethod = CFGTBL_Trans_Simple;
3727 static int __devinit hpsa_pci_init(struct ctlr_info *h)
3729 int prod_index, err;
3731 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
3734 h->product_name = products[prod_index].product_name;
3735 h->access = *(products[prod_index].access);
3737 if (hpsa_board_disabled(h->pdev)) {
3738 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
3741 err = pci_enable_device(h->pdev);
3743 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
3747 err = pci_request_regions(h->pdev, "hpsa");
3749 dev_err(&h->pdev->dev,
3750 "cannot obtain PCI resources, aborting\n");
3753 hpsa_interrupt_mode(h);
3754 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
3756 goto err_out_free_res;
3757 h->vaddr = remap_pci_mem(h->paddr, 0x250);
3760 goto err_out_free_res;
3762 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
3764 goto err_out_free_res;
3765 err = hpsa_find_cfgtables(h);
3767 goto err_out_free_res;
3768 hpsa_find_board_params(h);
3770 if (!hpsa_CISS_signature_present(h)) {
3772 goto err_out_free_res;
3774 hpsa_enable_scsi_prefetch(h);
3775 hpsa_p600_dma_prefetch_quirk(h);
3776 err = hpsa_enter_simple_mode(h);
3778 goto err_out_free_res;
3783 iounmap(h->transtable);
3785 iounmap(h->cfgtable);
3789 * Deliberately omit pci_disable_device(): it does something nasty to
3790 * Smart Array controllers that pci_enable_device does not undo
3792 pci_release_regions(h->pdev);
3796 static void __devinit hpsa_hba_inquiry(struct ctlr_info *h)
3800 #define HBA_INQUIRY_BYTE_COUNT 64
3801 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
3802 if (!h->hba_inquiry_data)
3804 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
3805 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
3807 kfree(h->hba_inquiry_data);
3808 h->hba_inquiry_data = NULL;
3812 static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
3819 /* Reset the controller with a PCI power-cycle or via doorbell */
3820 rc = hpsa_kdump_hard_reset_controller(pdev);
3822 /* -ENOTSUPP here means we cannot reset the controller
3823 * but it's already (and still) up and running in
3824 * "performant mode". Or, it might be 640x, which can't reset
3825 * due to concerns about shared bbwc between 6402/6404 pair.
3827 if (rc == -ENOTSUPP)
3828 return 0; /* just try to do the kdump anyhow. */
3832 /* Now try to get the controller to respond to a no-op */
3833 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
3834 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
3835 if (hpsa_noop(pdev) == 0)
3838 dev_warn(&pdev->dev, "no-op failed%s\n",
3839 (i < 11 ? "; re-trying" : ""));
3844 static __devinit int hpsa_allocate_cmd_pool(struct ctlr_info *h)
3846 h->cmd_pool_bits = kzalloc(
3847 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
3848 sizeof(unsigned long), GFP_KERNEL);
3849 h->cmd_pool = pci_alloc_consistent(h->pdev,
3850 h->nr_cmds * sizeof(*h->cmd_pool),
3851 &(h->cmd_pool_dhandle));
3852 h->errinfo_pool = pci_alloc_consistent(h->pdev,
3853 h->nr_cmds * sizeof(*h->errinfo_pool),
3854 &(h->errinfo_pool_dhandle));
3855 if ((h->cmd_pool_bits == NULL)
3856 || (h->cmd_pool == NULL)
3857 || (h->errinfo_pool == NULL)) {
3858 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
3864 static void hpsa_free_cmd_pool(struct ctlr_info *h)
3866 kfree(h->cmd_pool_bits);
3868 pci_free_consistent(h->pdev,
3869 h->nr_cmds * sizeof(struct CommandList),
3870 h->cmd_pool, h->cmd_pool_dhandle);
3871 if (h->errinfo_pool)
3872 pci_free_consistent(h->pdev,
3873 h->nr_cmds * sizeof(struct ErrorInfo),
3875 h->errinfo_pool_dhandle);
3878 static int hpsa_request_irq(struct ctlr_info *h,
3879 irqreturn_t (*msixhandler)(int, void *),
3880 irqreturn_t (*intxhandler)(int, void *))
3884 if (h->msix_vector || h->msi_vector)
3885 rc = request_irq(h->intr[h->intr_mode], msixhandler,
3886 IRQF_DISABLED, h->devname, h);
3888 rc = request_irq(h->intr[h->intr_mode], intxhandler,
3889 IRQF_DISABLED, h->devname, h);
3891 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
3892 h->intr[h->intr_mode], h->devname);
3898 static int __devinit hpsa_init_one(struct pci_dev *pdev,
3899 const struct pci_device_id *ent)
3902 struct ctlr_info *h;
3904 if (number_of_controllers == 0)
3905 printk(KERN_INFO DRIVER_NAME "\n");
3907 rc = hpsa_init_reset_devices(pdev);
3911 /* Command structures must be aligned on a 32-byte boundary because
3912 * the 5 lower bits of the address are used by the hardware. and by
3913 * the driver. See comments in hpsa.h for more info.
3915 #define COMMANDLIST_ALIGNMENT 32
3916 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
3917 h = kzalloc(sizeof(*h), GFP_KERNEL);
3922 h->busy_initializing = 1;
3923 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
3924 INIT_LIST_HEAD(&h->cmpQ);
3925 INIT_LIST_HEAD(&h->reqQ);
3926 spin_lock_init(&h->lock);
3927 spin_lock_init(&h->scan_lock);
3928 rc = hpsa_pci_init(h);
3932 sprintf(h->devname, "hpsa%d", number_of_controllers);
3933 h->ctlr = number_of_controllers;
3934 number_of_controllers++;
3936 /* configure PCI DMA stuff */
3937 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3941 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3945 dev_err(&pdev->dev, "no suitable DMA available\n");
3950 /* make sure the board interrupts are off */
3951 h->access.set_intr_mask(h, HPSA_INTR_OFF);
3953 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
3955 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
3956 h->devname, pdev->device,
3957 h->intr[h->intr_mode], dac ? "" : " not");
3958 if (hpsa_allocate_cmd_pool(h))
3960 if (hpsa_allocate_sg_chain_blocks(h))
3962 init_waitqueue_head(&h->scan_wait_queue);
3963 h->scan_finished = 1; /* no scan currently in progress */
3965 pci_set_drvdata(pdev, h);
3967 h->scsi_host = NULL;
3968 spin_lock_init(&h->devlock);
3970 /* Turn the interrupts on so we can service requests */
3971 h->access.set_intr_mask(h, HPSA_INTR_ON);
3973 hpsa_put_ctlr_into_performant_mode(h);
3974 hpsa_hba_inquiry(h);
3975 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
3976 h->busy_initializing = 0;
3980 hpsa_free_sg_chain_blocks(h);
3981 hpsa_free_cmd_pool(h);
3982 free_irq(h->intr[h->intr_mode], h);
3985 h->busy_initializing = 0;
3990 static void hpsa_flush_cache(struct ctlr_info *h)
3993 struct CommandList *c;
3995 flush_buf = kzalloc(4, GFP_KERNEL);
3999 c = cmd_special_alloc(h);
4001 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
4004 fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
4005 RAID_CTLR_LUNID, TYPE_CMD);
4006 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
4007 if (c->err_info->CommandStatus != 0)
4008 dev_warn(&h->pdev->dev,
4009 "error flushing cache on controller\n");
4010 cmd_special_free(h, c);
4015 static void hpsa_shutdown(struct pci_dev *pdev)
4017 struct ctlr_info *h;
4019 h = pci_get_drvdata(pdev);
4020 /* Turn board interrupts off and send the flush cache command
4021 * sendcmd will turn off interrupt, and send the flush...
4022 * To write all data in the battery backed cache to disks
4024 hpsa_flush_cache(h);
4025 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4026 free_irq(h->intr[h->intr_mode], h);
4027 #ifdef CONFIG_PCI_MSI
4029 pci_disable_msix(h->pdev);
4030 else if (h->msi_vector)
4031 pci_disable_msi(h->pdev);
4032 #endif /* CONFIG_PCI_MSI */
4035 static void __devexit hpsa_remove_one(struct pci_dev *pdev)
4037 struct ctlr_info *h;
4039 if (pci_get_drvdata(pdev) == NULL) {
4040 dev_err(&pdev->dev, "unable to remove device \n");
4043 h = pci_get_drvdata(pdev);
4044 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
4045 hpsa_shutdown(pdev);
4047 iounmap(h->transtable);
4048 iounmap(h->cfgtable);
4049 hpsa_free_sg_chain_blocks(h);
4050 pci_free_consistent(h->pdev,
4051 h->nr_cmds * sizeof(struct CommandList),
4052 h->cmd_pool, h->cmd_pool_dhandle);
4053 pci_free_consistent(h->pdev,
4054 h->nr_cmds * sizeof(struct ErrorInfo),
4055 h->errinfo_pool, h->errinfo_pool_dhandle);
4056 pci_free_consistent(h->pdev, h->reply_pool_size,
4057 h->reply_pool, h->reply_pool_dhandle);
4058 kfree(h->cmd_pool_bits);
4059 kfree(h->blockFetchTable);
4060 kfree(h->hba_inquiry_data);
4062 * Deliberately omit pci_disable_device(): it does something nasty to
4063 * Smart Array controllers that pci_enable_device does not undo
4065 pci_release_regions(pdev);
4066 pci_set_drvdata(pdev, NULL);
4070 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
4071 __attribute__((unused)) pm_message_t state)
4076 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
4081 static struct pci_driver hpsa_pci_driver = {
4083 .probe = hpsa_init_one,
4084 .remove = __devexit_p(hpsa_remove_one),
4085 .id_table = hpsa_pci_device_id, /* id_table */
4086 .shutdown = hpsa_shutdown,
4087 .suspend = hpsa_suspend,
4088 .resume = hpsa_resume,
4091 /* Fill in bucket_map[], given nsgs (the max number of
4092 * scatter gather elements supported) and bucket[],
4093 * which is an array of 8 integers. The bucket[] array
4094 * contains 8 different DMA transfer sizes (in 16
4095 * byte increments) which the controller uses to fetch
4096 * commands. This function fills in bucket_map[], which
4097 * maps a given number of scatter gather elements to one of
4098 * the 8 DMA transfer sizes. The point of it is to allow the
4099 * controller to only do as much DMA as needed to fetch the
4100 * command, with the DMA transfer size encoded in the lower
4101 * bits of the command address.
4103 static void calc_bucket_map(int bucket[], int num_buckets,
4104 int nsgs, int *bucket_map)
4108 /* even a command with 0 SGs requires 4 blocks */
4109 #define MINIMUM_TRANSFER_BLOCKS 4
4110 #define NUM_BUCKETS 8
4111 /* Note, bucket_map must have nsgs+1 entries. */
4112 for (i = 0; i <= nsgs; i++) {
4113 /* Compute size of a command with i SG entries */
4114 size = i + MINIMUM_TRANSFER_BLOCKS;
4115 b = num_buckets; /* Assume the biggest bucket */
4116 /* Find the bucket that is just big enough */
4117 for (j = 0; j < 8; j++) {
4118 if (bucket[j] >= size) {
4123 /* for a command with i SG entries, use bucket b. */
4128 static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
4132 unsigned long register_value;
4134 /* This is a bit complicated. There are 8 registers on
4135 * the controller which we write to to tell it 8 different
4136 * sizes of commands which there may be. It's a way of
4137 * reducing the DMA done to fetch each command. Encoded into
4138 * each command's tag are 3 bits which communicate to the controller
4139 * which of the eight sizes that command fits within. The size of
4140 * each command depends on how many scatter gather entries there are.
4141 * Each SG entry requires 16 bytes. The eight registers are programmed
4142 * with the number of 16-byte blocks a command of that size requires.
4143 * The smallest command possible requires 5 such 16 byte blocks.
4144 * the largest command possible requires MAXSGENTRIES + 4 16-byte
4145 * blocks. Note, this only extends to the SG entries contained
4146 * within the command block, and does not extend to chained blocks
4147 * of SG elements. bft[] contains the eight values we write to
4148 * the registers. They are not evenly distributed, but have more
4149 * sizes for small commands, and fewer sizes for larger commands.
4151 int bft[8] = {5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4};
4152 BUILD_BUG_ON(28 > MAXSGENTRIES + 4);
4153 /* 5 = 1 s/g entry or 4k
4154 * 6 = 2 s/g entry or 8k
4155 * 8 = 4 s/g entry or 16k
4156 * 10 = 6 s/g entry or 24k
4159 h->reply_pool_wraparound = 1; /* spec: init to 1 */
4161 /* Controller spec: zero out this buffer. */
4162 memset(h->reply_pool, 0, h->reply_pool_size);
4163 h->reply_pool_head = h->reply_pool;
4165 bft[7] = h->max_sg_entries + 4;
4166 calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable);
4167 for (i = 0; i < 8; i++)
4168 writel(bft[i], &h->transtable->BlockFetch[i]);
4170 /* size of controller ring buffer */
4171 writel(h->max_commands, &h->transtable->RepQSize);
4172 writel(1, &h->transtable->RepQCount);
4173 writel(0, &h->transtable->RepQCtrAddrLow32);
4174 writel(0, &h->transtable->RepQCtrAddrHigh32);
4175 writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
4176 writel(0, &h->transtable->RepQAddr0High32);
4177 writel(CFGTBL_Trans_Performant | use_short_tags,
4178 &(h->cfgtable->HostWrite.TransportRequest));
4179 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
4180 hpsa_wait_for_mode_change_ack(h);
4181 register_value = readl(&(h->cfgtable->TransportActive));
4182 if (!(register_value & CFGTBL_Trans_Performant)) {
4183 dev_warn(&h->pdev->dev, "unable to get board into"
4184 " performant mode\n");
4187 /* Change the access methods to the performant access methods */
4188 h->access = SA5_performant_access;
4189 h->transMethod = CFGTBL_Trans_Performant;
4192 static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
4196 if (hpsa_simple_mode)
4199 trans_support = readl(&(h->cfgtable->TransportSupport));
4200 if (!(trans_support & PERFORMANT_MODE))
4203 hpsa_get_max_perf_mode_cmds(h);
4204 h->max_sg_entries = 32;
4205 /* Performant mode ring buffer and supporting data structures */
4206 h->reply_pool_size = h->max_commands * sizeof(u64);
4207 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
4208 &(h->reply_pool_dhandle));
4210 /* Need a block fetch table for performant mode */
4211 h->blockFetchTable = kmalloc(((h->max_sg_entries+1) *
4212 sizeof(u32)), GFP_KERNEL);
4214 if ((h->reply_pool == NULL)
4215 || (h->blockFetchTable == NULL))
4218 hpsa_enter_performant_mode(h,
4219 trans_support & CFGTBL_Trans_use_short_tags);
4225 pci_free_consistent(h->pdev, h->reply_pool_size,
4226 h->reply_pool, h->reply_pool_dhandle);
4227 kfree(h->blockFetchTable);
4231 * This is it. Register the PCI driver information for the cards we control
4232 * the OS will call our registered routines when it finds one of our cards.
4234 static int __init hpsa_init(void)
4236 return pci_register_driver(&hpsa_pci_driver);
4239 static void __exit hpsa_cleanup(void)
4241 pci_unregister_driver(&hpsa_pci_driver);
4244 module_init(hpsa_init);
4245 module_exit(hpsa_cleanup);