2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2014-2015 PMC-Sierra, Inc.
4 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * Questions/Comments/Bugfixes to storagedev@pmcs.com
21 #include <scsi/scsicam.h>
28 struct access_method {
29 void (*submit_command)(struct ctlr_info *h,
30 struct CommandList *c);
31 void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
32 bool (*intr_pending)(struct ctlr_info *h);
33 unsigned long (*command_completed)(struct ctlr_info *h, u8 q);
36 struct hpsa_scsi_dev_t {
38 int bus, target, lun; /* as presented to the OS */
39 unsigned char scsi3addr[8]; /* as presented to the HW */
40 u8 physical_device : 1;
42 #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
43 unsigned char device_id[16]; /* from inquiry pg. 0x83 */
44 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
45 unsigned char model[16]; /* bytes 16-31 of inquiry data */
46 unsigned char raid_level; /* from inquiry page 0xC1 */
47 unsigned char volume_offline; /* discovered via TUR or VPD */
48 u16 queue_depth; /* max queue_depth for this device */
49 atomic_t reset_cmds_out; /* Count of commands to-be affected */
50 atomic_t ioaccel_cmds_out; /* Only used for physical devices
51 * counts commands sent to physical
52 * device via "ioaccel" path.
59 u16 phys_connector[8];
60 int offload_config; /* I/O accel RAID offload configured */
61 int offload_enabled; /* I/O accel RAID offload enabled */
62 int offload_to_be_enabled;
63 int hba_ioaccel_enabled;
64 int offload_to_mirror; /* Send next I/O accelerator RAID
65 * offload request to mirror drive
67 struct raid_map_data raid_map; /* I/O accelerator RAID map */
70 * Pointers from logical drive map indices to the phys drives that
71 * make those logical drives. Note, multiple logical drives may
72 * share physical drives. You can have for instance 5 physical
73 * drives with 3 logical drives each using those same 5 physical
74 * disks. We need these pointers for counting i/o's out to physical
75 * devices in order to honor physical device queue depth limits.
77 struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES];
82 struct reply_queue_buffer {
91 struct bmic_controller_parameters {
93 u8 enable_command_list_verification;
94 u8 backed_out_write_drives;
95 u16 stripes_for_parity;
96 u8 parity_distribution_mode_flags;
97 u16 max_driver_requests;
98 u16 elevator_trend_count;
100 u8 force_scan_complete;
101 u8 scsi_transfer_mode;
105 u8 host_sdb_asic_fix;
106 u8 pdpi_burst_from_host_disabled;
107 char software_name[64];
108 char hardware_name[32];
110 u8 snapshot_priority;
112 u8 post_prompt_timeout;
113 u8 automatic_drive_slamming;
116 u8 cache_nvram_flags;
117 u8 drive_config_flags;
119 u8 temp_warning_level;
120 u8 temp_shutdown_level;
121 u8 temp_condition_reset;
122 u8 max_coalesce_commands;
123 u32 max_coalesce_delay;
134 struct pci_dev *pdev;
138 int nr_cmds; /* Number of commands allowed on this controller */
139 #define HPSA_CMDS_RESERVED_FOR_ABORTS 2
140 #define HPSA_CMDS_RESERVED_FOR_DRIVER 1
141 struct CfgTable __iomem *cfgtable;
142 int interrupts_enabled;
144 atomic_t commands_outstanding;
145 # define PERF_MODE_INT 0
146 # define DOORBELL_INT 1
147 # define SIMPLE_MODE_INT 2
148 # define MEMQ_MODE_INT 3
149 unsigned int intr[MAX_REPLY_QUEUES];
150 unsigned int msix_vector;
151 unsigned int msi_vector;
152 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
153 struct access_method access;
155 /* queue and queue Info */
160 u8 max_cmd_sg_entries;
162 struct SGDescriptor **cmd_sg_list;
163 struct ioaccel2_sg_element **ioaccel2_cmd_sg_list;
165 /* pointers to command and error info pool */
166 struct CommandList *cmd_pool;
167 dma_addr_t cmd_pool_dhandle;
168 struct io_accel1_cmd *ioaccel_cmd_pool;
169 dma_addr_t ioaccel_cmd_pool_dhandle;
170 struct io_accel2_cmd *ioaccel2_cmd_pool;
171 dma_addr_t ioaccel2_cmd_pool_dhandle;
172 struct ErrorInfo *errinfo_pool;
173 dma_addr_t errinfo_pool_dhandle;
174 unsigned long *cmd_pool_bits;
176 spinlock_t scan_lock;
177 wait_queue_head_t scan_wait_queue;
179 struct Scsi_Host *scsi_host;
180 spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */
181 int ndevices; /* number of used elements in .dev[] array. */
182 struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES];
184 * Performant mode tables.
188 struct TransTable_struct __iomem *transtable;
189 unsigned long transMethod;
191 /* cap concurrent passthrus at some reasonable maximum */
192 #define HPSA_MAX_CONCURRENT_PASSTHRUS (10)
193 atomic_t passthru_cmds_avail;
196 * Performant mode completion buffers
198 size_t reply_queue_size;
199 struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES];
201 u32 *blockFetchTable;
202 u32 *ioaccel1_blockFetchTable;
203 u32 *ioaccel2_blockFetchTable;
204 u32 __iomem *ioaccel2_bft2_regs;
205 unsigned char *hba_inquiry_data;
210 u64 last_intr_timestamp;
212 u64 last_heartbeat_timestamp;
213 u32 heartbeat_sample_interval;
214 atomic_t firmware_flash_in_progress;
215 u32 __percpu *lockup_detected;
216 struct delayed_work monitor_ctlr_work;
217 struct delayed_work rescan_ctlr_work;
218 int remove_in_progress;
219 /* Address of h->q[x] is passed to intr handler to know which queue */
220 u8 q[MAX_REPLY_QUEUES];
221 char intrname[MAX_REPLY_QUEUES][16]; /* "hpsa0-msix00" names */
222 u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
223 #define HPSATMF_BITS_SUPPORTED (1 << 0)
224 #define HPSATMF_PHYS_LUN_RESET (1 << 1)
225 #define HPSATMF_PHYS_NEX_RESET (1 << 2)
226 #define HPSATMF_PHYS_TASK_ABORT (1 << 3)
227 #define HPSATMF_PHYS_TSET_ABORT (1 << 4)
228 #define HPSATMF_PHYS_CLEAR_ACA (1 << 5)
229 #define HPSATMF_PHYS_CLEAR_TSET (1 << 6)
230 #define HPSATMF_PHYS_QRY_TASK (1 << 7)
231 #define HPSATMF_PHYS_QRY_TSET (1 << 8)
232 #define HPSATMF_PHYS_QRY_ASYNC (1 << 9)
233 #define HPSATMF_IOACCEL_ENABLED (1 << 15)
234 #define HPSATMF_MASK_SUPPORTED (1 << 16)
235 #define HPSATMF_LOG_LUN_RESET (1 << 17)
236 #define HPSATMF_LOG_NEX_RESET (1 << 18)
237 #define HPSATMF_LOG_TASK_ABORT (1 << 19)
238 #define HPSATMF_LOG_TSET_ABORT (1 << 20)
239 #define HPSATMF_LOG_CLEAR_ACA (1 << 21)
240 #define HPSATMF_LOG_CLEAR_TSET (1 << 22)
241 #define HPSATMF_LOG_QRY_TASK (1 << 23)
242 #define HPSATMF_LOG_QRY_TSET (1 << 24)
243 #define HPSATMF_LOG_QRY_ASYNC (1 << 25)
245 #define CTLR_STATE_CHANGE_EVENT (1 << 0)
246 #define CTLR_ENCLOSURE_HOT_PLUG_EVENT (1 << 1)
247 #define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV (1 << 4)
248 #define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV (1 << 5)
249 #define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL (1 << 6)
250 #define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED (1 << 30)
251 #define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31)
253 #define RESCAN_REQUIRED_EVENT_BITS \
254 (CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
255 CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
256 CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
257 CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
258 CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
259 spinlock_t offline_device_lock;
260 struct list_head offline_device_list;
261 int acciopath_status;
263 int raid_offload_debug;
264 int needs_abort_tags_swizzled;
265 struct workqueue_struct *resubmit_wq;
266 struct workqueue_struct *rescan_ctlr_wq;
267 atomic_t abort_cmds_available;
268 wait_queue_head_t abort_cmd_wait_queue;
269 wait_queue_head_t event_sync_wait_queue;
270 struct mutex reset_mutex;
271 u8 reset_in_progress;
274 struct offline_device_entry {
275 unsigned char scsi3addr[8];
276 struct list_head offline_list;
279 #define HPSA_ABORT_MSG 0
280 #define HPSA_DEVICE_RESET_MSG 1
281 #define HPSA_RESET_TYPE_CONTROLLER 0x00
282 #define HPSA_RESET_TYPE_BUS 0x01
283 #define HPSA_RESET_TYPE_TARGET 0x03
284 #define HPSA_RESET_TYPE_LUN 0x04
285 #define HPSA_PHYS_TARGET_RESET 0x99 /* not defined by cciss spec */
286 #define HPSA_MSG_SEND_RETRY_LIMIT 10
287 #define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000)
289 /* Maximum time in seconds driver will wait for command completions
290 * when polling before giving up.
292 #define HPSA_MAX_POLL_TIME_SECS (20)
294 /* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
295 * how many times to retry TEST UNIT READY on a device
296 * while waiting for it to become ready before giving up.
297 * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
298 * between sending TURs while waiting for a device
301 #define HPSA_TUR_RETRY_LIMIT (20)
302 #define HPSA_MAX_WAIT_INTERVAL_SECS (30)
304 /* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
305 * to become ready, in seconds, before giving up on it.
306 * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
307 * between polling the board to see if it is ready, in
308 * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and
309 * HPSA_BOARD_READY_ITERATIONS are derived from those.
311 #define HPSA_BOARD_READY_WAIT_SECS (120)
312 #define HPSA_BOARD_NOT_READY_WAIT_SECS (100)
313 #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
314 #define HPSA_BOARD_READY_POLL_INTERVAL \
315 ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
316 #define HPSA_BOARD_READY_ITERATIONS \
317 ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
318 HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
319 #define HPSA_BOARD_NOT_READY_ITERATIONS \
320 ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \
321 HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
322 #define HPSA_POST_RESET_PAUSE_MSECS (3000)
323 #define HPSA_POST_RESET_NOOP_RETRIES (12)
325 /* Defining the diffent access_menthods */
327 * Memory mapped FIFO interface (SMART 53xx cards)
329 #define SA5_DOORBELL 0x20
330 #define SA5_REQUEST_PORT_OFFSET 0x40
331 #define SA5_REQUEST_PORT64_LO_OFFSET 0xC0
332 #define SA5_REQUEST_PORT64_HI_OFFSET 0xC4
333 #define SA5_REPLY_INTR_MASK_OFFSET 0x34
334 #define SA5_REPLY_PORT_OFFSET 0x44
335 #define SA5_INTR_STATUS 0x30
336 #define SA5_SCRATCHPAD_OFFSET 0xB0
338 #define SA5_CTCFG_OFFSET 0xB4
339 #define SA5_CTMEM_OFFSET 0xB8
341 #define SA5_INTR_OFF 0x08
342 #define SA5B_INTR_OFF 0x04
343 #define SA5_INTR_PENDING 0x08
344 #define SA5B_INTR_PENDING 0x04
345 #define FIFO_EMPTY 0xffffffff
346 #define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
348 #define HPSA_ERROR_BIT 0x02
350 /* Performant mode flags */
351 #define SA5_PERF_INTR_PENDING 0x04
352 #define SA5_PERF_INTR_OFF 0x05
353 #define SA5_OUTDB_STATUS_PERF_BIT 0x01
354 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01
355 #define SA5_OUTDB_CLEAR 0xA0
356 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01
357 #define SA5_OUTDB_STATUS 0x9C
360 #define HPSA_INTR_ON 1
361 #define HPSA_INTR_OFF 0
364 * Inbound Post Queue offsets for IO Accelerator Mode 2
366 #define IOACCEL2_INBOUND_POSTQ_32 0x48
367 #define IOACCEL2_INBOUND_POSTQ_64_LOW 0xd0
368 #define IOACCEL2_INBOUND_POSTQ_64_HI 0xd4
370 #define HPSA_PHYSICAL_DEVICE_BUS 0
371 #define HPSA_RAID_VOLUME_BUS 1
372 #define HPSA_EXTERNAL_RAID_VOLUME_BUS 2
373 #define HPSA_HBA_BUS 3
376 Send the command to the hardware
378 static void SA5_submit_command(struct ctlr_info *h,
379 struct CommandList *c)
381 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
382 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
385 static void SA5_submit_command_no_read(struct ctlr_info *h,
386 struct CommandList *c)
388 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
391 static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
392 struct CommandList *c)
394 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
398 * This card is the opposite of the other cards.
399 * 0 turns interrupts on...
400 * 0x08 turns them off...
402 static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
404 if (val) { /* Turn interrupts on */
405 h->interrupts_enabled = 1;
406 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
407 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
408 } else { /* Turn them off */
409 h->interrupts_enabled = 0;
411 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
412 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
416 static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
418 if (val) { /* turn on interrupts */
419 h->interrupts_enabled = 1;
420 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
421 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
423 h->interrupts_enabled = 0;
424 writel(SA5_PERF_INTR_OFF,
425 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
426 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
430 static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
432 struct reply_queue_buffer *rq = &h->reply_queue[q];
433 unsigned long register_value = FIFO_EMPTY;
435 /* msi auto clears the interrupt pending bit. */
436 if (unlikely(!(h->msi_vector || h->msix_vector))) {
437 /* flush the controller write of the reply queue by reading
438 * outbound doorbell status register.
440 (void) readl(h->vaddr + SA5_OUTDB_STATUS);
441 writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
442 /* Do a read in order to flush the write to the controller
445 (void) readl(h->vaddr + SA5_OUTDB_STATUS);
448 if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) {
449 register_value = rq->head[rq->current_entry];
451 atomic_dec(&h->commands_outstanding);
453 register_value = FIFO_EMPTY;
455 /* Check for wraparound */
456 if (rq->current_entry == h->max_commands) {
457 rq->current_entry = 0;
460 return register_value;
464 * returns value read from hardware.
465 * returns FIFO_EMPTY if there is nothing to read
467 static unsigned long SA5_completed(struct ctlr_info *h,
468 __attribute__((unused)) u8 q)
470 unsigned long register_value
471 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
473 if (register_value != FIFO_EMPTY)
474 atomic_dec(&h->commands_outstanding);
477 if (register_value != FIFO_EMPTY)
478 dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
481 dev_dbg(&h->pdev->dev, "FIFO Empty read\n");
484 return register_value;
487 * Returns true if an interrupt is pending..
489 static bool SA5_intr_pending(struct ctlr_info *h)
491 unsigned long register_value =
492 readl(h->vaddr + SA5_INTR_STATUS);
493 return register_value & SA5_INTR_PENDING;
496 static bool SA5_performant_intr_pending(struct ctlr_info *h)
498 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
503 /* Read outbound doorbell to flush */
504 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
505 return register_value & SA5_OUTDB_STATUS_PERF_BIT;
508 #define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT 0x100
510 static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
512 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
514 return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ?
518 #define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0
519 #define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8
520 #define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC
521 #define IOACCEL_MODE1_REPLY_UNUSED 0xFFFFFFFFFFFFFFFFULL
523 static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
526 struct reply_queue_buffer *rq = &h->reply_queue[q];
528 BUG_ON(q >= h->nreply_queues);
530 register_value = rq->head[rq->current_entry];
531 if (register_value != IOACCEL_MODE1_REPLY_UNUSED) {
532 rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED;
533 if (++rq->current_entry == rq->size)
534 rq->current_entry = 0;
538 * Don't really need to write the new index after each command,
539 * but with current driver design this is easiest.
542 writel((q << 24) | rq->current_entry, h->vaddr +
543 IOACCEL_MODE1_CONSUMER_INDEX);
544 atomic_dec(&h->commands_outstanding);
546 return (unsigned long) register_value;
549 static struct access_method SA5_access = {
556 static struct access_method SA5_ioaccel_mode1_access = {
558 SA5_performant_intr_mask,
559 SA5_ioaccel_mode1_intr_pending,
560 SA5_ioaccel_mode1_completed,
563 static struct access_method SA5_ioaccel_mode2_access = {
564 SA5_submit_command_ioaccel2,
565 SA5_performant_intr_mask,
566 SA5_performant_intr_pending,
567 SA5_performant_completed,
570 static struct access_method SA5_performant_access = {
572 SA5_performant_intr_mask,
573 SA5_performant_intr_pending,
574 SA5_performant_completed,
577 static struct access_method SA5_performant_access_no_read = {
578 SA5_submit_command_no_read,
579 SA5_performant_intr_mask,
580 SA5_performant_intr_pending,
581 SA5_performant_completed,
587 struct access_method *access;