1 // SPDX-License-Identifier: GPL-2.0
3 * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
5 * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
7 * Based on the original DAC960 driver,
8 * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
9 * Portions Copyright 2002 by Mylex (An IBM Business Unit)
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/pci.h>
18 #include <linux/raid_class.h>
19 #include <asm/unaligned.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_host.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_tcq.h>
27 static struct raid_template *myrb_raid_template;
29 static void myrb_monitor(struct work_struct *work);
30 static inline void myrb_translate_devstate(void *DeviceState);
32 static inline int myrb_logical_channel(struct Scsi_Host *shost)
34 return shost->max_channel - 1;
37 static struct myrb_devstate_name_entry {
38 enum myrb_devstate state;
40 } myrb_devstate_name_list[] = {
41 { MYRB_DEVICE_DEAD, "Dead" },
42 { MYRB_DEVICE_WO, "WriteOnly" },
43 { MYRB_DEVICE_ONLINE, "Online" },
44 { MYRB_DEVICE_CRITICAL, "Critical" },
45 { MYRB_DEVICE_STANDBY, "Standby" },
46 { MYRB_DEVICE_OFFLINE, "Offline" },
49 static const char *myrb_devstate_name(enum myrb_devstate state)
51 struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
54 for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
55 if (entry[i].state == state)
61 static struct myrb_raidlevel_name_entry {
62 enum myrb_raidlevel level;
64 } myrb_raidlevel_name_list[] = {
65 { MYRB_RAID_LEVEL0, "RAID0" },
66 { MYRB_RAID_LEVEL1, "RAID1" },
67 { MYRB_RAID_LEVEL3, "RAID3" },
68 { MYRB_RAID_LEVEL5, "RAID5" },
69 { MYRB_RAID_LEVEL6, "RAID6" },
70 { MYRB_RAID_JBOD, "JBOD" },
73 static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
75 struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
78 for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
79 if (entry[i].level == level)
86 * myrb_create_mempools - allocates auxiliary data structures
88 * Return: true on success, false otherwise.
90 static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
92 size_t elem_size, elem_align;
94 elem_align = sizeof(struct myrb_sge);
95 elem_size = cb->host->sg_tablesize * elem_align;
96 cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
97 elem_size, elem_align, 0);
98 if (cb->sg_pool == NULL) {
99 shost_printk(KERN_ERR, cb->host,
100 "Failed to allocate SG pool\n");
104 cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
105 sizeof(struct myrb_dcdb),
106 sizeof(unsigned int), 0);
107 if (!cb->dcdb_pool) {
108 dma_pool_destroy(cb->sg_pool);
110 shost_printk(KERN_ERR, cb->host,
111 "Failed to allocate DCDB pool\n");
115 snprintf(cb->work_q_name, sizeof(cb->work_q_name),
116 "myrb_wq_%d", cb->host->host_no);
117 cb->work_q = create_singlethread_workqueue(cb->work_q_name);
119 dma_pool_destroy(cb->dcdb_pool);
120 cb->dcdb_pool = NULL;
121 dma_pool_destroy(cb->sg_pool);
123 shost_printk(KERN_ERR, cb->host,
124 "Failed to create workqueue\n");
129 * Initialize the Monitoring Timer.
131 INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
132 queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
138 * myrb_destroy_mempools - tears down the memory pools for the controller
140 static void myrb_destroy_mempools(struct myrb_hba *cb)
142 cancel_delayed_work_sync(&cb->monitor_work);
143 destroy_workqueue(cb->work_q);
145 dma_pool_destroy(cb->sg_pool);
146 dma_pool_destroy(cb->dcdb_pool);
150 * myrb_reset_cmd - reset command block
152 static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
154 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
156 memset(mbox, 0, sizeof(union myrb_cmd_mbox));
161 * myrb_qcmd - queues command block for execution
163 static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
165 void __iomem *base = cb->io_base;
166 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
167 union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
169 cb->write_cmd_mbox(next_mbox, mbox);
170 if (cb->prev_cmd_mbox1->words[0] == 0 ||
171 cb->prev_cmd_mbox2->words[0] == 0)
172 cb->get_cmd_mbox(base);
173 cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
174 cb->prev_cmd_mbox1 = next_mbox;
175 if (++next_mbox > cb->last_cmd_mbox)
176 next_mbox = cb->first_cmd_mbox;
177 cb->next_cmd_mbox = next_mbox;
181 * myrb_exec_cmd - executes command block and waits for completion.
183 * Return: command status
185 static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
186 struct myrb_cmdblk *cmd_blk)
188 DECLARE_COMPLETION_ONSTACK(cmpl);
191 cmd_blk->completion = &cmpl;
193 spin_lock_irqsave(&cb->queue_lock, flags);
194 cb->qcmd(cb, cmd_blk);
195 spin_unlock_irqrestore(&cb->queue_lock, flags);
197 wait_for_completion(&cmpl);
198 return cmd_blk->status;
202 * myrb_exec_type3 - executes a type 3 command and waits for completion.
204 * Return: command status
206 static unsigned short myrb_exec_type3(struct myrb_hba *cb,
207 enum myrb_cmd_opcode op, dma_addr_t addr)
209 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
210 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
211 unsigned short status;
213 mutex_lock(&cb->dcmd_mutex);
214 myrb_reset_cmd(cmd_blk);
215 mbox->type3.id = MYRB_DCMD_TAG;
216 mbox->type3.opcode = op;
217 mbox->type3.addr = addr;
218 status = myrb_exec_cmd(cb, cmd_blk);
219 mutex_unlock(&cb->dcmd_mutex);
224 * myrb_exec_type3D - executes a type 3D command and waits for completion.
226 * Return: command status
228 static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
229 enum myrb_cmd_opcode op, struct scsi_device *sdev,
230 struct myrb_pdev_state *pdev_info)
232 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
233 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
234 unsigned short status;
235 dma_addr_t pdev_info_addr;
237 pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
238 sizeof(struct myrb_pdev_state),
240 if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
241 return MYRB_STATUS_SUBSYS_FAILED;
243 mutex_lock(&cb->dcmd_mutex);
244 myrb_reset_cmd(cmd_blk);
245 mbox->type3D.id = MYRB_DCMD_TAG;
246 mbox->type3D.opcode = op;
247 mbox->type3D.channel = sdev->channel;
248 mbox->type3D.target = sdev->id;
249 mbox->type3D.addr = pdev_info_addr;
250 status = myrb_exec_cmd(cb, cmd_blk);
251 mutex_unlock(&cb->dcmd_mutex);
252 dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
253 sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
254 if (status == MYRB_STATUS_SUCCESS &&
255 mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
256 myrb_translate_devstate(pdev_info);
261 static char *myrb_event_msg[] = {
262 "killed because write recovery failed",
263 "killed because of SCSI bus reset failure",
264 "killed because of double check condition",
265 "killed because it was removed",
266 "killed because of gross error on SCSI chip",
267 "killed because of bad tag returned from drive",
268 "killed because of timeout on SCSI command",
269 "killed because of reset SCSI command issued from system",
270 "killed because busy or parity error count exceeded limit",
271 "killed because of 'kill drive' command from system",
272 "killed because of selection timeout",
273 "killed due to SCSI phase sequence error",
274 "killed due to unknown status",
278 * myrb_get_event - get event log from HBA
279 * @cb: pointer to the hba structure
280 * @event: number of the event
282 * Execute a type 3E command and logs the event message
284 static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
286 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
287 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
288 struct myrb_log_entry *ev_buf;
290 unsigned short status;
292 ev_buf = dma_alloc_coherent(&cb->pdev->dev,
293 sizeof(struct myrb_log_entry),
294 &ev_addr, GFP_KERNEL);
298 myrb_reset_cmd(cmd_blk);
299 mbox->type3E.id = MYRB_MCMD_TAG;
300 mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
301 mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
302 mbox->type3E.opqual = 1;
303 mbox->type3E.ev_seq = event;
304 mbox->type3E.addr = ev_addr;
305 status = myrb_exec_cmd(cb, cmd_blk);
306 if (status != MYRB_STATUS_SUCCESS)
307 shost_printk(KERN_INFO, cb->host,
308 "Failed to get event log %d, status %04x\n",
311 else if (ev_buf->seq_num == event) {
312 struct scsi_sense_hdr sshdr;
314 memset(&sshdr, 0, sizeof(sshdr));
315 scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
317 if (sshdr.sense_key == VENDOR_SPECIFIC &&
319 sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
320 shost_printk(KERN_CRIT, cb->host,
321 "Physical drive %d:%d: %s\n",
322 ev_buf->channel, ev_buf->target,
323 myrb_event_msg[sshdr.ascq]);
325 shost_printk(KERN_CRIT, cb->host,
326 "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
327 ev_buf->channel, ev_buf->target,
328 sshdr.sense_key, sshdr.asc, sshdr.ascq);
331 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
336 * myrb_get_errtable - retrieves the error table from the controller
338 * Executes a type 3 command and logs the error table from the controller.
340 static void myrb_get_errtable(struct myrb_hba *cb)
342 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
343 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
344 unsigned short status;
345 struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
347 memcpy(&old_table, cb->err_table, sizeof(old_table));
349 myrb_reset_cmd(cmd_blk);
350 mbox->type3.id = MYRB_MCMD_TAG;
351 mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
352 mbox->type3.addr = cb->err_table_addr;
353 status = myrb_exec_cmd(cb, cmd_blk);
354 if (status == MYRB_STATUS_SUCCESS) {
355 struct myrb_error_entry *table = cb->err_table;
356 struct myrb_error_entry *new, *old;
357 size_t err_table_offset;
358 struct scsi_device *sdev;
360 shost_for_each_device(sdev, cb->host) {
361 if (sdev->channel >= myrb_logical_channel(cb->host))
363 err_table_offset = sdev->channel * MYRB_MAX_TARGETS
365 new = table + err_table_offset;
366 old = &old_table[err_table_offset];
367 if (new->parity_err == old->parity_err &&
368 new->soft_err == old->soft_err &&
369 new->hard_err == old->hard_err &&
370 new->misc_err == old->misc_err)
372 sdev_printk(KERN_CRIT, sdev,
373 "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
374 new->parity_err, new->soft_err,
375 new->hard_err, new->misc_err);
381 * myrb_get_ldev_info - retrieves the logical device table from the controller
383 * Executes a type 3 command and updates the logical device table.
385 * Return: command status
387 static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
389 unsigned short status;
390 int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
391 struct Scsi_Host *shost = cb->host;
393 status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
395 if (status != MYRB_STATUS_SUCCESS)
398 for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
399 struct myrb_ldev_info *old = NULL;
400 struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
401 struct scsi_device *sdev;
403 sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
406 if (new->state == MYRB_DEVICE_OFFLINE)
408 shost_printk(KERN_INFO, shost,
409 "Adding Logical Drive %d in state %s\n",
410 ldev_num, myrb_devstate_name(new->state));
411 scsi_add_device(shost, myrb_logical_channel(shost),
415 old = sdev->hostdata;
416 if (new->state != old->state)
417 shost_printk(KERN_INFO, shost,
418 "Logical Drive %d is now %s\n",
419 ldev_num, myrb_devstate_name(new->state));
420 if (new->wb_enabled != old->wb_enabled)
421 sdev_printk(KERN_INFO, sdev,
422 "Logical Drive is now WRITE %s\n",
423 (new->wb_enabled ? "BACK" : "THRU"));
424 memcpy(old, new, sizeof(*new));
425 scsi_device_put(sdev);
431 * myrb_get_rbld_progress - get rebuild progress information
433 * Executes a type 3 command and returns the rebuild progress
436 * Return: command status
438 static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
439 struct myrb_rbld_progress *rbld)
441 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
442 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
443 struct myrb_rbld_progress *rbld_buf;
444 dma_addr_t rbld_addr;
445 unsigned short status;
447 rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
448 sizeof(struct myrb_rbld_progress),
449 &rbld_addr, GFP_KERNEL);
451 return MYRB_STATUS_RBLD_NOT_CHECKED;
453 myrb_reset_cmd(cmd_blk);
454 mbox->type3.id = MYRB_MCMD_TAG;
455 mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
456 mbox->type3.addr = rbld_addr;
457 status = myrb_exec_cmd(cb, cmd_blk);
459 memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
460 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
461 rbld_buf, rbld_addr);
466 * myrb_update_rbld_progress - updates the rebuild status
468 * Updates the rebuild status for the attached logical devices.
470 static void myrb_update_rbld_progress(struct myrb_hba *cb)
472 struct myrb_rbld_progress rbld_buf;
473 unsigned short status;
475 status = myrb_get_rbld_progress(cb, &rbld_buf);
476 if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
477 cb->last_rbld_status == MYRB_STATUS_SUCCESS)
478 status = MYRB_STATUS_RBLD_SUCCESS;
479 if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
480 unsigned int blocks_done =
481 rbld_buf.ldev_size - rbld_buf.blocks_left;
482 struct scsi_device *sdev;
484 sdev = scsi_device_lookup(cb->host,
485 myrb_logical_channel(cb->host),
486 rbld_buf.ldev_num, 0);
491 case MYRB_STATUS_SUCCESS:
492 sdev_printk(KERN_INFO, sdev,
493 "Rebuild in Progress, %d%% completed\n",
494 (100 * (blocks_done >> 7))
495 / (rbld_buf.ldev_size >> 7));
497 case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
498 sdev_printk(KERN_INFO, sdev,
499 "Rebuild Failed due to Logical Drive Failure\n");
501 case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
502 sdev_printk(KERN_INFO, sdev,
503 "Rebuild Failed due to Bad Blocks on Other Drives\n");
505 case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
506 sdev_printk(KERN_INFO, sdev,
507 "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
509 case MYRB_STATUS_RBLD_SUCCESS:
510 sdev_printk(KERN_INFO, sdev,
511 "Rebuild Completed Successfully\n");
513 case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
514 sdev_printk(KERN_INFO, sdev,
515 "Rebuild Successfully Terminated\n");
520 scsi_device_put(sdev);
522 cb->last_rbld_status = status;
526 * myrb_get_cc_progress - retrieve the rebuild status
528 * Execute a type 3 Command and fetch the rebuild / consistency check
531 static void myrb_get_cc_progress(struct myrb_hba *cb)
533 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
534 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
535 struct myrb_rbld_progress *rbld_buf;
536 dma_addr_t rbld_addr;
537 unsigned short status;
539 rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
540 sizeof(struct myrb_rbld_progress),
541 &rbld_addr, GFP_KERNEL);
543 cb->need_cc_status = true;
546 myrb_reset_cmd(cmd_blk);
547 mbox->type3.id = MYRB_MCMD_TAG;
548 mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
549 mbox->type3.addr = rbld_addr;
550 status = myrb_exec_cmd(cb, cmd_blk);
551 if (status == MYRB_STATUS_SUCCESS) {
552 unsigned int ldev_num = rbld_buf->ldev_num;
553 unsigned int ldev_size = rbld_buf->ldev_size;
554 unsigned int blocks_done =
555 ldev_size - rbld_buf->blocks_left;
556 struct scsi_device *sdev;
558 sdev = scsi_device_lookup(cb->host,
559 myrb_logical_channel(cb->host),
562 sdev_printk(KERN_INFO, sdev,
563 "Consistency Check in Progress: %d%% completed\n",
564 (100 * (blocks_done >> 7))
566 scsi_device_put(sdev);
569 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
570 rbld_buf, rbld_addr);
574 * myrb_bgi_control - updates background initialisation status
576 * Executes a type 3B command and updates the background initialisation status
578 static void myrb_bgi_control(struct myrb_hba *cb)
580 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
581 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
582 struct myrb_bgi_status *bgi, *last_bgi;
584 struct scsi_device *sdev = NULL;
585 unsigned short status;
587 bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
588 &bgi_addr, GFP_KERNEL);
590 shost_printk(KERN_ERR, cb->host,
591 "Failed to allocate bgi memory\n");
594 myrb_reset_cmd(cmd_blk);
595 mbox->type3B.id = MYRB_DCMD_TAG;
596 mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
597 mbox->type3B.optype = 0x20;
598 mbox->type3B.addr = bgi_addr;
599 status = myrb_exec_cmd(cb, cmd_blk);
600 last_bgi = &cb->bgi_status;
601 sdev = scsi_device_lookup(cb->host,
602 myrb_logical_channel(cb->host),
605 case MYRB_STATUS_SUCCESS:
606 switch (bgi->status) {
607 case MYRB_BGI_INVALID:
609 case MYRB_BGI_STARTED:
612 sdev_printk(KERN_INFO, sdev,
613 "Background Initialization Started\n");
615 case MYRB_BGI_INPROGRESS:
618 if (bgi->blocks_done == last_bgi->blocks_done &&
619 bgi->ldev_num == last_bgi->ldev_num)
621 sdev_printk(KERN_INFO, sdev,
622 "Background Initialization in Progress: %d%% completed\n",
623 (100 * (bgi->blocks_done >> 7))
624 / (bgi->ldev_size >> 7));
626 case MYRB_BGI_SUSPENDED:
629 sdev_printk(KERN_INFO, sdev,
630 "Background Initialization Suspended\n");
632 case MYRB_BGI_CANCELLED:
635 sdev_printk(KERN_INFO, sdev,
636 "Background Initialization Cancelled\n");
639 memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
641 case MYRB_STATUS_BGI_SUCCESS:
642 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
643 sdev_printk(KERN_INFO, sdev,
644 "Background Initialization Completed Successfully\n");
645 cb->bgi_status.status = MYRB_BGI_INVALID;
647 case MYRB_STATUS_BGI_ABORTED:
648 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
649 sdev_printk(KERN_INFO, sdev,
650 "Background Initialization Aborted\n");
652 case MYRB_STATUS_NO_BGI_INPROGRESS:
653 cb->bgi_status.status = MYRB_BGI_INVALID;
657 scsi_device_put(sdev);
658 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
663 * myrb_hba_enquiry - updates the controller status
665 * Executes a DAC_V1_Enquiry command and updates the controller status.
667 * Return: command status
669 static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
671 struct myrb_enquiry old, *new;
672 unsigned short status;
674 memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
676 status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
677 if (status != MYRB_STATUS_SUCCESS)
681 if (new->ldev_count > old.ldev_count) {
682 int ldev_num = old.ldev_count - 1;
684 while (++ldev_num < new->ldev_count)
685 shost_printk(KERN_CRIT, cb->host,
686 "Logical Drive %d Now Exists\n",
689 if (new->ldev_count < old.ldev_count) {
690 int ldev_num = new->ldev_count - 1;
692 while (++ldev_num < old.ldev_count)
693 shost_printk(KERN_CRIT, cb->host,
694 "Logical Drive %d No Longer Exists\n",
697 if (new->status.deferred != old.status.deferred)
698 shost_printk(KERN_CRIT, cb->host,
699 "Deferred Write Error Flag is now %s\n",
700 (new->status.deferred ? "TRUE" : "FALSE"));
701 if (new->ev_seq != old.ev_seq) {
702 cb->new_ev_seq = new->ev_seq;
703 cb->need_err_info = true;
704 shost_printk(KERN_INFO, cb->host,
705 "Event log %d/%d (%d/%d) available\n",
706 cb->old_ev_seq, cb->new_ev_seq,
707 old.ev_seq, new->ev_seq);
709 if ((new->ldev_critical > 0 &&
710 new->ldev_critical != old.ldev_critical) ||
711 (new->ldev_offline > 0 &&
712 new->ldev_offline != old.ldev_offline) ||
713 (new->ldev_count != old.ldev_count)) {
714 shost_printk(KERN_INFO, cb->host,
715 "Logical drive count changed (%d/%d/%d)\n",
719 cb->need_ldev_info = true;
721 if (new->pdev_dead > 0 ||
722 new->pdev_dead != old.pdev_dead ||
723 time_after_eq(jiffies, cb->secondary_monitor_time
724 + MYRB_SECONDARY_MONITOR_INTERVAL)) {
725 cb->need_bgi_status = cb->bgi_status_supported;
726 cb->secondary_monitor_time = jiffies;
728 if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
729 new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
730 old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
731 old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
732 cb->need_rbld = true;
733 cb->rbld_first = (new->ldev_critical < old.ldev_critical);
735 if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
737 case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
738 shost_printk(KERN_INFO, cb->host,
739 "Consistency Check Completed Successfully\n");
741 case MYRB_STDBY_RBLD_IN_PROGRESS:
742 case MYRB_BG_RBLD_IN_PROGRESS:
744 case MYRB_BG_CHECK_IN_PROGRESS:
745 cb->need_cc_status = true;
747 case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
748 shost_printk(KERN_INFO, cb->host,
749 "Consistency Check Completed with Error\n");
751 case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
752 shost_printk(KERN_INFO, cb->host,
753 "Consistency Check Failed - Physical Device Failed\n");
755 case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
756 shost_printk(KERN_INFO, cb->host,
757 "Consistency Check Failed - Logical Drive Failed\n");
759 case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
760 shost_printk(KERN_INFO, cb->host,
761 "Consistency Check Failed - Other Causes\n");
763 case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
764 shost_printk(KERN_INFO, cb->host,
765 "Consistency Check Successfully Terminated\n");
768 else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
769 cb->need_cc_status = true;
771 return MYRB_STATUS_SUCCESS;
775 * myrb_set_pdev_state - sets the device state for a physical device
777 * Return: command status
779 static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
780 struct scsi_device *sdev, enum myrb_devstate state)
782 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
783 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
784 unsigned short status;
786 mutex_lock(&cb->dcmd_mutex);
787 mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
788 mbox->type3D.id = MYRB_DCMD_TAG;
789 mbox->type3D.channel = sdev->channel;
790 mbox->type3D.target = sdev->id;
791 mbox->type3D.state = state & 0x1F;
792 status = myrb_exec_cmd(cb, cmd_blk);
793 mutex_unlock(&cb->dcmd_mutex);
799 * myrb_enable_mmio - enables the Memory Mailbox Interface
801 * PD and P controller types have no memory mailbox, but still need the
802 * other dma mapped memory.
804 * Return: true on success, false otherwise.
806 static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
808 void __iomem *base = cb->io_base;
809 struct pci_dev *pdev = cb->pdev;
810 size_t err_table_size;
811 size_t ldev_info_size;
812 union myrb_cmd_mbox *cmd_mbox_mem;
813 struct myrb_stat_mbox *stat_mbox_mem;
814 union myrb_cmd_mbox mbox;
815 unsigned short status;
817 memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
819 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
820 dev_err(&pdev->dev, "DMA mask out of range\n");
824 cb->enquiry = dma_alloc_coherent(&pdev->dev,
825 sizeof(struct myrb_enquiry),
826 &cb->enquiry_addr, GFP_KERNEL);
830 err_table_size = sizeof(struct myrb_error_entry) *
831 MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
832 cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
833 &cb->err_table_addr, GFP_KERNEL);
837 ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
838 cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
839 &cb->ldev_info_addr, GFP_KERNEL);
840 if (!cb->ldev_info_buf)
844 * Skip mailbox initialisation for PD and P Controllers
849 /* These are the base addresses for the command memory mailbox array */
850 cb->cmd_mbox_size = MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
851 cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
855 if (!cb->first_cmd_mbox)
858 cmd_mbox_mem = cb->first_cmd_mbox;
859 cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
860 cb->last_cmd_mbox = cmd_mbox_mem;
861 cb->next_cmd_mbox = cb->first_cmd_mbox;
862 cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
863 cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
865 /* These are the base addresses for the status memory mailbox array */
866 cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
867 sizeof(struct myrb_stat_mbox);
868 cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
872 if (!cb->first_stat_mbox)
875 stat_mbox_mem = cb->first_stat_mbox;
876 stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
877 cb->last_stat_mbox = stat_mbox_mem;
878 cb->next_stat_mbox = cb->first_stat_mbox;
880 /* Enable the Memory Mailbox Interface. */
881 cb->dual_mode_interface = true;
882 mbox.typeX.opcode = 0x2B;
884 mbox.typeX.opcode2 = 0x14;
885 mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
886 mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
888 status = mmio_init_fn(pdev, base, &mbox);
889 if (status != MYRB_STATUS_SUCCESS) {
890 cb->dual_mode_interface = false;
891 mbox.typeX.opcode2 = 0x10;
892 status = mmio_init_fn(pdev, base, &mbox);
893 if (status != MYRB_STATUS_SUCCESS) {
895 "Failed to enable mailbox, statux %02X\n",
904 * myrb_get_hba_config - reads the configuration information
906 * Reads the configuration information from the controller and
907 * initializes the controller structure.
909 * Return: 0 on success, errno otherwise
911 static int myrb_get_hba_config(struct myrb_hba *cb)
913 struct myrb_enquiry2 *enquiry2;
914 dma_addr_t enquiry2_addr;
915 struct myrb_config2 *config2;
916 dma_addr_t config2_addr;
917 struct Scsi_Host *shost = cb->host;
918 struct pci_dev *pdev = cb->pdev;
919 int pchan_max = 0, pchan_cur = 0;
920 unsigned short status;
921 int ret = -ENODEV, memsize = 0;
923 enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
924 &enquiry2_addr, GFP_KERNEL);
926 shost_printk(KERN_ERR, cb->host,
927 "Failed to allocate V1 enquiry2 memory\n");
930 config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
931 &config2_addr, GFP_KERNEL);
933 shost_printk(KERN_ERR, cb->host,
934 "Failed to allocate V1 config2 memory\n");
935 dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
936 enquiry2, enquiry2_addr);
939 mutex_lock(&cb->dma_mutex);
940 status = myrb_hba_enquiry(cb);
941 mutex_unlock(&cb->dma_mutex);
942 if (status != MYRB_STATUS_SUCCESS) {
943 shost_printk(KERN_WARNING, cb->host,
944 "Failed it issue V1 Enquiry\n");
948 status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
949 if (status != MYRB_STATUS_SUCCESS) {
950 shost_printk(KERN_WARNING, cb->host,
951 "Failed to issue V1 Enquiry2\n");
955 status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
956 if (status != MYRB_STATUS_SUCCESS) {
957 shost_printk(KERN_WARNING, cb->host,
958 "Failed to issue ReadConfig2\n");
962 status = myrb_get_ldev_info(cb);
963 if (status != MYRB_STATUS_SUCCESS) {
964 shost_printk(KERN_WARNING, cb->host,
965 "Failed to get logical drive information\n");
970 * Initialize the Controller Model Name and Full Model Name fields.
972 switch (enquiry2->hw.sub_model) {
973 case DAC960_V1_P_PD_PU:
974 if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
975 strcpy(cb->model_name, "DAC960PU");
977 strcpy(cb->model_name, "DAC960PD");
980 strcpy(cb->model_name, "DAC960PL");
983 strcpy(cb->model_name, "DAC960PG");
986 strcpy(cb->model_name, "DAC960PJ");
989 strcpy(cb->model_name, "DAC960PR");
992 strcpy(cb->model_name, "DAC960PT");
995 strcpy(cb->model_name, "DAC960PTL0");
998 strcpy(cb->model_name, "DAC960PRL");
1000 case DAC960_V1_PTL1:
1001 strcpy(cb->model_name, "DAC960PTL1");
1003 case DAC960_V1_1164P:
1004 strcpy(cb->model_name, "eXtremeRAID 1100");
1007 shost_printk(KERN_WARNING, cb->host,
1008 "Unknown Model %X\n",
1009 enquiry2->hw.sub_model);
1013 * Initialize the Controller Firmware Version field and verify that it
1014 * is a supported firmware version.
1015 * The supported firmware versions are:
1017 * DAC1164P 5.06 and above
1018 * DAC960PTL/PRL/PJ/PG 4.06 and above
1019 * DAC960PU/PD/PL 3.51 and above
1020 * DAC960PU/PD/PL/P 2.73 and above
1022 #if defined(CONFIG_ALPHA)
1024 * DEC Alpha machines were often equipped with DAC960 cards that were
1025 * OEMed from Mylex, and had their own custom firmware. Version 2.70,
1026 * the last custom FW revision to be released by DEC for these older
1027 * controllers, appears to work quite well with this driver.
1029 * Cards tested successfully were several versions each of the PD and
1030 * PU, called by DEC the KZPSC and KZPAC, respectively, and having
1031 * the Manufacturer Numbers (from Mylex), usually on a sticker on the
1032 * back of the board, of:
1034 * KZPSC: D040347 (1-channel) or D040348 (2-channel)
1035 * or D040349 (3-channel)
1036 * KZPAC: D040395 (1-channel) or D040396 (2-channel)
1037 * or D040397 (3-channel)
1039 # define FIRMWARE_27X "2.70"
1041 # define FIRMWARE_27X "2.73"
1044 if (enquiry2->fw.major_version == 0) {
1045 enquiry2->fw.major_version = cb->enquiry->fw_major_version;
1046 enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
1047 enquiry2->fw.firmware_type = '0';
1048 enquiry2->fw.turn_id = 0;
1050 snprintf(cb->fw_version, sizeof(cb->fw_version),
1052 enquiry2->fw.major_version,
1053 enquiry2->fw.minor_version,
1054 enquiry2->fw.firmware_type,
1055 enquiry2->fw.turn_id);
1056 if (!((enquiry2->fw.major_version == 5 &&
1057 enquiry2->fw.minor_version >= 6) ||
1058 (enquiry2->fw.major_version == 4 &&
1059 enquiry2->fw.minor_version >= 6) ||
1060 (enquiry2->fw.major_version == 3 &&
1061 enquiry2->fw.minor_version >= 51) ||
1062 (enquiry2->fw.major_version == 2 &&
1063 strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
1064 shost_printk(KERN_WARNING, cb->host,
1065 "Firmware Version '%s' unsupported\n",
1070 * Initialize the Channels, Targets, Memory Size, and SAF-TE
1071 * Enclosure Management Enabled fields.
1073 switch (enquiry2->hw.model) {
1074 case MYRB_5_CHANNEL_BOARD:
1077 case MYRB_3_CHANNEL_BOARD:
1078 case MYRB_3_CHANNEL_ASIC_DAC:
1081 case MYRB_2_CHANNEL_BOARD:
1085 pchan_max = enquiry2->cfg_chan;
1088 pchan_cur = enquiry2->cur_chan;
1089 if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
1091 else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
1095 cb->ldev_block_size = enquiry2->ldev_block_size;
1096 shost->max_channel = pchan_cur;
1097 shost->max_id = enquiry2->max_targets;
1098 memsize = enquiry2->mem_size >> 20;
1099 cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
1101 * Initialize the Controller Queue Depth, Driver Queue Depth,
1102 * Logical Drive Count, Maximum Blocks per Command, Controller
1103 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
1104 * The Driver Queue Depth must be at most one less than the
1105 * Controller Queue Depth to allow for an automatic drive
1106 * rebuild operation.
1108 shost->can_queue = cb->enquiry->max_tcq;
1109 if (shost->can_queue < 3)
1110 shost->can_queue = enquiry2->max_cmds;
1111 if (shost->can_queue < 3)
1112 /* Play safe and disable TCQ */
1113 shost->can_queue = 1;
1115 if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
1116 shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
1117 shost->max_sectors = enquiry2->max_sectors;
1118 shost->sg_tablesize = enquiry2->max_sge;
1119 if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
1120 shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
1122 * Initialize the Stripe Size, Segment Size, and Geometry Translation.
1124 cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
1125 >> (10 - MYRB_BLKSIZE_BITS);
1126 cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
1127 >> (10 - MYRB_BLKSIZE_BITS);
1128 /* Assume 255/63 translation */
1129 cb->ldev_geom_heads = 255;
1130 cb->ldev_geom_sectors = 63;
1131 if (config2->drive_geometry) {
1132 cb->ldev_geom_heads = 128;
1133 cb->ldev_geom_sectors = 32;
1137 * Initialize the Background Initialization Status.
1139 if ((cb->fw_version[0] == '4' &&
1140 strcmp(cb->fw_version, "4.08") >= 0) ||
1141 (cb->fw_version[0] == '5' &&
1142 strcmp(cb->fw_version, "5.08") >= 0)) {
1143 cb->bgi_status_supported = true;
1144 myrb_bgi_control(cb);
1146 cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
1150 shost_printk(KERN_INFO, cb->host,
1151 "Configuring %s PCI RAID Controller\n", cb->model_name);
1152 shost_printk(KERN_INFO, cb->host,
1153 " Firmware Version: %s, Memory Size: %dMB\n",
1154 cb->fw_version, memsize);
1155 if (cb->io_addr == 0)
1156 shost_printk(KERN_INFO, cb->host,
1157 " I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
1158 (unsigned long)cb->pci_addr, cb->irq);
1160 shost_printk(KERN_INFO, cb->host,
1161 " I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
1162 (unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
1164 shost_printk(KERN_INFO, cb->host,
1165 " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
1166 cb->host->can_queue, cb->host->max_sectors);
1167 shost_printk(KERN_INFO, cb->host,
1168 " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
1169 cb->host->can_queue, cb->host->sg_tablesize,
1170 MYRB_SCATTER_GATHER_LIMIT);
1171 shost_printk(KERN_INFO, cb->host,
1172 " Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
1173 cb->stripe_size, cb->segment_size,
1174 cb->ldev_geom_heads, cb->ldev_geom_sectors,
1176 " SAF-TE Enclosure Management Enabled" : "");
1177 shost_printk(KERN_INFO, cb->host,
1178 " Physical: %d/%d channels %d/%d/%d devices\n",
1179 pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
1182 shost_printk(KERN_INFO, cb->host,
1183 " Logical: 1/1 channels, %d/%d disks\n",
1184 cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
1187 dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
1188 enquiry2, enquiry2_addr);
1189 dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
1190 config2, config2_addr);
1196 * myrb_unmap - unmaps controller structures
1198 static void myrb_unmap(struct myrb_hba *cb)
1200 if (cb->ldev_info_buf) {
1201 size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
1203 dma_free_coherent(&cb->pdev->dev, ldev_info_size,
1204 cb->ldev_info_buf, cb->ldev_info_addr);
1205 cb->ldev_info_buf = NULL;
1207 if (cb->err_table) {
1208 size_t err_table_size = sizeof(struct myrb_error_entry) *
1209 MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
1210 dma_free_coherent(&cb->pdev->dev, err_table_size,
1211 cb->err_table, cb->err_table_addr);
1212 cb->err_table = NULL;
1215 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
1216 cb->enquiry, cb->enquiry_addr);
1219 if (cb->first_stat_mbox) {
1220 dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
1221 cb->first_stat_mbox, cb->stat_mbox_addr);
1222 cb->first_stat_mbox = NULL;
1224 if (cb->first_cmd_mbox) {
1225 dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
1226 cb->first_cmd_mbox, cb->cmd_mbox_addr);
1227 cb->first_cmd_mbox = NULL;
1232 * myrb_cleanup - cleanup controller structures
1234 static void myrb_cleanup(struct myrb_hba *cb)
1236 struct pci_dev *pdev = cb->pdev;
1238 /* Free the memory mailbox, status, and related structures */
1241 if (cb->mmio_base) {
1242 cb->disable_intr(cb->io_base);
1243 iounmap(cb->mmio_base);
1246 free_irq(cb->irq, cb);
1248 release_region(cb->io_addr, 0x80);
1249 pci_set_drvdata(pdev, NULL);
1250 pci_disable_device(pdev);
1251 scsi_host_put(cb->host);
1254 static int myrb_host_reset(struct scsi_cmnd *scmd)
1256 struct Scsi_Host *shost = scmd->device->host;
1257 struct myrb_hba *cb = shost_priv(shost);
1259 cb->reset(cb->io_base);
1263 static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
1264 struct scsi_cmnd *scmd)
1266 struct request *rq = scsi_cmd_to_rq(scmd);
1267 struct myrb_hba *cb = shost_priv(shost);
1268 struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1269 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1270 struct myrb_dcdb *dcdb;
1271 dma_addr_t dcdb_addr;
1272 struct scsi_device *sdev = scmd->device;
1273 struct scatterlist *sgl;
1274 unsigned long flags;
1277 myrb_reset_cmd(cmd_blk);
1278 dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
1280 return SCSI_MLQUEUE_HOST_BUSY;
1281 nsge = scsi_dma_map(scmd);
1283 dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
1284 scmd->result = (DID_ERROR << 16);
1285 scmd->scsi_done(scmd);
1289 mbox->type3.opcode = MYRB_CMD_DCDB;
1290 mbox->type3.id = rq->tag + 3;
1291 mbox->type3.addr = dcdb_addr;
1292 dcdb->channel = sdev->channel;
1293 dcdb->target = sdev->id;
1294 switch (scmd->sc_data_direction) {
1296 dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
1299 dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
1301 case DMA_FROM_DEVICE:
1302 dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
1305 dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
1308 dcdb->early_status = false;
1309 if (rq->timeout <= 10)
1310 dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
1311 else if (rq->timeout <= 60)
1312 dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
1313 else if (rq->timeout <= 600)
1314 dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
1316 dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
1317 dcdb->no_autosense = false;
1318 dcdb->allow_disconnect = true;
1319 sgl = scsi_sglist(scmd);
1320 dcdb->dma_addr = sg_dma_address(sgl);
1321 if (sg_dma_len(sgl) > USHRT_MAX) {
1322 dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
1323 dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
1325 dcdb->xfer_len_lo = sg_dma_len(sgl);
1326 dcdb->xfer_len_hi4 = 0;
1328 dcdb->cdb_len = scmd->cmd_len;
1329 dcdb->sense_len = sizeof(dcdb->sense);
1330 memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
1332 spin_lock_irqsave(&cb->queue_lock, flags);
1333 cb->qcmd(cb, cmd_blk);
1334 spin_unlock_irqrestore(&cb->queue_lock, flags);
1338 static void myrb_inquiry(struct myrb_hba *cb,
1339 struct scsi_cmnd *scmd)
1341 unsigned char inq[36] = {
1342 0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
1343 0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
1344 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1345 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1346 0x20, 0x20, 0x20, 0x20,
1349 if (cb->bus_width > 16)
1351 if (cb->bus_width > 8)
1353 memcpy(&inq[16], cb->model_name, 16);
1354 memcpy(&inq[32], cb->fw_version, 1);
1355 memcpy(&inq[33], &cb->fw_version[2], 2);
1356 memcpy(&inq[35], &cb->fw_version[7], 1);
1358 scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
1362 myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1363 struct myrb_ldev_info *ldev_info)
1365 unsigned char modes[32], *mode_pg;
1369 dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1372 mode_pg = &modes[4];
1375 mode_pg = &modes[12];
1377 memset(modes, 0, sizeof(modes));
1378 modes[0] = mode_len - 1;
1380 unsigned char *block_desc = &modes[4];
1383 put_unaligned_be32(ldev_info->size, &block_desc[0]);
1384 put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
1388 if (ldev_info->wb_enabled)
1390 if (cb->segment_size) {
1392 put_unaligned_be16(cb->segment_size, &mode_pg[14]);
1395 scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1398 static void myrb_request_sense(struct myrb_hba *cb,
1399 struct scsi_cmnd *scmd)
1401 scsi_build_sense(scmd, 0, NO_SENSE, 0, 0);
1402 scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
1403 SCSI_SENSE_BUFFERSIZE);
1406 static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1407 struct myrb_ldev_info *ldev_info)
1409 unsigned char data[8];
1411 dev_dbg(&scmd->device->sdev_gendev,
1412 "Capacity %u, blocksize %u\n",
1413 ldev_info->size, cb->ldev_block_size);
1414 put_unaligned_be32(ldev_info->size - 1, &data[0]);
1415 put_unaligned_be32(cb->ldev_block_size, &data[4]);
1416 scsi_sg_copy_from_buffer(scmd, data, 8);
1419 static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
1420 struct scsi_cmnd *scmd)
1422 struct myrb_hba *cb = shost_priv(shost);
1423 struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1424 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1425 struct myrb_ldev_info *ldev_info;
1426 struct scsi_device *sdev = scmd->device;
1427 struct scatterlist *sgl;
1428 unsigned long flags;
1433 ldev_info = sdev->hostdata;
1434 if (ldev_info->state != MYRB_DEVICE_ONLINE &&
1435 ldev_info->state != MYRB_DEVICE_WO) {
1436 dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
1437 sdev->id, ldev_info ? ldev_info->state : 0xff);
1438 scmd->result = (DID_BAD_TARGET << 16);
1439 scmd->scsi_done(scmd);
1442 switch (scmd->cmnd[0]) {
1443 case TEST_UNIT_READY:
1444 scmd->result = (DID_OK << 16);
1445 scmd->scsi_done(scmd);
1448 if (scmd->cmnd[1] & 1) {
1449 /* Illegal request, invalid field in CDB */
1450 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1452 myrb_inquiry(cb, scmd);
1453 scmd->result = (DID_OK << 16);
1455 scmd->scsi_done(scmd);
1457 case SYNCHRONIZE_CACHE:
1458 scmd->result = (DID_OK << 16);
1459 scmd->scsi_done(scmd);
1462 if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1463 (scmd->cmnd[2] & 0x3F) != 0x08) {
1464 /* Illegal request, invalid field in CDB */
1465 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1467 myrb_mode_sense(cb, scmd, ldev_info);
1468 scmd->result = (DID_OK << 16);
1470 scmd->scsi_done(scmd);
1473 if ((scmd->cmnd[1] & 1) ||
1474 (scmd->cmnd[8] & 1)) {
1475 /* Illegal request, invalid field in CDB */
1476 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1477 scmd->scsi_done(scmd);
1480 lba = get_unaligned_be32(&scmd->cmnd[2]);
1482 /* Illegal request, invalid field in CDB */
1483 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1484 scmd->scsi_done(scmd);
1487 myrb_read_capacity(cb, scmd, ldev_info);
1488 scmd->scsi_done(scmd);
1491 myrb_request_sense(cb, scmd);
1492 scmd->result = (DID_OK << 16);
1494 case SEND_DIAGNOSTIC:
1495 if (scmd->cmnd[1] != 0x04) {
1496 /* Illegal request, invalid field in CDB */
1497 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1499 /* Assume good status */
1500 scmd->result = (DID_OK << 16);
1502 scmd->scsi_done(scmd);
1505 if (ldev_info->state == MYRB_DEVICE_WO) {
1506 /* Data protect, attempt to read invalid data */
1507 scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1508 scmd->scsi_done(scmd);
1513 lba = (((scmd->cmnd[1] & 0x1F) << 16) |
1514 (scmd->cmnd[2] << 8) |
1516 block_cnt = scmd->cmnd[4];
1519 if (ldev_info->state == MYRB_DEVICE_WO) {
1520 /* Data protect, attempt to read invalid data */
1521 scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1522 scmd->scsi_done(scmd);
1527 case VERIFY: /* 0x2F */
1528 case WRITE_VERIFY: /* 0x2E */
1529 lba = get_unaligned_be32(&scmd->cmnd[2]);
1530 block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
1533 if (ldev_info->state == MYRB_DEVICE_WO) {
1534 /* Data protect, attempt to read invalid data */
1535 scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1536 scmd->scsi_done(scmd);
1541 case VERIFY_12: /* 0xAF */
1542 case WRITE_VERIFY_12: /* 0xAE */
1543 lba = get_unaligned_be32(&scmd->cmnd[2]);
1544 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1547 /* Illegal request, invalid opcode */
1548 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0);
1549 scmd->scsi_done(scmd);
1553 myrb_reset_cmd(cmd_blk);
1554 mbox->type5.id = scsi_cmd_to_rq(scmd)->tag + 3;
1555 if (scmd->sc_data_direction == DMA_NONE)
1557 nsge = scsi_dma_map(scmd);
1559 sgl = scsi_sglist(scmd);
1560 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1561 mbox->type5.opcode = MYRB_CMD_READ;
1563 mbox->type5.opcode = MYRB_CMD_WRITE;
1565 mbox->type5.ld.xfer_len = block_cnt;
1566 mbox->type5.ld.ldev_num = sdev->id;
1567 mbox->type5.lba = lba;
1568 mbox->type5.addr = (u32)sg_dma_address(sgl);
1570 struct myrb_sge *hw_sgl;
1571 dma_addr_t hw_sgl_addr;
1574 hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
1576 return SCSI_MLQUEUE_HOST_BUSY;
1578 cmd_blk->sgl = hw_sgl;
1579 cmd_blk->sgl_addr = hw_sgl_addr;
1581 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1582 mbox->type5.opcode = MYRB_CMD_READ_SG;
1584 mbox->type5.opcode = MYRB_CMD_WRITE_SG;
1586 mbox->type5.ld.xfer_len = block_cnt;
1587 mbox->type5.ld.ldev_num = sdev->id;
1588 mbox->type5.lba = lba;
1589 mbox->type5.addr = hw_sgl_addr;
1590 mbox->type5.sg_count = nsge;
1592 scsi_for_each_sg(scmd, sgl, nsge, i) {
1593 hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
1594 hw_sgl->sge_count = (u32)sg_dma_len(sgl);
1599 spin_lock_irqsave(&cb->queue_lock, flags);
1600 cb->qcmd(cb, cmd_blk);
1601 spin_unlock_irqrestore(&cb->queue_lock, flags);
1606 static int myrb_queuecommand(struct Scsi_Host *shost,
1607 struct scsi_cmnd *scmd)
1609 struct scsi_device *sdev = scmd->device;
1611 if (sdev->channel > myrb_logical_channel(shost)) {
1612 scmd->result = (DID_BAD_TARGET << 16);
1613 scmd->scsi_done(scmd);
1616 if (sdev->channel == myrb_logical_channel(shost))
1617 return myrb_ldev_queuecommand(shost, scmd);
1619 return myrb_pthru_queuecommand(shost, scmd);
1622 static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
1624 struct myrb_hba *cb = shost_priv(sdev->host);
1625 struct myrb_ldev_info *ldev_info;
1626 unsigned short ldev_num = sdev->id;
1627 enum raid_level level;
1629 ldev_info = cb->ldev_info_buf + ldev_num;
1633 sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
1634 if (!sdev->hostdata)
1636 dev_dbg(&sdev->sdev_gendev,
1637 "slave alloc ldev %d state %x\n",
1638 ldev_num, ldev_info->state);
1639 memcpy(sdev->hostdata, ldev_info,
1640 sizeof(*ldev_info));
1641 switch (ldev_info->raid_level) {
1642 case MYRB_RAID_LEVEL0:
1643 level = RAID_LEVEL_LINEAR;
1645 case MYRB_RAID_LEVEL1:
1646 level = RAID_LEVEL_1;
1648 case MYRB_RAID_LEVEL3:
1649 level = RAID_LEVEL_3;
1651 case MYRB_RAID_LEVEL5:
1652 level = RAID_LEVEL_5;
1654 case MYRB_RAID_LEVEL6:
1655 level = RAID_LEVEL_6;
1657 case MYRB_RAID_JBOD:
1658 level = RAID_LEVEL_JBOD;
1661 level = RAID_LEVEL_UNKNOWN;
1664 raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
1668 static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
1670 struct myrb_hba *cb = shost_priv(sdev->host);
1671 struct myrb_pdev_state *pdev_info;
1672 unsigned short status;
1674 if (sdev->id > MYRB_MAX_TARGETS)
1677 pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
1681 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1683 if (status != MYRB_STATUS_SUCCESS) {
1684 dev_dbg(&sdev->sdev_gendev,
1685 "Failed to get device state, status %x\n",
1690 if (!pdev_info->present) {
1691 dev_dbg(&sdev->sdev_gendev,
1692 "device not present, skip\n");
1696 dev_dbg(&sdev->sdev_gendev,
1697 "slave alloc pdev %d:%d state %x\n",
1698 sdev->channel, sdev->id, pdev_info->state);
1699 sdev->hostdata = pdev_info;
1704 static int myrb_slave_alloc(struct scsi_device *sdev)
1706 if (sdev->channel > myrb_logical_channel(sdev->host))
1712 if (sdev->channel == myrb_logical_channel(sdev->host))
1713 return myrb_ldev_slave_alloc(sdev);
1715 return myrb_pdev_slave_alloc(sdev);
1718 static int myrb_slave_configure(struct scsi_device *sdev)
1720 struct myrb_ldev_info *ldev_info;
1722 if (sdev->channel > myrb_logical_channel(sdev->host))
1725 if (sdev->channel < myrb_logical_channel(sdev->host)) {
1726 sdev->no_uld_attach = 1;
1732 ldev_info = sdev->hostdata;
1735 if (ldev_info->state != MYRB_DEVICE_ONLINE)
1736 sdev_printk(KERN_INFO, sdev,
1737 "Logical drive is %s\n",
1738 myrb_devstate_name(ldev_info->state));
1740 sdev->tagged_supported = 1;
1744 static void myrb_slave_destroy(struct scsi_device *sdev)
1746 kfree(sdev->hostdata);
1749 static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1750 sector_t capacity, int geom[])
1752 struct myrb_hba *cb = shost_priv(sdev->host);
1754 geom[0] = cb->ldev_geom_heads;
1755 geom[1] = cb->ldev_geom_sectors;
1756 geom[2] = sector_div(capacity, geom[0] * geom[1]);
1761 static ssize_t raid_state_show(struct device *dev,
1762 struct device_attribute *attr, char *buf)
1764 struct scsi_device *sdev = to_scsi_device(dev);
1765 struct myrb_hba *cb = shost_priv(sdev->host);
1768 if (!sdev->hostdata)
1769 return snprintf(buf, 16, "Unknown\n");
1771 if (sdev->channel == myrb_logical_channel(sdev->host)) {
1772 struct myrb_ldev_info *ldev_info = sdev->hostdata;
1775 name = myrb_devstate_name(ldev_info->state);
1777 ret = snprintf(buf, 32, "%s\n", name);
1779 ret = snprintf(buf, 32, "Invalid (%02X)\n",
1782 struct myrb_pdev_state *pdev_info = sdev->hostdata;
1783 unsigned short status;
1786 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1788 if (status != MYRB_STATUS_SUCCESS)
1789 sdev_printk(KERN_INFO, sdev,
1790 "Failed to get device state, status %x\n",
1793 if (!pdev_info->present)
1796 name = myrb_devstate_name(pdev_info->state);
1798 ret = snprintf(buf, 32, "%s\n", name);
1800 ret = snprintf(buf, 32, "Invalid (%02X)\n",
1806 static ssize_t raid_state_store(struct device *dev,
1807 struct device_attribute *attr, const char *buf, size_t count)
1809 struct scsi_device *sdev = to_scsi_device(dev);
1810 struct myrb_hba *cb = shost_priv(sdev->host);
1811 struct myrb_pdev_state *pdev_info;
1812 enum myrb_devstate new_state;
1813 unsigned short status;
1815 if (!strncmp(buf, "kill", 4) ||
1816 !strncmp(buf, "offline", 7))
1817 new_state = MYRB_DEVICE_DEAD;
1818 else if (!strncmp(buf, "online", 6))
1819 new_state = MYRB_DEVICE_ONLINE;
1820 else if (!strncmp(buf, "standby", 7))
1821 new_state = MYRB_DEVICE_STANDBY;
1825 pdev_info = sdev->hostdata;
1827 sdev_printk(KERN_INFO, sdev,
1828 "Failed - no physical device information\n");
1831 if (!pdev_info->present) {
1832 sdev_printk(KERN_INFO, sdev,
1833 "Failed - device not present\n");
1837 if (pdev_info->state == new_state)
1840 status = myrb_set_pdev_state(cb, sdev, new_state);
1842 case MYRB_STATUS_SUCCESS:
1844 case MYRB_STATUS_START_DEVICE_FAILED:
1845 sdev_printk(KERN_INFO, sdev,
1846 "Failed - Unable to Start Device\n");
1849 case MYRB_STATUS_NO_DEVICE:
1850 sdev_printk(KERN_INFO, sdev,
1851 "Failed - No Device at Address\n");
1854 case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
1855 sdev_printk(KERN_INFO, sdev,
1856 "Failed - Invalid Channel or Target or Modifier\n");
1859 case MYRB_STATUS_CHANNEL_BUSY:
1860 sdev_printk(KERN_INFO, sdev,
1861 "Failed - Channel Busy\n");
1865 sdev_printk(KERN_INFO, sdev,
1866 "Failed - Unexpected Status %04X\n", status);
1872 static DEVICE_ATTR_RW(raid_state);
1874 static ssize_t raid_level_show(struct device *dev,
1875 struct device_attribute *attr, char *buf)
1877 struct scsi_device *sdev = to_scsi_device(dev);
1879 if (sdev->channel == myrb_logical_channel(sdev->host)) {
1880 struct myrb_ldev_info *ldev_info = sdev->hostdata;
1886 name = myrb_raidlevel_name(ldev_info->raid_level);
1888 return snprintf(buf, 32, "Invalid (%02X)\n",
1890 return snprintf(buf, 32, "%s\n", name);
1892 return snprintf(buf, 32, "Physical Drive\n");
1894 static DEVICE_ATTR_RO(raid_level);
1896 static ssize_t rebuild_show(struct device *dev,
1897 struct device_attribute *attr, char *buf)
1899 struct scsi_device *sdev = to_scsi_device(dev);
1900 struct myrb_hba *cb = shost_priv(sdev->host);
1901 struct myrb_rbld_progress rbld_buf;
1902 unsigned char status;
1904 if (sdev->channel < myrb_logical_channel(sdev->host))
1905 return snprintf(buf, 32, "physical device - not rebuilding\n");
1907 status = myrb_get_rbld_progress(cb, &rbld_buf);
1909 if (rbld_buf.ldev_num != sdev->id ||
1910 status != MYRB_STATUS_SUCCESS)
1911 return snprintf(buf, 32, "not rebuilding\n");
1913 return snprintf(buf, 32, "rebuilding block %u of %u\n",
1914 rbld_buf.ldev_size - rbld_buf.blocks_left,
1915 rbld_buf.ldev_size);
1918 static ssize_t rebuild_store(struct device *dev,
1919 struct device_attribute *attr, const char *buf, size_t count)
1921 struct scsi_device *sdev = to_scsi_device(dev);
1922 struct myrb_hba *cb = shost_priv(sdev->host);
1923 struct myrb_cmdblk *cmd_blk;
1924 union myrb_cmd_mbox *mbox;
1925 unsigned short status;
1929 rc = kstrtoint(buf, 0, &start);
1933 if (sdev->channel >= myrb_logical_channel(sdev->host))
1936 status = myrb_get_rbld_progress(cb, NULL);
1938 if (status == MYRB_STATUS_SUCCESS) {
1939 sdev_printk(KERN_INFO, sdev,
1940 "Rebuild Not Initiated; already in progress\n");
1943 mutex_lock(&cb->dcmd_mutex);
1944 cmd_blk = &cb->dcmd_blk;
1945 myrb_reset_cmd(cmd_blk);
1946 mbox = &cmd_blk->mbox;
1947 mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
1948 mbox->type3D.id = MYRB_DCMD_TAG;
1949 mbox->type3D.channel = sdev->channel;
1950 mbox->type3D.target = sdev->id;
1951 status = myrb_exec_cmd(cb, cmd_blk);
1952 mutex_unlock(&cb->dcmd_mutex);
1954 struct pci_dev *pdev = cb->pdev;
1955 unsigned char *rate;
1956 dma_addr_t rate_addr;
1958 if (status != MYRB_STATUS_SUCCESS) {
1959 sdev_printk(KERN_INFO, sdev,
1960 "Rebuild Not Cancelled; not in progress\n");
1964 rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
1965 &rate_addr, GFP_KERNEL);
1967 sdev_printk(KERN_INFO, sdev,
1968 "Cancellation of Rebuild Failed - Out of Memory\n");
1971 mutex_lock(&cb->dcmd_mutex);
1972 cmd_blk = &cb->dcmd_blk;
1973 myrb_reset_cmd(cmd_blk);
1974 mbox = &cmd_blk->mbox;
1975 mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
1976 mbox->type3R.id = MYRB_DCMD_TAG;
1977 mbox->type3R.rbld_rate = 0xFF;
1978 mbox->type3R.addr = rate_addr;
1979 status = myrb_exec_cmd(cb, cmd_blk);
1980 dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
1981 mutex_unlock(&cb->dcmd_mutex);
1983 if (status == MYRB_STATUS_SUCCESS) {
1984 sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
1985 start ? "Initiated" : "Cancelled");
1989 sdev_printk(KERN_INFO, sdev,
1990 "Rebuild Not Cancelled, status 0x%x\n",
1996 case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
1997 msg = "Attempt to Rebuild Online or Unresponsive Drive";
1999 case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2000 msg = "New Disk Failed During Rebuild";
2002 case MYRB_STATUS_INVALID_ADDRESS:
2003 msg = "Invalid Device Address";
2005 case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2006 msg = "Already in Progress";
2013 sdev_printk(KERN_INFO, sdev,
2014 "Rebuild Failed - %s\n", msg);
2016 sdev_printk(KERN_INFO, sdev,
2017 "Rebuild Failed, status 0x%x\n", status);
2021 static DEVICE_ATTR_RW(rebuild);
2023 static ssize_t consistency_check_store(struct device *dev,
2024 struct device_attribute *attr, const char *buf, size_t count)
2026 struct scsi_device *sdev = to_scsi_device(dev);
2027 struct myrb_hba *cb = shost_priv(sdev->host);
2028 struct myrb_rbld_progress rbld_buf;
2029 struct myrb_cmdblk *cmd_blk;
2030 union myrb_cmd_mbox *mbox;
2031 unsigned short ldev_num = 0xFFFF;
2032 unsigned short status;
2036 rc = kstrtoint(buf, 0, &start);
2040 if (sdev->channel < myrb_logical_channel(sdev->host))
2043 status = myrb_get_rbld_progress(cb, &rbld_buf);
2045 if (status == MYRB_STATUS_SUCCESS) {
2046 sdev_printk(KERN_INFO, sdev,
2047 "Check Consistency Not Initiated; already in progress\n");
2050 mutex_lock(&cb->dcmd_mutex);
2051 cmd_blk = &cb->dcmd_blk;
2052 myrb_reset_cmd(cmd_blk);
2053 mbox = &cmd_blk->mbox;
2054 mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
2055 mbox->type3C.id = MYRB_DCMD_TAG;
2056 mbox->type3C.ldev_num = sdev->id;
2057 mbox->type3C.auto_restore = true;
2059 status = myrb_exec_cmd(cb, cmd_blk);
2060 mutex_unlock(&cb->dcmd_mutex);
2062 struct pci_dev *pdev = cb->pdev;
2063 unsigned char *rate;
2064 dma_addr_t rate_addr;
2066 if (ldev_num != sdev->id) {
2067 sdev_printk(KERN_INFO, sdev,
2068 "Check Consistency Not Cancelled; not in progress\n");
2071 rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
2072 &rate_addr, GFP_KERNEL);
2074 sdev_printk(KERN_INFO, sdev,
2075 "Cancellation of Check Consistency Failed - Out of Memory\n");
2078 mutex_lock(&cb->dcmd_mutex);
2079 cmd_blk = &cb->dcmd_blk;
2080 myrb_reset_cmd(cmd_blk);
2081 mbox = &cmd_blk->mbox;
2082 mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2083 mbox->type3R.id = MYRB_DCMD_TAG;
2084 mbox->type3R.rbld_rate = 0xFF;
2085 mbox->type3R.addr = rate_addr;
2086 status = myrb_exec_cmd(cb, cmd_blk);
2087 dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2088 mutex_unlock(&cb->dcmd_mutex);
2090 if (status == MYRB_STATUS_SUCCESS) {
2091 sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
2092 start ? "Initiated" : "Cancelled");
2096 sdev_printk(KERN_INFO, sdev,
2097 "Check Consistency Not Cancelled, status 0x%x\n",
2103 case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2104 msg = "Dependent Physical Device is DEAD";
2106 case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2107 msg = "New Disk Failed During Rebuild";
2109 case MYRB_STATUS_INVALID_ADDRESS:
2110 msg = "Invalid or Nonredundant Logical Drive";
2112 case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2113 msg = "Already in Progress";
2120 sdev_printk(KERN_INFO, sdev,
2121 "Check Consistency Failed - %s\n", msg);
2123 sdev_printk(KERN_INFO, sdev,
2124 "Check Consistency Failed, status 0x%x\n", status);
2129 static ssize_t consistency_check_show(struct device *dev,
2130 struct device_attribute *attr, char *buf)
2132 return rebuild_show(dev, attr, buf);
2134 static DEVICE_ATTR_RW(consistency_check);
2136 static ssize_t ctlr_num_show(struct device *dev,
2137 struct device_attribute *attr, char *buf)
2139 struct Scsi_Host *shost = class_to_shost(dev);
2140 struct myrb_hba *cb = shost_priv(shost);
2142 return snprintf(buf, 20, "%u\n", cb->ctlr_num);
2144 static DEVICE_ATTR_RO(ctlr_num);
2146 static ssize_t firmware_show(struct device *dev,
2147 struct device_attribute *attr, char *buf)
2149 struct Scsi_Host *shost = class_to_shost(dev);
2150 struct myrb_hba *cb = shost_priv(shost);
2152 return snprintf(buf, 16, "%s\n", cb->fw_version);
2154 static DEVICE_ATTR_RO(firmware);
2156 static ssize_t model_show(struct device *dev,
2157 struct device_attribute *attr, char *buf)
2159 struct Scsi_Host *shost = class_to_shost(dev);
2160 struct myrb_hba *cb = shost_priv(shost);
2162 return snprintf(buf, 16, "%s\n", cb->model_name);
2164 static DEVICE_ATTR_RO(model);
2166 static ssize_t flush_cache_store(struct device *dev,
2167 struct device_attribute *attr, const char *buf, size_t count)
2169 struct Scsi_Host *shost = class_to_shost(dev);
2170 struct myrb_hba *cb = shost_priv(shost);
2171 unsigned short status;
2173 status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
2174 if (status == MYRB_STATUS_SUCCESS) {
2175 shost_printk(KERN_INFO, shost,
2176 "Cache Flush Completed\n");
2179 shost_printk(KERN_INFO, shost,
2180 "Cache Flush Failed, status %x\n", status);
2183 static DEVICE_ATTR_WO(flush_cache);
2185 static struct device_attribute *myrb_sdev_attrs[] = {
2187 &dev_attr_consistency_check,
2188 &dev_attr_raid_state,
2189 &dev_attr_raid_level,
2193 static struct device_attribute *myrb_shost_attrs[] = {
2197 &dev_attr_flush_cache,
2201 static struct scsi_host_template myrb_template = {
2202 .module = THIS_MODULE,
2204 .proc_name = "myrb",
2205 .queuecommand = myrb_queuecommand,
2206 .eh_host_reset_handler = myrb_host_reset,
2207 .slave_alloc = myrb_slave_alloc,
2208 .slave_configure = myrb_slave_configure,
2209 .slave_destroy = myrb_slave_destroy,
2210 .bios_param = myrb_biosparam,
2211 .cmd_size = sizeof(struct myrb_cmdblk),
2212 .shost_attrs = myrb_shost_attrs,
2213 .sdev_attrs = myrb_sdev_attrs,
2218 * myrb_is_raid - return boolean indicating device is raid volume
2219 * @dev: the device struct object
2221 static int myrb_is_raid(struct device *dev)
2223 struct scsi_device *sdev = to_scsi_device(dev);
2225 return sdev->channel == myrb_logical_channel(sdev->host);
2229 * myrb_get_resync - get raid volume resync percent complete
2230 * @dev: the device struct object
2232 static void myrb_get_resync(struct device *dev)
2234 struct scsi_device *sdev = to_scsi_device(dev);
2235 struct myrb_hba *cb = shost_priv(sdev->host);
2236 struct myrb_rbld_progress rbld_buf;
2237 unsigned int percent_complete = 0;
2238 unsigned short status;
2239 unsigned int ldev_size = 0, remaining = 0;
2241 if (sdev->channel < myrb_logical_channel(sdev->host))
2243 status = myrb_get_rbld_progress(cb, &rbld_buf);
2244 if (status == MYRB_STATUS_SUCCESS) {
2245 if (rbld_buf.ldev_num == sdev->id) {
2246 ldev_size = rbld_buf.ldev_size;
2247 remaining = rbld_buf.blocks_left;
2250 if (remaining && ldev_size)
2251 percent_complete = (ldev_size - remaining) * 100 / ldev_size;
2252 raid_set_resync(myrb_raid_template, dev, percent_complete);
2256 * myrb_get_state - get raid volume status
2257 * @dev: the device struct object
2259 static void myrb_get_state(struct device *dev)
2261 struct scsi_device *sdev = to_scsi_device(dev);
2262 struct myrb_hba *cb = shost_priv(sdev->host);
2263 struct myrb_ldev_info *ldev_info = sdev->hostdata;
2264 enum raid_state state = RAID_STATE_UNKNOWN;
2265 unsigned short status;
2267 if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
2268 state = RAID_STATE_UNKNOWN;
2270 status = myrb_get_rbld_progress(cb, NULL);
2271 if (status == MYRB_STATUS_SUCCESS)
2272 state = RAID_STATE_RESYNCING;
2274 switch (ldev_info->state) {
2275 case MYRB_DEVICE_ONLINE:
2276 state = RAID_STATE_ACTIVE;
2278 case MYRB_DEVICE_WO:
2279 case MYRB_DEVICE_CRITICAL:
2280 state = RAID_STATE_DEGRADED;
2283 state = RAID_STATE_OFFLINE;
2287 raid_set_state(myrb_raid_template, dev, state);
2290 static struct raid_function_template myrb_raid_functions = {
2291 .cookie = &myrb_template,
2292 .is_raid = myrb_is_raid,
2293 .get_resync = myrb_get_resync,
2294 .get_state = myrb_get_state,
2297 static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
2298 struct scsi_cmnd *scmd)
2300 unsigned short status;
2305 scsi_dma_unmap(scmd);
2307 if (cmd_blk->dcdb) {
2308 memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
2309 dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
2310 cmd_blk->dcdb_addr);
2311 cmd_blk->dcdb = NULL;
2314 dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
2315 cmd_blk->sgl = NULL;
2316 cmd_blk->sgl_addr = 0;
2318 status = cmd_blk->status;
2320 case MYRB_STATUS_SUCCESS:
2321 case MYRB_STATUS_DEVICE_BUSY:
2322 scmd->result = (DID_OK << 16) | status;
2324 case MYRB_STATUS_BAD_DATA:
2325 dev_dbg(&scmd->device->sdev_gendev,
2326 "Bad Data Encountered\n");
2327 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2328 /* Unrecovered read error */
2329 scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0);
2332 scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0);
2334 case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
2335 scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
2336 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2337 /* Unrecovered read error, auto-reallocation failed */
2338 scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0x04);
2340 /* Write error, auto-reallocation failed */
2341 scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0x02);
2343 case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
2344 dev_dbg(&scmd->device->sdev_gendev,
2345 "Logical Drive Nonexistent or Offline");
2346 scmd->result = (DID_BAD_TARGET << 16);
2348 case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
2349 dev_dbg(&scmd->device->sdev_gendev,
2350 "Attempt to Access Beyond End of Logical Drive");
2351 /* Logical block address out of range */
2352 scsi_build_sense(scmd, 0, NOT_READY, 0x21, 0);
2354 case MYRB_STATUS_DEVICE_NONRESPONSIVE:
2355 dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
2356 scmd->result = (DID_BAD_TARGET << 16);
2359 scmd_printk(KERN_ERR, scmd,
2360 "Unexpected Error Status %04X", status);
2361 scmd->result = (DID_ERROR << 16);
2364 scmd->scsi_done(scmd);
2367 static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
2372 if (cmd_blk->completion) {
2373 complete(cmd_blk->completion);
2374 cmd_blk->completion = NULL;
2378 static void myrb_monitor(struct work_struct *work)
2380 struct myrb_hba *cb = container_of(work,
2381 struct myrb_hba, monitor_work.work);
2382 struct Scsi_Host *shost = cb->host;
2383 unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
2385 dev_dbg(&shost->shost_gendev, "monitor tick\n");
2387 if (cb->new_ev_seq > cb->old_ev_seq) {
2388 int event = cb->old_ev_seq;
2390 dev_dbg(&shost->shost_gendev,
2391 "get event log no %d/%d\n",
2392 cb->new_ev_seq, event);
2393 myrb_get_event(cb, event);
2394 cb->old_ev_seq = event + 1;
2396 } else if (cb->need_err_info) {
2397 cb->need_err_info = false;
2398 dev_dbg(&shost->shost_gendev, "get error table\n");
2399 myrb_get_errtable(cb);
2401 } else if (cb->need_rbld && cb->rbld_first) {
2402 cb->need_rbld = false;
2403 dev_dbg(&shost->shost_gendev,
2404 "get rebuild progress\n");
2405 myrb_update_rbld_progress(cb);
2407 } else if (cb->need_ldev_info) {
2408 cb->need_ldev_info = false;
2409 dev_dbg(&shost->shost_gendev,
2410 "get logical drive info\n");
2411 myrb_get_ldev_info(cb);
2413 } else if (cb->need_rbld) {
2414 cb->need_rbld = false;
2415 dev_dbg(&shost->shost_gendev,
2416 "get rebuild progress\n");
2417 myrb_update_rbld_progress(cb);
2419 } else if (cb->need_cc_status) {
2420 cb->need_cc_status = false;
2421 dev_dbg(&shost->shost_gendev,
2422 "get consistency check progress\n");
2423 myrb_get_cc_progress(cb);
2425 } else if (cb->need_bgi_status) {
2426 cb->need_bgi_status = false;
2427 dev_dbg(&shost->shost_gendev, "get background init status\n");
2428 myrb_bgi_control(cb);
2431 dev_dbg(&shost->shost_gendev, "new enquiry\n");
2432 mutex_lock(&cb->dma_mutex);
2433 myrb_hba_enquiry(cb);
2434 mutex_unlock(&cb->dma_mutex);
2435 if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
2436 cb->need_err_info || cb->need_rbld ||
2437 cb->need_ldev_info || cb->need_cc_status ||
2438 cb->need_bgi_status) {
2439 dev_dbg(&shost->shost_gendev,
2440 "reschedule monitor\n");
2445 cb->primary_monitor_time = jiffies;
2446 queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
2450 * myrb_err_status - reports controller BIOS messages
2452 * Controller BIOS messages are passed through the Error Status Register
2453 * when the driver performs the BIOS handshaking.
2455 * Return: true for fatal errors and false otherwise.
2457 static bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
2458 unsigned char parm0, unsigned char parm1)
2460 struct pci_dev *pdev = cb->pdev;
2464 dev_info(&pdev->dev,
2465 "Physical Device %d:%d Not Responding\n",
2469 dev_notice(&pdev->dev, "Spinning Up Drives\n");
2472 dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2475 dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2478 dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2481 dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2485 dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2488 dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2491 dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2494 dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2497 dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2505 * Hardware-specific functions
2509 * DAC960 LA Series Controllers
2512 static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
2514 writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2517 static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
2519 writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
2522 static inline void DAC960_LA_reset_ctrl(void __iomem *base)
2524 writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
2527 static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
2529 writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2532 static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
2534 unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2536 return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
2539 static inline bool DAC960_LA_init_in_progress(void __iomem *base)
2541 unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2543 return !(idb & DAC960_LA_IDB_INIT_DONE);
2546 static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
2548 writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2551 static inline void DAC960_LA_ack_intr(void __iomem *base)
2553 writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
2554 base + DAC960_LA_ODB_OFFSET);
2557 static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
2559 unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2561 return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
2564 static inline void DAC960_LA_enable_intr(void __iomem *base)
2566 unsigned char odb = 0xFF;
2568 odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
2569 writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2572 static inline void DAC960_LA_disable_intr(void __iomem *base)
2574 unsigned char odb = 0xFF;
2576 odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
2577 writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2580 static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2581 union myrb_cmd_mbox *mbox)
2583 mem_mbox->words[1] = mbox->words[1];
2584 mem_mbox->words[2] = mbox->words[2];
2585 mem_mbox->words[3] = mbox->words[3];
2586 /* Memory barrier to prevent reordering */
2588 mem_mbox->words[0] = mbox->words[0];
2589 /* Memory barrier to force PCI access */
2593 static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
2594 union myrb_cmd_mbox *mbox)
2596 writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
2597 writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
2598 writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
2599 writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
2602 static inline unsigned short DAC960_LA_read_status(void __iomem *base)
2604 return readw(base + DAC960_LA_STS_OFFSET);
2608 DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
2609 unsigned char *param0, unsigned char *param1)
2611 unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
2613 if (!(errsts & DAC960_LA_ERRSTS_PENDING))
2615 errsts &= ~DAC960_LA_ERRSTS_PENDING;
2618 *param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
2619 *param1 = readb(base + DAC960_LA_CMDID_OFFSET);
2620 writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
2624 static inline unsigned short
2625 DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
2626 union myrb_cmd_mbox *mbox)
2628 unsigned short status;
2631 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2632 if (!DAC960_LA_hw_mbox_is_full(base))
2637 if (DAC960_LA_hw_mbox_is_full(base)) {
2639 "Timeout waiting for empty mailbox\n");
2640 return MYRB_STATUS_SUBSYS_TIMEOUT;
2642 DAC960_LA_write_hw_mbox(base, mbox);
2643 DAC960_LA_hw_mbox_new_cmd(base);
2645 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2646 if (DAC960_LA_hw_mbox_status_available(base))
2651 if (!DAC960_LA_hw_mbox_status_available(base)) {
2652 dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
2653 return MYRB_STATUS_SUBSYS_TIMEOUT;
2655 status = DAC960_LA_read_status(base);
2656 DAC960_LA_ack_hw_mbox_intr(base);
2657 DAC960_LA_ack_hw_mbox_status(base);
2662 static int DAC960_LA_hw_init(struct pci_dev *pdev,
2663 struct myrb_hba *cb, void __iomem *base)
2666 unsigned char error, parm0, parm1;
2668 DAC960_LA_disable_intr(base);
2669 DAC960_LA_ack_hw_mbox_status(base);
2671 while (DAC960_LA_init_in_progress(base) &&
2672 timeout < MYRB_MAILBOX_TIMEOUT) {
2673 if (DAC960_LA_read_error_status(base, &error,
2675 myrb_err_status(cb, error, parm0, parm1))
2680 if (timeout == MYRB_MAILBOX_TIMEOUT) {
2682 "Timeout waiting for Controller Initialisation\n");
2685 if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
2687 "Unable to Enable Memory Mailbox Interface\n");
2688 DAC960_LA_reset_ctrl(base);
2691 DAC960_LA_enable_intr(base);
2692 cb->qcmd = myrb_qcmd;
2693 cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
2694 if (cb->dual_mode_interface)
2695 cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
2697 cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
2698 cb->disable_intr = DAC960_LA_disable_intr;
2699 cb->reset = DAC960_LA_reset_ctrl;
2704 static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
2706 struct myrb_hba *cb = arg;
2707 void __iomem *base = cb->io_base;
2708 struct myrb_stat_mbox *next_stat_mbox;
2709 unsigned long flags;
2711 spin_lock_irqsave(&cb->queue_lock, flags);
2712 DAC960_LA_ack_intr(base);
2713 next_stat_mbox = cb->next_stat_mbox;
2714 while (next_stat_mbox->valid) {
2715 unsigned char id = next_stat_mbox->id;
2716 struct scsi_cmnd *scmd = NULL;
2717 struct myrb_cmdblk *cmd_blk = NULL;
2719 if (id == MYRB_DCMD_TAG)
2720 cmd_blk = &cb->dcmd_blk;
2721 else if (id == MYRB_MCMD_TAG)
2722 cmd_blk = &cb->mcmd_blk;
2724 scmd = scsi_host_find_tag(cb->host, id - 3);
2726 cmd_blk = scsi_cmd_priv(scmd);
2729 cmd_blk->status = next_stat_mbox->status;
2731 dev_err(&cb->pdev->dev,
2732 "Unhandled command completion %d\n", id);
2734 memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2735 if (++next_stat_mbox > cb->last_stat_mbox)
2736 next_stat_mbox = cb->first_stat_mbox;
2740 myrb_handle_cmdblk(cb, cmd_blk);
2742 myrb_handle_scsi(cb, cmd_blk, scmd);
2745 cb->next_stat_mbox = next_stat_mbox;
2746 spin_unlock_irqrestore(&cb->queue_lock, flags);
2750 static struct myrb_privdata DAC960_LA_privdata = {
2751 .hw_init = DAC960_LA_hw_init,
2752 .irq_handler = DAC960_LA_intr_handler,
2753 .mmio_size = DAC960_LA_mmio_size,
2757 * DAC960 PG Series Controllers
2759 static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
2761 writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2764 static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
2766 writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
2769 static inline void DAC960_PG_reset_ctrl(void __iomem *base)
2771 writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
2774 static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
2776 writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2779 static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
2781 unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2783 return idb & DAC960_PG_IDB_HWMBOX_FULL;
2786 static inline bool DAC960_PG_init_in_progress(void __iomem *base)
2788 unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2790 return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
2793 static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
2795 writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2798 static inline void DAC960_PG_ack_intr(void __iomem *base)
2800 writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
2801 base + DAC960_PG_ODB_OFFSET);
2804 static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
2806 unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2808 return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
2811 static inline void DAC960_PG_enable_intr(void __iomem *base)
2813 unsigned int imask = (unsigned int)-1;
2815 imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
2816 writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2819 static inline void DAC960_PG_disable_intr(void __iomem *base)
2821 unsigned int imask = (unsigned int)-1;
2823 writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2826 static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2827 union myrb_cmd_mbox *mbox)
2829 mem_mbox->words[1] = mbox->words[1];
2830 mem_mbox->words[2] = mbox->words[2];
2831 mem_mbox->words[3] = mbox->words[3];
2832 /* Memory barrier to prevent reordering */
2834 mem_mbox->words[0] = mbox->words[0];
2835 /* Memory barrier to force PCI access */
2839 static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
2840 union myrb_cmd_mbox *mbox)
2842 writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
2843 writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
2844 writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
2845 writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
2848 static inline unsigned short
2849 DAC960_PG_read_status(void __iomem *base)
2851 return readw(base + DAC960_PG_STS_OFFSET);
2855 DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
2856 unsigned char *param0, unsigned char *param1)
2858 unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
2860 if (!(errsts & DAC960_PG_ERRSTS_PENDING))
2862 errsts &= ~DAC960_PG_ERRSTS_PENDING;
2864 *param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
2865 *param1 = readb(base + DAC960_PG_CMDID_OFFSET);
2866 writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
2870 static inline unsigned short
2871 DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
2872 union myrb_cmd_mbox *mbox)
2874 unsigned short status;
2877 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2878 if (!DAC960_PG_hw_mbox_is_full(base))
2883 if (DAC960_PG_hw_mbox_is_full(base)) {
2885 "Timeout waiting for empty mailbox\n");
2886 return MYRB_STATUS_SUBSYS_TIMEOUT;
2888 DAC960_PG_write_hw_mbox(base, mbox);
2889 DAC960_PG_hw_mbox_new_cmd(base);
2892 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2893 if (DAC960_PG_hw_mbox_status_available(base))
2898 if (!DAC960_PG_hw_mbox_status_available(base)) {
2900 "Timeout waiting for mailbox status\n");
2901 return MYRB_STATUS_SUBSYS_TIMEOUT;
2903 status = DAC960_PG_read_status(base);
2904 DAC960_PG_ack_hw_mbox_intr(base);
2905 DAC960_PG_ack_hw_mbox_status(base);
2910 static int DAC960_PG_hw_init(struct pci_dev *pdev,
2911 struct myrb_hba *cb, void __iomem *base)
2914 unsigned char error, parm0, parm1;
2916 DAC960_PG_disable_intr(base);
2917 DAC960_PG_ack_hw_mbox_status(base);
2919 while (DAC960_PG_init_in_progress(base) &&
2920 timeout < MYRB_MAILBOX_TIMEOUT) {
2921 if (DAC960_PG_read_error_status(base, &error,
2923 myrb_err_status(cb, error, parm0, parm1))
2928 if (timeout == MYRB_MAILBOX_TIMEOUT) {
2930 "Timeout waiting for Controller Initialisation\n");
2933 if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
2935 "Unable to Enable Memory Mailbox Interface\n");
2936 DAC960_PG_reset_ctrl(base);
2939 DAC960_PG_enable_intr(base);
2940 cb->qcmd = myrb_qcmd;
2941 cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
2942 if (cb->dual_mode_interface)
2943 cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
2945 cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
2946 cb->disable_intr = DAC960_PG_disable_intr;
2947 cb->reset = DAC960_PG_reset_ctrl;
2952 static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
2954 struct myrb_hba *cb = arg;
2955 void __iomem *base = cb->io_base;
2956 struct myrb_stat_mbox *next_stat_mbox;
2957 unsigned long flags;
2959 spin_lock_irqsave(&cb->queue_lock, flags);
2960 DAC960_PG_ack_intr(base);
2961 next_stat_mbox = cb->next_stat_mbox;
2962 while (next_stat_mbox->valid) {
2963 unsigned char id = next_stat_mbox->id;
2964 struct scsi_cmnd *scmd = NULL;
2965 struct myrb_cmdblk *cmd_blk = NULL;
2967 if (id == MYRB_DCMD_TAG)
2968 cmd_blk = &cb->dcmd_blk;
2969 else if (id == MYRB_MCMD_TAG)
2970 cmd_blk = &cb->mcmd_blk;
2972 scmd = scsi_host_find_tag(cb->host, id - 3);
2974 cmd_blk = scsi_cmd_priv(scmd);
2977 cmd_blk->status = next_stat_mbox->status;
2979 dev_err(&cb->pdev->dev,
2980 "Unhandled command completion %d\n", id);
2982 memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2983 if (++next_stat_mbox > cb->last_stat_mbox)
2984 next_stat_mbox = cb->first_stat_mbox;
2987 myrb_handle_cmdblk(cb, cmd_blk);
2989 myrb_handle_scsi(cb, cmd_blk, scmd);
2991 cb->next_stat_mbox = next_stat_mbox;
2992 spin_unlock_irqrestore(&cb->queue_lock, flags);
2996 static struct myrb_privdata DAC960_PG_privdata = {
2997 .hw_init = DAC960_PG_hw_init,
2998 .irq_handler = DAC960_PG_intr_handler,
2999 .mmio_size = DAC960_PG_mmio_size,
3004 * DAC960 PD Series Controllers
3007 static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
3009 writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
3012 static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
3014 writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
3017 static inline void DAC960_PD_reset_ctrl(void __iomem *base)
3019 writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
3022 static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
3024 unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3026 return idb & DAC960_PD_IDB_HWMBOX_FULL;
3029 static inline bool DAC960_PD_init_in_progress(void __iomem *base)
3031 unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3033 return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
3036 static inline void DAC960_PD_ack_intr(void __iomem *base)
3038 writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
3041 static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
3043 unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
3045 return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
3048 static inline void DAC960_PD_enable_intr(void __iomem *base)
3050 writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
3053 static inline void DAC960_PD_disable_intr(void __iomem *base)
3055 writeb(0, base + DAC960_PD_IRQEN_OFFSET);
3058 static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
3059 union myrb_cmd_mbox *mbox)
3061 writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
3062 writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
3063 writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
3064 writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
3067 static inline unsigned char
3068 DAC960_PD_read_status_cmd_ident(void __iomem *base)
3070 return readb(base + DAC960_PD_STSID_OFFSET);
3073 static inline unsigned short
3074 DAC960_PD_read_status(void __iomem *base)
3076 return readw(base + DAC960_PD_STS_OFFSET);
3080 DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
3081 unsigned char *param0, unsigned char *param1)
3083 unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
3085 if (!(errsts & DAC960_PD_ERRSTS_PENDING))
3087 errsts &= ~DAC960_PD_ERRSTS_PENDING;
3089 *param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
3090 *param1 = readb(base + DAC960_PD_CMDID_OFFSET);
3091 writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
3095 static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3097 void __iomem *base = cb->io_base;
3098 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3100 while (DAC960_PD_hw_mbox_is_full(base))
3102 DAC960_PD_write_cmd_mbox(base, mbox);
3103 DAC960_PD_hw_mbox_new_cmd(base);
3106 static int DAC960_PD_hw_init(struct pci_dev *pdev,
3107 struct myrb_hba *cb, void __iomem *base)
3110 unsigned char error, parm0, parm1;
3112 if (!request_region(cb->io_addr, 0x80, "myrb")) {
3113 dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3114 (unsigned long)cb->io_addr);
3117 DAC960_PD_disable_intr(base);
3118 DAC960_PD_ack_hw_mbox_status(base);
3120 while (DAC960_PD_init_in_progress(base) &&
3121 timeout < MYRB_MAILBOX_TIMEOUT) {
3122 if (DAC960_PD_read_error_status(base, &error,
3124 myrb_err_status(cb, error, parm0, parm1))
3129 if (timeout == MYRB_MAILBOX_TIMEOUT) {
3131 "Timeout waiting for Controller Initialisation\n");
3134 if (!myrb_enable_mmio(cb, NULL)) {
3136 "Unable to Enable Memory Mailbox Interface\n");
3137 DAC960_PD_reset_ctrl(base);
3140 DAC960_PD_enable_intr(base);
3141 cb->qcmd = DAC960_PD_qcmd;
3142 cb->disable_intr = DAC960_PD_disable_intr;
3143 cb->reset = DAC960_PD_reset_ctrl;
3148 static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
3150 struct myrb_hba *cb = arg;
3151 void __iomem *base = cb->io_base;
3152 unsigned long flags;
3154 spin_lock_irqsave(&cb->queue_lock, flags);
3155 while (DAC960_PD_hw_mbox_status_available(base)) {
3156 unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3157 struct scsi_cmnd *scmd = NULL;
3158 struct myrb_cmdblk *cmd_blk = NULL;
3160 if (id == MYRB_DCMD_TAG)
3161 cmd_blk = &cb->dcmd_blk;
3162 else if (id == MYRB_MCMD_TAG)
3163 cmd_blk = &cb->mcmd_blk;
3165 scmd = scsi_host_find_tag(cb->host, id - 3);
3167 cmd_blk = scsi_cmd_priv(scmd);
3170 cmd_blk->status = DAC960_PD_read_status(base);
3172 dev_err(&cb->pdev->dev,
3173 "Unhandled command completion %d\n", id);
3175 DAC960_PD_ack_intr(base);
3176 DAC960_PD_ack_hw_mbox_status(base);
3179 myrb_handle_cmdblk(cb, cmd_blk);
3181 myrb_handle_scsi(cb, cmd_blk, scmd);
3183 spin_unlock_irqrestore(&cb->queue_lock, flags);
3187 static struct myrb_privdata DAC960_PD_privdata = {
3188 .hw_init = DAC960_PD_hw_init,
3189 .irq_handler = DAC960_PD_intr_handler,
3190 .mmio_size = DAC960_PD_mmio_size,
3195 * DAC960 P Series Controllers
3197 * Similar to the DAC960 PD Series Controllers, but some commands have
3201 static inline void myrb_translate_enquiry(void *enq)
3203 memcpy(enq + 132, enq + 36, 64);
3204 memset(enq + 36, 0, 96);
3207 static inline void myrb_translate_devstate(void *state)
3209 memcpy(state + 2, state + 3, 1);
3210 memmove(state + 4, state + 5, 2);
3211 memmove(state + 6, state + 8, 4);
3214 static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
3216 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3217 int ldev_num = mbox->type5.ld.ldev_num;
3219 mbox->bytes[3] &= 0x7;
3220 mbox->bytes[3] |= mbox->bytes[7] << 6;
3221 mbox->bytes[7] = ldev_num;
3224 static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
3226 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3227 int ldev_num = mbox->bytes[7];
3229 mbox->bytes[7] = mbox->bytes[3] >> 6;
3230 mbox->bytes[3] &= 0x7;
3231 mbox->bytes[3] |= ldev_num << 3;
3234 static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3236 void __iomem *base = cb->io_base;
3237 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3239 switch (mbox->common.opcode) {
3240 case MYRB_CMD_ENQUIRY:
3241 mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
3243 case MYRB_CMD_GET_DEVICE_STATE:
3244 mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
3247 mbox->common.opcode = MYRB_CMD_READ_OLD;
3248 myrb_translate_to_rw_command(cmd_blk);
3250 case MYRB_CMD_WRITE:
3251 mbox->common.opcode = MYRB_CMD_WRITE_OLD;
3252 myrb_translate_to_rw_command(cmd_blk);
3254 case MYRB_CMD_READ_SG:
3255 mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
3256 myrb_translate_to_rw_command(cmd_blk);
3258 case MYRB_CMD_WRITE_SG:
3259 mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
3260 myrb_translate_to_rw_command(cmd_blk);
3265 while (DAC960_PD_hw_mbox_is_full(base))
3267 DAC960_PD_write_cmd_mbox(base, mbox);
3268 DAC960_PD_hw_mbox_new_cmd(base);
3272 static int DAC960_P_hw_init(struct pci_dev *pdev,
3273 struct myrb_hba *cb, void __iomem *base)
3276 unsigned char error, parm0, parm1;
3278 if (!request_region(cb->io_addr, 0x80, "myrb")) {
3279 dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3280 (unsigned long)cb->io_addr);
3283 DAC960_PD_disable_intr(base);
3284 DAC960_PD_ack_hw_mbox_status(base);
3286 while (DAC960_PD_init_in_progress(base) &&
3287 timeout < MYRB_MAILBOX_TIMEOUT) {
3288 if (DAC960_PD_read_error_status(base, &error,
3290 myrb_err_status(cb, error, parm0, parm1))
3295 if (timeout == MYRB_MAILBOX_TIMEOUT) {
3297 "Timeout waiting for Controller Initialisation\n");
3300 if (!myrb_enable_mmio(cb, NULL)) {
3302 "Unable to allocate DMA mapped memory\n");
3303 DAC960_PD_reset_ctrl(base);
3306 DAC960_PD_enable_intr(base);
3307 cb->qcmd = DAC960_P_qcmd;
3308 cb->disable_intr = DAC960_PD_disable_intr;
3309 cb->reset = DAC960_PD_reset_ctrl;
3314 static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
3316 struct myrb_hba *cb = arg;
3317 void __iomem *base = cb->io_base;
3318 unsigned long flags;
3320 spin_lock_irqsave(&cb->queue_lock, flags);
3321 while (DAC960_PD_hw_mbox_status_available(base)) {
3322 unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3323 struct scsi_cmnd *scmd = NULL;
3324 struct myrb_cmdblk *cmd_blk = NULL;
3325 union myrb_cmd_mbox *mbox;
3326 enum myrb_cmd_opcode op;
3329 if (id == MYRB_DCMD_TAG)
3330 cmd_blk = &cb->dcmd_blk;
3331 else if (id == MYRB_MCMD_TAG)
3332 cmd_blk = &cb->mcmd_blk;
3334 scmd = scsi_host_find_tag(cb->host, id - 3);
3336 cmd_blk = scsi_cmd_priv(scmd);
3339 cmd_blk->status = DAC960_PD_read_status(base);
3341 dev_err(&cb->pdev->dev,
3342 "Unhandled command completion %d\n", id);
3344 DAC960_PD_ack_intr(base);
3345 DAC960_PD_ack_hw_mbox_status(base);
3350 mbox = &cmd_blk->mbox;
3351 op = mbox->common.opcode;
3353 case MYRB_CMD_ENQUIRY_OLD:
3354 mbox->common.opcode = MYRB_CMD_ENQUIRY;
3355 myrb_translate_enquiry(cb->enquiry);
3357 case MYRB_CMD_READ_OLD:
3358 mbox->common.opcode = MYRB_CMD_READ;
3359 myrb_translate_from_rw_command(cmd_blk);
3361 case MYRB_CMD_WRITE_OLD:
3362 mbox->common.opcode = MYRB_CMD_WRITE;
3363 myrb_translate_from_rw_command(cmd_blk);
3365 case MYRB_CMD_READ_SG_OLD:
3366 mbox->common.opcode = MYRB_CMD_READ_SG;
3367 myrb_translate_from_rw_command(cmd_blk);
3369 case MYRB_CMD_WRITE_SG_OLD:
3370 mbox->common.opcode = MYRB_CMD_WRITE_SG;
3371 myrb_translate_from_rw_command(cmd_blk);
3377 myrb_handle_cmdblk(cb, cmd_blk);
3379 myrb_handle_scsi(cb, cmd_blk, scmd);
3381 spin_unlock_irqrestore(&cb->queue_lock, flags);
3385 static struct myrb_privdata DAC960_P_privdata = {
3386 .hw_init = DAC960_P_hw_init,
3387 .irq_handler = DAC960_P_intr_handler,
3388 .mmio_size = DAC960_PD_mmio_size,
3391 static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
3392 const struct pci_device_id *entry)
3394 struct myrb_privdata *privdata =
3395 (struct myrb_privdata *)entry->driver_data;
3396 irq_handler_t irq_handler = privdata->irq_handler;
3397 unsigned int mmio_size = privdata->mmio_size;
3398 struct Scsi_Host *shost;
3399 struct myrb_hba *cb = NULL;
3401 shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
3403 dev_err(&pdev->dev, "Unable to allocate Controller\n");
3406 shost->max_cmd_len = 12;
3407 shost->max_lun = 256;
3408 cb = shost_priv(shost);
3409 mutex_init(&cb->dcmd_mutex);
3410 mutex_init(&cb->dma_mutex);
3413 if (pci_enable_device(pdev))
3416 if (privdata->hw_init == DAC960_PD_hw_init ||
3417 privdata->hw_init == DAC960_P_hw_init) {
3418 cb->io_addr = pci_resource_start(pdev, 0);
3419 cb->pci_addr = pci_resource_start(pdev, 1);
3421 cb->pci_addr = pci_resource_start(pdev, 0);
3423 pci_set_drvdata(pdev, cb);
3424 spin_lock_init(&cb->queue_lock);
3425 if (mmio_size < PAGE_SIZE)
3426 mmio_size = PAGE_SIZE;
3427 cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size);
3428 if (cb->mmio_base == NULL) {
3430 "Unable to map Controller Register Window\n");
3434 cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
3435 if (privdata->hw_init(pdev, cb, cb->io_base))
3438 if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
3440 "Unable to acquire IRQ Channel %d\n", pdev->irq);
3443 cb->irq = pdev->irq;
3448 "Failed to initialize Controller\n");
3453 static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3455 struct myrb_hba *cb;
3458 cb = myrb_detect(dev, entry);
3462 ret = myrb_get_hba_config(cb);
3468 if (!myrb_create_mempools(dev, cb)) {
3473 ret = scsi_add_host(cb->host, &dev->dev);
3475 dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3476 myrb_destroy_mempools(cb);
3479 scsi_scan_host(cb->host);
3487 static void myrb_remove(struct pci_dev *pdev)
3489 struct myrb_hba *cb = pci_get_drvdata(pdev);
3491 shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
3492 myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
3494 myrb_destroy_mempools(cb);
3498 static const struct pci_device_id myrb_id_table[] = {
3500 PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
3501 PCI_DEVICE_ID_DEC_21285,
3502 PCI_VENDOR_ID_MYLEX,
3503 PCI_DEVICE_ID_MYLEX_DAC960_LA),
3504 .driver_data = (unsigned long) &DAC960_LA_privdata,
3507 PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
3510 PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
3513 PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
3518 MODULE_DEVICE_TABLE(pci, myrb_id_table);
3520 static struct pci_driver myrb_pci_driver = {
3522 .id_table = myrb_id_table,
3523 .probe = myrb_probe,
3524 .remove = myrb_remove,
3527 static int __init myrb_init_module(void)
3531 myrb_raid_template = raid_class_attach(&myrb_raid_functions);
3532 if (!myrb_raid_template)
3535 ret = pci_register_driver(&myrb_pci_driver);
3537 raid_class_release(myrb_raid_template);
3542 static void __exit myrb_cleanup_module(void)
3544 pci_unregister_driver(&myrb_pci_driver);
3545 raid_class_release(myrb_raid_template);
3548 module_init(myrb_init_module);
3549 module_exit(myrb_cleanup_module);
3551 MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
3552 MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3553 MODULE_LICENSE("GPL");