1 // SPDX-License-Identifier: GPL-2.0
3 * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
5 * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
7 * Based on the original DAC960 driver,
8 * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
9 * Portions Copyright 2002 by Mylex (An IBM Business Unit)
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/pci.h>
18 #include <linux/raid_class.h>
19 #include <asm/unaligned.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_host.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_tcq.h>
27 static struct raid_template *myrb_raid_template;
29 static void myrb_monitor(struct work_struct *work);
30 static inline void myrb_translate_devstate(void *DeviceState);
32 static inline int myrb_logical_channel(struct Scsi_Host *shost)
34 return shost->max_channel - 1;
37 static struct myrb_devstate_name_entry {
38 enum myrb_devstate state;
40 } myrb_devstate_name_list[] = {
41 { MYRB_DEVICE_DEAD, "Dead" },
42 { MYRB_DEVICE_WO, "WriteOnly" },
43 { MYRB_DEVICE_ONLINE, "Online" },
44 { MYRB_DEVICE_CRITICAL, "Critical" },
45 { MYRB_DEVICE_STANDBY, "Standby" },
46 { MYRB_DEVICE_OFFLINE, "Offline" },
49 static const char *myrb_devstate_name(enum myrb_devstate state)
51 struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
54 for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
55 if (entry[i].state == state)
61 static struct myrb_raidlevel_name_entry {
62 enum myrb_raidlevel level;
64 } myrb_raidlevel_name_list[] = {
65 { MYRB_RAID_LEVEL0, "RAID0" },
66 { MYRB_RAID_LEVEL1, "RAID1" },
67 { MYRB_RAID_LEVEL3, "RAID3" },
68 { MYRB_RAID_LEVEL5, "RAID5" },
69 { MYRB_RAID_LEVEL6, "RAID6" },
70 { MYRB_RAID_JBOD, "JBOD" },
73 static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
75 struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
78 for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
79 if (entry[i].level == level)
86 * myrb_create_mempools - allocates auxiliary data structures
88 * Return: true on success, false otherwise.
90 static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
92 size_t elem_size, elem_align;
94 elem_align = sizeof(struct myrb_sge);
95 elem_size = cb->host->sg_tablesize * elem_align;
96 cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
97 elem_size, elem_align, 0);
98 if (cb->sg_pool == NULL) {
99 shost_printk(KERN_ERR, cb->host,
100 "Failed to allocate SG pool\n");
104 cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
105 sizeof(struct myrb_dcdb),
106 sizeof(unsigned int), 0);
107 if (!cb->dcdb_pool) {
108 dma_pool_destroy(cb->sg_pool);
110 shost_printk(KERN_ERR, cb->host,
111 "Failed to allocate DCDB pool\n");
115 snprintf(cb->work_q_name, sizeof(cb->work_q_name),
116 "myrb_wq_%d", cb->host->host_no);
117 cb->work_q = create_singlethread_workqueue(cb->work_q_name);
119 dma_pool_destroy(cb->dcdb_pool);
120 cb->dcdb_pool = NULL;
121 dma_pool_destroy(cb->sg_pool);
123 shost_printk(KERN_ERR, cb->host,
124 "Failed to create workqueue\n");
129 * Initialize the Monitoring Timer.
131 INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
132 queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
138 * myrb_destroy_mempools - tears down the memory pools for the controller
140 static void myrb_destroy_mempools(struct myrb_hba *cb)
142 cancel_delayed_work_sync(&cb->monitor_work);
143 destroy_workqueue(cb->work_q);
145 dma_pool_destroy(cb->sg_pool);
146 dma_pool_destroy(cb->dcdb_pool);
150 * myrb_reset_cmd - reset command block
152 static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
154 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
156 memset(mbox, 0, sizeof(union myrb_cmd_mbox));
161 * myrb_qcmd - queues command block for execution
163 static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
165 void __iomem *base = cb->io_base;
166 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
167 union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
169 cb->write_cmd_mbox(next_mbox, mbox);
170 if (cb->prev_cmd_mbox1->words[0] == 0 ||
171 cb->prev_cmd_mbox2->words[0] == 0)
172 cb->get_cmd_mbox(base);
173 cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
174 cb->prev_cmd_mbox1 = next_mbox;
175 if (++next_mbox > cb->last_cmd_mbox)
176 next_mbox = cb->first_cmd_mbox;
177 cb->next_cmd_mbox = next_mbox;
181 * myrb_exec_cmd - executes command block and waits for completion.
183 * Return: command status
185 static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
186 struct myrb_cmdblk *cmd_blk)
188 DECLARE_COMPLETION_ONSTACK(cmpl);
191 cmd_blk->completion = &cmpl;
193 spin_lock_irqsave(&cb->queue_lock, flags);
194 cb->qcmd(cb, cmd_blk);
195 spin_unlock_irqrestore(&cb->queue_lock, flags);
197 WARN_ON(in_interrupt());
198 wait_for_completion(&cmpl);
199 return cmd_blk->status;
203 * myrb_exec_type3 - executes a type 3 command and waits for completion.
205 * Return: command status
207 static unsigned short myrb_exec_type3(struct myrb_hba *cb,
208 enum myrb_cmd_opcode op, dma_addr_t addr)
210 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
211 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
212 unsigned short status;
214 mutex_lock(&cb->dcmd_mutex);
215 myrb_reset_cmd(cmd_blk);
216 mbox->type3.id = MYRB_DCMD_TAG;
217 mbox->type3.opcode = op;
218 mbox->type3.addr = addr;
219 status = myrb_exec_cmd(cb, cmd_blk);
220 mutex_unlock(&cb->dcmd_mutex);
225 * myrb_exec_type3D - executes a type 3D command and waits for completion.
227 * Return: command status
229 static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
230 enum myrb_cmd_opcode op, struct scsi_device *sdev,
231 struct myrb_pdev_state *pdev_info)
233 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
234 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
235 unsigned short status;
236 dma_addr_t pdev_info_addr;
238 pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
239 sizeof(struct myrb_pdev_state),
241 if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
242 return MYRB_STATUS_SUBSYS_FAILED;
244 mutex_lock(&cb->dcmd_mutex);
245 myrb_reset_cmd(cmd_blk);
246 mbox->type3D.id = MYRB_DCMD_TAG;
247 mbox->type3D.opcode = op;
248 mbox->type3D.channel = sdev->channel;
249 mbox->type3D.target = sdev->id;
250 mbox->type3D.addr = pdev_info_addr;
251 status = myrb_exec_cmd(cb, cmd_blk);
252 mutex_unlock(&cb->dcmd_mutex);
253 dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
254 sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
255 if (status == MYRB_STATUS_SUCCESS &&
256 mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
257 myrb_translate_devstate(pdev_info);
262 static char *myrb_event_msg[] = {
263 "killed because write recovery failed",
264 "killed because of SCSI bus reset failure",
265 "killed because of double check condition",
266 "killed because it was removed",
267 "killed because of gross error on SCSI chip",
268 "killed because of bad tag returned from drive",
269 "killed because of timeout on SCSI command",
270 "killed because of reset SCSI command issued from system",
271 "killed because busy or parity error count exceeded limit",
272 "killed because of 'kill drive' command from system",
273 "killed because of selection timeout",
274 "killed due to SCSI phase sequence error",
275 "killed due to unknown status",
279 * myrb_get_event - get event log from HBA
280 * @cb: pointer to the hba structure
281 * @event: number of the event
283 * Execute a type 3E command and logs the event message
285 static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
287 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
288 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
289 struct myrb_log_entry *ev_buf;
291 unsigned short status;
293 ev_buf = dma_alloc_coherent(&cb->pdev->dev,
294 sizeof(struct myrb_log_entry),
295 &ev_addr, GFP_KERNEL);
299 myrb_reset_cmd(cmd_blk);
300 mbox->type3E.id = MYRB_MCMD_TAG;
301 mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
302 mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
303 mbox->type3E.opqual = 1;
304 mbox->type3E.ev_seq = event;
305 mbox->type3E.addr = ev_addr;
306 status = myrb_exec_cmd(cb, cmd_blk);
307 if (status != MYRB_STATUS_SUCCESS)
308 shost_printk(KERN_INFO, cb->host,
309 "Failed to get event log %d, status %04x\n",
312 else if (ev_buf->seq_num == event) {
313 struct scsi_sense_hdr sshdr;
315 memset(&sshdr, 0, sizeof(sshdr));
316 scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
318 if (sshdr.sense_key == VENDOR_SPECIFIC &&
320 sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
321 shost_printk(KERN_CRIT, cb->host,
322 "Physical drive %d:%d: %s\n",
323 ev_buf->channel, ev_buf->target,
324 myrb_event_msg[sshdr.ascq]);
326 shost_printk(KERN_CRIT, cb->host,
327 "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
328 ev_buf->channel, ev_buf->target,
329 sshdr.sense_key, sshdr.asc, sshdr.ascq);
332 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
337 * myrb_get_errtable - retrieves the error table from the controller
339 * Executes a type 3 command and logs the error table from the controller.
341 static void myrb_get_errtable(struct myrb_hba *cb)
343 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
344 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
345 unsigned short status;
346 struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
348 memcpy(&old_table, cb->err_table, sizeof(old_table));
350 myrb_reset_cmd(cmd_blk);
351 mbox->type3.id = MYRB_MCMD_TAG;
352 mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
353 mbox->type3.addr = cb->err_table_addr;
354 status = myrb_exec_cmd(cb, cmd_blk);
355 if (status == MYRB_STATUS_SUCCESS) {
356 struct myrb_error_entry *table = cb->err_table;
357 struct myrb_error_entry *new, *old;
358 size_t err_table_offset;
359 struct scsi_device *sdev;
361 shost_for_each_device(sdev, cb->host) {
362 if (sdev->channel >= myrb_logical_channel(cb->host))
364 err_table_offset = sdev->channel * MYRB_MAX_TARGETS
366 new = table + err_table_offset;
367 old = &old_table[err_table_offset];
368 if (new->parity_err == old->parity_err &&
369 new->soft_err == old->soft_err &&
370 new->hard_err == old->hard_err &&
371 new->misc_err == old->misc_err)
373 sdev_printk(KERN_CRIT, sdev,
374 "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
375 new->parity_err, new->soft_err,
376 new->hard_err, new->misc_err);
382 * myrb_get_ldev_info - retrieves the logical device table from the controller
384 * Executes a type 3 command and updates the logical device table.
386 * Return: command status
388 static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
390 unsigned short status;
391 int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
392 struct Scsi_Host *shost = cb->host;
394 status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
396 if (status != MYRB_STATUS_SUCCESS)
399 for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
400 struct myrb_ldev_info *old = NULL;
401 struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
402 struct scsi_device *sdev;
404 sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
407 if (new->state == MYRB_DEVICE_OFFLINE)
409 shost_printk(KERN_INFO, shost,
410 "Adding Logical Drive %d in state %s\n",
411 ldev_num, myrb_devstate_name(new->state));
412 scsi_add_device(shost, myrb_logical_channel(shost),
416 old = sdev->hostdata;
417 if (new->state != old->state)
418 shost_printk(KERN_INFO, shost,
419 "Logical Drive %d is now %s\n",
420 ldev_num, myrb_devstate_name(new->state));
421 if (new->wb_enabled != old->wb_enabled)
422 sdev_printk(KERN_INFO, sdev,
423 "Logical Drive is now WRITE %s\n",
424 (new->wb_enabled ? "BACK" : "THRU"));
425 memcpy(old, new, sizeof(*new));
426 scsi_device_put(sdev);
432 * myrb_get_rbld_progress - get rebuild progress information
434 * Executes a type 3 command and returns the rebuild progress
437 * Return: command status
439 static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
440 struct myrb_rbld_progress *rbld)
442 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
443 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
444 struct myrb_rbld_progress *rbld_buf;
445 dma_addr_t rbld_addr;
446 unsigned short status;
448 rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
449 sizeof(struct myrb_rbld_progress),
450 &rbld_addr, GFP_KERNEL);
452 return MYRB_STATUS_RBLD_NOT_CHECKED;
454 myrb_reset_cmd(cmd_blk);
455 mbox->type3.id = MYRB_MCMD_TAG;
456 mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
457 mbox->type3.addr = rbld_addr;
458 status = myrb_exec_cmd(cb, cmd_blk);
460 memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
461 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
462 rbld_buf, rbld_addr);
467 * myrb_update_rbld_progress - updates the rebuild status
469 * Updates the rebuild status for the attached logical devices.
472 static void myrb_update_rbld_progress(struct myrb_hba *cb)
474 struct myrb_rbld_progress rbld_buf;
475 unsigned short status;
477 status = myrb_get_rbld_progress(cb, &rbld_buf);
478 if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
479 cb->last_rbld_status == MYRB_STATUS_SUCCESS)
480 status = MYRB_STATUS_RBLD_SUCCESS;
481 if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
482 unsigned int blocks_done =
483 rbld_buf.ldev_size - rbld_buf.blocks_left;
484 struct scsi_device *sdev;
486 sdev = scsi_device_lookup(cb->host,
487 myrb_logical_channel(cb->host),
488 rbld_buf.ldev_num, 0);
493 case MYRB_STATUS_SUCCESS:
494 sdev_printk(KERN_INFO, sdev,
495 "Rebuild in Progress, %d%% completed\n",
496 (100 * (blocks_done >> 7))
497 / (rbld_buf.ldev_size >> 7));
499 case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
500 sdev_printk(KERN_INFO, sdev,
501 "Rebuild Failed due to Logical Drive Failure\n");
503 case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
504 sdev_printk(KERN_INFO, sdev,
505 "Rebuild Failed due to Bad Blocks on Other Drives\n");
507 case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
508 sdev_printk(KERN_INFO, sdev,
509 "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
511 case MYRB_STATUS_RBLD_SUCCESS:
512 sdev_printk(KERN_INFO, sdev,
513 "Rebuild Completed Successfully\n");
515 case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
516 sdev_printk(KERN_INFO, sdev,
517 "Rebuild Successfully Terminated\n");
522 scsi_device_put(sdev);
524 cb->last_rbld_status = status;
528 * myrb_get_cc_progress - retrieve the rebuild status
530 * Execute a type 3 Command and fetch the rebuild / consistency check
533 static void myrb_get_cc_progress(struct myrb_hba *cb)
535 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
536 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
537 struct myrb_rbld_progress *rbld_buf;
538 dma_addr_t rbld_addr;
539 unsigned short status;
541 rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
542 sizeof(struct myrb_rbld_progress),
543 &rbld_addr, GFP_KERNEL);
545 cb->need_cc_status = true;
548 myrb_reset_cmd(cmd_blk);
549 mbox->type3.id = MYRB_MCMD_TAG;
550 mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
551 mbox->type3.addr = rbld_addr;
552 status = myrb_exec_cmd(cb, cmd_blk);
553 if (status == MYRB_STATUS_SUCCESS) {
554 unsigned int ldev_num = rbld_buf->ldev_num;
555 unsigned int ldev_size = rbld_buf->ldev_size;
556 unsigned int blocks_done =
557 ldev_size - rbld_buf->blocks_left;
558 struct scsi_device *sdev;
560 sdev = scsi_device_lookup(cb->host,
561 myrb_logical_channel(cb->host),
564 sdev_printk(KERN_INFO, sdev,
565 "Consistency Check in Progress: %d%% completed\n",
566 (100 * (blocks_done >> 7))
568 scsi_device_put(sdev);
571 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
572 rbld_buf, rbld_addr);
576 * myrb_bgi_control - updates background initialisation status
578 * Executes a type 3B command and updates the background initialisation status
580 static void myrb_bgi_control(struct myrb_hba *cb)
582 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
583 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
584 struct myrb_bgi_status *bgi, *last_bgi;
586 struct scsi_device *sdev = NULL;
587 unsigned short status;
589 bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
590 &bgi_addr, GFP_KERNEL);
592 shost_printk(KERN_ERR, cb->host,
593 "Failed to allocate bgi memory\n");
596 myrb_reset_cmd(cmd_blk);
597 mbox->type3B.id = MYRB_DCMD_TAG;
598 mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
599 mbox->type3B.optype = 0x20;
600 mbox->type3B.addr = bgi_addr;
601 status = myrb_exec_cmd(cb, cmd_blk);
602 last_bgi = &cb->bgi_status;
603 sdev = scsi_device_lookup(cb->host,
604 myrb_logical_channel(cb->host),
607 case MYRB_STATUS_SUCCESS:
608 switch (bgi->status) {
609 case MYRB_BGI_INVALID:
611 case MYRB_BGI_STARTED:
614 sdev_printk(KERN_INFO, sdev,
615 "Background Initialization Started\n");
617 case MYRB_BGI_INPROGRESS:
620 if (bgi->blocks_done == last_bgi->blocks_done &&
621 bgi->ldev_num == last_bgi->ldev_num)
623 sdev_printk(KERN_INFO, sdev,
624 "Background Initialization in Progress: %d%% completed\n",
625 (100 * (bgi->blocks_done >> 7))
626 / (bgi->ldev_size >> 7));
628 case MYRB_BGI_SUSPENDED:
631 sdev_printk(KERN_INFO, sdev,
632 "Background Initialization Suspended\n");
634 case MYRB_BGI_CANCELLED:
637 sdev_printk(KERN_INFO, sdev,
638 "Background Initialization Cancelled\n");
641 memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
643 case MYRB_STATUS_BGI_SUCCESS:
644 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
645 sdev_printk(KERN_INFO, sdev,
646 "Background Initialization Completed Successfully\n");
647 cb->bgi_status.status = MYRB_BGI_INVALID;
649 case MYRB_STATUS_BGI_ABORTED:
650 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
651 sdev_printk(KERN_INFO, sdev,
652 "Background Initialization Aborted\n");
654 case MYRB_STATUS_NO_BGI_INPROGRESS:
655 cb->bgi_status.status = MYRB_BGI_INVALID;
659 scsi_device_put(sdev);
660 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
665 * myrb_hba_enquiry - updates the controller status
667 * Executes a DAC_V1_Enquiry command and updates the controller status.
669 * Return: command status
671 static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
673 struct myrb_enquiry old, *new;
674 unsigned short status;
676 memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
678 status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
679 if (status != MYRB_STATUS_SUCCESS)
683 if (new->ldev_count > old.ldev_count) {
684 int ldev_num = old.ldev_count - 1;
686 while (++ldev_num < new->ldev_count)
687 shost_printk(KERN_CRIT, cb->host,
688 "Logical Drive %d Now Exists\n",
691 if (new->ldev_count < old.ldev_count) {
692 int ldev_num = new->ldev_count - 1;
694 while (++ldev_num < old.ldev_count)
695 shost_printk(KERN_CRIT, cb->host,
696 "Logical Drive %d No Longer Exists\n",
699 if (new->status.deferred != old.status.deferred)
700 shost_printk(KERN_CRIT, cb->host,
701 "Deferred Write Error Flag is now %s\n",
702 (new->status.deferred ? "TRUE" : "FALSE"));
703 if (new->ev_seq != old.ev_seq) {
704 cb->new_ev_seq = new->ev_seq;
705 cb->need_err_info = true;
706 shost_printk(KERN_INFO, cb->host,
707 "Event log %d/%d (%d/%d) available\n",
708 cb->old_ev_seq, cb->new_ev_seq,
709 old.ev_seq, new->ev_seq);
711 if ((new->ldev_critical > 0 &&
712 new->ldev_critical != old.ldev_critical) ||
713 (new->ldev_offline > 0 &&
714 new->ldev_offline != old.ldev_offline) ||
715 (new->ldev_count != old.ldev_count)) {
716 shost_printk(KERN_INFO, cb->host,
717 "Logical drive count changed (%d/%d/%d)\n",
721 cb->need_ldev_info = true;
723 if (new->pdev_dead > 0 ||
724 new->pdev_dead != old.pdev_dead ||
725 time_after_eq(jiffies, cb->secondary_monitor_time
726 + MYRB_SECONDARY_MONITOR_INTERVAL)) {
727 cb->need_bgi_status = cb->bgi_status_supported;
728 cb->secondary_monitor_time = jiffies;
730 if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
731 new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
732 old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
733 old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
734 cb->need_rbld = true;
735 cb->rbld_first = (new->ldev_critical < old.ldev_critical);
737 if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
739 case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
740 shost_printk(KERN_INFO, cb->host,
741 "Consistency Check Completed Successfully\n");
743 case MYRB_STDBY_RBLD_IN_PROGRESS:
744 case MYRB_BG_RBLD_IN_PROGRESS:
746 case MYRB_BG_CHECK_IN_PROGRESS:
747 cb->need_cc_status = true;
749 case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
750 shost_printk(KERN_INFO, cb->host,
751 "Consistency Check Completed with Error\n");
753 case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
754 shost_printk(KERN_INFO, cb->host,
755 "Consistency Check Failed - Physical Device Failed\n");
757 case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
758 shost_printk(KERN_INFO, cb->host,
759 "Consistency Check Failed - Logical Drive Failed\n");
761 case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
762 shost_printk(KERN_INFO, cb->host,
763 "Consistency Check Failed - Other Causes\n");
765 case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
766 shost_printk(KERN_INFO, cb->host,
767 "Consistency Check Successfully Terminated\n");
770 else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
771 cb->need_cc_status = true;
773 return MYRB_STATUS_SUCCESS;
777 * myrb_set_pdev_state - sets the device state for a physical device
779 * Return: command status
781 static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
782 struct scsi_device *sdev, enum myrb_devstate state)
784 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
785 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
786 unsigned short status;
788 mutex_lock(&cb->dcmd_mutex);
789 mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
790 mbox->type3D.id = MYRB_DCMD_TAG;
791 mbox->type3D.channel = sdev->channel;
792 mbox->type3D.target = sdev->id;
793 mbox->type3D.state = state & 0x1F;
794 status = myrb_exec_cmd(cb, cmd_blk);
795 mutex_unlock(&cb->dcmd_mutex);
801 * myrb_enable_mmio - enables the Memory Mailbox Interface
803 * PD and P controller types have no memory mailbox, but still need the
804 * other dma mapped memory.
806 * Return: true on success, false otherwise.
808 static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
810 void __iomem *base = cb->io_base;
811 struct pci_dev *pdev = cb->pdev;
812 size_t err_table_size;
813 size_t ldev_info_size;
814 union myrb_cmd_mbox *cmd_mbox_mem;
815 struct myrb_stat_mbox *stat_mbox_mem;
816 union myrb_cmd_mbox mbox;
817 unsigned short status;
819 memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
821 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
822 dev_err(&pdev->dev, "DMA mask out of range\n");
826 cb->enquiry = dma_alloc_coherent(&pdev->dev,
827 sizeof(struct myrb_enquiry),
828 &cb->enquiry_addr, GFP_KERNEL);
832 err_table_size = sizeof(struct myrb_error_entry) *
833 MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
834 cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
835 &cb->err_table_addr, GFP_KERNEL);
839 ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
840 cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
841 &cb->ldev_info_addr, GFP_KERNEL);
842 if (!cb->ldev_info_buf)
846 * Skip mailbox initialisation for PD and P Controllers
851 /* These are the base addresses for the command memory mailbox array */
852 cb->cmd_mbox_size = MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
853 cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
857 if (!cb->first_cmd_mbox)
860 cmd_mbox_mem = cb->first_cmd_mbox;
861 cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
862 cb->last_cmd_mbox = cmd_mbox_mem;
863 cb->next_cmd_mbox = cb->first_cmd_mbox;
864 cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
865 cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
867 /* These are the base addresses for the status memory mailbox array */
868 cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
869 sizeof(struct myrb_stat_mbox);
870 cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
874 if (!cb->first_stat_mbox)
877 stat_mbox_mem = cb->first_stat_mbox;
878 stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
879 cb->last_stat_mbox = stat_mbox_mem;
880 cb->next_stat_mbox = cb->first_stat_mbox;
882 /* Enable the Memory Mailbox Interface. */
883 cb->dual_mode_interface = true;
884 mbox.typeX.opcode = 0x2B;
886 mbox.typeX.opcode2 = 0x14;
887 mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
888 mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
890 status = mmio_init_fn(pdev, base, &mbox);
891 if (status != MYRB_STATUS_SUCCESS) {
892 cb->dual_mode_interface = false;
893 mbox.typeX.opcode2 = 0x10;
894 status = mmio_init_fn(pdev, base, &mbox);
895 if (status != MYRB_STATUS_SUCCESS) {
897 "Failed to enable mailbox, statux %02X\n",
906 * myrb_get_hba_config - reads the configuration information
908 * Reads the configuration information from the controller and
909 * initializes the controller structure.
911 * Return: 0 on success, errno otherwise
913 static int myrb_get_hba_config(struct myrb_hba *cb)
915 struct myrb_enquiry2 *enquiry2;
916 dma_addr_t enquiry2_addr;
917 struct myrb_config2 *config2;
918 dma_addr_t config2_addr;
919 struct Scsi_Host *shost = cb->host;
920 struct pci_dev *pdev = cb->pdev;
921 int pchan_max = 0, pchan_cur = 0;
922 unsigned short status;
923 int ret = -ENODEV, memsize = 0;
925 enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
926 &enquiry2_addr, GFP_KERNEL);
928 shost_printk(KERN_ERR, cb->host,
929 "Failed to allocate V1 enquiry2 memory\n");
932 config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
933 &config2_addr, GFP_KERNEL);
935 shost_printk(KERN_ERR, cb->host,
936 "Failed to allocate V1 config2 memory\n");
937 dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
938 enquiry2, enquiry2_addr);
941 mutex_lock(&cb->dma_mutex);
942 status = myrb_hba_enquiry(cb);
943 mutex_unlock(&cb->dma_mutex);
944 if (status != MYRB_STATUS_SUCCESS) {
945 shost_printk(KERN_WARNING, cb->host,
946 "Failed it issue V1 Enquiry\n");
950 status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
951 if (status != MYRB_STATUS_SUCCESS) {
952 shost_printk(KERN_WARNING, cb->host,
953 "Failed to issue V1 Enquiry2\n");
957 status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
958 if (status != MYRB_STATUS_SUCCESS) {
959 shost_printk(KERN_WARNING, cb->host,
960 "Failed to issue ReadConfig2\n");
964 status = myrb_get_ldev_info(cb);
965 if (status != MYRB_STATUS_SUCCESS) {
966 shost_printk(KERN_WARNING, cb->host,
967 "Failed to get logical drive information\n");
972 * Initialize the Controller Model Name and Full Model Name fields.
974 switch (enquiry2->hw.sub_model) {
975 case DAC960_V1_P_PD_PU:
976 if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
977 strcpy(cb->model_name, "DAC960PU");
979 strcpy(cb->model_name, "DAC960PD");
982 strcpy(cb->model_name, "DAC960PL");
985 strcpy(cb->model_name, "DAC960PG");
988 strcpy(cb->model_name, "DAC960PJ");
991 strcpy(cb->model_name, "DAC960PR");
994 strcpy(cb->model_name, "DAC960PT");
997 strcpy(cb->model_name, "DAC960PTL0");
1000 strcpy(cb->model_name, "DAC960PRL");
1002 case DAC960_V1_PTL1:
1003 strcpy(cb->model_name, "DAC960PTL1");
1005 case DAC960_V1_1164P:
1006 strcpy(cb->model_name, "eXtremeRAID 1100");
1009 shost_printk(KERN_WARNING, cb->host,
1010 "Unknown Model %X\n",
1011 enquiry2->hw.sub_model);
1015 * Initialize the Controller Firmware Version field and verify that it
1016 * is a supported firmware version.
1017 * The supported firmware versions are:
1019 * DAC1164P 5.06 and above
1020 * DAC960PTL/PRL/PJ/PG 4.06 and above
1021 * DAC960PU/PD/PL 3.51 and above
1022 * DAC960PU/PD/PL/P 2.73 and above
1024 #if defined(CONFIG_ALPHA)
1026 * DEC Alpha machines were often equipped with DAC960 cards that were
1027 * OEMed from Mylex, and had their own custom firmware. Version 2.70,
1028 * the last custom FW revision to be released by DEC for these older
1029 * controllers, appears to work quite well with this driver.
1031 * Cards tested successfully were several versions each of the PD and
1032 * PU, called by DEC the KZPSC and KZPAC, respectively, and having
1033 * the Manufacturer Numbers (from Mylex), usually on a sticker on the
1034 * back of the board, of:
1036 * KZPSC: D040347 (1-channel) or D040348 (2-channel)
1037 * or D040349 (3-channel)
1038 * KZPAC: D040395 (1-channel) or D040396 (2-channel)
1039 * or D040397 (3-channel)
1041 # define FIRMWARE_27X "2.70"
1043 # define FIRMWARE_27X "2.73"
1046 if (enquiry2->fw.major_version == 0) {
1047 enquiry2->fw.major_version = cb->enquiry->fw_major_version;
1048 enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
1049 enquiry2->fw.firmware_type = '0';
1050 enquiry2->fw.turn_id = 0;
1052 snprintf(cb->fw_version, sizeof(cb->fw_version),
1054 enquiry2->fw.major_version,
1055 enquiry2->fw.minor_version,
1056 enquiry2->fw.firmware_type,
1057 enquiry2->fw.turn_id);
1058 if (!((enquiry2->fw.major_version == 5 &&
1059 enquiry2->fw.minor_version >= 6) ||
1060 (enquiry2->fw.major_version == 4 &&
1061 enquiry2->fw.minor_version >= 6) ||
1062 (enquiry2->fw.major_version == 3 &&
1063 enquiry2->fw.minor_version >= 51) ||
1064 (enquiry2->fw.major_version == 2 &&
1065 strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
1066 shost_printk(KERN_WARNING, cb->host,
1067 "Firmware Version '%s' unsupported\n",
1072 * Initialize the Channels, Targets, Memory Size, and SAF-TE
1073 * Enclosure Management Enabled fields.
1075 switch (enquiry2->hw.model) {
1076 case MYRB_5_CHANNEL_BOARD:
1079 case MYRB_3_CHANNEL_BOARD:
1080 case MYRB_3_CHANNEL_ASIC_DAC:
1083 case MYRB_2_CHANNEL_BOARD:
1087 pchan_max = enquiry2->cfg_chan;
1090 pchan_cur = enquiry2->cur_chan;
1091 if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
1093 else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
1097 cb->ldev_block_size = enquiry2->ldev_block_size;
1098 shost->max_channel = pchan_cur;
1099 shost->max_id = enquiry2->max_targets;
1100 memsize = enquiry2->mem_size >> 20;
1101 cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
1103 * Initialize the Controller Queue Depth, Driver Queue Depth,
1104 * Logical Drive Count, Maximum Blocks per Command, Controller
1105 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
1106 * The Driver Queue Depth must be at most one less than the
1107 * Controller Queue Depth to allow for an automatic drive
1108 * rebuild operation.
1110 shost->can_queue = cb->enquiry->max_tcq;
1111 if (shost->can_queue < 3)
1112 shost->can_queue = enquiry2->max_cmds;
1113 if (shost->can_queue < 3)
1114 /* Play safe and disable TCQ */
1115 shost->can_queue = 1;
1117 if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
1118 shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
1119 shost->max_sectors = enquiry2->max_sectors;
1120 shost->sg_tablesize = enquiry2->max_sge;
1121 if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
1122 shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
1124 * Initialize the Stripe Size, Segment Size, and Geometry Translation.
1126 cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
1127 >> (10 - MYRB_BLKSIZE_BITS);
1128 cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
1129 >> (10 - MYRB_BLKSIZE_BITS);
1130 /* Assume 255/63 translation */
1131 cb->ldev_geom_heads = 255;
1132 cb->ldev_geom_sectors = 63;
1133 if (config2->drive_geometry) {
1134 cb->ldev_geom_heads = 128;
1135 cb->ldev_geom_sectors = 32;
1139 * Initialize the Background Initialization Status.
1141 if ((cb->fw_version[0] == '4' &&
1142 strcmp(cb->fw_version, "4.08") >= 0) ||
1143 (cb->fw_version[0] == '5' &&
1144 strcmp(cb->fw_version, "5.08") >= 0)) {
1145 cb->bgi_status_supported = true;
1146 myrb_bgi_control(cb);
1148 cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
1152 shost_printk(KERN_INFO, cb->host,
1153 "Configuring %s PCI RAID Controller\n", cb->model_name);
1154 shost_printk(KERN_INFO, cb->host,
1155 " Firmware Version: %s, Memory Size: %dMB\n",
1156 cb->fw_version, memsize);
1157 if (cb->io_addr == 0)
1158 shost_printk(KERN_INFO, cb->host,
1159 " I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
1160 (unsigned long)cb->pci_addr, cb->irq);
1162 shost_printk(KERN_INFO, cb->host,
1163 " I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
1164 (unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
1166 shost_printk(KERN_INFO, cb->host,
1167 " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
1168 cb->host->can_queue, cb->host->max_sectors);
1169 shost_printk(KERN_INFO, cb->host,
1170 " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
1171 cb->host->can_queue, cb->host->sg_tablesize,
1172 MYRB_SCATTER_GATHER_LIMIT);
1173 shost_printk(KERN_INFO, cb->host,
1174 " Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
1175 cb->stripe_size, cb->segment_size,
1176 cb->ldev_geom_heads, cb->ldev_geom_sectors,
1178 " SAF-TE Enclosure Management Enabled" : "");
1179 shost_printk(KERN_INFO, cb->host,
1180 " Physical: %d/%d channels %d/%d/%d devices\n",
1181 pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
1184 shost_printk(KERN_INFO, cb->host,
1185 " Logical: 1/1 channels, %d/%d disks\n",
1186 cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
1189 dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
1190 enquiry2, enquiry2_addr);
1191 dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
1192 config2, config2_addr);
1198 * myrb_unmap - unmaps controller structures
1200 static void myrb_unmap(struct myrb_hba *cb)
1202 if (cb->ldev_info_buf) {
1203 size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
1205 dma_free_coherent(&cb->pdev->dev, ldev_info_size,
1206 cb->ldev_info_buf, cb->ldev_info_addr);
1207 cb->ldev_info_buf = NULL;
1209 if (cb->err_table) {
1210 size_t err_table_size = sizeof(struct myrb_error_entry) *
1211 MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
1212 dma_free_coherent(&cb->pdev->dev, err_table_size,
1213 cb->err_table, cb->err_table_addr);
1214 cb->err_table = NULL;
1217 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
1218 cb->enquiry, cb->enquiry_addr);
1221 if (cb->first_stat_mbox) {
1222 dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
1223 cb->first_stat_mbox, cb->stat_mbox_addr);
1224 cb->first_stat_mbox = NULL;
1226 if (cb->first_cmd_mbox) {
1227 dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
1228 cb->first_cmd_mbox, cb->cmd_mbox_addr);
1229 cb->first_cmd_mbox = NULL;
1234 * myrb_cleanup - cleanup controller structures
1236 static void myrb_cleanup(struct myrb_hba *cb)
1238 struct pci_dev *pdev = cb->pdev;
1240 /* Free the memory mailbox, status, and related structures */
1243 if (cb->mmio_base) {
1244 cb->disable_intr(cb->io_base);
1245 iounmap(cb->mmio_base);
1248 free_irq(cb->irq, cb);
1250 release_region(cb->io_addr, 0x80);
1251 pci_set_drvdata(pdev, NULL);
1252 pci_disable_device(pdev);
1253 scsi_host_put(cb->host);
1256 static int myrb_host_reset(struct scsi_cmnd *scmd)
1258 struct Scsi_Host *shost = scmd->device->host;
1259 struct myrb_hba *cb = shost_priv(shost);
1261 cb->reset(cb->io_base);
1265 static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
1266 struct scsi_cmnd *scmd)
1268 struct myrb_hba *cb = shost_priv(shost);
1269 struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1270 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1271 struct myrb_dcdb *dcdb;
1272 dma_addr_t dcdb_addr;
1273 struct scsi_device *sdev = scmd->device;
1274 struct scatterlist *sgl;
1275 unsigned long flags;
1278 myrb_reset_cmd(cmd_blk);
1279 dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
1281 return SCSI_MLQUEUE_HOST_BUSY;
1282 nsge = scsi_dma_map(scmd);
1284 dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
1285 scmd->result = (DID_ERROR << 16);
1286 scmd->scsi_done(scmd);
1290 mbox->type3.opcode = MYRB_CMD_DCDB;
1291 mbox->type3.id = scmd->request->tag + 3;
1292 mbox->type3.addr = dcdb_addr;
1293 dcdb->channel = sdev->channel;
1294 dcdb->target = sdev->id;
1295 switch (scmd->sc_data_direction) {
1297 dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
1300 dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
1302 case DMA_FROM_DEVICE:
1303 dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
1306 dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
1309 dcdb->early_status = false;
1310 if (scmd->request->timeout <= 10)
1311 dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
1312 else if (scmd->request->timeout <= 60)
1313 dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
1314 else if (scmd->request->timeout <= 600)
1315 dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
1317 dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
1318 dcdb->no_autosense = false;
1319 dcdb->allow_disconnect = true;
1320 sgl = scsi_sglist(scmd);
1321 dcdb->dma_addr = sg_dma_address(sgl);
1322 if (sg_dma_len(sgl) > USHRT_MAX) {
1323 dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
1324 dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
1326 dcdb->xfer_len_lo = sg_dma_len(sgl);
1327 dcdb->xfer_len_hi4 = 0;
1329 dcdb->cdb_len = scmd->cmd_len;
1330 dcdb->sense_len = sizeof(dcdb->sense);
1331 memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
1333 spin_lock_irqsave(&cb->queue_lock, flags);
1334 cb->qcmd(cb, cmd_blk);
1335 spin_unlock_irqrestore(&cb->queue_lock, flags);
1339 static void myrb_inquiry(struct myrb_hba *cb,
1340 struct scsi_cmnd *scmd)
1342 unsigned char inq[36] = {
1343 0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
1344 0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
1345 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1346 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1347 0x20, 0x20, 0x20, 0x20,
1350 if (cb->bus_width > 16)
1352 if (cb->bus_width > 8)
1354 memcpy(&inq[16], cb->model_name, 16);
1355 memcpy(&inq[32], cb->fw_version, 1);
1356 memcpy(&inq[33], &cb->fw_version[2], 2);
1357 memcpy(&inq[35], &cb->fw_version[7], 1);
1359 scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
1363 myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1364 struct myrb_ldev_info *ldev_info)
1366 unsigned char modes[32], *mode_pg;
1370 dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1373 mode_pg = &modes[4];
1376 mode_pg = &modes[12];
1378 memset(modes, 0, sizeof(modes));
1379 modes[0] = mode_len - 1;
1381 unsigned char *block_desc = &modes[4];
1384 put_unaligned_be32(ldev_info->size, &block_desc[0]);
1385 put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
1389 if (ldev_info->wb_enabled)
1391 if (cb->segment_size) {
1393 put_unaligned_be16(cb->segment_size, &mode_pg[14]);
1396 scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1399 static void myrb_request_sense(struct myrb_hba *cb,
1400 struct scsi_cmnd *scmd)
1402 scsi_build_sense_buffer(0, scmd->sense_buffer,
1404 scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
1405 SCSI_SENSE_BUFFERSIZE);
1408 static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1409 struct myrb_ldev_info *ldev_info)
1411 unsigned char data[8];
1413 dev_dbg(&scmd->device->sdev_gendev,
1414 "Capacity %u, blocksize %u\n",
1415 ldev_info->size, cb->ldev_block_size);
1416 put_unaligned_be32(ldev_info->size - 1, &data[0]);
1417 put_unaligned_be32(cb->ldev_block_size, &data[4]);
1418 scsi_sg_copy_from_buffer(scmd, data, 8);
1421 static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
1422 struct scsi_cmnd *scmd)
1424 struct myrb_hba *cb = shost_priv(shost);
1425 struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1426 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1427 struct myrb_ldev_info *ldev_info;
1428 struct scsi_device *sdev = scmd->device;
1429 struct scatterlist *sgl;
1430 unsigned long flags;
1435 ldev_info = sdev->hostdata;
1436 if (ldev_info->state != MYRB_DEVICE_ONLINE &&
1437 ldev_info->state != MYRB_DEVICE_WO) {
1438 dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
1439 sdev->id, ldev_info ? ldev_info->state : 0xff);
1440 scmd->result = (DID_BAD_TARGET << 16);
1441 scmd->scsi_done(scmd);
1444 switch (scmd->cmnd[0]) {
1445 case TEST_UNIT_READY:
1446 scmd->result = (DID_OK << 16);
1447 scmd->scsi_done(scmd);
1450 if (scmd->cmnd[1] & 1) {
1451 /* Illegal request, invalid field in CDB */
1452 scsi_build_sense_buffer(0, scmd->sense_buffer,
1453 ILLEGAL_REQUEST, 0x24, 0);
1454 scmd->result = (DRIVER_SENSE << 24) |
1455 SAM_STAT_CHECK_CONDITION;
1457 myrb_inquiry(cb, scmd);
1458 scmd->result = (DID_OK << 16);
1460 scmd->scsi_done(scmd);
1462 case SYNCHRONIZE_CACHE:
1463 scmd->result = (DID_OK << 16);
1464 scmd->scsi_done(scmd);
1467 if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1468 (scmd->cmnd[2] & 0x3F) != 0x08) {
1469 /* Illegal request, invalid field in CDB */
1470 scsi_build_sense_buffer(0, scmd->sense_buffer,
1471 ILLEGAL_REQUEST, 0x24, 0);
1472 scmd->result = (DRIVER_SENSE << 24) |
1473 SAM_STAT_CHECK_CONDITION;
1475 myrb_mode_sense(cb, scmd, ldev_info);
1476 scmd->result = (DID_OK << 16);
1478 scmd->scsi_done(scmd);
1481 if ((scmd->cmnd[1] & 1) ||
1482 (scmd->cmnd[8] & 1)) {
1483 /* Illegal request, invalid field in CDB */
1484 scsi_build_sense_buffer(0, scmd->sense_buffer,
1485 ILLEGAL_REQUEST, 0x24, 0);
1486 scmd->result = (DRIVER_SENSE << 24) |
1487 SAM_STAT_CHECK_CONDITION;
1488 scmd->scsi_done(scmd);
1491 lba = get_unaligned_be32(&scmd->cmnd[2]);
1493 /* Illegal request, invalid field in CDB */
1494 scsi_build_sense_buffer(0, scmd->sense_buffer,
1495 ILLEGAL_REQUEST, 0x24, 0);
1496 scmd->result = (DRIVER_SENSE << 24) |
1497 SAM_STAT_CHECK_CONDITION;
1498 scmd->scsi_done(scmd);
1501 myrb_read_capacity(cb, scmd, ldev_info);
1502 scmd->scsi_done(scmd);
1505 myrb_request_sense(cb, scmd);
1506 scmd->result = (DID_OK << 16);
1508 case SEND_DIAGNOSTIC:
1509 if (scmd->cmnd[1] != 0x04) {
1510 /* Illegal request, invalid field in CDB */
1511 scsi_build_sense_buffer(0, scmd->sense_buffer,
1512 ILLEGAL_REQUEST, 0x24, 0);
1513 scmd->result = (DRIVER_SENSE << 24) |
1514 SAM_STAT_CHECK_CONDITION;
1516 /* Assume good status */
1517 scmd->result = (DID_OK << 16);
1519 scmd->scsi_done(scmd);
1522 if (ldev_info->state == MYRB_DEVICE_WO) {
1523 /* Data protect, attempt to read invalid data */
1524 scsi_build_sense_buffer(0, scmd->sense_buffer,
1525 DATA_PROTECT, 0x21, 0x06);
1526 scmd->result = (DRIVER_SENSE << 24) |
1527 SAM_STAT_CHECK_CONDITION;
1528 scmd->scsi_done(scmd);
1533 lba = (((scmd->cmnd[1] & 0x1F) << 16) |
1534 (scmd->cmnd[2] << 8) |
1536 block_cnt = scmd->cmnd[4];
1539 if (ldev_info->state == MYRB_DEVICE_WO) {
1540 /* Data protect, attempt to read invalid data */
1541 scsi_build_sense_buffer(0, scmd->sense_buffer,
1542 DATA_PROTECT, 0x21, 0x06);
1543 scmd->result = (DRIVER_SENSE << 24) |
1544 SAM_STAT_CHECK_CONDITION;
1545 scmd->scsi_done(scmd);
1550 case VERIFY: /* 0x2F */
1551 case WRITE_VERIFY: /* 0x2E */
1552 lba = get_unaligned_be32(&scmd->cmnd[2]);
1553 block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
1556 if (ldev_info->state == MYRB_DEVICE_WO) {
1557 /* Data protect, attempt to read invalid data */
1558 scsi_build_sense_buffer(0, scmd->sense_buffer,
1559 DATA_PROTECT, 0x21, 0x06);
1560 scmd->result = (DRIVER_SENSE << 24) |
1561 SAM_STAT_CHECK_CONDITION;
1562 scmd->scsi_done(scmd);
1567 case VERIFY_12: /* 0xAF */
1568 case WRITE_VERIFY_12: /* 0xAE */
1569 lba = get_unaligned_be32(&scmd->cmnd[2]);
1570 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1573 /* Illegal request, invalid opcode */
1574 scsi_build_sense_buffer(0, scmd->sense_buffer,
1575 ILLEGAL_REQUEST, 0x20, 0);
1576 scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
1577 scmd->scsi_done(scmd);
1581 myrb_reset_cmd(cmd_blk);
1582 mbox->type5.id = scmd->request->tag + 3;
1583 if (scmd->sc_data_direction == DMA_NONE)
1585 nsge = scsi_dma_map(scmd);
1587 sgl = scsi_sglist(scmd);
1588 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1589 mbox->type5.opcode = MYRB_CMD_READ;
1591 mbox->type5.opcode = MYRB_CMD_WRITE;
1593 mbox->type5.ld.xfer_len = block_cnt;
1594 mbox->type5.ld.ldev_num = sdev->id;
1595 mbox->type5.lba = lba;
1596 mbox->type5.addr = (u32)sg_dma_address(sgl);
1598 struct myrb_sge *hw_sgl;
1599 dma_addr_t hw_sgl_addr;
1602 hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
1604 return SCSI_MLQUEUE_HOST_BUSY;
1606 cmd_blk->sgl = hw_sgl;
1607 cmd_blk->sgl_addr = hw_sgl_addr;
1609 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1610 mbox->type5.opcode = MYRB_CMD_READ_SG;
1612 mbox->type5.opcode = MYRB_CMD_WRITE_SG;
1614 mbox->type5.ld.xfer_len = block_cnt;
1615 mbox->type5.ld.ldev_num = sdev->id;
1616 mbox->type5.lba = lba;
1617 mbox->type5.addr = hw_sgl_addr;
1618 mbox->type5.sg_count = nsge;
1620 scsi_for_each_sg(scmd, sgl, nsge, i) {
1621 hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
1622 hw_sgl->sge_count = (u32)sg_dma_len(sgl);
1627 spin_lock_irqsave(&cb->queue_lock, flags);
1628 cb->qcmd(cb, cmd_blk);
1629 spin_unlock_irqrestore(&cb->queue_lock, flags);
1634 static int myrb_queuecommand(struct Scsi_Host *shost,
1635 struct scsi_cmnd *scmd)
1637 struct scsi_device *sdev = scmd->device;
1639 if (sdev->channel > myrb_logical_channel(shost)) {
1640 scmd->result = (DID_BAD_TARGET << 16);
1641 scmd->scsi_done(scmd);
1644 if (sdev->channel == myrb_logical_channel(shost))
1645 return myrb_ldev_queuecommand(shost, scmd);
1647 return myrb_pthru_queuecommand(shost, scmd);
1650 static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
1652 struct myrb_hba *cb = shost_priv(sdev->host);
1653 struct myrb_ldev_info *ldev_info;
1654 unsigned short ldev_num = sdev->id;
1655 enum raid_level level;
1657 ldev_info = cb->ldev_info_buf + ldev_num;
1661 sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
1662 if (!sdev->hostdata)
1664 dev_dbg(&sdev->sdev_gendev,
1665 "slave alloc ldev %d state %x\n",
1666 ldev_num, ldev_info->state);
1667 memcpy(sdev->hostdata, ldev_info,
1668 sizeof(*ldev_info));
1669 switch (ldev_info->raid_level) {
1670 case MYRB_RAID_LEVEL0:
1671 level = RAID_LEVEL_LINEAR;
1673 case MYRB_RAID_LEVEL1:
1674 level = RAID_LEVEL_1;
1676 case MYRB_RAID_LEVEL3:
1677 level = RAID_LEVEL_3;
1679 case MYRB_RAID_LEVEL5:
1680 level = RAID_LEVEL_5;
1682 case MYRB_RAID_LEVEL6:
1683 level = RAID_LEVEL_6;
1685 case MYRB_RAID_JBOD:
1686 level = RAID_LEVEL_JBOD;
1689 level = RAID_LEVEL_UNKNOWN;
1692 raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
1696 static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
1698 struct myrb_hba *cb = shost_priv(sdev->host);
1699 struct myrb_pdev_state *pdev_info;
1700 unsigned short status;
1702 if (sdev->id > MYRB_MAX_TARGETS)
1705 pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
1709 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1711 if (status != MYRB_STATUS_SUCCESS) {
1712 dev_dbg(&sdev->sdev_gendev,
1713 "Failed to get device state, status %x\n",
1718 if (!pdev_info->present) {
1719 dev_dbg(&sdev->sdev_gendev,
1720 "device not present, skip\n");
1724 dev_dbg(&sdev->sdev_gendev,
1725 "slave alloc pdev %d:%d state %x\n",
1726 sdev->channel, sdev->id, pdev_info->state);
1727 sdev->hostdata = pdev_info;
1732 static int myrb_slave_alloc(struct scsi_device *sdev)
1734 if (sdev->channel > myrb_logical_channel(sdev->host))
1740 if (sdev->channel == myrb_logical_channel(sdev->host))
1741 return myrb_ldev_slave_alloc(sdev);
1743 return myrb_pdev_slave_alloc(sdev);
1746 static int myrb_slave_configure(struct scsi_device *sdev)
1748 struct myrb_ldev_info *ldev_info;
1750 if (sdev->channel > myrb_logical_channel(sdev->host))
1753 if (sdev->channel < myrb_logical_channel(sdev->host)) {
1754 sdev->no_uld_attach = 1;
1760 ldev_info = sdev->hostdata;
1763 if (ldev_info->state != MYRB_DEVICE_ONLINE)
1764 sdev_printk(KERN_INFO, sdev,
1765 "Logical drive is %s\n",
1766 myrb_devstate_name(ldev_info->state));
1768 sdev->tagged_supported = 1;
1772 static void myrb_slave_destroy(struct scsi_device *sdev)
1774 kfree(sdev->hostdata);
1777 static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1778 sector_t capacity, int geom[])
1780 struct myrb_hba *cb = shost_priv(sdev->host);
1782 geom[0] = cb->ldev_geom_heads;
1783 geom[1] = cb->ldev_geom_sectors;
1784 geom[2] = sector_div(capacity, geom[0] * geom[1]);
1789 static ssize_t raid_state_show(struct device *dev,
1790 struct device_attribute *attr, char *buf)
1792 struct scsi_device *sdev = to_scsi_device(dev);
1793 struct myrb_hba *cb = shost_priv(sdev->host);
1796 if (!sdev->hostdata)
1797 return snprintf(buf, 16, "Unknown\n");
1799 if (sdev->channel == myrb_logical_channel(sdev->host)) {
1800 struct myrb_ldev_info *ldev_info = sdev->hostdata;
1803 name = myrb_devstate_name(ldev_info->state);
1805 ret = snprintf(buf, 32, "%s\n", name);
1807 ret = snprintf(buf, 32, "Invalid (%02X)\n",
1810 struct myrb_pdev_state *pdev_info = sdev->hostdata;
1811 unsigned short status;
1814 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1816 if (status != MYRB_STATUS_SUCCESS)
1817 sdev_printk(KERN_INFO, sdev,
1818 "Failed to get device state, status %x\n",
1821 if (!pdev_info->present)
1824 name = myrb_devstate_name(pdev_info->state);
1826 ret = snprintf(buf, 32, "%s\n", name);
1828 ret = snprintf(buf, 32, "Invalid (%02X)\n",
1834 static ssize_t raid_state_store(struct device *dev,
1835 struct device_attribute *attr, const char *buf, size_t count)
1837 struct scsi_device *sdev = to_scsi_device(dev);
1838 struct myrb_hba *cb = shost_priv(sdev->host);
1839 struct myrb_pdev_state *pdev_info;
1840 enum myrb_devstate new_state;
1841 unsigned short status;
1843 if (!strncmp(buf, "kill", 4) ||
1844 !strncmp(buf, "offline", 7))
1845 new_state = MYRB_DEVICE_DEAD;
1846 else if (!strncmp(buf, "online", 6))
1847 new_state = MYRB_DEVICE_ONLINE;
1848 else if (!strncmp(buf, "standby", 7))
1849 new_state = MYRB_DEVICE_STANDBY;
1853 pdev_info = sdev->hostdata;
1855 sdev_printk(KERN_INFO, sdev,
1856 "Failed - no physical device information\n");
1859 if (!pdev_info->present) {
1860 sdev_printk(KERN_INFO, sdev,
1861 "Failed - device not present\n");
1865 if (pdev_info->state == new_state)
1868 status = myrb_set_pdev_state(cb, sdev, new_state);
1870 case MYRB_STATUS_SUCCESS:
1872 case MYRB_STATUS_START_DEVICE_FAILED:
1873 sdev_printk(KERN_INFO, sdev,
1874 "Failed - Unable to Start Device\n");
1877 case MYRB_STATUS_NO_DEVICE:
1878 sdev_printk(KERN_INFO, sdev,
1879 "Failed - No Device at Address\n");
1882 case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
1883 sdev_printk(KERN_INFO, sdev,
1884 "Failed - Invalid Channel or Target or Modifier\n");
1887 case MYRB_STATUS_CHANNEL_BUSY:
1888 sdev_printk(KERN_INFO, sdev,
1889 "Failed - Channel Busy\n");
1893 sdev_printk(KERN_INFO, sdev,
1894 "Failed - Unexpected Status %04X\n", status);
1900 static DEVICE_ATTR_RW(raid_state);
1902 static ssize_t raid_level_show(struct device *dev,
1903 struct device_attribute *attr, char *buf)
1905 struct scsi_device *sdev = to_scsi_device(dev);
1907 if (sdev->channel == myrb_logical_channel(sdev->host)) {
1908 struct myrb_ldev_info *ldev_info = sdev->hostdata;
1914 name = myrb_raidlevel_name(ldev_info->raid_level);
1916 return snprintf(buf, 32, "Invalid (%02X)\n",
1918 return snprintf(buf, 32, "%s\n", name);
1920 return snprintf(buf, 32, "Physical Drive\n");
1922 static DEVICE_ATTR_RO(raid_level);
1924 static ssize_t rebuild_show(struct device *dev,
1925 struct device_attribute *attr, char *buf)
1927 struct scsi_device *sdev = to_scsi_device(dev);
1928 struct myrb_hba *cb = shost_priv(sdev->host);
1929 struct myrb_rbld_progress rbld_buf;
1930 unsigned char status;
1932 if (sdev->channel < myrb_logical_channel(sdev->host))
1933 return snprintf(buf, 32, "physical device - not rebuilding\n");
1935 status = myrb_get_rbld_progress(cb, &rbld_buf);
1937 if (rbld_buf.ldev_num != sdev->id ||
1938 status != MYRB_STATUS_SUCCESS)
1939 return snprintf(buf, 32, "not rebuilding\n");
1941 return snprintf(buf, 32, "rebuilding block %u of %u\n",
1942 rbld_buf.ldev_size - rbld_buf.blocks_left,
1943 rbld_buf.ldev_size);
1946 static ssize_t rebuild_store(struct device *dev,
1947 struct device_attribute *attr, const char *buf, size_t count)
1949 struct scsi_device *sdev = to_scsi_device(dev);
1950 struct myrb_hba *cb = shost_priv(sdev->host);
1951 struct myrb_cmdblk *cmd_blk;
1952 union myrb_cmd_mbox *mbox;
1953 unsigned short status;
1957 rc = kstrtoint(buf, 0, &start);
1961 if (sdev->channel >= myrb_logical_channel(sdev->host))
1964 status = myrb_get_rbld_progress(cb, NULL);
1966 if (status == MYRB_STATUS_SUCCESS) {
1967 sdev_printk(KERN_INFO, sdev,
1968 "Rebuild Not Initiated; already in progress\n");
1971 mutex_lock(&cb->dcmd_mutex);
1972 cmd_blk = &cb->dcmd_blk;
1973 myrb_reset_cmd(cmd_blk);
1974 mbox = &cmd_blk->mbox;
1975 mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
1976 mbox->type3D.id = MYRB_DCMD_TAG;
1977 mbox->type3D.channel = sdev->channel;
1978 mbox->type3D.target = sdev->id;
1979 status = myrb_exec_cmd(cb, cmd_blk);
1980 mutex_unlock(&cb->dcmd_mutex);
1982 struct pci_dev *pdev = cb->pdev;
1983 unsigned char *rate;
1984 dma_addr_t rate_addr;
1986 if (status != MYRB_STATUS_SUCCESS) {
1987 sdev_printk(KERN_INFO, sdev,
1988 "Rebuild Not Cancelled; not in progress\n");
1992 rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
1993 &rate_addr, GFP_KERNEL);
1995 sdev_printk(KERN_INFO, sdev,
1996 "Cancellation of Rebuild Failed - Out of Memory\n");
1999 mutex_lock(&cb->dcmd_mutex);
2000 cmd_blk = &cb->dcmd_blk;
2001 myrb_reset_cmd(cmd_blk);
2002 mbox = &cmd_blk->mbox;
2003 mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2004 mbox->type3R.id = MYRB_DCMD_TAG;
2005 mbox->type3R.rbld_rate = 0xFF;
2006 mbox->type3R.addr = rate_addr;
2007 status = myrb_exec_cmd(cb, cmd_blk);
2008 dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2009 mutex_unlock(&cb->dcmd_mutex);
2011 if (status == MYRB_STATUS_SUCCESS) {
2012 sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
2013 start ? "Initiated" : "Cancelled");
2017 sdev_printk(KERN_INFO, sdev,
2018 "Rebuild Not Cancelled, status 0x%x\n",
2024 case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2025 msg = "Attempt to Rebuild Online or Unresponsive Drive";
2027 case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2028 msg = "New Disk Failed During Rebuild";
2030 case MYRB_STATUS_INVALID_ADDRESS:
2031 msg = "Invalid Device Address";
2033 case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2034 msg = "Already in Progress";
2041 sdev_printk(KERN_INFO, sdev,
2042 "Rebuild Failed - %s\n", msg);
2044 sdev_printk(KERN_INFO, sdev,
2045 "Rebuild Failed, status 0x%x\n", status);
2049 static DEVICE_ATTR_RW(rebuild);
2051 static ssize_t consistency_check_store(struct device *dev,
2052 struct device_attribute *attr, const char *buf, size_t count)
2054 struct scsi_device *sdev = to_scsi_device(dev);
2055 struct myrb_hba *cb = shost_priv(sdev->host);
2056 struct myrb_rbld_progress rbld_buf;
2057 struct myrb_cmdblk *cmd_blk;
2058 union myrb_cmd_mbox *mbox;
2059 unsigned short ldev_num = 0xFFFF;
2060 unsigned short status;
2064 rc = kstrtoint(buf, 0, &start);
2068 if (sdev->channel < myrb_logical_channel(sdev->host))
2071 status = myrb_get_rbld_progress(cb, &rbld_buf);
2073 if (status == MYRB_STATUS_SUCCESS) {
2074 sdev_printk(KERN_INFO, sdev,
2075 "Check Consistency Not Initiated; already in progress\n");
2078 mutex_lock(&cb->dcmd_mutex);
2079 cmd_blk = &cb->dcmd_blk;
2080 myrb_reset_cmd(cmd_blk);
2081 mbox = &cmd_blk->mbox;
2082 mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
2083 mbox->type3C.id = MYRB_DCMD_TAG;
2084 mbox->type3C.ldev_num = sdev->id;
2085 mbox->type3C.auto_restore = true;
2087 status = myrb_exec_cmd(cb, cmd_blk);
2088 mutex_unlock(&cb->dcmd_mutex);
2090 struct pci_dev *pdev = cb->pdev;
2091 unsigned char *rate;
2092 dma_addr_t rate_addr;
2094 if (ldev_num != sdev->id) {
2095 sdev_printk(KERN_INFO, sdev,
2096 "Check Consistency Not Cancelled; not in progress\n");
2099 rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
2100 &rate_addr, GFP_KERNEL);
2102 sdev_printk(KERN_INFO, sdev,
2103 "Cancellation of Check Consistency Failed - Out of Memory\n");
2106 mutex_lock(&cb->dcmd_mutex);
2107 cmd_blk = &cb->dcmd_blk;
2108 myrb_reset_cmd(cmd_blk);
2109 mbox = &cmd_blk->mbox;
2110 mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2111 mbox->type3R.id = MYRB_DCMD_TAG;
2112 mbox->type3R.rbld_rate = 0xFF;
2113 mbox->type3R.addr = rate_addr;
2114 status = myrb_exec_cmd(cb, cmd_blk);
2115 dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2116 mutex_unlock(&cb->dcmd_mutex);
2118 if (status == MYRB_STATUS_SUCCESS) {
2119 sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
2120 start ? "Initiated" : "Cancelled");
2124 sdev_printk(KERN_INFO, sdev,
2125 "Check Consistency Not Cancelled, status 0x%x\n",
2131 case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2132 msg = "Dependent Physical Device is DEAD";
2134 case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2135 msg = "New Disk Failed During Rebuild";
2137 case MYRB_STATUS_INVALID_ADDRESS:
2138 msg = "Invalid or Nonredundant Logical Drive";
2140 case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2141 msg = "Already in Progress";
2148 sdev_printk(KERN_INFO, sdev,
2149 "Check Consistency Failed - %s\n", msg);
2151 sdev_printk(KERN_INFO, sdev,
2152 "Check Consistency Failed, status 0x%x\n", status);
2157 static ssize_t consistency_check_show(struct device *dev,
2158 struct device_attribute *attr, char *buf)
2160 return rebuild_show(dev, attr, buf);
2162 static DEVICE_ATTR_RW(consistency_check);
2164 static ssize_t ctlr_num_show(struct device *dev,
2165 struct device_attribute *attr, char *buf)
2167 struct Scsi_Host *shost = class_to_shost(dev);
2168 struct myrb_hba *cb = shost_priv(shost);
2170 return snprintf(buf, 20, "%u\n", cb->ctlr_num);
2172 static DEVICE_ATTR_RO(ctlr_num);
2174 static ssize_t firmware_show(struct device *dev,
2175 struct device_attribute *attr, char *buf)
2177 struct Scsi_Host *shost = class_to_shost(dev);
2178 struct myrb_hba *cb = shost_priv(shost);
2180 return snprintf(buf, 16, "%s\n", cb->fw_version);
2182 static DEVICE_ATTR_RO(firmware);
2184 static ssize_t model_show(struct device *dev,
2185 struct device_attribute *attr, char *buf)
2187 struct Scsi_Host *shost = class_to_shost(dev);
2188 struct myrb_hba *cb = shost_priv(shost);
2190 return snprintf(buf, 16, "%s\n", cb->model_name);
2192 static DEVICE_ATTR_RO(model);
2194 static ssize_t flush_cache_store(struct device *dev,
2195 struct device_attribute *attr, const char *buf, size_t count)
2197 struct Scsi_Host *shost = class_to_shost(dev);
2198 struct myrb_hba *cb = shost_priv(shost);
2199 unsigned short status;
2201 status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
2202 if (status == MYRB_STATUS_SUCCESS) {
2203 shost_printk(KERN_INFO, shost,
2204 "Cache Flush Completed\n");
2207 shost_printk(KERN_INFO, shost,
2208 "Cache Flush Failed, status %x\n", status);
2211 static DEVICE_ATTR_WO(flush_cache);
2213 static struct device_attribute *myrb_sdev_attrs[] = {
2215 &dev_attr_consistency_check,
2216 &dev_attr_raid_state,
2217 &dev_attr_raid_level,
2221 static struct device_attribute *myrb_shost_attrs[] = {
2225 &dev_attr_flush_cache,
2229 static struct scsi_host_template myrb_template = {
2230 .module = THIS_MODULE,
2232 .proc_name = "myrb",
2233 .queuecommand = myrb_queuecommand,
2234 .eh_host_reset_handler = myrb_host_reset,
2235 .slave_alloc = myrb_slave_alloc,
2236 .slave_configure = myrb_slave_configure,
2237 .slave_destroy = myrb_slave_destroy,
2238 .bios_param = myrb_biosparam,
2239 .cmd_size = sizeof(struct myrb_cmdblk),
2240 .shost_attrs = myrb_shost_attrs,
2241 .sdev_attrs = myrb_sdev_attrs,
2246 * myrb_is_raid - return boolean indicating device is raid volume
2247 * @dev the device struct object
2249 static int myrb_is_raid(struct device *dev)
2251 struct scsi_device *sdev = to_scsi_device(dev);
2253 return sdev->channel == myrb_logical_channel(sdev->host);
2257 * myrb_get_resync - get raid volume resync percent complete
2258 * @dev the device struct object
2260 static void myrb_get_resync(struct device *dev)
2262 struct scsi_device *sdev = to_scsi_device(dev);
2263 struct myrb_hba *cb = shost_priv(sdev->host);
2264 struct myrb_rbld_progress rbld_buf;
2265 unsigned int percent_complete = 0;
2266 unsigned short status;
2267 unsigned int ldev_size = 0, remaining = 0;
2269 if (sdev->channel < myrb_logical_channel(sdev->host))
2271 status = myrb_get_rbld_progress(cb, &rbld_buf);
2272 if (status == MYRB_STATUS_SUCCESS) {
2273 if (rbld_buf.ldev_num == sdev->id) {
2274 ldev_size = rbld_buf.ldev_size;
2275 remaining = rbld_buf.blocks_left;
2278 if (remaining && ldev_size)
2279 percent_complete = (ldev_size - remaining) * 100 / ldev_size;
2280 raid_set_resync(myrb_raid_template, dev, percent_complete);
2284 * myrb_get_state - get raid volume status
2285 * @dev the device struct object
2287 static void myrb_get_state(struct device *dev)
2289 struct scsi_device *sdev = to_scsi_device(dev);
2290 struct myrb_hba *cb = shost_priv(sdev->host);
2291 struct myrb_ldev_info *ldev_info = sdev->hostdata;
2292 enum raid_state state = RAID_STATE_UNKNOWN;
2293 unsigned short status;
2295 if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
2296 state = RAID_STATE_UNKNOWN;
2298 status = myrb_get_rbld_progress(cb, NULL);
2299 if (status == MYRB_STATUS_SUCCESS)
2300 state = RAID_STATE_RESYNCING;
2302 switch (ldev_info->state) {
2303 case MYRB_DEVICE_ONLINE:
2304 state = RAID_STATE_ACTIVE;
2306 case MYRB_DEVICE_WO:
2307 case MYRB_DEVICE_CRITICAL:
2308 state = RAID_STATE_DEGRADED;
2311 state = RAID_STATE_OFFLINE;
2315 raid_set_state(myrb_raid_template, dev, state);
2318 static struct raid_function_template myrb_raid_functions = {
2319 .cookie = &myrb_template,
2320 .is_raid = myrb_is_raid,
2321 .get_resync = myrb_get_resync,
2322 .get_state = myrb_get_state,
2325 static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
2326 struct scsi_cmnd *scmd)
2328 unsigned short status;
2333 scsi_dma_unmap(scmd);
2335 if (cmd_blk->dcdb) {
2336 memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
2337 dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
2338 cmd_blk->dcdb_addr);
2339 cmd_blk->dcdb = NULL;
2342 dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
2343 cmd_blk->sgl = NULL;
2344 cmd_blk->sgl_addr = 0;
2346 status = cmd_blk->status;
2348 case MYRB_STATUS_SUCCESS:
2349 case MYRB_STATUS_DEVICE_BUSY:
2350 scmd->result = (DID_OK << 16) | status;
2352 case MYRB_STATUS_BAD_DATA:
2353 dev_dbg(&scmd->device->sdev_gendev,
2354 "Bad Data Encountered\n");
2355 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2356 /* Unrecovered read error */
2357 scsi_build_sense_buffer(0, scmd->sense_buffer,
2358 MEDIUM_ERROR, 0x11, 0);
2361 scsi_build_sense_buffer(0, scmd->sense_buffer,
2362 MEDIUM_ERROR, 0x0C, 0);
2363 scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
2365 case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
2366 scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
2367 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2368 /* Unrecovered read error, auto-reallocation failed */
2369 scsi_build_sense_buffer(0, scmd->sense_buffer,
2370 MEDIUM_ERROR, 0x11, 0x04);
2372 /* Write error, auto-reallocation failed */
2373 scsi_build_sense_buffer(0, scmd->sense_buffer,
2374 MEDIUM_ERROR, 0x0C, 0x02);
2375 scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
2377 case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
2378 dev_dbg(&scmd->device->sdev_gendev,
2379 "Logical Drive Nonexistent or Offline");
2380 scmd->result = (DID_BAD_TARGET << 16);
2382 case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
2383 dev_dbg(&scmd->device->sdev_gendev,
2384 "Attempt to Access Beyond End of Logical Drive");
2385 /* Logical block address out of range */
2386 scsi_build_sense_buffer(0, scmd->sense_buffer,
2387 NOT_READY, 0x21, 0);
2389 case MYRB_STATUS_DEVICE_NONRESPONSIVE:
2390 dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
2391 scmd->result = (DID_BAD_TARGET << 16);
2394 scmd_printk(KERN_ERR, scmd,
2395 "Unexpected Error Status %04X", status);
2396 scmd->result = (DID_ERROR << 16);
2399 scmd->scsi_done(scmd);
2402 static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
2407 if (cmd_blk->completion) {
2408 complete(cmd_blk->completion);
2409 cmd_blk->completion = NULL;
2413 static void myrb_monitor(struct work_struct *work)
2415 struct myrb_hba *cb = container_of(work,
2416 struct myrb_hba, monitor_work.work);
2417 struct Scsi_Host *shost = cb->host;
2418 unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
2420 dev_dbg(&shost->shost_gendev, "monitor tick\n");
2422 if (cb->new_ev_seq > cb->old_ev_seq) {
2423 int event = cb->old_ev_seq;
2425 dev_dbg(&shost->shost_gendev,
2426 "get event log no %d/%d\n",
2427 cb->new_ev_seq, event);
2428 myrb_get_event(cb, event);
2429 cb->old_ev_seq = event + 1;
2431 } else if (cb->need_err_info) {
2432 cb->need_err_info = false;
2433 dev_dbg(&shost->shost_gendev, "get error table\n");
2434 myrb_get_errtable(cb);
2436 } else if (cb->need_rbld && cb->rbld_first) {
2437 cb->need_rbld = false;
2438 dev_dbg(&shost->shost_gendev,
2439 "get rebuild progress\n");
2440 myrb_update_rbld_progress(cb);
2442 } else if (cb->need_ldev_info) {
2443 cb->need_ldev_info = false;
2444 dev_dbg(&shost->shost_gendev,
2445 "get logical drive info\n");
2446 myrb_get_ldev_info(cb);
2448 } else if (cb->need_rbld) {
2449 cb->need_rbld = false;
2450 dev_dbg(&shost->shost_gendev,
2451 "get rebuild progress\n");
2452 myrb_update_rbld_progress(cb);
2454 } else if (cb->need_cc_status) {
2455 cb->need_cc_status = false;
2456 dev_dbg(&shost->shost_gendev,
2457 "get consistency check progress\n");
2458 myrb_get_cc_progress(cb);
2460 } else if (cb->need_bgi_status) {
2461 cb->need_bgi_status = false;
2462 dev_dbg(&shost->shost_gendev, "get background init status\n");
2463 myrb_bgi_control(cb);
2466 dev_dbg(&shost->shost_gendev, "new enquiry\n");
2467 mutex_lock(&cb->dma_mutex);
2468 myrb_hba_enquiry(cb);
2469 mutex_unlock(&cb->dma_mutex);
2470 if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
2471 cb->need_err_info || cb->need_rbld ||
2472 cb->need_ldev_info || cb->need_cc_status ||
2473 cb->need_bgi_status) {
2474 dev_dbg(&shost->shost_gendev,
2475 "reschedule monitor\n");
2480 cb->primary_monitor_time = jiffies;
2481 queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
2485 * myrb_err_status - reports controller BIOS messages
2487 * Controller BIOS messages are passed through the Error Status Register
2488 * when the driver performs the BIOS handshaking.
2490 * Return: true for fatal errors and false otherwise.
2492 static bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
2493 unsigned char parm0, unsigned char parm1)
2495 struct pci_dev *pdev = cb->pdev;
2499 dev_info(&pdev->dev,
2500 "Physical Device %d:%d Not Responding\n",
2504 dev_notice(&pdev->dev, "Spinning Up Drives\n");
2507 dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2510 dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2513 dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2516 dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2520 dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2523 dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2526 dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2529 dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2532 dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2540 * Hardware-specific functions
2544 * DAC960 LA Series Controllers
2547 static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
2549 writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2552 static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
2554 writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
2557 static inline void DAC960_LA_gen_intr(void __iomem *base)
2559 writeb(DAC960_LA_IDB_GEN_IRQ, base + DAC960_LA_IDB_OFFSET);
2562 static inline void DAC960_LA_reset_ctrl(void __iomem *base)
2564 writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
2567 static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
2569 writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2572 static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
2574 unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2576 return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
2579 static inline bool DAC960_LA_init_in_progress(void __iomem *base)
2581 unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2583 return !(idb & DAC960_LA_IDB_INIT_DONE);
2586 static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
2588 writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2591 static inline void DAC960_LA_ack_mem_mbox_intr(void __iomem *base)
2593 writeb(DAC960_LA_ODB_MMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2596 static inline void DAC960_LA_ack_intr(void __iomem *base)
2598 writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
2599 base + DAC960_LA_ODB_OFFSET);
2602 static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
2604 unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2606 return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
2609 static inline bool DAC960_LA_mem_mbox_status_available(void __iomem *base)
2611 unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2613 return odb & DAC960_LA_ODB_MMBOX_STS_AVAIL;
2616 static inline void DAC960_LA_enable_intr(void __iomem *base)
2618 unsigned char odb = 0xFF;
2620 odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
2621 writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2624 static inline void DAC960_LA_disable_intr(void __iomem *base)
2626 unsigned char odb = 0xFF;
2628 odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
2629 writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2632 static inline bool DAC960_LA_intr_enabled(void __iomem *base)
2634 unsigned char imask = readb(base + DAC960_LA_IRQMASK_OFFSET);
2636 return !(imask & DAC960_LA_IRQMASK_DISABLE_IRQ);
2639 static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2640 union myrb_cmd_mbox *mbox)
2642 mem_mbox->words[1] = mbox->words[1];
2643 mem_mbox->words[2] = mbox->words[2];
2644 mem_mbox->words[3] = mbox->words[3];
2645 /* Memory barrier to prevent reordering */
2647 mem_mbox->words[0] = mbox->words[0];
2648 /* Memory barrier to force PCI access */
2652 static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
2653 union myrb_cmd_mbox *mbox)
2655 writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
2656 writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
2657 writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
2658 writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
2661 static inline unsigned char DAC960_LA_read_status_cmd_ident(void __iomem *base)
2663 return readb(base + DAC960_LA_STSID_OFFSET);
2666 static inline unsigned short DAC960_LA_read_status(void __iomem *base)
2668 return readw(base + DAC960_LA_STS_OFFSET);
2672 DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
2673 unsigned char *param0, unsigned char *param1)
2675 unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
2677 if (!(errsts & DAC960_LA_ERRSTS_PENDING))
2679 errsts &= ~DAC960_LA_ERRSTS_PENDING;
2682 *param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
2683 *param1 = readb(base + DAC960_LA_CMDID_OFFSET);
2684 writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
2688 static inline unsigned short
2689 DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
2690 union myrb_cmd_mbox *mbox)
2692 unsigned short status;
2695 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2696 if (!DAC960_LA_hw_mbox_is_full(base))
2701 if (DAC960_LA_hw_mbox_is_full(base)) {
2703 "Timeout waiting for empty mailbox\n");
2704 return MYRB_STATUS_SUBSYS_TIMEOUT;
2706 DAC960_LA_write_hw_mbox(base, mbox);
2707 DAC960_LA_hw_mbox_new_cmd(base);
2709 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2710 if (DAC960_LA_hw_mbox_status_available(base))
2715 if (!DAC960_LA_hw_mbox_status_available(base)) {
2716 dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
2717 return MYRB_STATUS_SUBSYS_TIMEOUT;
2719 status = DAC960_LA_read_status(base);
2720 DAC960_LA_ack_hw_mbox_intr(base);
2721 DAC960_LA_ack_hw_mbox_status(base);
2726 static int DAC960_LA_hw_init(struct pci_dev *pdev,
2727 struct myrb_hba *cb, void __iomem *base)
2730 unsigned char error, parm0, parm1;
2732 DAC960_LA_disable_intr(base);
2733 DAC960_LA_ack_hw_mbox_status(base);
2735 while (DAC960_LA_init_in_progress(base) &&
2736 timeout < MYRB_MAILBOX_TIMEOUT) {
2737 if (DAC960_LA_read_error_status(base, &error,
2739 myrb_err_status(cb, error, parm0, parm1))
2744 if (timeout == MYRB_MAILBOX_TIMEOUT) {
2746 "Timeout waiting for Controller Initialisation\n");
2749 if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
2751 "Unable to Enable Memory Mailbox Interface\n");
2752 DAC960_LA_reset_ctrl(base);
2755 DAC960_LA_enable_intr(base);
2756 cb->qcmd = myrb_qcmd;
2757 cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
2758 if (cb->dual_mode_interface)
2759 cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
2761 cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
2762 cb->disable_intr = DAC960_LA_disable_intr;
2763 cb->reset = DAC960_LA_reset_ctrl;
2768 static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
2770 struct myrb_hba *cb = arg;
2771 void __iomem *base = cb->io_base;
2772 struct myrb_stat_mbox *next_stat_mbox;
2773 unsigned long flags;
2775 spin_lock_irqsave(&cb->queue_lock, flags);
2776 DAC960_LA_ack_intr(base);
2777 next_stat_mbox = cb->next_stat_mbox;
2778 while (next_stat_mbox->valid) {
2779 unsigned char id = next_stat_mbox->id;
2780 struct scsi_cmnd *scmd = NULL;
2781 struct myrb_cmdblk *cmd_blk = NULL;
2783 if (id == MYRB_DCMD_TAG)
2784 cmd_blk = &cb->dcmd_blk;
2785 else if (id == MYRB_MCMD_TAG)
2786 cmd_blk = &cb->mcmd_blk;
2788 scmd = scsi_host_find_tag(cb->host, id - 3);
2790 cmd_blk = scsi_cmd_priv(scmd);
2793 cmd_blk->status = next_stat_mbox->status;
2795 dev_err(&cb->pdev->dev,
2796 "Unhandled command completion %d\n", id);
2798 memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2799 if (++next_stat_mbox > cb->last_stat_mbox)
2800 next_stat_mbox = cb->first_stat_mbox;
2804 myrb_handle_cmdblk(cb, cmd_blk);
2806 myrb_handle_scsi(cb, cmd_blk, scmd);
2809 cb->next_stat_mbox = next_stat_mbox;
2810 spin_unlock_irqrestore(&cb->queue_lock, flags);
2814 struct myrb_privdata DAC960_LA_privdata = {
2815 .hw_init = DAC960_LA_hw_init,
2816 .irq_handler = DAC960_LA_intr_handler,
2817 .mmio_size = DAC960_LA_mmio_size,
2821 * DAC960 PG Series Controllers
2823 static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
2825 writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2828 static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
2830 writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
2833 static inline void DAC960_PG_gen_intr(void __iomem *base)
2835 writel(DAC960_PG_IDB_GEN_IRQ, base + DAC960_PG_IDB_OFFSET);
2838 static inline void DAC960_PG_reset_ctrl(void __iomem *base)
2840 writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
2843 static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
2845 writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2848 static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
2850 unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2852 return idb & DAC960_PG_IDB_HWMBOX_FULL;
2855 static inline bool DAC960_PG_init_in_progress(void __iomem *base)
2857 unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2859 return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
2862 static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
2864 writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2867 static inline void DAC960_PG_ack_mem_mbox_intr(void __iomem *base)
2869 writel(DAC960_PG_ODB_MMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2872 static inline void DAC960_PG_ack_intr(void __iomem *base)
2874 writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
2875 base + DAC960_PG_ODB_OFFSET);
2878 static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
2880 unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2882 return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
2885 static inline bool DAC960_PG_mem_mbox_status_available(void __iomem *base)
2887 unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2889 return odb & DAC960_PG_ODB_MMBOX_STS_AVAIL;
2892 static inline void DAC960_PG_enable_intr(void __iomem *base)
2894 unsigned int imask = (unsigned int)-1;
2896 imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
2897 writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2900 static inline void DAC960_PG_disable_intr(void __iomem *base)
2902 unsigned int imask = (unsigned int)-1;
2904 writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2907 static inline bool DAC960_PG_intr_enabled(void __iomem *base)
2909 unsigned int imask = readl(base + DAC960_PG_IRQMASK_OFFSET);
2911 return !(imask & DAC960_PG_IRQMASK_DISABLE_IRQ);
2914 static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2915 union myrb_cmd_mbox *mbox)
2917 mem_mbox->words[1] = mbox->words[1];
2918 mem_mbox->words[2] = mbox->words[2];
2919 mem_mbox->words[3] = mbox->words[3];
2920 /* Memory barrier to prevent reordering */
2922 mem_mbox->words[0] = mbox->words[0];
2923 /* Memory barrier to force PCI access */
2927 static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
2928 union myrb_cmd_mbox *mbox)
2930 writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
2931 writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
2932 writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
2933 writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
2936 static inline unsigned char
2937 DAC960_PG_read_status_cmd_ident(void __iomem *base)
2939 return readb(base + DAC960_PG_STSID_OFFSET);
2942 static inline unsigned short
2943 DAC960_PG_read_status(void __iomem *base)
2945 return readw(base + DAC960_PG_STS_OFFSET);
2949 DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
2950 unsigned char *param0, unsigned char *param1)
2952 unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
2954 if (!(errsts & DAC960_PG_ERRSTS_PENDING))
2956 errsts &= ~DAC960_PG_ERRSTS_PENDING;
2958 *param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
2959 *param1 = readb(base + DAC960_PG_CMDID_OFFSET);
2960 writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
2964 static inline unsigned short
2965 DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
2966 union myrb_cmd_mbox *mbox)
2968 unsigned short status;
2971 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2972 if (!DAC960_PG_hw_mbox_is_full(base))
2977 if (DAC960_PG_hw_mbox_is_full(base)) {
2979 "Timeout waiting for empty mailbox\n");
2980 return MYRB_STATUS_SUBSYS_TIMEOUT;
2982 DAC960_PG_write_hw_mbox(base, mbox);
2983 DAC960_PG_hw_mbox_new_cmd(base);
2986 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2987 if (DAC960_PG_hw_mbox_status_available(base))
2992 if (!DAC960_PG_hw_mbox_status_available(base)) {
2994 "Timeout waiting for mailbox status\n");
2995 return MYRB_STATUS_SUBSYS_TIMEOUT;
2997 status = DAC960_PG_read_status(base);
2998 DAC960_PG_ack_hw_mbox_intr(base);
2999 DAC960_PG_ack_hw_mbox_status(base);
3004 static int DAC960_PG_hw_init(struct pci_dev *pdev,
3005 struct myrb_hba *cb, void __iomem *base)
3008 unsigned char error, parm0, parm1;
3010 DAC960_PG_disable_intr(base);
3011 DAC960_PG_ack_hw_mbox_status(base);
3013 while (DAC960_PG_init_in_progress(base) &&
3014 timeout < MYRB_MAILBOX_TIMEOUT) {
3015 if (DAC960_PG_read_error_status(base, &error,
3017 myrb_err_status(cb, error, parm0, parm1))
3022 if (timeout == MYRB_MAILBOX_TIMEOUT) {
3024 "Timeout waiting for Controller Initialisation\n");
3027 if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
3029 "Unable to Enable Memory Mailbox Interface\n");
3030 DAC960_PG_reset_ctrl(base);
3033 DAC960_PG_enable_intr(base);
3034 cb->qcmd = myrb_qcmd;
3035 cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
3036 if (cb->dual_mode_interface)
3037 cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
3039 cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
3040 cb->disable_intr = DAC960_PG_disable_intr;
3041 cb->reset = DAC960_PG_reset_ctrl;
3046 static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
3048 struct myrb_hba *cb = arg;
3049 void __iomem *base = cb->io_base;
3050 struct myrb_stat_mbox *next_stat_mbox;
3051 unsigned long flags;
3053 spin_lock_irqsave(&cb->queue_lock, flags);
3054 DAC960_PG_ack_intr(base);
3055 next_stat_mbox = cb->next_stat_mbox;
3056 while (next_stat_mbox->valid) {
3057 unsigned char id = next_stat_mbox->id;
3058 struct scsi_cmnd *scmd = NULL;
3059 struct myrb_cmdblk *cmd_blk = NULL;
3061 if (id == MYRB_DCMD_TAG)
3062 cmd_blk = &cb->dcmd_blk;
3063 else if (id == MYRB_MCMD_TAG)
3064 cmd_blk = &cb->mcmd_blk;
3066 scmd = scsi_host_find_tag(cb->host, id - 3);
3068 cmd_blk = scsi_cmd_priv(scmd);
3071 cmd_blk->status = next_stat_mbox->status;
3073 dev_err(&cb->pdev->dev,
3074 "Unhandled command completion %d\n", id);
3076 memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
3077 if (++next_stat_mbox > cb->last_stat_mbox)
3078 next_stat_mbox = cb->first_stat_mbox;
3081 myrb_handle_cmdblk(cb, cmd_blk);
3083 myrb_handle_scsi(cb, cmd_blk, scmd);
3085 cb->next_stat_mbox = next_stat_mbox;
3086 spin_unlock_irqrestore(&cb->queue_lock, flags);
3090 struct myrb_privdata DAC960_PG_privdata = {
3091 .hw_init = DAC960_PG_hw_init,
3092 .irq_handler = DAC960_PG_intr_handler,
3093 .mmio_size = DAC960_PG_mmio_size,
3098 * DAC960 PD Series Controllers
3101 static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
3103 writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
3106 static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
3108 writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
3111 static inline void DAC960_PD_gen_intr(void __iomem *base)
3113 writeb(DAC960_PD_IDB_GEN_IRQ, base + DAC960_PD_IDB_OFFSET);
3116 static inline void DAC960_PD_reset_ctrl(void __iomem *base)
3118 writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
3121 static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
3123 unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3125 return idb & DAC960_PD_IDB_HWMBOX_FULL;
3128 static inline bool DAC960_PD_init_in_progress(void __iomem *base)
3130 unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3132 return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
3135 static inline void DAC960_PD_ack_intr(void __iomem *base)
3137 writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
3140 static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
3142 unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
3144 return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
3147 static inline void DAC960_PD_enable_intr(void __iomem *base)
3149 writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
3152 static inline void DAC960_PD_disable_intr(void __iomem *base)
3154 writeb(0, base + DAC960_PD_IRQEN_OFFSET);
3157 static inline bool DAC960_PD_intr_enabled(void __iomem *base)
3159 unsigned char imask = readb(base + DAC960_PD_IRQEN_OFFSET);
3161 return imask & DAC960_PD_IRQMASK_ENABLE_IRQ;
3164 static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
3165 union myrb_cmd_mbox *mbox)
3167 writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
3168 writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
3169 writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
3170 writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
3173 static inline unsigned char
3174 DAC960_PD_read_status_cmd_ident(void __iomem *base)
3176 return readb(base + DAC960_PD_STSID_OFFSET);
3179 static inline unsigned short
3180 DAC960_PD_read_status(void __iomem *base)
3182 return readw(base + DAC960_PD_STS_OFFSET);
3186 DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
3187 unsigned char *param0, unsigned char *param1)
3189 unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
3191 if (!(errsts & DAC960_PD_ERRSTS_PENDING))
3193 errsts &= ~DAC960_PD_ERRSTS_PENDING;
3195 *param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
3196 *param1 = readb(base + DAC960_PD_CMDID_OFFSET);
3197 writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
3201 static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3203 void __iomem *base = cb->io_base;
3204 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3206 while (DAC960_PD_hw_mbox_is_full(base))
3208 DAC960_PD_write_cmd_mbox(base, mbox);
3209 DAC960_PD_hw_mbox_new_cmd(base);
3212 static int DAC960_PD_hw_init(struct pci_dev *pdev,
3213 struct myrb_hba *cb, void __iomem *base)
3216 unsigned char error, parm0, parm1;
3218 if (!request_region(cb->io_addr, 0x80, "myrb")) {
3219 dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3220 (unsigned long)cb->io_addr);
3223 DAC960_PD_disable_intr(base);
3224 DAC960_PD_ack_hw_mbox_status(base);
3226 while (DAC960_PD_init_in_progress(base) &&
3227 timeout < MYRB_MAILBOX_TIMEOUT) {
3228 if (DAC960_PD_read_error_status(base, &error,
3230 myrb_err_status(cb, error, parm0, parm1))
3235 if (timeout == MYRB_MAILBOX_TIMEOUT) {
3237 "Timeout waiting for Controller Initialisation\n");
3240 if (!myrb_enable_mmio(cb, NULL)) {
3242 "Unable to Enable Memory Mailbox Interface\n");
3243 DAC960_PD_reset_ctrl(base);
3246 DAC960_PD_enable_intr(base);
3247 cb->qcmd = DAC960_PD_qcmd;
3248 cb->disable_intr = DAC960_PD_disable_intr;
3249 cb->reset = DAC960_PD_reset_ctrl;
3254 static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
3256 struct myrb_hba *cb = arg;
3257 void __iomem *base = cb->io_base;
3258 unsigned long flags;
3260 spin_lock_irqsave(&cb->queue_lock, flags);
3261 while (DAC960_PD_hw_mbox_status_available(base)) {
3262 unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3263 struct scsi_cmnd *scmd = NULL;
3264 struct myrb_cmdblk *cmd_blk = NULL;
3266 if (id == MYRB_DCMD_TAG)
3267 cmd_blk = &cb->dcmd_blk;
3268 else if (id == MYRB_MCMD_TAG)
3269 cmd_blk = &cb->mcmd_blk;
3271 scmd = scsi_host_find_tag(cb->host, id - 3);
3273 cmd_blk = scsi_cmd_priv(scmd);
3276 cmd_blk->status = DAC960_PD_read_status(base);
3278 dev_err(&cb->pdev->dev,
3279 "Unhandled command completion %d\n", id);
3281 DAC960_PD_ack_intr(base);
3282 DAC960_PD_ack_hw_mbox_status(base);
3285 myrb_handle_cmdblk(cb, cmd_blk);
3287 myrb_handle_scsi(cb, cmd_blk, scmd);
3289 spin_unlock_irqrestore(&cb->queue_lock, flags);
3293 struct myrb_privdata DAC960_PD_privdata = {
3294 .hw_init = DAC960_PD_hw_init,
3295 .irq_handler = DAC960_PD_intr_handler,
3296 .mmio_size = DAC960_PD_mmio_size,
3301 * DAC960 P Series Controllers
3303 * Similar to the DAC960 PD Series Controllers, but some commands have
3307 static inline void myrb_translate_enquiry(void *enq)
3309 memcpy(enq + 132, enq + 36, 64);
3310 memset(enq + 36, 0, 96);
3313 static inline void myrb_translate_devstate(void *state)
3315 memcpy(state + 2, state + 3, 1);
3316 memmove(state + 4, state + 5, 2);
3317 memmove(state + 6, state + 8, 4);
3320 static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
3322 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3323 int ldev_num = mbox->type5.ld.ldev_num;
3325 mbox->bytes[3] &= 0x7;
3326 mbox->bytes[3] |= mbox->bytes[7] << 6;
3327 mbox->bytes[7] = ldev_num;
3330 static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
3332 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3333 int ldev_num = mbox->bytes[7];
3335 mbox->bytes[7] = mbox->bytes[3] >> 6;
3336 mbox->bytes[3] &= 0x7;
3337 mbox->bytes[3] |= ldev_num << 3;
3340 static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3342 void __iomem *base = cb->io_base;
3343 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3345 switch (mbox->common.opcode) {
3346 case MYRB_CMD_ENQUIRY:
3347 mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
3349 case MYRB_CMD_GET_DEVICE_STATE:
3350 mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
3353 mbox->common.opcode = MYRB_CMD_READ_OLD;
3354 myrb_translate_to_rw_command(cmd_blk);
3356 case MYRB_CMD_WRITE:
3357 mbox->common.opcode = MYRB_CMD_WRITE_OLD;
3358 myrb_translate_to_rw_command(cmd_blk);
3360 case MYRB_CMD_READ_SG:
3361 mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
3362 myrb_translate_to_rw_command(cmd_blk);
3364 case MYRB_CMD_WRITE_SG:
3365 mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
3366 myrb_translate_to_rw_command(cmd_blk);
3371 while (DAC960_PD_hw_mbox_is_full(base))
3373 DAC960_PD_write_cmd_mbox(base, mbox);
3374 DAC960_PD_hw_mbox_new_cmd(base);
3378 static int DAC960_P_hw_init(struct pci_dev *pdev,
3379 struct myrb_hba *cb, void __iomem *base)
3382 unsigned char error, parm0, parm1;
3384 if (!request_region(cb->io_addr, 0x80, "myrb")) {
3385 dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3386 (unsigned long)cb->io_addr);
3389 DAC960_PD_disable_intr(base);
3390 DAC960_PD_ack_hw_mbox_status(base);
3392 while (DAC960_PD_init_in_progress(base) &&
3393 timeout < MYRB_MAILBOX_TIMEOUT) {
3394 if (DAC960_PD_read_error_status(base, &error,
3396 myrb_err_status(cb, error, parm0, parm1))
3401 if (timeout == MYRB_MAILBOX_TIMEOUT) {
3403 "Timeout waiting for Controller Initialisation\n");
3406 if (!myrb_enable_mmio(cb, NULL)) {
3408 "Unable to allocate DMA mapped memory\n");
3409 DAC960_PD_reset_ctrl(base);
3412 DAC960_PD_enable_intr(base);
3413 cb->qcmd = DAC960_P_qcmd;
3414 cb->disable_intr = DAC960_PD_disable_intr;
3415 cb->reset = DAC960_PD_reset_ctrl;
3420 static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
3422 struct myrb_hba *cb = arg;
3423 void __iomem *base = cb->io_base;
3424 unsigned long flags;
3426 spin_lock_irqsave(&cb->queue_lock, flags);
3427 while (DAC960_PD_hw_mbox_status_available(base)) {
3428 unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3429 struct scsi_cmnd *scmd = NULL;
3430 struct myrb_cmdblk *cmd_blk = NULL;
3431 union myrb_cmd_mbox *mbox;
3432 enum myrb_cmd_opcode op;
3435 if (id == MYRB_DCMD_TAG)
3436 cmd_blk = &cb->dcmd_blk;
3437 else if (id == MYRB_MCMD_TAG)
3438 cmd_blk = &cb->mcmd_blk;
3440 scmd = scsi_host_find_tag(cb->host, id - 3);
3442 cmd_blk = scsi_cmd_priv(scmd);
3445 cmd_blk->status = DAC960_PD_read_status(base);
3447 dev_err(&cb->pdev->dev,
3448 "Unhandled command completion %d\n", id);
3450 DAC960_PD_ack_intr(base);
3451 DAC960_PD_ack_hw_mbox_status(base);
3456 mbox = &cmd_blk->mbox;
3457 op = mbox->common.opcode;
3459 case MYRB_CMD_ENQUIRY_OLD:
3460 mbox->common.opcode = MYRB_CMD_ENQUIRY;
3461 myrb_translate_enquiry(cb->enquiry);
3463 case MYRB_CMD_READ_OLD:
3464 mbox->common.opcode = MYRB_CMD_READ;
3465 myrb_translate_from_rw_command(cmd_blk);
3467 case MYRB_CMD_WRITE_OLD:
3468 mbox->common.opcode = MYRB_CMD_WRITE;
3469 myrb_translate_from_rw_command(cmd_blk);
3471 case MYRB_CMD_READ_SG_OLD:
3472 mbox->common.opcode = MYRB_CMD_READ_SG;
3473 myrb_translate_from_rw_command(cmd_blk);
3475 case MYRB_CMD_WRITE_SG_OLD:
3476 mbox->common.opcode = MYRB_CMD_WRITE_SG;
3477 myrb_translate_from_rw_command(cmd_blk);
3483 myrb_handle_cmdblk(cb, cmd_blk);
3485 myrb_handle_scsi(cb, cmd_blk, scmd);
3487 spin_unlock_irqrestore(&cb->queue_lock, flags);
3491 struct myrb_privdata DAC960_P_privdata = {
3492 .hw_init = DAC960_P_hw_init,
3493 .irq_handler = DAC960_P_intr_handler,
3494 .mmio_size = DAC960_PD_mmio_size,
3497 static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
3498 const struct pci_device_id *entry)
3500 struct myrb_privdata *privdata =
3501 (struct myrb_privdata *)entry->driver_data;
3502 irq_handler_t irq_handler = privdata->irq_handler;
3503 unsigned int mmio_size = privdata->mmio_size;
3504 struct Scsi_Host *shost;
3505 struct myrb_hba *cb = NULL;
3507 shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
3509 dev_err(&pdev->dev, "Unable to allocate Controller\n");
3512 shost->max_cmd_len = 12;
3513 shost->max_lun = 256;
3514 cb = shost_priv(shost);
3515 mutex_init(&cb->dcmd_mutex);
3516 mutex_init(&cb->dma_mutex);
3519 if (pci_enable_device(pdev))
3522 if (privdata->hw_init == DAC960_PD_hw_init ||
3523 privdata->hw_init == DAC960_P_hw_init) {
3524 cb->io_addr = pci_resource_start(pdev, 0);
3525 cb->pci_addr = pci_resource_start(pdev, 1);
3527 cb->pci_addr = pci_resource_start(pdev, 0);
3529 pci_set_drvdata(pdev, cb);
3530 spin_lock_init(&cb->queue_lock);
3531 if (mmio_size < PAGE_SIZE)
3532 mmio_size = PAGE_SIZE;
3533 cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size);
3534 if (cb->mmio_base == NULL) {
3536 "Unable to map Controller Register Window\n");
3540 cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
3541 if (privdata->hw_init(pdev, cb, cb->io_base))
3544 if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
3546 "Unable to acquire IRQ Channel %d\n", pdev->irq);
3549 cb->irq = pdev->irq;
3554 "Failed to initialize Controller\n");
3559 static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3561 struct myrb_hba *cb;
3564 cb = myrb_detect(dev, entry);
3568 ret = myrb_get_hba_config(cb);
3574 if (!myrb_create_mempools(dev, cb)) {
3579 ret = scsi_add_host(cb->host, &dev->dev);
3581 dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3582 myrb_destroy_mempools(cb);
3585 scsi_scan_host(cb->host);
3593 static void myrb_remove(struct pci_dev *pdev)
3595 struct myrb_hba *cb = pci_get_drvdata(pdev);
3597 shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
3598 myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
3600 myrb_destroy_mempools(cb);
3604 static const struct pci_device_id myrb_id_table[] = {
3606 PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
3607 PCI_DEVICE_ID_DEC_21285,
3608 PCI_VENDOR_ID_MYLEX,
3609 PCI_DEVICE_ID_MYLEX_DAC960_LA),
3610 .driver_data = (unsigned long) &DAC960_LA_privdata,
3613 PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
3616 PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
3619 PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
3624 MODULE_DEVICE_TABLE(pci, myrb_id_table);
3626 static struct pci_driver myrb_pci_driver = {
3628 .id_table = myrb_id_table,
3629 .probe = myrb_probe,
3630 .remove = myrb_remove,
3633 static int __init myrb_init_module(void)
3637 myrb_raid_template = raid_class_attach(&myrb_raid_functions);
3638 if (!myrb_raid_template)
3641 ret = pci_register_driver(&myrb_pci_driver);
3643 raid_class_release(myrb_raid_template);
3648 static void __exit myrb_cleanup_module(void)
3650 pci_unregister_driver(&myrb_pci_driver);
3651 raid_class_release(myrb_raid_template);
3654 module_init(myrb_init_module);
3655 module_exit(myrb_cleanup_module);
3657 MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
3658 MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3659 MODULE_LICENSE("GPL");