1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright 2011 Marvell. <jyli@marvell.com>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/moduleparam.h>
11 #include <linux/init.h>
12 #include <linux/device.h>
13 #include <linux/pci.h>
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/interrupt.h>
17 #include <linux/delay.h>
18 #include <linux/ktime.h>
19 #include <linux/blkdev.h>
21 #include <scsi/scsi.h>
22 #include <scsi/scsi_cmnd.h>
23 #include <scsi/scsi_device.h>
24 #include <scsi/scsi_host.h>
25 #include <scsi/scsi_transport.h>
26 #include <scsi/scsi_eh.h>
27 #include <linux/uaccess.h>
28 #include <linux/kthread.h>
32 MODULE_LICENSE("GPL");
33 MODULE_AUTHOR("jyli@marvell.com");
34 MODULE_DESCRIPTION("Marvell UMI Driver");
36 static const struct pci_device_id mvumi_pci_table[] = {
37 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) },
38 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) },
42 MODULE_DEVICE_TABLE(pci, mvumi_pci_table);
44 static void tag_init(struct mvumi_tag *st, unsigned short size)
47 BUG_ON(size != st->size);
49 for (i = 0; i < size; i++)
50 st->stack[i] = size - 1 - i;
53 static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st)
56 return st->stack[--st->top];
59 static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
62 BUG_ON(st->top >= st->size);
63 st->stack[st->top++] = tag;
66 static bool tag_is_empty(struct mvumi_tag *st)
74 static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array)
78 for (i = 0; i < MAX_BASE_ADDRESS; i++)
79 if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) &&
81 pci_iounmap(dev, addr_array[i]);
84 static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
88 for (i = 0; i < MAX_BASE_ADDRESS; i++) {
89 if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
90 addr_array[i] = pci_iomap(dev, i, 0);
92 dev_err(&dev->dev, "failed to map Bar[%d]\n",
94 mvumi_unmap_pci_addr(dev, addr_array);
100 dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]);
106 static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
107 enum resource_type type, unsigned int size)
109 struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC);
112 dev_err(&mhba->pdev->dev,
113 "Failed to allocate memory for resource manager.\n");
118 case RESOURCE_CACHED_MEMORY:
119 res->virt_addr = kzalloc(size, GFP_ATOMIC);
120 if (!res->virt_addr) {
121 dev_err(&mhba->pdev->dev,
122 "unable to allocate memory,size = %d.\n", size);
128 case RESOURCE_UNCACHED_MEMORY:
129 size = round_up(size, 8);
130 res->virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size,
133 if (!res->virt_addr) {
134 dev_err(&mhba->pdev->dev,
135 "unable to allocate consistent mem,"
136 "size = %d.\n", size);
143 dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type);
150 INIT_LIST_HEAD(&res->entry);
151 list_add_tail(&res->entry, &mhba->res_list);
156 static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
158 struct mvumi_res *res, *tmp;
160 list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
162 case RESOURCE_UNCACHED_MEMORY:
163 dma_free_coherent(&mhba->pdev->dev, res->size,
164 res->virt_addr, res->bus_addr);
166 case RESOURCE_CACHED_MEMORY:
167 kfree(res->virt_addr);
170 dev_err(&mhba->pdev->dev,
171 "unknown resource type %d\n", res->type);
174 list_del(&res->entry);
177 mhba->fw_flag &= ~MVUMI_FW_ALLOC;
181 * mvumi_make_sgl - Prepares SGL
182 * @mhba: Adapter soft state
183 * @scmd: SCSI command from the mid-layer
184 * @sgl_p: SGL to be filled in
185 * @sg_count return the number of SG elements
187 * If successful, this function returns 0. otherwise, it returns -1.
189 static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
190 void *sgl_p, unsigned char *sg_count)
192 struct scatterlist *sg;
193 struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p;
195 unsigned int sgnum = scsi_sg_count(scmd);
198 sg = scsi_sglist(scmd);
199 *sg_count = dma_map_sg(&mhba->pdev->dev, sg, sgnum,
200 scmd->sc_data_direction);
201 if (*sg_count > mhba->max_sge) {
202 dev_err(&mhba->pdev->dev,
203 "sg count[0x%x] is bigger than max sg[0x%x].\n",
204 *sg_count, mhba->max_sge);
205 dma_unmap_sg(&mhba->pdev->dev, sg, sgnum,
206 scmd->sc_data_direction);
209 for (i = 0; i < *sg_count; i++) {
210 busaddr = sg_dma_address(&sg[i]);
211 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
212 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
214 sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i])));
215 if ((i + 1) == *sg_count)
216 m_sg->flags |= 1U << mhba->eot_flag;
224 static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
227 struct mvumi_sgl *m_sg;
234 virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &phy_addr,
239 m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
240 cmd->frame->sg_counts = 1;
241 cmd->data_buf = virt_addr;
243 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
244 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
245 m_sg->flags = 1U << mhba->eot_flag;
246 sgd_setsz(mhba, m_sg, cpu_to_le32(size));
251 static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
252 unsigned int buf_size)
254 struct mvumi_cmd *cmd;
256 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
258 dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n");
261 INIT_LIST_HEAD(&cmd->queue_pointer);
263 cmd->frame = dma_alloc_coherent(&mhba->pdev->dev, mhba->ib_max_size,
264 &cmd->frame_phys, GFP_KERNEL);
266 dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
267 " frame,size = %d.\n", mhba->ib_max_size);
273 if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
274 dev_err(&mhba->pdev->dev, "failed to allocate memory"
275 " for internal frame\n");
276 dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
277 cmd->frame, cmd->frame_phys);
282 cmd->frame->sg_counts = 0;
287 static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
288 struct mvumi_cmd *cmd)
290 struct mvumi_sgl *m_sg;
294 if (cmd && cmd->frame) {
295 if (cmd->frame->sg_counts) {
296 m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
297 sgd_getsz(mhba, m_sg, size);
299 phy_addr = (dma_addr_t) m_sg->baseaddr_l |
300 (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
302 dma_free_coherent(&mhba->pdev->dev, size, cmd->data_buf,
305 dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
306 cmd->frame, cmd->frame_phys);
312 * mvumi_get_cmd - Get a command from the free pool
313 * @mhba: Adapter soft state
315 * Returns a free command from the pool
317 static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba)
319 struct mvumi_cmd *cmd = NULL;
321 if (likely(!list_empty(&mhba->cmd_pool))) {
322 cmd = list_entry((&mhba->cmd_pool)->next,
323 struct mvumi_cmd, queue_pointer);
324 list_del_init(&cmd->queue_pointer);
326 dev_warn(&mhba->pdev->dev, "command pool is empty!\n");
332 * mvumi_return_cmd - Return a cmd to free command pool
333 * @mhba: Adapter soft state
334 * @cmd: Command packet to be returned to free command pool
336 static inline void mvumi_return_cmd(struct mvumi_hba *mhba,
337 struct mvumi_cmd *cmd)
340 list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
344 * mvumi_free_cmds - Free all the cmds in the free cmd pool
345 * @mhba: Adapter soft state
347 static void mvumi_free_cmds(struct mvumi_hba *mhba)
349 struct mvumi_cmd *cmd;
351 while (!list_empty(&mhba->cmd_pool)) {
352 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
354 list_del(&cmd->queue_pointer);
355 if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
362 * mvumi_alloc_cmds - Allocates the command packets
363 * @mhba: Adapter soft state
366 static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
369 struct mvumi_cmd *cmd;
371 for (i = 0; i < mhba->max_io; i++) {
372 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
376 INIT_LIST_HEAD(&cmd->queue_pointer);
377 list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
378 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
379 cmd->frame = mhba->ib_frame + i * mhba->ib_max_size;
380 cmd->frame_phys = mhba->ib_frame_phys
381 + i * mhba->ib_max_size;
383 cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
390 dev_err(&mhba->pdev->dev,
391 "failed to allocate memory for cmd[0x%x].\n", i);
392 while (!list_empty(&mhba->cmd_pool)) {
393 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
395 list_del(&cmd->queue_pointer);
396 if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
403 static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba)
405 unsigned int ib_rp_reg;
406 struct mvumi_hw_regs *regs = mhba->regs;
408 ib_rp_reg = ioread32(mhba->regs->inb_read_pointer);
410 if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) ==
411 (mhba->ib_cur_slot & regs->cl_slot_num_mask)) &&
412 ((ib_rp_reg & regs->cl_pointer_toggle)
413 != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) {
414 dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
417 if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
418 dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
421 return mhba->max_io - atomic_read(&mhba->fw_outstanding);
425 static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba)
428 if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1))
430 count = ioread32(mhba->ib_shadow);
436 static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
438 unsigned int cur_ib_entry;
440 cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask;
442 if (cur_ib_entry >= mhba->list_num_io) {
443 cur_ib_entry -= mhba->list_num_io;
444 mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle;
446 mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask;
447 mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask);
448 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
449 *ib_entry = mhba->ib_list + cur_ib_entry *
450 sizeof(struct mvumi_dyn_list_entry);
452 *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
454 atomic_inc(&mhba->fw_outstanding);
457 static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
459 iowrite32(0xffff, mhba->ib_shadow);
460 iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer);
463 static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
464 unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame)
466 unsigned short tag, request_id;
469 p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
470 request_id = p_outb_frame->request_id;
471 tag = p_outb_frame->tag;
472 if (tag > mhba->tag_pool.size) {
473 dev_err(&mhba->pdev->dev, "ob frame data error\n");
476 if (mhba->tag_cmd[tag] == NULL) {
477 dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag);
479 } else if (mhba->tag_cmd[tag]->request_id != request_id &&
480 mhba->request_id_enabled) {
481 dev_err(&mhba->pdev->dev, "request ID from FW:0x%x,"
482 "cmd request ID:0x%x\n", request_id,
483 mhba->tag_cmd[tag]->request_id);
490 static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba,
491 unsigned int *cur_obf, unsigned int *assign_obf_end)
493 unsigned int ob_write, ob_write_shadow;
494 struct mvumi_hw_regs *regs = mhba->regs;
497 ob_write = ioread32(regs->outb_copy_pointer);
498 ob_write_shadow = ioread32(mhba->ob_shadow);
499 } while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow);
501 *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
502 *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
504 if ((ob_write & regs->cl_pointer_toggle) !=
505 (mhba->ob_cur_slot & regs->cl_pointer_toggle)) {
506 *assign_obf_end += mhba->list_num_io;
511 static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba,
512 unsigned int *cur_obf, unsigned int *assign_obf_end)
514 unsigned int ob_write;
515 struct mvumi_hw_regs *regs = mhba->regs;
517 ob_write = ioread32(regs->outb_read_pointer);
518 ob_write = ioread32(regs->outb_copy_pointer);
519 *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
520 *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
521 if (*assign_obf_end < *cur_obf)
522 *assign_obf_end += mhba->list_num_io;
523 else if (*assign_obf_end == *cur_obf)
528 static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
530 unsigned int cur_obf, assign_obf_end, i;
531 struct mvumi_ob_data *ob_data;
532 struct mvumi_rsp_frame *p_outb_frame;
533 struct mvumi_hw_regs *regs = mhba->regs;
535 if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end))
538 for (i = (assign_obf_end - cur_obf); i != 0; i--) {
540 if (cur_obf >= mhba->list_num_io) {
541 cur_obf -= mhba->list_num_io;
542 mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
545 p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
547 /* Copy pointer may point to entry in outbound list
548 * before entry has valid data
550 if (unlikely(p_outb_frame->tag > mhba->tag_pool.size ||
551 mhba->tag_cmd[p_outb_frame->tag] == NULL ||
552 p_outb_frame->request_id !=
553 mhba->tag_cmd[p_outb_frame->tag]->request_id))
554 if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame))
557 if (!list_empty(&mhba->ob_data_list)) {
558 ob_data = (struct mvumi_ob_data *)
559 list_first_entry(&mhba->ob_data_list,
560 struct mvumi_ob_data, list);
561 list_del_init(&ob_data->list);
565 cur_obf = mhba->list_num_io - 1;
566 mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
572 memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size);
573 p_outb_frame->tag = 0xff;
575 list_add_tail(&ob_data->list, &mhba->free_ob_list);
577 mhba->ob_cur_slot &= ~regs->cl_slot_num_mask;
578 mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask);
579 iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer);
582 static void mvumi_reset(struct mvumi_hba *mhba)
584 struct mvumi_hw_regs *regs = mhba->regs;
586 iowrite32(0, regs->enpointa_mask_reg);
587 if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE)
590 iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg);
593 static unsigned char mvumi_start(struct mvumi_hba *mhba);
595 static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
597 mhba->fw_state = FW_STATE_ABORT;
600 if (mvumi_start(mhba))
606 static int mvumi_wait_for_fw(struct mvumi_hba *mhba)
608 struct mvumi_hw_regs *regs = mhba->regs;
610 unsigned long before;
613 iowrite32(0, regs->enpointa_mask_reg);
614 tmp = ioread32(regs->arm_to_pciea_msg1);
615 while (tmp != HANDSHAKE_READYSTATE) {
616 iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg);
617 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
618 dev_err(&mhba->pdev->dev,
619 "FW reset failed [0x%x].\n", tmp);
625 tmp = ioread32(regs->arm_to_pciea_msg1);
631 static void mvumi_backup_bar_addr(struct mvumi_hba *mhba)
635 for (i = 0; i < MAX_BASE_ADDRESS; i++) {
636 pci_read_config_dword(mhba->pdev, 0x10 + i * 4,
641 static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
645 for (i = 0; i < MAX_BASE_ADDRESS; i++) {
646 if (mhba->pci_base[i])
647 pci_write_config_dword(mhba->pdev, 0x10 + i * 4,
652 static int mvumi_pci_set_master(struct pci_dev *pdev)
656 pci_set_master(pdev);
659 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
660 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
662 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
667 static int mvumi_reset_host_9580(struct mvumi_hba *mhba)
669 mhba->fw_state = FW_STATE_ABORT;
671 iowrite32(0, mhba->regs->reset_enable);
672 iowrite32(0xf, mhba->regs->reset_request);
674 iowrite32(0x10, mhba->regs->reset_enable);
675 iowrite32(0x10, mhba->regs->reset_request);
677 pci_disable_device(mhba->pdev);
679 if (pci_enable_device(mhba->pdev)) {
680 dev_err(&mhba->pdev->dev, "enable device failed\n");
683 if (mvumi_pci_set_master(mhba->pdev)) {
684 dev_err(&mhba->pdev->dev, "set master failed\n");
687 mvumi_restore_bar_addr(mhba);
688 if (mvumi_wait_for_fw(mhba) == FAILED)
691 return mvumi_wait_for_outstanding(mhba);
694 static int mvumi_reset_host_9143(struct mvumi_hba *mhba)
696 return mvumi_wait_for_outstanding(mhba);
699 static int mvumi_host_reset(struct scsi_cmnd *scmd)
701 struct mvumi_hba *mhba;
703 mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
705 scmd_printk(KERN_NOTICE, scmd, "RESET -%u cmd=%x retries=%x\n",
706 scmd->request->tag, scmd->cmnd[0], scmd->retries);
708 return mhba->instancet->reset_host(mhba);
711 static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
712 struct mvumi_cmd *cmd)
716 cmd->cmd_status = REQ_STATUS_PENDING;
718 if (atomic_read(&cmd->sync_cmd)) {
719 dev_err(&mhba->pdev->dev,
720 "last blocked cmd not finished, sync_cmd = %d\n",
721 atomic_read(&cmd->sync_cmd));
725 atomic_inc(&cmd->sync_cmd);
726 spin_lock_irqsave(mhba->shost->host_lock, flags);
727 mhba->instancet->fire_cmd(mhba, cmd);
728 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
730 wait_event_timeout(mhba->int_cmd_wait_q,
731 (cmd->cmd_status != REQ_STATUS_PENDING),
732 MVUMI_INTERNAL_CMD_WAIT_TIME * HZ);
734 /* command timeout */
735 if (atomic_read(&cmd->sync_cmd)) {
736 spin_lock_irqsave(mhba->shost->host_lock, flags);
737 atomic_dec(&cmd->sync_cmd);
738 if (mhba->tag_cmd[cmd->frame->tag]) {
739 mhba->tag_cmd[cmd->frame->tag] = NULL;
740 dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n",
742 tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
744 if (!list_empty(&cmd->queue_pointer)) {
745 dev_warn(&mhba->pdev->dev,
746 "TIMEOUT:A internal command doesn't send!\n");
747 list_del_init(&cmd->queue_pointer);
749 atomic_dec(&mhba->fw_outstanding);
751 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
756 static void mvumi_release_fw(struct mvumi_hba *mhba)
758 mvumi_free_cmds(mhba);
759 mvumi_release_mem_resource(mhba);
760 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
761 dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
762 mhba->handshake_page, mhba->handshake_page_phys);
764 pci_release_regions(mhba->pdev);
767 static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba)
769 struct mvumi_cmd *cmd;
770 struct mvumi_msg_frame *frame;
771 unsigned char device_id, retry = 0;
772 unsigned char bitcount = sizeof(unsigned char) * 8;
774 for (device_id = 0; device_id < mhba->max_target_id; device_id++) {
775 if (!(mhba->target_map[device_id / bitcount] &
776 (1 << (device_id % bitcount))))
778 get_cmd: cmd = mvumi_create_internal_cmd(mhba, 0);
781 dev_err(&mhba->pdev->dev, "failed to get memory"
782 " for internal flush cache cmd for "
783 "device %d", device_id);
790 cmd->cmd_status = REQ_STATUS_PENDING;
791 atomic_set(&cmd->sync_cmd, 0);
793 frame->req_function = CL_FUN_SCSI_CMD;
794 frame->device_id = device_id;
795 frame->cmd_flag = CMD_FLAG_NON_DATA;
796 frame->data_transfer_length = 0;
797 frame->cdb_length = MAX_COMMAND_SIZE;
798 memset(frame->cdb, 0, MAX_COMMAND_SIZE);
799 frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
800 frame->cdb[1] = CDB_CORE_MODULE;
801 frame->cdb[2] = CDB_CORE_SHUTDOWN;
803 mvumi_issue_blocked_cmd(mhba, cmd);
804 if (cmd->cmd_status != SAM_STAT_GOOD) {
805 dev_err(&mhba->pdev->dev,
806 "device %d flush cache failed, status=0x%x.\n",
807 device_id, cmd->cmd_status);
810 mvumi_delete_internal_cmd(mhba, cmd);
816 mvumi_calculate_checksum(struct mvumi_hs_header *p_header,
820 unsigned char ret = 0, i;
822 ptr = (unsigned char *) p_header->frame_content;
823 for (i = 0; i < len; i++) {
831 static void mvumi_hs_build_page(struct mvumi_hba *mhba,
832 struct mvumi_hs_header *hs_header)
834 struct mvumi_hs_page2 *hs_page2;
835 struct mvumi_hs_page4 *hs_page4;
836 struct mvumi_hs_page3 *hs_page3;
840 switch (hs_header->page_code) {
841 case HS_PAGE_HOST_INFO:
842 hs_page2 = (struct mvumi_hs_page2 *) hs_header;
843 hs_header->frame_length = sizeof(*hs_page2) - 4;
844 memset(hs_header->frame_content, 0, hs_header->frame_length);
845 hs_page2->host_type = 3; /* 3 mean linux*/
846 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
847 hs_page2->host_cap = 0x08;/* host dynamic source mode */
848 hs_page2->host_ver.ver_major = VER_MAJOR;
849 hs_page2->host_ver.ver_minor = VER_MINOR;
850 hs_page2->host_ver.ver_oem = VER_OEM;
851 hs_page2->host_ver.ver_build = VER_BUILD;
852 hs_page2->system_io_bus = 0;
853 hs_page2->slot_number = 0;
854 hs_page2->intr_level = 0;
855 hs_page2->intr_vector = 0;
856 time = ktime_get_real_seconds();
857 local_time = (time - (sys_tz.tz_minuteswest * 60));
858 hs_page2->seconds_since1970 = local_time;
859 hs_header->checksum = mvumi_calculate_checksum(hs_header,
860 hs_header->frame_length);
863 case HS_PAGE_FIRM_CTL:
864 hs_page3 = (struct mvumi_hs_page3 *) hs_header;
865 hs_header->frame_length = sizeof(*hs_page3) - 4;
866 memset(hs_header->frame_content, 0, hs_header->frame_length);
867 hs_header->checksum = mvumi_calculate_checksum(hs_header,
868 hs_header->frame_length);
871 case HS_PAGE_CL_INFO:
872 hs_page4 = (struct mvumi_hs_page4 *) hs_header;
873 hs_header->frame_length = sizeof(*hs_page4) - 4;
874 memset(hs_header->frame_content, 0, hs_header->frame_length);
875 hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys);
876 hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys);
878 hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys);
879 hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
880 hs_page4->ib_entry_size = mhba->ib_max_size_setting;
881 hs_page4->ob_entry_size = mhba->ob_max_size_setting;
882 if (mhba->hba_capability
883 & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) {
884 hs_page4->ob_depth = find_first_bit((unsigned long *)
887 hs_page4->ib_depth = find_first_bit((unsigned long *)
891 hs_page4->ob_depth = (u8) mhba->list_num_io;
892 hs_page4->ib_depth = (u8) mhba->list_num_io;
894 hs_header->checksum = mvumi_calculate_checksum(hs_header,
895 hs_header->frame_length);
899 dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n",
900 hs_header->page_code);
906 * mvumi_init_data - Initialize requested date for FW
907 * @mhba: Adapter soft state
909 static int mvumi_init_data(struct mvumi_hba *mhba)
911 struct mvumi_ob_data *ob_pool;
912 struct mvumi_res *res_mgnt;
913 unsigned int tmp_size, offset, i;
917 if (mhba->fw_flag & MVUMI_FW_ALLOC)
920 tmp_size = mhba->ib_max_size * mhba->max_io;
921 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
922 tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
924 tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
925 tmp_size += 8 + sizeof(u32)*2 + 16;
927 res_mgnt = mvumi_alloc_mem_resource(mhba,
928 RESOURCE_UNCACHED_MEMORY, tmp_size);
930 dev_err(&mhba->pdev->dev,
931 "failed to allocate memory for inbound list\n");
932 goto fail_alloc_dma_buf;
935 p = res_mgnt->bus_addr;
936 v = res_mgnt->virt_addr;
938 offset = round_up(p, 128) - p;
942 mhba->ib_list_phys = p;
943 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
944 v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
945 p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
947 mhba->ib_frame_phys = p;
949 v += mhba->ib_max_size * mhba->max_io;
950 p += mhba->ib_max_size * mhba->max_io;
953 offset = round_up(p, 8) - p;
957 mhba->ib_shadow_phys = p;
961 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
962 offset = round_up(p, 8) - p;
966 mhba->ob_shadow_phys = p;
970 offset = round_up(p, 4) - p;
974 mhba->ob_shadow_phys = p;
980 offset = round_up(p, 128) - p;
985 mhba->ob_list_phys = p;
988 tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool));
989 tmp_size = round_up(tmp_size, 8);
991 res_mgnt = mvumi_alloc_mem_resource(mhba,
992 RESOURCE_CACHED_MEMORY, tmp_size);
994 dev_err(&mhba->pdev->dev,
995 "failed to allocate memory for outbound data buffer\n");
996 goto fail_alloc_dma_buf;
998 virmem = res_mgnt->virt_addr;
1000 for (i = mhba->max_io; i != 0; i--) {
1001 ob_pool = (struct mvumi_ob_data *) virmem;
1002 list_add_tail(&ob_pool->list, &mhba->ob_data_list);
1003 virmem += mhba->ob_max_size + sizeof(*ob_pool);
1006 tmp_size = sizeof(unsigned short) * mhba->max_io +
1007 sizeof(struct mvumi_cmd *) * mhba->max_io;
1008 tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) /
1009 (sizeof(unsigned char) * 8);
1011 res_mgnt = mvumi_alloc_mem_resource(mhba,
1012 RESOURCE_CACHED_MEMORY, tmp_size);
1014 dev_err(&mhba->pdev->dev,
1015 "failed to allocate memory for tag and target map\n");
1016 goto fail_alloc_dma_buf;
1019 virmem = res_mgnt->virt_addr;
1020 mhba->tag_pool.stack = virmem;
1021 mhba->tag_pool.size = mhba->max_io;
1022 tag_init(&mhba->tag_pool, mhba->max_io);
1023 virmem += sizeof(unsigned short) * mhba->max_io;
1025 mhba->tag_cmd = virmem;
1026 virmem += sizeof(struct mvumi_cmd *) * mhba->max_io;
1028 mhba->target_map = virmem;
1030 mhba->fw_flag |= MVUMI_FW_ALLOC;
1034 mvumi_release_mem_resource(mhba);
1038 static int mvumi_hs_process_page(struct mvumi_hba *mhba,
1039 struct mvumi_hs_header *hs_header)
1041 struct mvumi_hs_page1 *hs_page1;
1042 unsigned char page_checksum;
1044 page_checksum = mvumi_calculate_checksum(hs_header,
1045 hs_header->frame_length);
1046 if (page_checksum != hs_header->checksum) {
1047 dev_err(&mhba->pdev->dev, "checksum error\n");
1051 switch (hs_header->page_code) {
1052 case HS_PAGE_FIRM_CAP:
1053 hs_page1 = (struct mvumi_hs_page1 *) hs_header;
1055 mhba->max_io = hs_page1->max_io_support;
1056 mhba->list_num_io = hs_page1->cl_inout_list_depth;
1057 mhba->max_transfer_size = hs_page1->max_transfer_size;
1058 mhba->max_target_id = hs_page1->max_devices_support;
1059 mhba->hba_capability = hs_page1->capability;
1060 mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size;
1061 mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2;
1063 mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size;
1064 mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2;
1066 dev_dbg(&mhba->pdev->dev, "FW version:%d\n",
1067 hs_page1->fw_ver.ver_build);
1069 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG)
1070 mhba->eot_flag = 22;
1072 mhba->eot_flag = 27;
1073 if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF)
1074 mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth;
1077 dev_err(&mhba->pdev->dev, "handshake: page code error\n");
1084 * mvumi_handshake - Move the FW to READY state
1085 * @mhba: Adapter soft state
1087 * During the initialization, FW passes can potentially be in any one of
1088 * several possible states. If the FW in operational, waiting-for-handshake
1089 * states, driver must take steps to bring it to ready state. Otherwise, it
1090 * has to wait for the ready state.
1092 static int mvumi_handshake(struct mvumi_hba *mhba)
1094 unsigned int hs_state, tmp, hs_fun;
1095 struct mvumi_hs_header *hs_header;
1096 struct mvumi_hw_regs *regs = mhba->regs;
1098 if (mhba->fw_state == FW_STATE_STARTING)
1099 hs_state = HS_S_START;
1101 tmp = ioread32(regs->arm_to_pciea_msg0);
1102 hs_state = HS_GET_STATE(tmp);
1103 dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state);
1104 if (HS_GET_STATUS(tmp) != HS_STATUS_OK) {
1105 mhba->fw_state = FW_STATE_STARTING;
1113 mhba->fw_state = FW_STATE_HANDSHAKING;
1114 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1115 HS_SET_STATE(hs_fun, HS_S_RESET);
1116 iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1);
1117 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1118 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1122 iowrite32(lower_32_bits(mhba->handshake_page_phys),
1123 regs->pciea_to_arm_msg1);
1124 iowrite32(upper_32_bits(mhba->handshake_page_phys),
1125 regs->arm_to_pciea_msg1);
1126 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1127 HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR);
1128 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1129 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1132 case HS_S_PAGE_ADDR:
1133 case HS_S_QUERY_PAGE:
1134 case HS_S_SEND_PAGE:
1135 hs_header = (struct mvumi_hs_header *) mhba->handshake_page;
1136 if (hs_header->page_code == HS_PAGE_FIRM_CAP) {
1137 mhba->hba_total_pages =
1138 ((struct mvumi_hs_page1 *) hs_header)->total_pages;
1140 if (mhba->hba_total_pages == 0)
1141 mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1144 if (hs_state == HS_S_QUERY_PAGE) {
1145 if (mvumi_hs_process_page(mhba, hs_header)) {
1146 HS_SET_STATE(hs_fun, HS_S_ABORT);
1149 if (mvumi_init_data(mhba)) {
1150 HS_SET_STATE(hs_fun, HS_S_ABORT);
1153 } else if (hs_state == HS_S_PAGE_ADDR) {
1154 hs_header->page_code = 0;
1155 mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1158 if ((hs_header->page_code + 1) <= mhba->hba_total_pages) {
1159 hs_header->page_code++;
1160 if (hs_header->page_code != HS_PAGE_FIRM_CAP) {
1161 mvumi_hs_build_page(mhba, hs_header);
1162 HS_SET_STATE(hs_fun, HS_S_SEND_PAGE);
1164 HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE);
1166 HS_SET_STATE(hs_fun, HS_S_END);
1168 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1169 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1170 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1174 /* Set communication list ISR */
1175 tmp = ioread32(regs->enpointa_mask_reg);
1176 tmp |= regs->int_comaout | regs->int_comaerr;
1177 iowrite32(tmp, regs->enpointa_mask_reg);
1178 iowrite32(mhba->list_num_io, mhba->ib_shadow);
1179 /* Set InBound List Available count shadow */
1180 iowrite32(lower_32_bits(mhba->ib_shadow_phys),
1181 regs->inb_aval_count_basel);
1182 iowrite32(upper_32_bits(mhba->ib_shadow_phys),
1183 regs->inb_aval_count_baseh);
1185 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) {
1186 /* Set OutBound List Available count shadow */
1187 iowrite32((mhba->list_num_io-1) |
1188 regs->cl_pointer_toggle,
1190 iowrite32(lower_32_bits(mhba->ob_shadow_phys),
1191 regs->outb_copy_basel);
1192 iowrite32(upper_32_bits(mhba->ob_shadow_phys),
1193 regs->outb_copy_baseh);
1196 mhba->ib_cur_slot = (mhba->list_num_io - 1) |
1197 regs->cl_pointer_toggle;
1198 mhba->ob_cur_slot = (mhba->list_num_io - 1) |
1199 regs->cl_pointer_toggle;
1200 mhba->fw_state = FW_STATE_STARTED;
1204 dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n",
1211 static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
1213 unsigned int isr_status;
1214 unsigned long before;
1217 mvumi_handshake(mhba);
1219 isr_status = mhba->instancet->read_fw_status_reg(mhba);
1221 if (mhba->fw_state == FW_STATE_STARTED)
1223 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1224 dev_err(&mhba->pdev->dev,
1225 "no handshake response at state 0x%x.\n",
1227 dev_err(&mhba->pdev->dev,
1228 "isr : global=0x%x,status=0x%x.\n",
1229 mhba->global_isr, isr_status);
1233 usleep_range(1000, 2000);
1234 } while (!(isr_status & DRBL_HANDSHAKE_ISR));
1239 static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
1242 unsigned long before;
1245 tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1246 while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) {
1247 if (tmp != HANDSHAKE_READYSTATE)
1248 iowrite32(DRBL_MU_RESET,
1249 mhba->regs->pciea_to_arm_drbl_reg);
1250 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1251 dev_err(&mhba->pdev->dev,
1252 "invalid signature [0x%x].\n", tmp);
1255 usleep_range(1000, 2000);
1257 tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1260 mhba->fw_state = FW_STATE_STARTING;
1261 dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n");
1263 if (mvumi_handshake_event(mhba)) {
1264 dev_err(&mhba->pdev->dev,
1265 "handshake failed at state 0x%x.\n",
1269 } while (mhba->fw_state != FW_STATE_STARTED);
1271 dev_dbg(&mhba->pdev->dev, "firmware handshake done\n");
1276 static unsigned char mvumi_start(struct mvumi_hba *mhba)
1279 struct mvumi_hw_regs *regs = mhba->regs;
1281 /* clear Door bell */
1282 tmp = ioread32(regs->arm_to_pciea_drbl_reg);
1283 iowrite32(tmp, regs->arm_to_pciea_drbl_reg);
1285 iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1286 tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea;
1287 iowrite32(tmp, regs->enpointa_mask_reg);
1289 if (mvumi_check_handshake(mhba))
1296 * mvumi_complete_cmd - Completes a command
1297 * @mhba: Adapter soft state
1298 * @cmd: Command to be completed
1300 static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
1301 struct mvumi_rsp_frame *ob_frame)
1303 struct scsi_cmnd *scmd = cmd->scmd;
1305 cmd->scmd->SCp.ptr = NULL;
1306 scmd->result = ob_frame->req_status;
1308 switch (ob_frame->req_status) {
1310 scmd->result |= DID_OK << 16;
1313 scmd->result |= DID_BUS_BUSY << 16;
1315 case SAM_STAT_CHECK_CONDITION:
1316 scmd->result |= (DID_OK << 16);
1317 if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) {
1318 memcpy(cmd->scmd->sense_buffer, ob_frame->payload,
1319 sizeof(struct mvumi_sense_data));
1320 scmd->result |= (DRIVER_SENSE << 24);
1324 scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16);
1328 if (scsi_bufflen(scmd))
1329 dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
1330 scsi_sg_count(scmd),
1331 scmd->sc_data_direction);
1332 cmd->scmd->scsi_done(scmd);
1333 mvumi_return_cmd(mhba, cmd);
1336 static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba,
1337 struct mvumi_cmd *cmd,
1338 struct mvumi_rsp_frame *ob_frame)
1340 if (atomic_read(&cmd->sync_cmd)) {
1341 cmd->cmd_status = ob_frame->req_status;
1343 if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) &&
1344 (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) &&
1346 memcpy(cmd->data_buf, ob_frame->payload,
1347 sizeof(struct mvumi_sense_data));
1349 atomic_dec(&cmd->sync_cmd);
1350 wake_up(&mhba->int_cmd_wait_q);
1354 static void mvumi_show_event(struct mvumi_hba *mhba,
1355 struct mvumi_driver_event *ptr)
1359 dev_warn(&mhba->pdev->dev,
1360 "Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n",
1361 ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id);
1362 if (ptr->param_count) {
1363 printk(KERN_WARNING "Event param(len 0x%x): ",
1365 for (i = 0; i < ptr->param_count; i++)
1366 printk(KERN_WARNING "0x%x ", ptr->params[i]);
1368 printk(KERN_WARNING "\n");
1371 if (ptr->sense_data_length) {
1372 printk(KERN_WARNING "Event sense data(len 0x%x): ",
1373 ptr->sense_data_length);
1374 for (i = 0; i < ptr->sense_data_length; i++)
1375 printk(KERN_WARNING "0x%x ", ptr->sense_data[i]);
1376 printk(KERN_WARNING "\n");
1380 static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status)
1382 struct scsi_device *sdev;
1385 if (status == DEVICE_OFFLINE) {
1386 sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1388 dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0,
1390 scsi_remove_device(sdev);
1391 scsi_device_put(sdev);
1394 dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n",
1396 } else if (status == DEVICE_ONLINE) {
1397 sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1399 scsi_add_device(mhba->shost, 0, devid, 0);
1400 dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0,
1404 dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n",
1406 scsi_device_put(sdev);
1412 static u64 mvumi_inquiry(struct mvumi_hba *mhba,
1413 unsigned int id, struct mvumi_cmd *cmd)
1415 struct mvumi_msg_frame *frame;
1418 int data_buf_len = 64;
1421 cmd = mvumi_create_internal_cmd(mhba, data_buf_len);
1427 memset(cmd->data_buf, 0, data_buf_len);
1430 cmd->cmd_status = REQ_STATUS_PENDING;
1431 atomic_set(&cmd->sync_cmd, 0);
1433 frame->device_id = (u16) id;
1434 frame->cmd_flag = CMD_FLAG_DATA_IN;
1435 frame->req_function = CL_FUN_SCSI_CMD;
1436 frame->cdb_length = 6;
1437 frame->data_transfer_length = MVUMI_INQUIRY_LENGTH;
1438 memset(frame->cdb, 0, frame->cdb_length);
1439 frame->cdb[0] = INQUIRY;
1440 frame->cdb[4] = frame->data_transfer_length;
1442 mvumi_issue_blocked_cmd(mhba, cmd);
1444 if (cmd->cmd_status == SAM_STAT_GOOD) {
1445 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1448 memcpy((void *)&wwid,
1449 (cmd->data_buf + MVUMI_INQUIRY_UUID_OFF),
1450 MVUMI_INQUIRY_UUID_LEN);
1451 dev_dbg(&mhba->pdev->dev,
1452 "inquiry device(0:%d:0) wwid(%llx)\n", id, wwid);
1457 mvumi_delete_internal_cmd(mhba, cmd);
1462 static void mvumi_detach_devices(struct mvumi_hba *mhba)
1464 struct mvumi_device *mv_dev = NULL , *dev_next;
1465 struct scsi_device *sdev = NULL;
1467 mutex_lock(&mhba->device_lock);
1469 /* detach Hard Disk */
1470 list_for_each_entry_safe(mv_dev, dev_next,
1471 &mhba->shost_dev_list, list) {
1472 mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1473 list_del_init(&mv_dev->list);
1474 dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1475 mv_dev->id, mv_dev->wwid);
1478 list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) {
1479 list_del_init(&mv_dev->list);
1480 dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1481 mv_dev->id, mv_dev->wwid);
1485 /* detach virtual device */
1486 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
1487 sdev = scsi_device_lookup(mhba->shost, 0,
1488 mhba->max_target_id - 1, 0);
1491 scsi_remove_device(sdev);
1492 scsi_device_put(sdev);
1495 mutex_unlock(&mhba->device_lock);
1498 static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
1500 struct scsi_device *sdev;
1502 sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
1504 scsi_rescan_device(&sdev->sdev_gendev);
1505 scsi_device_put(sdev);
1509 static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid)
1511 struct mvumi_device *mv_dev = NULL;
1513 list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) {
1514 if (mv_dev->wwid == wwid) {
1515 if (mv_dev->id != id) {
1516 dev_err(&mhba->pdev->dev,
1517 "%s has same wwid[%llx] ,"
1518 " but different id[%d %d]\n",
1519 __func__, mv_dev->wwid, mv_dev->id, id);
1522 if (mhba->pdev->device ==
1523 PCI_DEVICE_ID_MARVELL_MV9143)
1524 mvumi_rescan_devices(mhba, id);
1532 static void mvumi_remove_devices(struct mvumi_hba *mhba, int id)
1534 struct mvumi_device *mv_dev = NULL, *dev_next;
1536 list_for_each_entry_safe(mv_dev, dev_next,
1537 &mhba->shost_dev_list, list) {
1538 if (mv_dev->id == id) {
1539 dev_dbg(&mhba->pdev->dev,
1540 "detach device(0:%d:0) wwid(%llx) from HOST\n",
1541 mv_dev->id, mv_dev->wwid);
1542 mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1543 list_del_init(&mv_dev->list);
1549 static int mvumi_probe_devices(struct mvumi_hba *mhba)
1553 struct mvumi_device *mv_dev = NULL;
1554 struct mvumi_cmd *cmd = NULL;
1557 cmd = mvumi_create_internal_cmd(mhba, 64);
1561 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1562 maxid = mhba->max_target_id;
1564 maxid = mhba->max_target_id - 1;
1566 for (id = 0; id < maxid; id++) {
1567 wwid = mvumi_inquiry(mhba, id, cmd);
1569 /* device no response, remove it */
1570 mvumi_remove_devices(mhba, id);
1572 /* device response, add it */
1573 found = mvumi_match_devices(mhba, id, wwid);
1575 mvumi_remove_devices(mhba, id);
1576 mv_dev = kzalloc(sizeof(struct mvumi_device),
1579 dev_err(&mhba->pdev->dev,
1580 "%s alloc mv_dev failed\n",
1585 mv_dev->wwid = wwid;
1586 mv_dev->sdev = NULL;
1587 INIT_LIST_HEAD(&mv_dev->list);
1588 list_add_tail(&mv_dev->list,
1589 &mhba->mhba_dev_list);
1590 dev_dbg(&mhba->pdev->dev,
1591 "probe a new device(0:%d:0)"
1592 " wwid(%llx)\n", id, mv_dev->wwid);
1593 } else if (found == -1)
1601 mvumi_delete_internal_cmd(mhba, cmd);
1606 static int mvumi_rescan_bus(void *data)
1609 struct mvumi_hba *mhba = (struct mvumi_hba *) data;
1610 struct mvumi_device *mv_dev = NULL , *dev_next;
1612 while (!kthread_should_stop()) {
1614 set_current_state(TASK_INTERRUPTIBLE);
1615 if (!atomic_read(&mhba->pnp_count))
1618 atomic_set(&mhba->pnp_count, 0);
1619 __set_current_state(TASK_RUNNING);
1621 mutex_lock(&mhba->device_lock);
1622 ret = mvumi_probe_devices(mhba);
1624 list_for_each_entry_safe(mv_dev, dev_next,
1625 &mhba->mhba_dev_list, list) {
1626 if (mvumi_handle_hotplug(mhba, mv_dev->id,
1628 dev_err(&mhba->pdev->dev,
1629 "%s add device(0:%d:0) failed"
1630 "wwid(%llx) has exist\n",
1632 mv_dev->id, mv_dev->wwid);
1633 list_del_init(&mv_dev->list);
1636 list_move_tail(&mv_dev->list,
1637 &mhba->shost_dev_list);
1641 mutex_unlock(&mhba->device_lock);
1646 static void mvumi_proc_msg(struct mvumi_hba *mhba,
1647 struct mvumi_hotplug_event *param)
1649 u16 size = param->size;
1650 const unsigned long *ar_bitmap;
1651 const unsigned long *re_bitmap;
1654 if (mhba->fw_flag & MVUMI_FW_ATTACH) {
1656 ar_bitmap = (const unsigned long *) param->bitmap;
1657 re_bitmap = (const unsigned long *) ¶m->bitmap[size >> 3];
1659 mutex_lock(&mhba->sas_discovery_mutex);
1661 index = find_next_zero_bit(ar_bitmap, size, index + 1);
1664 mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE);
1669 index = find_next_zero_bit(re_bitmap, size, index + 1);
1672 mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE);
1674 mutex_unlock(&mhba->sas_discovery_mutex);
1678 static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
1680 if (msg == APICDB1_EVENT_GETEVENT) {
1682 struct mvumi_driver_event *param = NULL;
1683 struct mvumi_event_req *er = buffer;
1685 if (count > MAX_EVENTS_RETURNED) {
1686 dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger"
1687 " than max event count[0x%x].\n",
1688 count, MAX_EVENTS_RETURNED);
1691 for (i = 0; i < count; i++) {
1692 param = &er->events[i];
1693 mvumi_show_event(mhba, param);
1695 } else if (msg == APICDB1_HOST_GETEVENT) {
1696 mvumi_proc_msg(mhba, buffer);
1700 static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg)
1702 struct mvumi_cmd *cmd;
1703 struct mvumi_msg_frame *frame;
1705 cmd = mvumi_create_internal_cmd(mhba, 512);
1709 cmd->cmd_status = REQ_STATUS_PENDING;
1710 atomic_set(&cmd->sync_cmd, 0);
1712 frame->device_id = 0;
1713 frame->cmd_flag = CMD_FLAG_DATA_IN;
1714 frame->req_function = CL_FUN_SCSI_CMD;
1715 frame->cdb_length = MAX_COMMAND_SIZE;
1716 frame->data_transfer_length = sizeof(struct mvumi_event_req);
1717 memset(frame->cdb, 0, MAX_COMMAND_SIZE);
1718 frame->cdb[0] = APICDB0_EVENT;
1719 frame->cdb[1] = msg;
1720 mvumi_issue_blocked_cmd(mhba, cmd);
1722 if (cmd->cmd_status != SAM_STAT_GOOD)
1723 dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n",
1726 mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf);
1728 mvumi_delete_internal_cmd(mhba, cmd);
1732 static void mvumi_scan_events(struct work_struct *work)
1734 struct mvumi_events_wq *mu_ev =
1735 container_of(work, struct mvumi_events_wq, work_q);
1737 mvumi_get_event(mu_ev->mhba, mu_ev->event);
1741 static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status)
1743 struct mvumi_events_wq *mu_ev;
1745 while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) {
1746 if (isr_status & DRBL_BUS_CHANGE) {
1747 atomic_inc(&mhba->pnp_count);
1748 wake_up_process(mhba->dm_thread);
1749 isr_status &= ~(DRBL_BUS_CHANGE);
1753 mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC);
1755 INIT_WORK(&mu_ev->work_q, mvumi_scan_events);
1757 mu_ev->event = APICDB1_EVENT_GETEVENT;
1758 isr_status &= ~(DRBL_EVENT_NOTIFY);
1759 mu_ev->param = NULL;
1760 schedule_work(&mu_ev->work_q);
1765 static void mvumi_handle_clob(struct mvumi_hba *mhba)
1767 struct mvumi_rsp_frame *ob_frame;
1768 struct mvumi_cmd *cmd;
1769 struct mvumi_ob_data *pool;
1771 while (!list_empty(&mhba->free_ob_list)) {
1772 pool = list_first_entry(&mhba->free_ob_list,
1773 struct mvumi_ob_data, list);
1774 list_del_init(&pool->list);
1775 list_add_tail(&pool->list, &mhba->ob_data_list);
1777 ob_frame = (struct mvumi_rsp_frame *) &pool->data[0];
1778 cmd = mhba->tag_cmd[ob_frame->tag];
1780 atomic_dec(&mhba->fw_outstanding);
1781 mhba->tag_cmd[ob_frame->tag] = NULL;
1782 tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag);
1784 mvumi_complete_cmd(mhba, cmd, ob_frame);
1786 mvumi_complete_internal_cmd(mhba, cmd, ob_frame);
1788 mhba->instancet->fire_cmd(mhba, NULL);
1791 static irqreturn_t mvumi_isr_handler(int irq, void *devp)
1793 struct mvumi_hba *mhba = (struct mvumi_hba *) devp;
1794 unsigned long flags;
1796 spin_lock_irqsave(mhba->shost->host_lock, flags);
1797 if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) {
1798 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1802 if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) {
1803 if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY))
1804 mvumi_launch_events(mhba, mhba->isr_status);
1805 if (mhba->isr_status & DRBL_HANDSHAKE_ISR) {
1806 dev_warn(&mhba->pdev->dev, "enter handshake again!\n");
1807 mvumi_handshake(mhba);
1812 if (mhba->global_isr & mhba->regs->int_comaout)
1813 mvumi_receive_ob_list_entry(mhba);
1815 mhba->global_isr = 0;
1816 mhba->isr_status = 0;
1817 if (mhba->fw_state == FW_STATE_STARTED)
1818 mvumi_handle_clob(mhba);
1819 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1823 static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
1824 struct mvumi_cmd *cmd)
1827 struct mvumi_msg_frame *ib_frame;
1828 unsigned int frame_len;
1830 ib_frame = cmd->frame;
1831 if (unlikely(mhba->fw_state != FW_STATE_STARTED)) {
1832 dev_dbg(&mhba->pdev->dev, "firmware not ready.\n");
1833 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1835 if (tag_is_empty(&mhba->tag_pool)) {
1836 dev_dbg(&mhba->pdev->dev, "no free tag.\n");
1837 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1839 mvumi_get_ib_list_entry(mhba, &ib_entry);
1841 cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool);
1842 cmd->frame->request_id = mhba->io_seq++;
1843 cmd->request_id = cmd->frame->request_id;
1844 mhba->tag_cmd[cmd->frame->tag] = cmd;
1845 frame_len = sizeof(*ib_frame) - 4 +
1846 ib_frame->sg_counts * sizeof(struct mvumi_sgl);
1847 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
1848 struct mvumi_dyn_list_entry *dle;
1851 cpu_to_le32(lower_32_bits(cmd->frame_phys));
1852 dle->src_high_addr =
1853 cpu_to_le32(upper_32_bits(cmd->frame_phys));
1854 dle->if_length = (frame_len >> 2) & 0xFFF;
1856 memcpy(ib_entry, ib_frame, frame_len);
1858 return MV_QUEUE_COMMAND_RESULT_SENT;
1861 static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
1863 unsigned short num_of_cl_sent = 0;
1865 enum mvumi_qc_result result;
1868 list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list);
1869 count = mhba->instancet->check_ib_list(mhba);
1870 if (list_empty(&mhba->waiting_req_list) || !count)
1874 cmd = list_first_entry(&mhba->waiting_req_list,
1875 struct mvumi_cmd, queue_pointer);
1876 list_del_init(&cmd->queue_pointer);
1877 result = mvumi_send_command(mhba, cmd);
1879 case MV_QUEUE_COMMAND_RESULT_SENT:
1882 case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE:
1883 list_add(&cmd->queue_pointer, &mhba->waiting_req_list);
1884 if (num_of_cl_sent > 0)
1885 mvumi_send_ib_list_entry(mhba);
1889 } while (!list_empty(&mhba->waiting_req_list) && count--);
1891 if (num_of_cl_sent > 0)
1892 mvumi_send_ib_list_entry(mhba);
1896 * mvumi_enable_intr - Enables interrupts
1897 * @mhba: Adapter soft state
1899 static void mvumi_enable_intr(struct mvumi_hba *mhba)
1902 struct mvumi_hw_regs *regs = mhba->regs;
1904 iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1905 mask = ioread32(regs->enpointa_mask_reg);
1906 mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr;
1907 iowrite32(mask, regs->enpointa_mask_reg);
1911 * mvumi_disable_intr -Disables interrupt
1912 * @mhba: Adapter soft state
1914 static void mvumi_disable_intr(struct mvumi_hba *mhba)
1917 struct mvumi_hw_regs *regs = mhba->regs;
1919 iowrite32(0, regs->arm_to_pciea_mask_reg);
1920 mask = ioread32(regs->enpointa_mask_reg);
1921 mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout |
1923 iowrite32(mask, regs->enpointa_mask_reg);
1926 static int mvumi_clear_intr(void *extend)
1928 struct mvumi_hba *mhba = (struct mvumi_hba *) extend;
1929 unsigned int status, isr_status = 0, tmp = 0;
1930 struct mvumi_hw_regs *regs = mhba->regs;
1932 status = ioread32(regs->main_int_cause_reg);
1933 if (!(status & regs->int_mu) || status == 0xFFFFFFFF)
1935 if (unlikely(status & regs->int_comaerr)) {
1936 tmp = ioread32(regs->outb_isr_cause);
1937 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
1938 if (tmp & regs->clic_out_err) {
1939 iowrite32(tmp & regs->clic_out_err,
1940 regs->outb_isr_cause);
1943 if (tmp & (regs->clic_in_err | regs->clic_out_err))
1944 iowrite32(tmp & (regs->clic_in_err |
1945 regs->clic_out_err),
1946 regs->outb_isr_cause);
1948 status ^= mhba->regs->int_comaerr;
1949 /* inbound or outbound parity error, command will timeout */
1951 if (status & regs->int_comaout) {
1952 tmp = ioread32(regs->outb_isr_cause);
1953 if (tmp & regs->clic_irq)
1954 iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause);
1956 if (status & regs->int_dl_cpu2pciea) {
1957 isr_status = ioread32(regs->arm_to_pciea_drbl_reg);
1959 iowrite32(isr_status, regs->arm_to_pciea_drbl_reg);
1962 mhba->global_isr = status;
1963 mhba->isr_status = isr_status;
1969 * mvumi_read_fw_status_reg - returns the current FW status value
1970 * @mhba: Adapter soft state
1972 static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)
1974 unsigned int status;
1976 status = ioread32(mhba->regs->arm_to_pciea_drbl_reg);
1978 iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg);
1982 static struct mvumi_instance_template mvumi_instance_9143 = {
1983 .fire_cmd = mvumi_fire_cmd,
1984 .enable_intr = mvumi_enable_intr,
1985 .disable_intr = mvumi_disable_intr,
1986 .clear_intr = mvumi_clear_intr,
1987 .read_fw_status_reg = mvumi_read_fw_status_reg,
1988 .check_ib_list = mvumi_check_ib_list_9143,
1989 .check_ob_list = mvumi_check_ob_list_9143,
1990 .reset_host = mvumi_reset_host_9143,
1993 static struct mvumi_instance_template mvumi_instance_9580 = {
1994 .fire_cmd = mvumi_fire_cmd,
1995 .enable_intr = mvumi_enable_intr,
1996 .disable_intr = mvumi_disable_intr,
1997 .clear_intr = mvumi_clear_intr,
1998 .read_fw_status_reg = mvumi_read_fw_status_reg,
1999 .check_ib_list = mvumi_check_ib_list_9580,
2000 .check_ob_list = mvumi_check_ob_list_9580,
2001 .reset_host = mvumi_reset_host_9580,
2004 static int mvumi_slave_configure(struct scsi_device *sdev)
2006 struct mvumi_hba *mhba;
2007 unsigned char bitcount = sizeof(unsigned char) * 8;
2009 mhba = (struct mvumi_hba *) sdev->host->hostdata;
2010 if (sdev->id >= mhba->max_target_id)
2013 mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount));
2018 * mvumi_build_frame - Prepares a direct cdb (DCDB) command
2019 * @mhba: Adapter soft state
2020 * @scmd: SCSI command
2021 * @cmd: Command to be prepared in
2023 * This function prepares CDB commands. These are typcially pass-through
2024 * commands to the devices.
2026 static unsigned char mvumi_build_frame(struct mvumi_hba *mhba,
2027 struct scsi_cmnd *scmd, struct mvumi_cmd *cmd)
2029 struct mvumi_msg_frame *pframe;
2032 cmd->cmd_status = REQ_STATUS_PENDING;
2033 pframe = cmd->frame;
2034 pframe->device_id = ((unsigned short) scmd->device->id) |
2035 (((unsigned short) scmd->device->lun) << 8);
2036 pframe->cmd_flag = 0;
2038 switch (scmd->sc_data_direction) {
2040 pframe->cmd_flag |= CMD_FLAG_NON_DATA;
2042 case DMA_FROM_DEVICE:
2043 pframe->cmd_flag |= CMD_FLAG_DATA_IN;
2046 pframe->cmd_flag |= CMD_FLAG_DATA_OUT;
2048 case DMA_BIDIRECTIONAL:
2050 dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] "
2051 "cmd[0x%x]\n", scmd->sc_data_direction, scmd->cmnd[0]);
2055 pframe->cdb_length = scmd->cmd_len;
2056 memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length);
2057 pframe->req_function = CL_FUN_SCSI_CMD;
2058 if (scsi_bufflen(scmd)) {
2059 if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0],
2060 &pframe->sg_counts))
2063 pframe->data_transfer_length = scsi_bufflen(scmd);
2065 pframe->sg_counts = 0;
2066 pframe->data_transfer_length = 0;
2071 scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) |
2072 SAM_STAT_CHECK_CONDITION;
2073 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
2079 * mvumi_queue_command - Queue entry point
2080 * @scmd: SCSI command to be queued
2081 * @done: Callback entry point
2083 static int mvumi_queue_command(struct Scsi_Host *shost,
2084 struct scsi_cmnd *scmd)
2086 struct mvumi_cmd *cmd;
2087 struct mvumi_hba *mhba;
2088 unsigned long irq_flags;
2090 spin_lock_irqsave(shost->host_lock, irq_flags);
2092 mhba = (struct mvumi_hba *) shost->hostdata;
2094 cmd = mvumi_get_cmd(mhba);
2095 if (unlikely(!cmd)) {
2096 spin_unlock_irqrestore(shost->host_lock, irq_flags);
2097 return SCSI_MLQUEUE_HOST_BUSY;
2100 if (unlikely(mvumi_build_frame(mhba, scmd, cmd)))
2101 goto out_return_cmd;
2104 scmd->SCp.ptr = (char *) cmd;
2105 mhba->instancet->fire_cmd(mhba, cmd);
2106 spin_unlock_irqrestore(shost->host_lock, irq_flags);
2110 mvumi_return_cmd(mhba, cmd);
2111 scmd->scsi_done(scmd);
2112 spin_unlock_irqrestore(shost->host_lock, irq_flags);
2116 static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
2118 struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr;
2119 struct Scsi_Host *host = scmd->device->host;
2120 struct mvumi_hba *mhba = shost_priv(host);
2121 unsigned long flags;
2123 spin_lock_irqsave(mhba->shost->host_lock, flags);
2125 if (mhba->tag_cmd[cmd->frame->tag]) {
2126 mhba->tag_cmd[cmd->frame->tag] = NULL;
2127 tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
2129 if (!list_empty(&cmd->queue_pointer))
2130 list_del_init(&cmd->queue_pointer);
2132 atomic_dec(&mhba->fw_outstanding);
2134 scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
2135 scmd->SCp.ptr = NULL;
2136 if (scsi_bufflen(scmd)) {
2137 dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
2138 scsi_sg_count(scmd),
2139 scmd->sc_data_direction);
2141 mvumi_return_cmd(mhba, cmd);
2142 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
2148 mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2149 sector_t capacity, int geom[])
2157 tmp = heads * sectors;
2158 cylinders = capacity;
2159 sector_div(cylinders, tmp);
2161 if (capacity >= 0x200000) {
2164 tmp = heads * sectors;
2165 cylinders = capacity;
2166 sector_div(cylinders, tmp);
2170 geom[2] = cylinders;
2175 static struct scsi_host_template mvumi_template = {
2177 .module = THIS_MODULE,
2178 .name = "Marvell Storage Controller",
2179 .slave_configure = mvumi_slave_configure,
2180 .queuecommand = mvumi_queue_command,
2181 .eh_timed_out = mvumi_timed_out,
2182 .eh_host_reset_handler = mvumi_host_reset,
2183 .bios_param = mvumi_bios_param,
2184 .dma_boundary = PAGE_SIZE - 1,
2188 static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
2191 struct mvumi_hw_regs *regs;
2193 switch (mhba->pdev->device) {
2194 case PCI_DEVICE_ID_MARVELL_MV9143:
2195 mhba->mmio = mhba->base_addr[0];
2198 mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2199 if (mhba->regs == NULL)
2205 regs->ctrl_sts_reg = base + 0x20104;
2206 regs->rstoutn_mask_reg = base + 0x20108;
2207 regs->sys_soft_rst_reg = base + 0x2010C;
2208 regs->main_int_cause_reg = base + 0x20200;
2209 regs->enpointa_mask_reg = base + 0x2020C;
2210 regs->rstoutn_en_reg = base + 0xF1400;
2212 regs->pciea_to_arm_drbl_reg = base + 0x20400;
2213 regs->arm_to_pciea_drbl_reg = base + 0x20408;
2214 regs->arm_to_pciea_mask_reg = base + 0x2040C;
2215 regs->pciea_to_arm_msg0 = base + 0x20430;
2216 regs->pciea_to_arm_msg1 = base + 0x20434;
2217 regs->arm_to_pciea_msg0 = base + 0x20438;
2218 regs->arm_to_pciea_msg1 = base + 0x2043C;
2220 /* For Message Unit */
2222 regs->inb_aval_count_basel = base + 0x508;
2223 regs->inb_aval_count_baseh = base + 0x50C;
2224 regs->inb_write_pointer = base + 0x518;
2225 regs->inb_read_pointer = base + 0x51C;
2226 regs->outb_coal_cfg = base + 0x568;
2227 regs->outb_copy_basel = base + 0x5B0;
2228 regs->outb_copy_baseh = base + 0x5B4;
2229 regs->outb_copy_pointer = base + 0x544;
2230 regs->outb_read_pointer = base + 0x548;
2231 regs->outb_isr_cause = base + 0x560;
2232 regs->outb_coal_cfg = base + 0x568;
2233 /* Bit setting for HW */
2234 regs->int_comaout = 1 << 8;
2235 regs->int_comaerr = 1 << 6;
2236 regs->int_dl_cpu2pciea = 1 << 1;
2237 regs->cl_pointer_toggle = 1 << 12;
2238 regs->clic_irq = 1 << 1;
2239 regs->clic_in_err = 1 << 8;
2240 regs->clic_out_err = 1 << 12;
2241 regs->cl_slot_num_mask = 0xFFF;
2242 regs->int_drbl_int_mask = 0x3FFFFFFF;
2243 regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout |
2246 case PCI_DEVICE_ID_MARVELL_MV9580:
2247 mhba->mmio = mhba->base_addr[2];
2250 mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2251 if (mhba->regs == NULL)
2256 regs->ctrl_sts_reg = base + 0x20104;
2257 regs->rstoutn_mask_reg = base + 0x1010C;
2258 regs->sys_soft_rst_reg = base + 0x10108;
2259 regs->main_int_cause_reg = base + 0x10200;
2260 regs->enpointa_mask_reg = base + 0x1020C;
2261 regs->rstoutn_en_reg = base + 0xF1400;
2264 regs->pciea_to_arm_drbl_reg = base + 0x10460;
2265 regs->arm_to_pciea_drbl_reg = base + 0x10480;
2266 regs->arm_to_pciea_mask_reg = base + 0x10484;
2267 regs->pciea_to_arm_msg0 = base + 0x10400;
2268 regs->pciea_to_arm_msg1 = base + 0x10404;
2269 regs->arm_to_pciea_msg0 = base + 0x10420;
2270 regs->arm_to_pciea_msg1 = base + 0x10424;
2273 regs->reset_request = base + 0x10108;
2274 regs->reset_enable = base + 0x1010c;
2276 /* For Message Unit */
2277 regs->inb_aval_count_basel = base + 0x4008;
2278 regs->inb_aval_count_baseh = base + 0x400C;
2279 regs->inb_write_pointer = base + 0x4018;
2280 regs->inb_read_pointer = base + 0x401C;
2281 regs->outb_copy_basel = base + 0x4058;
2282 regs->outb_copy_baseh = base + 0x405C;
2283 regs->outb_copy_pointer = base + 0x406C;
2284 regs->outb_read_pointer = base + 0x4070;
2285 regs->outb_coal_cfg = base + 0x4080;
2286 regs->outb_isr_cause = base + 0x4088;
2287 /* Bit setting for HW */
2288 regs->int_comaout = 1 << 4;
2289 regs->int_dl_cpu2pciea = 1 << 12;
2290 regs->int_comaerr = 1 << 29;
2291 regs->cl_pointer_toggle = 1 << 14;
2292 regs->cl_slot_num_mask = 0x3FFF;
2293 regs->clic_irq = 1 << 0;
2294 regs->clic_out_err = 1 << 1;
2295 regs->int_drbl_int_mask = 0x3FFFFFFF;
2296 regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout;
2307 * mvumi_init_fw - Initializes the FW
2308 * @mhba: Adapter soft state
2310 * This is the main function for initializing firmware.
2312 static int mvumi_init_fw(struct mvumi_hba *mhba)
2316 if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) {
2317 dev_err(&mhba->pdev->dev, "IO memory region busy!\n");
2320 ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2324 switch (mhba->pdev->device) {
2325 case PCI_DEVICE_ID_MARVELL_MV9143:
2326 mhba->instancet = &mvumi_instance_9143;
2328 mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2329 mhba->request_id_enabled = 1;
2331 case PCI_DEVICE_ID_MARVELL_MV9580:
2332 mhba->instancet = &mvumi_instance_9580;
2334 mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2337 dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n",
2338 mhba->pdev->device);
2339 mhba->instancet = NULL;
2341 goto fail_alloc_mem;
2343 dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n",
2344 mhba->pdev->device);
2345 ret = mvumi_cfg_hw_reg(mhba);
2347 dev_err(&mhba->pdev->dev,
2348 "failed to allocate memory for reg\n");
2350 goto fail_alloc_mem;
2352 mhba->handshake_page = dma_alloc_coherent(&mhba->pdev->dev,
2353 HSP_MAX_SIZE, &mhba->handshake_page_phys, GFP_KERNEL);
2354 if (!mhba->handshake_page) {
2355 dev_err(&mhba->pdev->dev,
2356 "failed to allocate memory for handshake\n");
2358 goto fail_alloc_page;
2361 if (mvumi_start(mhba)) {
2363 goto fail_ready_state;
2365 ret = mvumi_alloc_cmds(mhba);
2367 goto fail_ready_state;
2372 mvumi_release_mem_resource(mhba);
2373 dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
2374 mhba->handshake_page, mhba->handshake_page_phys);
2378 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
2380 pci_release_regions(mhba->pdev);
2386 * mvumi_io_attach - Attaches this driver to SCSI mid-layer
2387 * @mhba: Adapter soft state
2389 static int mvumi_io_attach(struct mvumi_hba *mhba)
2391 struct Scsi_Host *host = mhba->shost;
2392 struct scsi_device *sdev = NULL;
2394 unsigned int max_sg = (mhba->ib_max_size + 4 -
2395 sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
2397 host->irq = mhba->pdev->irq;
2398 host->unique_id = mhba->unique_id;
2399 host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2400 host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge;
2401 host->max_sectors = mhba->max_transfer_size / 512;
2402 host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2403 host->max_id = mhba->max_target_id;
2404 host->max_cmd_len = MAX_COMMAND_SIZE;
2406 ret = scsi_add_host(host, &mhba->pdev->dev);
2408 dev_err(&mhba->pdev->dev, "scsi_add_host failed\n");
2411 mhba->fw_flag |= MVUMI_FW_ATTACH;
2413 mutex_lock(&mhba->sas_discovery_mutex);
2414 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2415 ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0);
2419 dev_err(&mhba->pdev->dev, "add virtual device failed\n");
2420 mutex_unlock(&mhba->sas_discovery_mutex);
2421 goto fail_add_device;
2424 mhba->dm_thread = kthread_create(mvumi_rescan_bus,
2425 mhba, "mvumi_scanthread");
2426 if (IS_ERR(mhba->dm_thread)) {
2427 dev_err(&mhba->pdev->dev,
2428 "failed to create device scan thread\n");
2429 mutex_unlock(&mhba->sas_discovery_mutex);
2430 goto fail_create_thread;
2432 atomic_set(&mhba->pnp_count, 1);
2433 wake_up_process(mhba->dm_thread);
2435 mutex_unlock(&mhba->sas_discovery_mutex);
2439 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2440 sdev = scsi_device_lookup(mhba->shost, 0,
2441 mhba->max_target_id - 1, 0);
2443 scsi_remove_device(sdev);
2444 scsi_device_put(sdev);
2447 scsi_remove_host(mhba->shost);
2452 * mvumi_probe_one - PCI hotplug entry point
2453 * @pdev: PCI device structure
2454 * @id: PCI ids of supported hotplugged adapter
2456 static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2458 struct Scsi_Host *host;
2459 struct mvumi_hba *mhba;
2462 dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
2463 pdev->vendor, pdev->device, pdev->subsystem_vendor,
2464 pdev->subsystem_device);
2466 ret = pci_enable_device(pdev);
2470 ret = mvumi_pci_set_master(pdev);
2472 goto fail_set_dma_mask;
2474 host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
2476 dev_err(&pdev->dev, "scsi_host_alloc failed\n");
2478 goto fail_alloc_instance;
2480 mhba = shost_priv(host);
2482 INIT_LIST_HEAD(&mhba->cmd_pool);
2483 INIT_LIST_HEAD(&mhba->ob_data_list);
2484 INIT_LIST_HEAD(&mhba->free_ob_list);
2485 INIT_LIST_HEAD(&mhba->res_list);
2486 INIT_LIST_HEAD(&mhba->waiting_req_list);
2487 mutex_init(&mhba->device_lock);
2488 INIT_LIST_HEAD(&mhba->mhba_dev_list);
2489 INIT_LIST_HEAD(&mhba->shost_dev_list);
2490 atomic_set(&mhba->fw_outstanding, 0);
2491 init_waitqueue_head(&mhba->int_cmd_wait_q);
2492 mutex_init(&mhba->sas_discovery_mutex);
2496 mhba->unique_id = pdev->bus->number << 8 | pdev->devfn;
2498 ret = mvumi_init_fw(mhba);
2502 ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
2505 dev_err(&pdev->dev, "failed to register IRQ\n");
2509 mhba->instancet->enable_intr(mhba);
2510 pci_set_drvdata(pdev, mhba);
2512 ret = mvumi_io_attach(mhba);
2514 goto fail_io_attach;
2516 mvumi_backup_bar_addr(mhba);
2517 dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n");
2522 mhba->instancet->disable_intr(mhba);
2523 free_irq(mhba->pdev->irq, mhba);
2525 mvumi_release_fw(mhba);
2527 scsi_host_put(host);
2529 fail_alloc_instance:
2531 pci_disable_device(pdev);
2536 static void mvumi_detach_one(struct pci_dev *pdev)
2538 struct Scsi_Host *host;
2539 struct mvumi_hba *mhba;
2541 mhba = pci_get_drvdata(pdev);
2542 if (mhba->dm_thread) {
2543 kthread_stop(mhba->dm_thread);
2544 mhba->dm_thread = NULL;
2547 mvumi_detach_devices(mhba);
2549 scsi_remove_host(mhba->shost);
2550 mvumi_flush_cache(mhba);
2552 mhba->instancet->disable_intr(mhba);
2553 free_irq(mhba->pdev->irq, mhba);
2554 mvumi_release_fw(mhba);
2555 scsi_host_put(host);
2556 pci_disable_device(pdev);
2557 dev_dbg(&pdev->dev, "driver is removed!\n");
2561 * mvumi_shutdown - Shutdown entry point
2562 * @device: Generic device structure
2564 static void mvumi_shutdown(struct pci_dev *pdev)
2566 struct mvumi_hba *mhba = pci_get_drvdata(pdev);
2568 mvumi_flush_cache(mhba);
2571 static int __maybe_unused mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
2573 struct mvumi_hba *mhba = NULL;
2575 mhba = pci_get_drvdata(pdev);
2576 mvumi_flush_cache(mhba);
2578 pci_set_drvdata(pdev, mhba);
2579 mhba->instancet->disable_intr(mhba);
2580 free_irq(mhba->pdev->irq, mhba);
2581 mvumi_unmap_pci_addr(pdev, mhba->base_addr);
2582 pci_release_regions(pdev);
2583 pci_save_state(pdev);
2584 pci_disable_device(pdev);
2585 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2590 static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
2593 struct mvumi_hba *mhba = NULL;
2595 mhba = pci_get_drvdata(pdev);
2597 pci_set_power_state(pdev, PCI_D0);
2598 pci_enable_wake(pdev, PCI_D0, 0);
2599 pci_restore_state(pdev);
2601 ret = pci_enable_device(pdev);
2603 dev_err(&pdev->dev, "enable device failed\n");
2607 ret = mvumi_pci_set_master(pdev);
2608 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2611 ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
2614 ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2616 goto release_regions;
2618 if (mvumi_cfg_hw_reg(mhba)) {
2620 goto unmap_pci_addr;
2623 mhba->mmio = mhba->base_addr[0];
2626 if (mvumi_start(mhba)) {
2628 goto unmap_pci_addr;
2631 ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
2634 dev_err(&pdev->dev, "failed to register IRQ\n");
2635 goto unmap_pci_addr;
2637 mhba->instancet->enable_intr(mhba);
2642 mvumi_unmap_pci_addr(pdev, mhba->base_addr);
2644 pci_release_regions(pdev);
2646 pci_disable_device(pdev);
2651 static struct pci_driver mvumi_pci_driver = {
2653 .name = MV_DRIVER_NAME,
2654 .id_table = mvumi_pci_table,
2655 .probe = mvumi_probe_one,
2656 .remove = mvumi_detach_one,
2657 .shutdown = mvumi_shutdown,
2659 .suspend = mvumi_suspend,
2660 .resume = mvumi_resume,
2664 module_pci_driver(mvumi_pci_driver);