2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/driver-api/libata.rst
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/gfp.h>
42 #include <linux/pci.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.5"
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
60 NV_PIO_MASK = ATA_PIO4,
61 NV_MWDMA_MASK = ATA_MWDMA2,
62 NV_UDMA_MASK = ATA_UDMA6,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
66 /* INT_STATUS/ENABLE */
69 NV_INT_STATUS_CK804 = 0x440,
70 NV_INT_ENABLE_CK804 = 0x441,
72 /* INT_STATUS/ENABLE bits */
76 NV_INT_REMOVED = 0x08,
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
96 NV_ADMA_MAX_CPBS = 32,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
106 /* BAR5 offset to ADMA general registers */
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
117 /* ADMA port registers */
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
172 /* MCP55 reg offset */
173 NV_CTL_MCP55 = 0x400,
174 NV_INT_STATUS_MCP55 = 0x440,
175 NV_INT_ENABLE_MCP55 = 0x444,
176 NV_NCQ_REG_MCP55 = 0x448,
179 NV_INT_ALL_MCP55 = 0xffff,
180 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
181 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
183 /* SWNCQ ENABLE BITS*/
184 NV_CTL_PRI_SWNCQ = 0x02,
185 NV_CTL_SEC_SWNCQ = 0x04,
187 /* SW NCQ status bits*/
188 NV_SWNCQ_IRQ_DEV = (1 << 0),
189 NV_SWNCQ_IRQ_PM = (1 << 1),
190 NV_SWNCQ_IRQ_ADDED = (1 << 2),
191 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
193 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
194 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
195 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
196 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
198 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
199 NV_SWNCQ_IRQ_REMOVED,
203 /* ADMA Physical Region Descriptor - one SG segment */
212 enum nv_adma_regbits {
213 CMDEND = (1 << 15), /* end of command list */
214 WNB = (1 << 14), /* wait-not-BSY */
215 IGN = (1 << 13), /* ignore this entry */
216 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
217 DA2 = (1 << (2 + 8)),
218 DA1 = (1 << (1 + 8)),
219 DA0 = (1 << (0 + 8)),
222 /* ADMA Command Parameter Block
223 The first 5 SG segments are stored inside the Command Parameter Block itself.
224 If there are more than 5 segments the remainder are stored in a separate
225 memory area indicated by next_aprd. */
227 u8 resp_flags; /* 0 */
228 u8 reserved1; /* 1 */
229 u8 ctl_flags; /* 2 */
230 /* len is length of taskfile in 64 bit words */
233 u8 next_cpb_idx; /* 5 */
234 __le16 reserved2; /* 6-7 */
235 __le16 tf[12]; /* 8-31 */
236 struct nv_adma_prd aprd[5]; /* 32-111 */
237 __le64 next_aprd; /* 112-119 */
238 __le64 reserved3; /* 120-127 */
242 struct nv_adma_port_priv {
243 struct nv_adma_cpb *cpb;
245 struct nv_adma_prd *aprd;
247 void __iomem *ctl_block;
248 void __iomem *gen_block;
249 void __iomem *notifier_clear_block;
255 struct nv_host_priv {
263 unsigned int tag[ATA_MAX_QUEUE];
266 enum ncq_saw_flag_list {
267 ncq_saw_d2h = (1U << 0),
268 ncq_saw_dmas = (1U << 1),
269 ncq_saw_sdb = (1U << 2),
270 ncq_saw_backout = (1U << 3),
273 struct nv_swncq_port_priv {
274 struct ata_bmdma_prd *prd; /* our SG list */
275 dma_addr_t prd_dma; /* and its DMA mapping */
276 void __iomem *sactive_block;
277 void __iomem *irq_block;
278 void __iomem *tag_block;
281 unsigned int last_issue_tag;
283 /* fifo circular queue to store deferral command */
284 struct defer_queue defer_queue;
286 /* for NCQ interrupt analysis */
291 unsigned int ncq_flags;
295 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
297 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
298 #ifdef CONFIG_PM_SLEEP
299 static int nv_pci_device_resume(struct pci_dev *pdev);
301 static void nv_ck804_host_stop(struct ata_host *host);
302 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
305 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
306 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
308 static int nv_hardreset(struct ata_link *link, unsigned int *class,
309 unsigned long deadline);
310 static void nv_nf2_freeze(struct ata_port *ap);
311 static void nv_nf2_thaw(struct ata_port *ap);
312 static void nv_ck804_freeze(struct ata_port *ap);
313 static void nv_ck804_thaw(struct ata_port *ap);
314 static int nv_adma_slave_config(struct scsi_device *sdev);
315 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
316 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
317 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
318 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
319 static void nv_adma_irq_clear(struct ata_port *ap);
320 static int nv_adma_port_start(struct ata_port *ap);
321 static void nv_adma_port_stop(struct ata_port *ap);
323 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
324 static int nv_adma_port_resume(struct ata_port *ap);
326 static void nv_adma_freeze(struct ata_port *ap);
327 static void nv_adma_thaw(struct ata_port *ap);
328 static void nv_adma_error_handler(struct ata_port *ap);
329 static void nv_adma_host_stop(struct ata_host *host);
330 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
331 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
333 static void nv_mcp55_thaw(struct ata_port *ap);
334 static void nv_mcp55_freeze(struct ata_port *ap);
335 static void nv_swncq_error_handler(struct ata_port *ap);
336 static int nv_swncq_slave_config(struct scsi_device *sdev);
337 static int nv_swncq_port_start(struct ata_port *ap);
338 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
339 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
340 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
341 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
342 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
344 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
345 static int nv_swncq_port_resume(struct ata_port *ap);
352 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
359 static const struct pci_device_id nv_pci_tbl[] = {
360 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
361 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
362 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
363 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
364 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
365 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
366 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
367 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
368 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
369 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
370 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
371 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
372 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
373 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
375 { } /* terminate list */
378 static struct pci_driver nv_pci_driver = {
380 .id_table = nv_pci_tbl,
381 .probe = nv_init_one,
382 #ifdef CONFIG_PM_SLEEP
383 .suspend = ata_pci_device_suspend,
384 .resume = nv_pci_device_resume,
386 .remove = ata_pci_remove_one,
389 static struct scsi_host_template nv_sht = {
390 ATA_BMDMA_SHT(DRV_NAME),
393 static struct scsi_host_template nv_adma_sht = {
394 ATA_NCQ_SHT(DRV_NAME),
395 .can_queue = NV_ADMA_MAX_CPBS,
396 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
397 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
398 .slave_configure = nv_adma_slave_config,
401 static struct scsi_host_template nv_swncq_sht = {
402 ATA_NCQ_SHT(DRV_NAME),
403 .can_queue = ATA_MAX_QUEUE - 1,
404 .sg_tablesize = LIBATA_MAX_PRD,
405 .dma_boundary = ATA_DMA_BOUNDARY,
406 .slave_configure = nv_swncq_slave_config,
410 * NV SATA controllers have various different problems with hardreset
411 * protocol depending on the specific controller and device.
415 * bko11195 reports that link doesn't come online after hardreset on
416 * generic nv's and there have been several other similar reports on
419 * bko12351#c23 reports that warmplug on MCP61 doesn't work with
424 * bko3352 reports nf2/3 controllers can't determine device signature
425 * reliably after hardreset. The following thread reports detection
426 * failure on cold boot with the standard debouncing timing.
428 * http://thread.gmane.org/gmane.linux.ide/34098
430 * bko12176 reports that hardreset fails to bring up the link during
435 * For initial probing after boot and hot plugging, hardreset mostly
436 * works fine on CK804 but curiously, reprobing on the initial port
437 * by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
438 * FIS in somewhat undeterministic way.
442 * bko12351 reports that when SWNCQ is enabled, for hotplug to work,
443 * hardreset should be used and hardreset can't report proper
444 * signature, which suggests that mcp5x is closer to nf2 as long as
445 * reset quirkiness is concerned.
447 * bko12703 reports that boot probing fails for intel SSD with
448 * hardreset. Link fails to come online. Softreset works fine.
450 * The failures are varied but the following patterns seem true for
453 * - Softreset during boot always works.
455 * - Hardreset during boot sometimes fails to bring up the link on
456 * certain comibnations and device signature acquisition is
459 * - Hardreset is often necessary after hotplug.
461 * So, preferring softreset for boot probing and error handling (as
462 * hardreset might bring down the link) but using hardreset for
463 * post-boot probing should work around the above issues in most
464 * cases. Define nv_hardreset() which only kicks in for post-boot
465 * probing and use it for all variants.
467 static struct ata_port_operations nv_generic_ops = {
468 .inherits = &ata_bmdma_port_ops,
469 .lost_interrupt = ATA_OP_NULL,
470 .scr_read = nv_scr_read,
471 .scr_write = nv_scr_write,
472 .hardreset = nv_hardreset,
475 static struct ata_port_operations nv_nf2_ops = {
476 .inherits = &nv_generic_ops,
477 .freeze = nv_nf2_freeze,
481 static struct ata_port_operations nv_ck804_ops = {
482 .inherits = &nv_generic_ops,
483 .freeze = nv_ck804_freeze,
484 .thaw = nv_ck804_thaw,
485 .host_stop = nv_ck804_host_stop,
488 static struct ata_port_operations nv_adma_ops = {
489 .inherits = &nv_ck804_ops,
491 .check_atapi_dma = nv_adma_check_atapi_dma,
492 .sff_tf_read = nv_adma_tf_read,
493 .qc_defer = ata_std_qc_defer,
494 .qc_prep = nv_adma_qc_prep,
495 .qc_issue = nv_adma_qc_issue,
496 .sff_irq_clear = nv_adma_irq_clear,
498 .freeze = nv_adma_freeze,
499 .thaw = nv_adma_thaw,
500 .error_handler = nv_adma_error_handler,
501 .post_internal_cmd = nv_adma_post_internal_cmd,
503 .port_start = nv_adma_port_start,
504 .port_stop = nv_adma_port_stop,
506 .port_suspend = nv_adma_port_suspend,
507 .port_resume = nv_adma_port_resume,
509 .host_stop = nv_adma_host_stop,
512 static struct ata_port_operations nv_swncq_ops = {
513 .inherits = &nv_generic_ops,
515 .qc_defer = ata_std_qc_defer,
516 .qc_prep = nv_swncq_qc_prep,
517 .qc_issue = nv_swncq_qc_issue,
519 .freeze = nv_mcp55_freeze,
520 .thaw = nv_mcp55_thaw,
521 .error_handler = nv_swncq_error_handler,
524 .port_suspend = nv_swncq_port_suspend,
525 .port_resume = nv_swncq_port_resume,
527 .port_start = nv_swncq_port_start,
531 irq_handler_t irq_handler;
532 struct scsi_host_template *sht;
535 #define NV_PI_PRIV(_irq_handler, _sht) \
536 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
538 static const struct ata_port_info nv_port_info[] = {
541 .flags = ATA_FLAG_SATA,
542 .pio_mask = NV_PIO_MASK,
543 .mwdma_mask = NV_MWDMA_MASK,
544 .udma_mask = NV_UDMA_MASK,
545 .port_ops = &nv_generic_ops,
546 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
550 .flags = ATA_FLAG_SATA,
551 .pio_mask = NV_PIO_MASK,
552 .mwdma_mask = NV_MWDMA_MASK,
553 .udma_mask = NV_UDMA_MASK,
554 .port_ops = &nv_nf2_ops,
555 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
559 .flags = ATA_FLAG_SATA,
560 .pio_mask = NV_PIO_MASK,
561 .mwdma_mask = NV_MWDMA_MASK,
562 .udma_mask = NV_UDMA_MASK,
563 .port_ops = &nv_ck804_ops,
564 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
568 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
569 .pio_mask = NV_PIO_MASK,
570 .mwdma_mask = NV_MWDMA_MASK,
571 .udma_mask = NV_UDMA_MASK,
572 .port_ops = &nv_adma_ops,
573 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
577 .flags = ATA_FLAG_SATA,
578 .pio_mask = NV_PIO_MASK,
579 .mwdma_mask = NV_MWDMA_MASK,
580 .udma_mask = NV_UDMA_MASK,
581 .port_ops = &nv_generic_ops,
582 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
586 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
587 .pio_mask = NV_PIO_MASK,
588 .mwdma_mask = NV_MWDMA_MASK,
589 .udma_mask = NV_UDMA_MASK,
590 .port_ops = &nv_swncq_ops,
591 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
595 MODULE_AUTHOR("NVIDIA");
596 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
597 MODULE_LICENSE("GPL");
598 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
599 MODULE_VERSION(DRV_VERSION);
601 static bool adma_enabled;
602 static bool swncq_enabled = true;
603 static bool msi_enabled;
605 static void nv_adma_register_mode(struct ata_port *ap)
607 struct nv_adma_port_priv *pp = ap->private_data;
608 void __iomem *mmio = pp->ctl_block;
612 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
615 status = readw(mmio + NV_ADMA_STAT);
616 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
618 status = readw(mmio + NV_ADMA_STAT);
622 ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
625 tmp = readw(mmio + NV_ADMA_CTL);
626 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
629 status = readw(mmio + NV_ADMA_STAT);
630 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
632 status = readw(mmio + NV_ADMA_STAT);
637 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
640 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
643 static void nv_adma_mode(struct ata_port *ap)
645 struct nv_adma_port_priv *pp = ap->private_data;
646 void __iomem *mmio = pp->ctl_block;
650 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
653 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
655 tmp = readw(mmio + NV_ADMA_CTL);
656 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
658 status = readw(mmio + NV_ADMA_STAT);
659 while (((status & NV_ADMA_STAT_LEGACY) ||
660 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
662 status = readw(mmio + NV_ADMA_STAT);
667 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
670 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
673 static int nv_adma_slave_config(struct scsi_device *sdev)
675 struct ata_port *ap = ata_shost_to_port(sdev->host);
676 struct nv_adma_port_priv *pp = ap->private_data;
677 struct nv_adma_port_priv *port0, *port1;
678 struct scsi_device *sdev0, *sdev1;
679 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
680 unsigned long segment_boundary, flags;
681 unsigned short sg_tablesize;
684 u32 current_reg, new_reg, config_mask;
686 rc = ata_scsi_slave_config(sdev);
688 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
689 /* Not a proper libata device, ignore */
692 spin_lock_irqsave(ap->lock, flags);
694 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
696 * NVIDIA reports that ADMA mode does not support ATAPI commands.
697 * Therefore ATAPI commands are sent through the legacy interface.
698 * However, the legacy interface only supports 32-bit DMA.
699 * Restrict DMA parameters as required by the legacy interface
700 * when an ATAPI device is connected.
702 segment_boundary = ATA_DMA_BOUNDARY;
703 /* Subtract 1 since an extra entry may be needed for padding, see
705 sg_tablesize = LIBATA_MAX_PRD - 1;
707 /* Since the legacy DMA engine is in use, we need to disable ADMA
710 nv_adma_register_mode(ap);
712 segment_boundary = NV_ADMA_DMA_BOUNDARY;
713 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
717 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, ¤t_reg);
719 if (ap->port_no == 1)
720 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
721 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
723 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
724 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
727 new_reg = current_reg | config_mask;
728 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
730 new_reg = current_reg & ~config_mask;
731 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
734 if (current_reg != new_reg)
735 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
737 port0 = ap->host->ports[0]->private_data;
738 port1 = ap->host->ports[1]->private_data;
739 sdev0 = ap->host->ports[0]->link.device[0].sdev;
740 sdev1 = ap->host->ports[1]->link.device[0].sdev;
741 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
742 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
744 * We have to set the DMA mask to 32-bit if either port is in
745 * ATAPI mode, since they are on the same PCI device which is
746 * used for DMA mapping. If either SCSI device is not allocated
747 * yet, it's OK since that port will discover its correct
748 * setting when it does get allocated.
750 rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
752 rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
755 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
756 blk_queue_max_segments(sdev->request_queue, sg_tablesize);
758 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
759 (unsigned long long)*ap->host->dev->dma_mask,
760 segment_boundary, sg_tablesize);
762 spin_unlock_irqrestore(ap->lock, flags);
767 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
769 struct nv_adma_port_priv *pp = qc->ap->private_data;
770 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
773 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
775 /* Other than when internal or pass-through commands are executed,
776 the only time this function will be called in ADMA mode will be
777 if a command fails. In the failure case we don't care about going
778 into register mode with ADMA commands pending, as the commands will
779 all shortly be aborted anyway. We assume that NCQ commands are not
780 issued via passthrough, which is the only way that switching into
781 ADMA mode could abort outstanding commands. */
782 nv_adma_register_mode(ap);
784 ata_sff_tf_read(ap, tf);
787 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
789 unsigned int idx = 0;
791 if (tf->flags & ATA_TFLAG_ISADDR) {
792 if (tf->flags & ATA_TFLAG_LBA48) {
793 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
794 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
795 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
796 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
797 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
798 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
800 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
802 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
803 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
804 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
805 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
808 if (tf->flags & ATA_TFLAG_DEVICE)
809 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
811 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
814 cpb[idx++] = cpu_to_le16(IGN);
819 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
821 struct nv_adma_port_priv *pp = ap->private_data;
822 u8 flags = pp->cpb[cpb_num].resp_flags;
824 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
826 if (unlikely((force_err ||
827 flags & (NV_CPB_RESP_ATA_ERR |
828 NV_CPB_RESP_CMD_ERR |
829 NV_CPB_RESP_CPB_ERR)))) {
830 struct ata_eh_info *ehi = &ap->link.eh_info;
833 ata_ehi_clear_desc(ehi);
834 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
835 if (flags & NV_CPB_RESP_ATA_ERR) {
836 ata_ehi_push_desc(ehi, "ATA error");
837 ehi->err_mask |= AC_ERR_DEV;
838 } else if (flags & NV_CPB_RESP_CMD_ERR) {
839 ata_ehi_push_desc(ehi, "CMD error");
840 ehi->err_mask |= AC_ERR_DEV;
841 } else if (flags & NV_CPB_RESP_CPB_ERR) {
842 ata_ehi_push_desc(ehi, "CPB error");
843 ehi->err_mask |= AC_ERR_SYSTEM;
846 /* notifier error, but no error in CPB flags? */
847 ata_ehi_push_desc(ehi, "unknown");
848 ehi->err_mask |= AC_ERR_OTHER;
851 /* Kill all commands. EH will determine what actually failed. */
859 if (likely(flags & NV_CPB_RESP_DONE))
864 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
866 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
868 /* freeze if hotplugged */
869 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
874 /* bail out if not our interrupt */
875 if (!(irq_stat & NV_INT_DEV))
878 /* DEV interrupt w/ no active qc? */
879 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
880 ata_sff_check_status(ap);
884 /* handle interrupt */
885 return ata_bmdma_port_intr(ap, qc);
888 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
890 struct ata_host *host = dev_instance;
892 u32 notifier_clears[2];
894 spin_lock(&host->lock);
896 for (i = 0; i < host->n_ports; i++) {
897 struct ata_port *ap = host->ports[i];
898 struct nv_adma_port_priv *pp = ap->private_data;
899 void __iomem *mmio = pp->ctl_block;
902 u32 notifier, notifier_error;
904 notifier_clears[i] = 0;
906 /* if ADMA is disabled, use standard ata interrupt handler */
907 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
908 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
909 >> (NV_INT_PORT_SHIFT * i);
910 handled += nv_host_intr(ap, irq_stat);
914 /* if in ATA register mode, check for standard interrupts */
915 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
916 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
917 >> (NV_INT_PORT_SHIFT * i);
918 if (ata_tag_valid(ap->link.active_tag))
919 /** NV_INT_DEV indication seems unreliable
920 at times at least in ADMA mode. Force it
921 on always when a command is active, to
922 prevent losing interrupts. */
923 irq_stat |= NV_INT_DEV;
924 handled += nv_host_intr(ap, irq_stat);
927 notifier = readl(mmio + NV_ADMA_NOTIFIER);
928 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
929 notifier_clears[i] = notifier | notifier_error;
931 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
933 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
938 status = readw(mmio + NV_ADMA_STAT);
941 * Clear status. Ensure the controller sees the
942 * clearing before we start looking at any of the CPB
943 * statuses, so that any CPB completions after this
944 * point in the handler will raise another interrupt.
946 writew(status, mmio + NV_ADMA_STAT);
947 readw(mmio + NV_ADMA_STAT); /* flush posted write */
950 handled++; /* irq handled if we got here */
952 /* freeze if hotplugged or controller error */
953 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
954 NV_ADMA_STAT_HOTUNPLUG |
955 NV_ADMA_STAT_TIMEOUT |
956 NV_ADMA_STAT_SERROR))) {
957 struct ata_eh_info *ehi = &ap->link.eh_info;
959 ata_ehi_clear_desc(ehi);
960 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
961 if (status & NV_ADMA_STAT_TIMEOUT) {
962 ehi->err_mask |= AC_ERR_SYSTEM;
963 ata_ehi_push_desc(ehi, "timeout");
964 } else if (status & NV_ADMA_STAT_HOTPLUG) {
965 ata_ehi_hotplugged(ehi);
966 ata_ehi_push_desc(ehi, "hotplug");
967 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
968 ata_ehi_hotplugged(ehi);
969 ata_ehi_push_desc(ehi, "hot unplug");
970 } else if (status & NV_ADMA_STAT_SERROR) {
971 /* let EH analyze SError and figure out cause */
972 ata_ehi_push_desc(ehi, "SError");
974 ata_ehi_push_desc(ehi, "unknown");
979 if (status & (NV_ADMA_STAT_DONE |
980 NV_ADMA_STAT_CPBERR |
981 NV_ADMA_STAT_CMD_COMPLETE)) {
982 u32 check_commands = notifier_clears[i];
986 if (status & NV_ADMA_STAT_CPBERR) {
987 /* check all active commands */
988 if (ata_tag_valid(ap->link.active_tag))
989 check_commands = 1 <<
992 check_commands = ap->link.sactive;
995 /* check CPBs for completed commands */
996 while ((pos = ffs(check_commands))) {
998 rc = nv_adma_check_cpb(ap, pos,
999 notifier_error & (1 << pos));
1001 done_mask |= 1 << pos;
1002 else if (unlikely(rc < 0))
1004 check_commands &= ~(1 << pos);
1006 ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
1010 if (notifier_clears[0] || notifier_clears[1]) {
1011 /* Note: Both notifier clear registers must be written
1012 if either is set, even if one is zero, according to NVIDIA. */
1013 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1014 writel(notifier_clears[0], pp->notifier_clear_block);
1015 pp = host->ports[1]->private_data;
1016 writel(notifier_clears[1], pp->notifier_clear_block);
1019 spin_unlock(&host->lock);
1021 return IRQ_RETVAL(handled);
1024 static void nv_adma_freeze(struct ata_port *ap)
1026 struct nv_adma_port_priv *pp = ap->private_data;
1027 void __iomem *mmio = pp->ctl_block;
1030 nv_ck804_freeze(ap);
1032 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1035 /* clear any outstanding CK804 notifications */
1036 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1037 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1039 /* Disable interrupt */
1040 tmp = readw(mmio + NV_ADMA_CTL);
1041 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1042 mmio + NV_ADMA_CTL);
1043 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1046 static void nv_adma_thaw(struct ata_port *ap)
1048 struct nv_adma_port_priv *pp = ap->private_data;
1049 void __iomem *mmio = pp->ctl_block;
1054 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1057 /* Enable interrupt */
1058 tmp = readw(mmio + NV_ADMA_CTL);
1059 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1060 mmio + NV_ADMA_CTL);
1061 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1064 static void nv_adma_irq_clear(struct ata_port *ap)
1066 struct nv_adma_port_priv *pp = ap->private_data;
1067 void __iomem *mmio = pp->ctl_block;
1068 u32 notifier_clears[2];
1070 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1071 ata_bmdma_irq_clear(ap);
1075 /* clear any outstanding CK804 notifications */
1076 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1077 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1079 /* clear ADMA status */
1080 writew(0xffff, mmio + NV_ADMA_STAT);
1082 /* clear notifiers - note both ports need to be written with
1083 something even though we are only clearing on one */
1084 if (ap->port_no == 0) {
1085 notifier_clears[0] = 0xFFFFFFFF;
1086 notifier_clears[1] = 0;
1088 notifier_clears[0] = 0;
1089 notifier_clears[1] = 0xFFFFFFFF;
1091 pp = ap->host->ports[0]->private_data;
1092 writel(notifier_clears[0], pp->notifier_clear_block);
1093 pp = ap->host->ports[1]->private_data;
1094 writel(notifier_clears[1], pp->notifier_clear_block);
1097 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1099 struct nv_adma_port_priv *pp = qc->ap->private_data;
1101 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1102 ata_bmdma_post_internal_cmd(qc);
1105 static int nv_adma_port_start(struct ata_port *ap)
1107 struct device *dev = ap->host->dev;
1108 struct nv_adma_port_priv *pp;
1113 struct pci_dev *pdev = to_pci_dev(dev);
1119 * Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1122 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1126 /* we might fallback to bmdma, allocate bmdma resources */
1127 rc = ata_bmdma_port_start(ap);
1131 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1135 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1136 ap->port_no * NV_ADMA_PORT_SIZE;
1137 pp->ctl_block = mmio;
1138 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1139 pp->notifier_clear_block = pp->gen_block +
1140 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1143 * Now that the legacy PRD and padding buffer are allocated we can
1144 * try to raise the DMA mask to allocate the CPB/APRD table.
1146 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1148 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1152 pp->adma_dma_mask = *dev->dma_mask;
1154 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1155 &mem_dma, GFP_KERNEL);
1158 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1161 * First item in chunk of DMA memory:
1162 * 128-byte command parameter block (CPB)
1163 * one for each command tag
1166 pp->cpb_dma = mem_dma;
1168 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1169 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1171 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1172 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1175 * Second item: block of ADMA_SGTBL_LEN s/g entries
1178 pp->aprd_dma = mem_dma;
1180 ap->private_data = pp;
1182 /* clear any outstanding interrupt conditions */
1183 writew(0xffff, mmio + NV_ADMA_STAT);
1185 /* initialize port variables */
1186 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1188 /* clear CPB fetch count */
1189 writew(0, mmio + NV_ADMA_CPB_COUNT);
1191 /* clear GO for register mode, enable interrupt */
1192 tmp = readw(mmio + NV_ADMA_CTL);
1193 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1194 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1196 tmp = readw(mmio + NV_ADMA_CTL);
1197 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1198 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1200 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1201 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1206 static void nv_adma_port_stop(struct ata_port *ap)
1208 struct nv_adma_port_priv *pp = ap->private_data;
1209 void __iomem *mmio = pp->ctl_block;
1212 writew(0, mmio + NV_ADMA_CTL);
1216 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1218 struct nv_adma_port_priv *pp = ap->private_data;
1219 void __iomem *mmio = pp->ctl_block;
1221 /* Go to register mode - clears GO */
1222 nv_adma_register_mode(ap);
1224 /* clear CPB fetch count */
1225 writew(0, mmio + NV_ADMA_CPB_COUNT);
1227 /* disable interrupt, shut down port */
1228 writew(0, mmio + NV_ADMA_CTL);
1233 static int nv_adma_port_resume(struct ata_port *ap)
1235 struct nv_adma_port_priv *pp = ap->private_data;
1236 void __iomem *mmio = pp->ctl_block;
1239 /* set CPB block location */
1240 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1241 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1243 /* clear any outstanding interrupt conditions */
1244 writew(0xffff, mmio + NV_ADMA_STAT);
1246 /* initialize port variables */
1247 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1249 /* clear CPB fetch count */
1250 writew(0, mmio + NV_ADMA_CPB_COUNT);
1252 /* clear GO for register mode, enable interrupt */
1253 tmp = readw(mmio + NV_ADMA_CTL);
1254 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1255 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1257 tmp = readw(mmio + NV_ADMA_CTL);
1258 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1259 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1261 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1262 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1268 static void nv_adma_setup_port(struct ata_port *ap)
1270 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1271 struct ata_ioports *ioport = &ap->ioaddr;
1275 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1277 ioport->cmd_addr = mmio;
1278 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1279 ioport->error_addr =
1280 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1281 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1282 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1283 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1284 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1285 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1286 ioport->status_addr =
1287 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1288 ioport->altstatus_addr =
1289 ioport->ctl_addr = mmio + 0x20;
1292 static int nv_adma_host_init(struct ata_host *host)
1294 struct pci_dev *pdev = to_pci_dev(host->dev);
1300 /* enable ADMA on the ports */
1301 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1302 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1303 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1304 NV_MCP_SATA_CFG_20_PORT1_EN |
1305 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1307 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1309 for (i = 0; i < host->n_ports; i++)
1310 nv_adma_setup_port(host->ports[i]);
1315 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1316 struct scatterlist *sg,
1318 struct nv_adma_prd *aprd)
1321 if (qc->tf.flags & ATA_TFLAG_WRITE)
1322 flags |= NV_APRD_WRITE;
1323 if (idx == qc->n_elem - 1)
1324 flags |= NV_APRD_END;
1326 flags |= NV_APRD_CONT;
1328 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1329 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1330 aprd->flags = flags;
1331 aprd->packet_len = 0;
1334 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1336 struct nv_adma_port_priv *pp = qc->ap->private_data;
1337 struct nv_adma_prd *aprd;
1338 struct scatterlist *sg;
1343 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1344 aprd = (si < 5) ? &cpb->aprd[si] :
1345 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)];
1346 nv_adma_fill_aprd(qc, sg, si, aprd);
1349 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag)));
1351 cpb->next_aprd = cpu_to_le64(0);
1354 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1356 struct nv_adma_port_priv *pp = qc->ap->private_data;
1358 /* ADMA engine can only be used for non-ATAPI DMA commands,
1359 or interrupt-driven no-data commands. */
1360 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1361 (qc->tf.flags & ATA_TFLAG_POLLING))
1364 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1365 (qc->tf.protocol == ATA_PROT_NODATA))
1371 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1373 struct nv_adma_port_priv *pp = qc->ap->private_data;
1374 struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
1375 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1378 if (nv_adma_use_reg_mode(qc)) {
1379 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1380 (qc->flags & ATA_QCFLAG_DMAMAP));
1381 nv_adma_register_mode(qc->ap);
1382 ata_bmdma_qc_prep(qc);
1386 cpb->resp_flags = NV_CPB_RESP_DONE;
1392 cpb->tag = qc->hw_tag;
1393 cpb->next_cpb_idx = 0;
1395 /* turn on NCQ flags for NCQ commands */
1396 if (qc->tf.protocol == ATA_PROT_NCQ)
1397 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1399 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1401 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1403 if (qc->flags & ATA_QCFLAG_DMAMAP) {
1404 nv_adma_fill_sg(qc, cpb);
1405 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1407 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1409 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1410 until we are finished filling in all of the contents */
1412 cpb->ctl_flags = ctl_flags;
1414 cpb->resp_flags = 0;
1417 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1419 struct nv_adma_port_priv *pp = qc->ap->private_data;
1420 void __iomem *mmio = pp->ctl_block;
1421 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1425 /* We can't handle result taskfile with NCQ commands, since
1426 retrieving the taskfile switches us out of ADMA mode and would abort
1427 existing commands. */
1428 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1429 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1430 ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
1431 return AC_ERR_SYSTEM;
1434 if (nv_adma_use_reg_mode(qc)) {
1435 /* use ATA register mode */
1436 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1437 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1438 (qc->flags & ATA_QCFLAG_DMAMAP));
1439 nv_adma_register_mode(qc->ap);
1440 return ata_bmdma_qc_issue(qc);
1442 nv_adma_mode(qc->ap);
1444 /* write append register, command tag in lower 8 bits
1445 and (number of cpbs to append -1) in top 8 bits */
1448 if (curr_ncq != pp->last_issue_ncq) {
1449 /* Seems to need some delay before switching between NCQ and
1450 non-NCQ commands, else we get command timeouts and such. */
1452 pp->last_issue_ncq = curr_ncq;
1455 writew(qc->hw_tag, mmio + NV_ADMA_APPEND);
1457 DPRINTK("Issued tag %u\n", qc->hw_tag);
1462 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1464 struct ata_host *host = dev_instance;
1466 unsigned int handled = 0;
1467 unsigned long flags;
1469 spin_lock_irqsave(&host->lock, flags);
1471 for (i = 0; i < host->n_ports; i++) {
1472 struct ata_port *ap = host->ports[i];
1473 struct ata_queued_cmd *qc;
1475 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1476 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1477 handled += ata_bmdma_port_intr(ap, qc);
1480 * No request pending? Clear interrupt status
1481 * anyway, in case there's one pending.
1483 ap->ops->sff_check_status(ap);
1487 spin_unlock_irqrestore(&host->lock, flags);
1489 return IRQ_RETVAL(handled);
1492 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1496 for (i = 0; i < host->n_ports; i++) {
1497 handled += nv_host_intr(host->ports[i], irq_stat);
1498 irq_stat >>= NV_INT_PORT_SHIFT;
1501 return IRQ_RETVAL(handled);
1504 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1506 struct ata_host *host = dev_instance;
1510 spin_lock(&host->lock);
1511 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1512 ret = nv_do_interrupt(host, irq_stat);
1513 spin_unlock(&host->lock);
1518 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1520 struct ata_host *host = dev_instance;
1524 spin_lock(&host->lock);
1525 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1526 ret = nv_do_interrupt(host, irq_stat);
1527 spin_unlock(&host->lock);
1532 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1534 if (sc_reg > SCR_CONTROL)
1537 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1541 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1543 if (sc_reg > SCR_CONTROL)
1546 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1550 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1551 unsigned long deadline)
1553 struct ata_eh_context *ehc = &link->eh_context;
1555 /* Do hardreset iff it's post-boot probing, please read the
1556 * comment above port ops for details.
1558 if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1559 !ata_dev_enabled(link->device))
1560 sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1563 const unsigned long *timing = sata_ehc_deb_timing(ehc);
1566 if (!(ehc->i.flags & ATA_EHI_QUIET))
1568 "nv: skipping hardreset on occupied port\n");
1570 /* make sure the link is online */
1571 rc = sata_link_resume(link, timing, deadline);
1572 /* whine about phy resume failure but proceed */
1573 if (rc && rc != -EOPNOTSUPP)
1574 ata_link_warn(link, "failed to resume link (errno=%d)\n",
1578 /* device signature acquisition is unreliable */
1582 static void nv_nf2_freeze(struct ata_port *ap)
1584 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1585 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1588 mask = ioread8(scr_addr + NV_INT_ENABLE);
1589 mask &= ~(NV_INT_ALL << shift);
1590 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1593 static void nv_nf2_thaw(struct ata_port *ap)
1595 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1596 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1599 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1601 mask = ioread8(scr_addr + NV_INT_ENABLE);
1602 mask |= (NV_INT_MASK << shift);
1603 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1606 static void nv_ck804_freeze(struct ata_port *ap)
1608 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1609 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1612 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1613 mask &= ~(NV_INT_ALL << shift);
1614 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1617 static void nv_ck804_thaw(struct ata_port *ap)
1619 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1620 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1623 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1625 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1626 mask |= (NV_INT_MASK << shift);
1627 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1630 static void nv_mcp55_freeze(struct ata_port *ap)
1632 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1633 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1636 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1638 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1639 mask &= ~(NV_INT_ALL_MCP55 << shift);
1640 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1643 static void nv_mcp55_thaw(struct ata_port *ap)
1645 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1646 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1649 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1651 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1652 mask |= (NV_INT_MASK_MCP55 << shift);
1653 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1656 static void nv_adma_error_handler(struct ata_port *ap)
1658 struct nv_adma_port_priv *pp = ap->private_data;
1659 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1660 void __iomem *mmio = pp->ctl_block;
1664 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1665 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1666 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1667 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1668 u32 status = readw(mmio + NV_ADMA_STAT);
1669 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1670 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1673 "EH in ADMA mode, notifier 0x%X "
1674 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1675 "next cpb count 0x%X next cpb idx 0x%x\n",
1676 notifier, notifier_error, gen_ctl, status,
1677 cpb_count, next_cpb_idx);
1679 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1680 struct nv_adma_cpb *cpb = &pp->cpb[i];
1681 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1682 ap->link.sactive & (1 << i))
1684 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1685 i, cpb->ctl_flags, cpb->resp_flags);
1689 /* Push us back into port register mode for error handling. */
1690 nv_adma_register_mode(ap);
1692 /* Mark all of the CPBs as invalid to prevent them from
1694 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1695 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1697 /* clear CPB fetch count */
1698 writew(0, mmio + NV_ADMA_CPB_COUNT);
1701 tmp = readw(mmio + NV_ADMA_CTL);
1702 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1703 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1705 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1706 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1709 ata_bmdma_error_handler(ap);
1712 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1714 struct nv_swncq_port_priv *pp = ap->private_data;
1715 struct defer_queue *dq = &pp->defer_queue;
1718 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1719 dq->defer_bits |= (1 << qc->hw_tag);
1720 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag;
1723 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1725 struct nv_swncq_port_priv *pp = ap->private_data;
1726 struct defer_queue *dq = &pp->defer_queue;
1729 if (dq->head == dq->tail) /* null queue */
1732 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1733 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1734 WARN_ON(!(dq->defer_bits & (1 << tag)));
1735 dq->defer_bits &= ~(1 << tag);
1737 return ata_qc_from_tag(ap, tag);
1740 static void nv_swncq_fis_reinit(struct ata_port *ap)
1742 struct nv_swncq_port_priv *pp = ap->private_data;
1745 pp->dmafis_bits = 0;
1746 pp->sdbfis_bits = 0;
1750 static void nv_swncq_pp_reinit(struct ata_port *ap)
1752 struct nv_swncq_port_priv *pp = ap->private_data;
1753 struct defer_queue *dq = &pp->defer_queue;
1759 pp->last_issue_tag = ATA_TAG_POISON;
1760 nv_swncq_fis_reinit(ap);
1763 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1765 struct nv_swncq_port_priv *pp = ap->private_data;
1767 writew(fis, pp->irq_block);
1770 static void __ata_bmdma_stop(struct ata_port *ap)
1772 struct ata_queued_cmd qc;
1775 ata_bmdma_stop(&qc);
1778 static void nv_swncq_ncq_stop(struct ata_port *ap)
1780 struct nv_swncq_port_priv *pp = ap->private_data;
1785 ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n",
1786 ap->qc_active, ap->link.sactive);
1788 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1789 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1790 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1791 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1793 ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
1794 ap->ops->sff_check_status(ap),
1795 ioread8(ap->ioaddr.error_addr));
1797 sactive = readl(pp->sactive_block);
1798 done_mask = pp->qc_active ^ sactive;
1800 ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
1801 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1803 if (pp->qc_active & (1 << i))
1805 else if (done_mask & (1 << i))
1811 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1812 (pp->dhfis_bits >> i) & 0x1,
1813 (pp->dmafis_bits >> i) & 0x1,
1814 (pp->sdbfis_bits >> i) & 0x1,
1815 (sactive >> i) & 0x1,
1816 (err ? "error! tag doesn't exit" : " "));
1819 nv_swncq_pp_reinit(ap);
1820 ap->ops->sff_irq_clear(ap);
1821 __ata_bmdma_stop(ap);
1822 nv_swncq_irq_clear(ap, 0xffff);
1825 static void nv_swncq_error_handler(struct ata_port *ap)
1827 struct ata_eh_context *ehc = &ap->link.eh_context;
1829 if (ap->link.sactive) {
1830 nv_swncq_ncq_stop(ap);
1831 ehc->i.action |= ATA_EH_RESET;
1834 ata_bmdma_error_handler(ap);
1838 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1840 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1844 writel(~0, mmio + NV_INT_STATUS_MCP55);
1847 writel(0, mmio + NV_INT_ENABLE_MCP55);
1850 tmp = readl(mmio + NV_CTL_MCP55);
1851 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1852 writel(tmp, mmio + NV_CTL_MCP55);
1857 static int nv_swncq_port_resume(struct ata_port *ap)
1859 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1863 writel(~0, mmio + NV_INT_STATUS_MCP55);
1866 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1869 tmp = readl(mmio + NV_CTL_MCP55);
1870 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1876 static void nv_swncq_host_init(struct ata_host *host)
1879 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1880 struct pci_dev *pdev = to_pci_dev(host->dev);
1883 /* disable ECO 398 */
1884 pci_read_config_byte(pdev, 0x7f, ®val);
1885 regval &= ~(1 << 7);
1886 pci_write_config_byte(pdev, 0x7f, regval);
1889 tmp = readl(mmio + NV_CTL_MCP55);
1890 VPRINTK("HOST_CTL:0x%X\n", tmp);
1891 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1893 /* enable irq intr */
1894 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1895 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1896 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1898 /* clear port irq */
1899 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1902 static int nv_swncq_slave_config(struct scsi_device *sdev)
1904 struct ata_port *ap = ata_shost_to_port(sdev->host);
1905 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1906 struct ata_device *dev;
1909 u8 check_maxtor = 0;
1910 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1912 rc = ata_scsi_slave_config(sdev);
1913 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1914 /* Not a proper libata device, ignore */
1917 dev = &ap->link.device[sdev->id];
1918 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1921 /* if MCP51 and Maxtor, then disable ncq */
1922 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1923 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1926 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1927 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1928 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1929 pci_read_config_byte(pdev, 0x8, &rev);
1937 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1939 if (strncmp(model_num, "Maxtor", 6) == 0) {
1940 ata_scsi_change_queue_depth(sdev, 1);
1941 ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1948 static int nv_swncq_port_start(struct ata_port *ap)
1950 struct device *dev = ap->host->dev;
1951 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1952 struct nv_swncq_port_priv *pp;
1955 /* we might fallback to bmdma, allocate bmdma resources */
1956 rc = ata_bmdma_port_start(ap);
1960 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1964 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1965 &pp->prd_dma, GFP_KERNEL);
1968 memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1970 ap->private_data = pp;
1971 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1972 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1973 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1978 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1980 if (qc->tf.protocol != ATA_PROT_NCQ) {
1981 ata_bmdma_qc_prep(qc);
1985 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1988 nv_swncq_fill_sg(qc);
1991 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1993 struct ata_port *ap = qc->ap;
1994 struct scatterlist *sg;
1995 struct nv_swncq_port_priv *pp = ap->private_data;
1996 struct ata_bmdma_prd *prd;
1997 unsigned int si, idx;
1999 prd = pp->prd + ATA_MAX_PRD * qc->hw_tag;
2002 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2006 addr = (u32)sg_dma_address(sg);
2007 sg_len = sg_dma_len(sg);
2010 offset = addr & 0xffff;
2012 if ((offset + sg_len) > 0x10000)
2013 len = 0x10000 - offset;
2015 prd[idx].addr = cpu_to_le32(addr);
2016 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2024 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2027 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2028 struct ata_queued_cmd *qc)
2030 struct nv_swncq_port_priv *pp = ap->private_data;
2037 writel((1 << qc->hw_tag), pp->sactive_block);
2038 pp->last_issue_tag = qc->hw_tag;
2039 pp->dhfis_bits &= ~(1 << qc->hw_tag);
2040 pp->dmafis_bits &= ~(1 << qc->hw_tag);
2041 pp->qc_active |= (0x1 << qc->hw_tag);
2043 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2044 ap->ops->sff_exec_command(ap, &qc->tf);
2046 DPRINTK("Issued tag %u\n", qc->hw_tag);
2051 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2053 struct ata_port *ap = qc->ap;
2054 struct nv_swncq_port_priv *pp = ap->private_data;
2056 if (qc->tf.protocol != ATA_PROT_NCQ)
2057 return ata_bmdma_qc_issue(qc);
2062 nv_swncq_issue_atacmd(ap, qc);
2064 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2069 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2072 struct ata_eh_info *ehi = &ap->link.eh_info;
2074 ata_ehi_clear_desc(ehi);
2076 /* AHCI needs SError cleared; otherwise, it might lock up */
2077 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2078 sata_scr_write(&ap->link, SCR_ERROR, serror);
2080 /* analyze @irq_stat */
2081 if (fis & NV_SWNCQ_IRQ_ADDED)
2082 ata_ehi_push_desc(ehi, "hot plug");
2083 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2084 ata_ehi_push_desc(ehi, "hot unplug");
2086 ata_ehi_hotplugged(ehi);
2088 /* okay, let's hand over to EH */
2089 ehi->serror |= serror;
2091 ata_port_freeze(ap);
2094 static int nv_swncq_sdbfis(struct ata_port *ap)
2096 struct ata_queued_cmd *qc;
2097 struct nv_swncq_port_priv *pp = ap->private_data;
2098 struct ata_eh_info *ehi = &ap->link.eh_info;
2104 host_stat = ap->ops->bmdma_status(ap);
2105 if (unlikely(host_stat & ATA_DMA_ERR)) {
2106 /* error when transferring data to/from memory */
2107 ata_ehi_clear_desc(ehi);
2108 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2109 ehi->err_mask |= AC_ERR_HOST_BUS;
2110 ehi->action |= ATA_EH_RESET;
2114 ap->ops->sff_irq_clear(ap);
2115 __ata_bmdma_stop(ap);
2117 sactive = readl(pp->sactive_block);
2118 done_mask = pp->qc_active ^ sactive;
2120 pp->qc_active &= ~done_mask;
2121 pp->dhfis_bits &= ~done_mask;
2122 pp->dmafis_bits &= ~done_mask;
2123 pp->sdbfis_bits |= done_mask;
2124 ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2126 if (!ap->qc_active) {
2128 nv_swncq_pp_reinit(ap);
2132 if (pp->qc_active & pp->dhfis_bits)
2135 if ((pp->ncq_flags & ncq_saw_backout) ||
2136 (pp->qc_active ^ pp->dhfis_bits))
2137 /* if the controller can't get a device to host register FIS,
2138 * The driver needs to reissue the new command.
2142 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2143 "SWNCQ:qc_active 0x%X defer_bits %X "
2144 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2145 ap->print_id, ap->qc_active, pp->qc_active,
2146 pp->defer_queue.defer_bits, pp->dhfis_bits,
2147 pp->dmafis_bits, pp->last_issue_tag);
2149 nv_swncq_fis_reinit(ap);
2152 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2153 nv_swncq_issue_atacmd(ap, qc);
2157 if (pp->defer_queue.defer_bits) {
2158 /* send deferral queue command */
2159 qc = nv_swncq_qc_from_dq(ap);
2160 WARN_ON(qc == NULL);
2161 nv_swncq_issue_atacmd(ap, qc);
2167 static inline u32 nv_swncq_tag(struct ata_port *ap)
2169 struct nv_swncq_port_priv *pp = ap->private_data;
2172 tag = readb(pp->tag_block) >> 2;
2173 return (tag & 0x1f);
2176 static void nv_swncq_dmafis(struct ata_port *ap)
2178 struct ata_queued_cmd *qc;
2182 struct nv_swncq_port_priv *pp = ap->private_data;
2184 __ata_bmdma_stop(ap);
2185 tag = nv_swncq_tag(ap);
2187 DPRINTK("dma setup tag 0x%x\n", tag);
2188 qc = ata_qc_from_tag(ap, tag);
2193 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2195 /* load PRD table addr. */
2196 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag,
2197 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2199 /* specify data direction, triple-check start bit is clear */
2200 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2201 dmactl &= ~ATA_DMA_WR;
2203 dmactl |= ATA_DMA_WR;
2205 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2208 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2210 struct nv_swncq_port_priv *pp = ap->private_data;
2211 struct ata_queued_cmd *qc;
2212 struct ata_eh_info *ehi = &ap->link.eh_info;
2216 ata_stat = ap->ops->sff_check_status(ap);
2217 nv_swncq_irq_clear(ap, fis);
2221 if (ap->pflags & ATA_PFLAG_FROZEN)
2224 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2225 nv_swncq_hotplug(ap, fis);
2232 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2234 ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2236 if (ata_stat & ATA_ERR) {
2237 ata_ehi_clear_desc(ehi);
2238 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2239 ehi->err_mask |= AC_ERR_DEV;
2240 ehi->serror |= serror;
2241 ehi->action |= ATA_EH_RESET;
2242 ata_port_freeze(ap);
2246 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2247 /* If the IRQ is backout, driver must issue
2248 * the new command again some time later.
2250 pp->ncq_flags |= ncq_saw_backout;
2253 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2254 pp->ncq_flags |= ncq_saw_sdb;
2255 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2256 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2257 ap->print_id, pp->qc_active, pp->dhfis_bits,
2258 pp->dmafis_bits, readl(pp->sactive_block));
2259 if (nv_swncq_sdbfis(ap) < 0)
2263 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2264 /* The interrupt indicates the new command
2265 * was transmitted correctly to the drive.
2267 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2268 pp->ncq_flags |= ncq_saw_d2h;
2269 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2270 ata_ehi_push_desc(ehi, "illegal fis transaction");
2271 ehi->err_mask |= AC_ERR_HSM;
2272 ehi->action |= ATA_EH_RESET;
2276 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2277 !(pp->ncq_flags & ncq_saw_dmas)) {
2278 ata_stat = ap->ops->sff_check_status(ap);
2279 if (ata_stat & ATA_BUSY)
2282 if (pp->defer_queue.defer_bits) {
2283 DPRINTK("send next command\n");
2284 qc = nv_swncq_qc_from_dq(ap);
2285 nv_swncq_issue_atacmd(ap, qc);
2290 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2291 /* program the dma controller with appropriate PRD buffers
2292 * and start the DMA transfer for requested command.
2294 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2295 pp->ncq_flags |= ncq_saw_dmas;
2296 nv_swncq_dmafis(ap);
2302 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2303 ata_port_freeze(ap);
2307 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2309 struct ata_host *host = dev_instance;
2311 unsigned int handled = 0;
2312 unsigned long flags;
2315 spin_lock_irqsave(&host->lock, flags);
2317 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2319 for (i = 0; i < host->n_ports; i++) {
2320 struct ata_port *ap = host->ports[i];
2322 if (ap->link.sactive) {
2323 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2326 if (irq_stat) /* reserve Hotplug */
2327 nv_swncq_irq_clear(ap, 0xfff0);
2329 handled += nv_host_intr(ap, (u8)irq_stat);
2331 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2334 spin_unlock_irqrestore(&host->lock, flags);
2336 return IRQ_RETVAL(handled);
2339 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2341 const struct ata_port_info *ppi[] = { NULL, NULL };
2342 struct nv_pi_priv *ipriv;
2343 struct ata_host *host;
2344 struct nv_host_priv *hpriv;
2348 unsigned long type = ent->driver_data;
2350 // Make sure this is a SATA controller by counting the number of bars
2351 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2352 // it's an IDE controller and we ignore it.
2353 for (bar = 0; bar < 6; bar++)
2354 if (pci_resource_start(pdev, bar) == 0)
2357 ata_print_version_once(&pdev->dev, DRV_VERSION);
2359 rc = pcim_enable_device(pdev);
2363 /* determine type and allocate host */
2364 if (type == CK804 && adma_enabled) {
2365 dev_notice(&pdev->dev, "Using ADMA mode\n");
2367 } else if (type == MCP5x && swncq_enabled) {
2368 dev_notice(&pdev->dev, "Using SWNCQ mode\n");
2372 ppi[0] = &nv_port_info[type];
2373 ipriv = ppi[0]->private_data;
2374 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2378 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2382 host->private_data = hpriv;
2384 /* request and iomap NV_MMIO_BAR */
2385 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2389 /* configure SCR access */
2390 base = host->iomap[NV_MMIO_BAR];
2391 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2392 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2394 /* enable SATA space for CK804 */
2395 if (type >= CK804) {
2398 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2399 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2400 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2405 rc = nv_adma_host_init(host);
2408 } else if (type == SWNCQ)
2409 nv_swncq_host_init(host);
2412 dev_notice(&pdev->dev, "Using MSI\n");
2413 pci_enable_msi(pdev);
2416 pci_set_master(pdev);
2417 return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2420 #ifdef CONFIG_PM_SLEEP
2421 static int nv_pci_device_resume(struct pci_dev *pdev)
2423 struct ata_host *host = pci_get_drvdata(pdev);
2424 struct nv_host_priv *hpriv = host->private_data;
2427 rc = ata_pci_device_do_resume(pdev);
2431 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2432 if (hpriv->type >= CK804) {
2435 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2436 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2437 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2439 if (hpriv->type == ADMA) {
2441 struct nv_adma_port_priv *pp;
2442 /* enable/disable ADMA on the ports appropriately */
2443 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2445 pp = host->ports[0]->private_data;
2446 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2447 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2448 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2450 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
2451 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2452 pp = host->ports[1]->private_data;
2453 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2454 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2455 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2457 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
2458 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2460 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2464 ata_host_resume(host);
2470 static void nv_ck804_host_stop(struct ata_host *host)
2472 struct pci_dev *pdev = to_pci_dev(host->dev);
2475 /* disable SATA space for CK804 */
2476 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2477 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2478 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2481 static void nv_adma_host_stop(struct ata_host *host)
2483 struct pci_dev *pdev = to_pci_dev(host->dev);
2486 /* disable ADMA on the ports */
2487 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2488 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2489 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2490 NV_MCP_SATA_CFG_20_PORT1_EN |
2491 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2493 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2495 nv_ck804_host_stop(host);
2498 module_pci_driver(nv_pci_driver);
2500 module_param_named(adma, adma_enabled, bool, 0444);
2501 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2502 module_param_named(swncq, swncq_enabled, bool, 0444);
2503 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2504 module_param_named(msi, msi_enabled, bool, 0444);
2505 MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");