1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * sata_nv.c - NVIDIA nForce SATA
5 * Copyright 2004 NVIDIA Corp. All rights reserved.
6 * Copyright 2004 Andrew Chew
8 * libata documentation is available via 'make {ps|pdf}docs',
9 * as Documentation/driver-api/libata.rst
11 * No hardware documentation available outside of NVIDIA.
12 * This driver programs the NVIDIA SATA controller in a similar
13 * fashion as with other PCI IDE BMDMA controllers, with a few
14 * NV-specific details such as register offsets, SATA phy location,
17 * CK804/MCP04 controllers support an alternate programming interface
18 * similar to the ADMA specification (with some modifications).
19 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
20 * sent through the legacy interface.
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/gfp.h>
26 #include <linux/pci.h>
27 #include <linux/blkdev.h>
28 #include <linux/delay.h>
29 #include <linux/interrupt.h>
30 #include <linux/device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_device.h>
33 #include <linux/libata.h>
35 #define DRV_NAME "sata_nv"
36 #define DRV_VERSION "3.5"
38 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
44 NV_PIO_MASK = ATA_PIO4,
45 NV_MWDMA_MASK = ATA_MWDMA2,
46 NV_UDMA_MASK = ATA_UDMA6,
47 NV_PORT0_SCR_REG_OFFSET = 0x00,
48 NV_PORT1_SCR_REG_OFFSET = 0x40,
50 /* INT_STATUS/ENABLE */
53 NV_INT_STATUS_CK804 = 0x440,
54 NV_INT_ENABLE_CK804 = 0x441,
56 /* INT_STATUS/ENABLE bits */
60 NV_INT_REMOVED = 0x08,
62 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
65 NV_INT_MASK = NV_INT_DEV |
66 NV_INT_ADDED | NV_INT_REMOVED,
70 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
72 // For PCI config register 20
73 NV_MCP_SATA_CFG_20 = 0x50,
74 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
75 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
76 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
77 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
78 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
80 NV_ADMA_MAX_CPBS = 32,
83 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
85 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
86 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
87 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
88 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
90 /* BAR5 offset to ADMA general registers */
92 NV_ADMA_GEN_CTL = 0x00,
93 NV_ADMA_NOTIFIER_CLEAR = 0x30,
95 /* BAR5 offset to ADMA ports */
98 /* size of ADMA port register space */
99 NV_ADMA_PORT_SIZE = 0x100,
101 /* ADMA port registers */
103 NV_ADMA_CPB_COUNT = 0x42,
104 NV_ADMA_NEXT_CPB_IDX = 0x43,
106 NV_ADMA_CPB_BASE_LOW = 0x48,
107 NV_ADMA_CPB_BASE_HIGH = 0x4C,
108 NV_ADMA_APPEND = 0x50,
109 NV_ADMA_NOTIFIER = 0x68,
110 NV_ADMA_NOTIFIER_ERROR = 0x6C,
112 /* NV_ADMA_CTL register bits */
113 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
114 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
115 NV_ADMA_CTL_GO = (1 << 7),
116 NV_ADMA_CTL_AIEN = (1 << 8),
117 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
118 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
120 /* CPB response flag bits */
121 NV_CPB_RESP_DONE = (1 << 0),
122 NV_CPB_RESP_ATA_ERR = (1 << 3),
123 NV_CPB_RESP_CMD_ERR = (1 << 4),
124 NV_CPB_RESP_CPB_ERR = (1 << 7),
126 /* CPB control flag bits */
127 NV_CPB_CTL_CPB_VALID = (1 << 0),
128 NV_CPB_CTL_QUEUE = (1 << 1),
129 NV_CPB_CTL_APRD_VALID = (1 << 2),
130 NV_CPB_CTL_IEN = (1 << 3),
131 NV_CPB_CTL_FPDMA = (1 << 4),
134 NV_APRD_WRITE = (1 << 1),
135 NV_APRD_END = (1 << 2),
136 NV_APRD_CONT = (1 << 3),
138 /* NV_ADMA_STAT flags */
139 NV_ADMA_STAT_TIMEOUT = (1 << 0),
140 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
141 NV_ADMA_STAT_HOTPLUG = (1 << 2),
142 NV_ADMA_STAT_CPBERR = (1 << 4),
143 NV_ADMA_STAT_SERROR = (1 << 5),
144 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
145 NV_ADMA_STAT_IDLE = (1 << 8),
146 NV_ADMA_STAT_LEGACY = (1 << 9),
147 NV_ADMA_STAT_STOPPED = (1 << 10),
148 NV_ADMA_STAT_DONE = (1 << 12),
149 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
150 NV_ADMA_STAT_TIMEOUT,
153 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
154 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
156 /* MCP55 reg offset */
157 NV_CTL_MCP55 = 0x400,
158 NV_INT_STATUS_MCP55 = 0x440,
159 NV_INT_ENABLE_MCP55 = 0x444,
160 NV_NCQ_REG_MCP55 = 0x448,
163 NV_INT_ALL_MCP55 = 0xffff,
164 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
165 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
167 /* SWNCQ ENABLE BITS*/
168 NV_CTL_PRI_SWNCQ = 0x02,
169 NV_CTL_SEC_SWNCQ = 0x04,
171 /* SW NCQ status bits*/
172 NV_SWNCQ_IRQ_DEV = (1 << 0),
173 NV_SWNCQ_IRQ_PM = (1 << 1),
174 NV_SWNCQ_IRQ_ADDED = (1 << 2),
175 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
177 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
178 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
179 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
180 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
182 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
183 NV_SWNCQ_IRQ_REMOVED,
187 /* ADMA Physical Region Descriptor - one SG segment */
196 enum nv_adma_regbits {
197 CMDEND = (1 << 15), /* end of command list */
198 WNB = (1 << 14), /* wait-not-BSY */
199 IGN = (1 << 13), /* ignore this entry */
200 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
201 DA2 = (1 << (2 + 8)),
202 DA1 = (1 << (1 + 8)),
203 DA0 = (1 << (0 + 8)),
206 /* ADMA Command Parameter Block
207 The first 5 SG segments are stored inside the Command Parameter Block itself.
208 If there are more than 5 segments the remainder are stored in a separate
209 memory area indicated by next_aprd. */
211 u8 resp_flags; /* 0 */
212 u8 reserved1; /* 1 */
213 u8 ctl_flags; /* 2 */
214 /* len is length of taskfile in 64 bit words */
217 u8 next_cpb_idx; /* 5 */
218 __le16 reserved2; /* 6-7 */
219 __le16 tf[12]; /* 8-31 */
220 struct nv_adma_prd aprd[5]; /* 32-111 */
221 __le64 next_aprd; /* 112-119 */
222 __le64 reserved3; /* 120-127 */
226 struct nv_adma_port_priv {
227 struct nv_adma_cpb *cpb;
229 struct nv_adma_prd *aprd;
231 void __iomem *ctl_block;
232 void __iomem *gen_block;
233 void __iomem *notifier_clear_block;
239 struct nv_host_priv {
247 unsigned int tag[ATA_MAX_QUEUE];
250 enum ncq_saw_flag_list {
251 ncq_saw_d2h = (1U << 0),
252 ncq_saw_dmas = (1U << 1),
253 ncq_saw_sdb = (1U << 2),
254 ncq_saw_backout = (1U << 3),
257 struct nv_swncq_port_priv {
258 struct ata_bmdma_prd *prd; /* our SG list */
259 dma_addr_t prd_dma; /* and its DMA mapping */
260 void __iomem *sactive_block;
261 void __iomem *irq_block;
262 void __iomem *tag_block;
265 unsigned int last_issue_tag;
267 /* fifo circular queue to store deferral command */
268 struct defer_queue defer_queue;
270 /* for NCQ interrupt analysis */
275 unsigned int ncq_flags;
279 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
281 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
282 #ifdef CONFIG_PM_SLEEP
283 static int nv_pci_device_resume(struct pci_dev *pdev);
285 static void nv_ck804_host_stop(struct ata_host *host);
286 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
287 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
288 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
289 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
290 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
292 static int nv_hardreset(struct ata_link *link, unsigned int *class,
293 unsigned long deadline);
294 static void nv_nf2_freeze(struct ata_port *ap);
295 static void nv_nf2_thaw(struct ata_port *ap);
296 static void nv_ck804_freeze(struct ata_port *ap);
297 static void nv_ck804_thaw(struct ata_port *ap);
298 static int nv_adma_slave_config(struct scsi_device *sdev);
299 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
300 static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
301 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
302 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
303 static void nv_adma_irq_clear(struct ata_port *ap);
304 static int nv_adma_port_start(struct ata_port *ap);
305 static void nv_adma_port_stop(struct ata_port *ap);
307 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
308 static int nv_adma_port_resume(struct ata_port *ap);
310 static void nv_adma_freeze(struct ata_port *ap);
311 static void nv_adma_thaw(struct ata_port *ap);
312 static void nv_adma_error_handler(struct ata_port *ap);
313 static void nv_adma_host_stop(struct ata_host *host);
314 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
315 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
317 static void nv_mcp55_thaw(struct ata_port *ap);
318 static void nv_mcp55_freeze(struct ata_port *ap);
319 static void nv_swncq_error_handler(struct ata_port *ap);
320 static int nv_swncq_slave_config(struct scsi_device *sdev);
321 static int nv_swncq_port_start(struct ata_port *ap);
322 static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
323 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
324 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
325 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
326 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
328 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
329 static int nv_swncq_port_resume(struct ata_port *ap);
336 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
343 static const struct pci_device_id nv_pci_tbl[] = {
344 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
345 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
346 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
347 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
348 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
349 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
350 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
351 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
352 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
353 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
354 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
355 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
356 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
357 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
359 { } /* terminate list */
362 static struct pci_driver nv_pci_driver = {
364 .id_table = nv_pci_tbl,
365 .probe = nv_init_one,
366 #ifdef CONFIG_PM_SLEEP
367 .suspend = ata_pci_device_suspend,
368 .resume = nv_pci_device_resume,
370 .remove = ata_pci_remove_one,
373 static struct scsi_host_template nv_sht = {
374 ATA_BMDMA_SHT(DRV_NAME),
377 static struct scsi_host_template nv_adma_sht = {
378 __ATA_BASE_SHT(DRV_NAME),
379 .can_queue = NV_ADMA_MAX_CPBS,
380 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
381 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
382 .slave_configure = nv_adma_slave_config,
383 .sdev_groups = ata_ncq_sdev_groups,
384 .change_queue_depth = ata_scsi_change_queue_depth,
385 .tag_alloc_policy = BLK_TAG_ALLOC_RR,
388 static struct scsi_host_template nv_swncq_sht = {
389 __ATA_BASE_SHT(DRV_NAME),
390 .can_queue = ATA_MAX_QUEUE - 1,
391 .sg_tablesize = LIBATA_MAX_PRD,
392 .dma_boundary = ATA_DMA_BOUNDARY,
393 .slave_configure = nv_swncq_slave_config,
394 .sdev_groups = ata_ncq_sdev_groups,
395 .change_queue_depth = ata_scsi_change_queue_depth,
396 .tag_alloc_policy = BLK_TAG_ALLOC_RR,
400 * NV SATA controllers have various different problems with hardreset
401 * protocol depending on the specific controller and device.
405 * bko11195 reports that link doesn't come online after hardreset on
406 * generic nv's and there have been several other similar reports on
409 * bko12351#c23 reports that warmplug on MCP61 doesn't work with
414 * bko3352 reports nf2/3 controllers can't determine device signature
415 * reliably after hardreset. The following thread reports detection
416 * failure on cold boot with the standard debouncing timing.
418 * http://thread.gmane.org/gmane.linux.ide/34098
420 * bko12176 reports that hardreset fails to bring up the link during
425 * For initial probing after boot and hot plugging, hardreset mostly
426 * works fine on CK804 but curiously, reprobing on the initial port
427 * by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
428 * FIS in somewhat undeterministic way.
432 * bko12351 reports that when SWNCQ is enabled, for hotplug to work,
433 * hardreset should be used and hardreset can't report proper
434 * signature, which suggests that mcp5x is closer to nf2 as long as
435 * reset quirkiness is concerned.
437 * bko12703 reports that boot probing fails for intel SSD with
438 * hardreset. Link fails to come online. Softreset works fine.
440 * The failures are varied but the following patterns seem true for
443 * - Softreset during boot always works.
445 * - Hardreset during boot sometimes fails to bring up the link on
446 * certain comibnations and device signature acquisition is
449 * - Hardreset is often necessary after hotplug.
451 * So, preferring softreset for boot probing and error handling (as
452 * hardreset might bring down the link) but using hardreset for
453 * post-boot probing should work around the above issues in most
454 * cases. Define nv_hardreset() which only kicks in for post-boot
455 * probing and use it for all variants.
457 static struct ata_port_operations nv_generic_ops = {
458 .inherits = &ata_bmdma_port_ops,
459 .lost_interrupt = ATA_OP_NULL,
460 .scr_read = nv_scr_read,
461 .scr_write = nv_scr_write,
462 .hardreset = nv_hardreset,
465 static struct ata_port_operations nv_nf2_ops = {
466 .inherits = &nv_generic_ops,
467 .freeze = nv_nf2_freeze,
471 static struct ata_port_operations nv_ck804_ops = {
472 .inherits = &nv_generic_ops,
473 .freeze = nv_ck804_freeze,
474 .thaw = nv_ck804_thaw,
475 .host_stop = nv_ck804_host_stop,
478 static struct ata_port_operations nv_adma_ops = {
479 .inherits = &nv_ck804_ops,
481 .check_atapi_dma = nv_adma_check_atapi_dma,
482 .sff_tf_read = nv_adma_tf_read,
483 .qc_defer = ata_std_qc_defer,
484 .qc_prep = nv_adma_qc_prep,
485 .qc_issue = nv_adma_qc_issue,
486 .sff_irq_clear = nv_adma_irq_clear,
488 .freeze = nv_adma_freeze,
489 .thaw = nv_adma_thaw,
490 .error_handler = nv_adma_error_handler,
491 .post_internal_cmd = nv_adma_post_internal_cmd,
493 .port_start = nv_adma_port_start,
494 .port_stop = nv_adma_port_stop,
496 .port_suspend = nv_adma_port_suspend,
497 .port_resume = nv_adma_port_resume,
499 .host_stop = nv_adma_host_stop,
502 static struct ata_port_operations nv_swncq_ops = {
503 .inherits = &nv_generic_ops,
505 .qc_defer = ata_std_qc_defer,
506 .qc_prep = nv_swncq_qc_prep,
507 .qc_issue = nv_swncq_qc_issue,
509 .freeze = nv_mcp55_freeze,
510 .thaw = nv_mcp55_thaw,
511 .error_handler = nv_swncq_error_handler,
514 .port_suspend = nv_swncq_port_suspend,
515 .port_resume = nv_swncq_port_resume,
517 .port_start = nv_swncq_port_start,
521 irq_handler_t irq_handler;
522 struct scsi_host_template *sht;
525 #define NV_PI_PRIV(_irq_handler, _sht) \
526 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
528 static const struct ata_port_info nv_port_info[] = {
531 .flags = ATA_FLAG_SATA,
532 .pio_mask = NV_PIO_MASK,
533 .mwdma_mask = NV_MWDMA_MASK,
534 .udma_mask = NV_UDMA_MASK,
535 .port_ops = &nv_generic_ops,
536 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
540 .flags = ATA_FLAG_SATA,
541 .pio_mask = NV_PIO_MASK,
542 .mwdma_mask = NV_MWDMA_MASK,
543 .udma_mask = NV_UDMA_MASK,
544 .port_ops = &nv_nf2_ops,
545 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
549 .flags = ATA_FLAG_SATA,
550 .pio_mask = NV_PIO_MASK,
551 .mwdma_mask = NV_MWDMA_MASK,
552 .udma_mask = NV_UDMA_MASK,
553 .port_ops = &nv_ck804_ops,
554 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
558 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
559 .pio_mask = NV_PIO_MASK,
560 .mwdma_mask = NV_MWDMA_MASK,
561 .udma_mask = NV_UDMA_MASK,
562 .port_ops = &nv_adma_ops,
563 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
567 .flags = ATA_FLAG_SATA,
568 .pio_mask = NV_PIO_MASK,
569 .mwdma_mask = NV_MWDMA_MASK,
570 .udma_mask = NV_UDMA_MASK,
571 .port_ops = &nv_generic_ops,
572 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
576 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
577 .pio_mask = NV_PIO_MASK,
578 .mwdma_mask = NV_MWDMA_MASK,
579 .udma_mask = NV_UDMA_MASK,
580 .port_ops = &nv_swncq_ops,
581 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
585 MODULE_AUTHOR("NVIDIA");
586 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
587 MODULE_LICENSE("GPL");
588 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
589 MODULE_VERSION(DRV_VERSION);
591 static bool adma_enabled;
592 static bool swncq_enabled = true;
593 static bool msi_enabled;
595 static void nv_adma_register_mode(struct ata_port *ap)
597 struct nv_adma_port_priv *pp = ap->private_data;
598 void __iomem *mmio = pp->ctl_block;
602 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
605 status = readw(mmio + NV_ADMA_STAT);
606 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
608 status = readw(mmio + NV_ADMA_STAT);
612 ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
615 tmp = readw(mmio + NV_ADMA_CTL);
616 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
619 status = readw(mmio + NV_ADMA_STAT);
620 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
622 status = readw(mmio + NV_ADMA_STAT);
627 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
630 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
633 static void nv_adma_mode(struct ata_port *ap)
635 struct nv_adma_port_priv *pp = ap->private_data;
636 void __iomem *mmio = pp->ctl_block;
640 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
643 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
645 tmp = readw(mmio + NV_ADMA_CTL);
646 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
648 status = readw(mmio + NV_ADMA_STAT);
649 while (((status & NV_ADMA_STAT_LEGACY) ||
650 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
652 status = readw(mmio + NV_ADMA_STAT);
657 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
660 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
663 static int nv_adma_slave_config(struct scsi_device *sdev)
665 struct ata_port *ap = ata_shost_to_port(sdev->host);
666 struct nv_adma_port_priv *pp = ap->private_data;
667 struct nv_adma_port_priv *port0, *port1;
668 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
669 unsigned long segment_boundary, flags;
670 unsigned short sg_tablesize;
673 u32 current_reg, new_reg, config_mask;
675 rc = ata_scsi_slave_config(sdev);
677 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
678 /* Not a proper libata device, ignore */
681 spin_lock_irqsave(ap->lock, flags);
683 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
685 * NVIDIA reports that ADMA mode does not support ATAPI commands.
686 * Therefore ATAPI commands are sent through the legacy interface.
687 * However, the legacy interface only supports 32-bit DMA.
688 * Restrict DMA parameters as required by the legacy interface
689 * when an ATAPI device is connected.
691 segment_boundary = ATA_DMA_BOUNDARY;
692 /* Subtract 1 since an extra entry may be needed for padding, see
694 sg_tablesize = LIBATA_MAX_PRD - 1;
696 /* Since the legacy DMA engine is in use, we need to disable ADMA
699 nv_adma_register_mode(ap);
701 segment_boundary = NV_ADMA_DMA_BOUNDARY;
702 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
706 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, ¤t_reg);
708 if (ap->port_no == 1)
709 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
710 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
712 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
713 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
716 new_reg = current_reg | config_mask;
717 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
719 new_reg = current_reg & ~config_mask;
720 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
723 if (current_reg != new_reg)
724 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
726 port0 = ap->host->ports[0]->private_data;
727 port1 = ap->host->ports[1]->private_data;
728 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
729 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
731 * We have to set the DMA mask to 32-bit if either port is in
732 * ATAPI mode, since they are on the same PCI device which is
733 * used for DMA mapping. If either SCSI device is not allocated
734 * yet, it's OK since that port will discover its correct
735 * setting when it does get allocated.
737 rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
739 rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
742 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
743 blk_queue_max_segments(sdev->request_queue, sg_tablesize);
745 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
746 (unsigned long long)*ap->host->dev->dma_mask,
747 segment_boundary, sg_tablesize);
749 spin_unlock_irqrestore(ap->lock, flags);
754 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
756 struct nv_adma_port_priv *pp = qc->ap->private_data;
757 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
760 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
762 /* Other than when internal or pass-through commands are executed,
763 the only time this function will be called in ADMA mode will be
764 if a command fails. In the failure case we don't care about going
765 into register mode with ADMA commands pending, as the commands will
766 all shortly be aborted anyway. We assume that NCQ commands are not
767 issued via passthrough, which is the only way that switching into
768 ADMA mode could abort outstanding commands. */
769 nv_adma_register_mode(ap);
771 ata_sff_tf_read(ap, tf);
774 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
776 unsigned int idx = 0;
778 if (tf->flags & ATA_TFLAG_ISADDR) {
779 if (tf->flags & ATA_TFLAG_LBA48) {
780 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
781 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
782 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
783 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
784 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
785 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
787 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
789 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
790 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
791 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
792 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
795 if (tf->flags & ATA_TFLAG_DEVICE)
796 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
798 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
801 cpb[idx++] = cpu_to_le16(IGN);
806 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
808 struct nv_adma_port_priv *pp = ap->private_data;
809 u8 flags = pp->cpb[cpb_num].resp_flags;
811 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
813 if (unlikely((force_err ||
814 flags & (NV_CPB_RESP_ATA_ERR |
815 NV_CPB_RESP_CMD_ERR |
816 NV_CPB_RESP_CPB_ERR)))) {
817 struct ata_eh_info *ehi = &ap->link.eh_info;
820 ata_ehi_clear_desc(ehi);
821 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
822 if (flags & NV_CPB_RESP_ATA_ERR) {
823 ata_ehi_push_desc(ehi, "ATA error");
824 ehi->err_mask |= AC_ERR_DEV;
825 } else if (flags & NV_CPB_RESP_CMD_ERR) {
826 ata_ehi_push_desc(ehi, "CMD error");
827 ehi->err_mask |= AC_ERR_DEV;
828 } else if (flags & NV_CPB_RESP_CPB_ERR) {
829 ata_ehi_push_desc(ehi, "CPB error");
830 ehi->err_mask |= AC_ERR_SYSTEM;
833 /* notifier error, but no error in CPB flags? */
834 ata_ehi_push_desc(ehi, "unknown");
835 ehi->err_mask |= AC_ERR_OTHER;
838 /* Kill all commands. EH will determine what actually failed. */
846 if (likely(flags & NV_CPB_RESP_DONE))
851 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
853 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
855 /* freeze if hotplugged */
856 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
861 /* bail out if not our interrupt */
862 if (!(irq_stat & NV_INT_DEV))
865 /* DEV interrupt w/ no active qc? */
866 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
867 ata_sff_check_status(ap);
871 /* handle interrupt */
872 return ata_bmdma_port_intr(ap, qc);
875 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
877 struct ata_host *host = dev_instance;
879 u32 notifier_clears[2];
881 spin_lock(&host->lock);
883 for (i = 0; i < host->n_ports; i++) {
884 struct ata_port *ap = host->ports[i];
885 struct nv_adma_port_priv *pp = ap->private_data;
886 void __iomem *mmio = pp->ctl_block;
889 u32 notifier, notifier_error;
891 notifier_clears[i] = 0;
893 /* if ADMA is disabled, use standard ata interrupt handler */
894 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
895 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
896 >> (NV_INT_PORT_SHIFT * i);
897 handled += nv_host_intr(ap, irq_stat);
901 /* if in ATA register mode, check for standard interrupts */
902 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
903 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
904 >> (NV_INT_PORT_SHIFT * i);
905 if (ata_tag_valid(ap->link.active_tag))
906 /** NV_INT_DEV indication seems unreliable
907 at times at least in ADMA mode. Force it
908 on always when a command is active, to
909 prevent losing interrupts. */
910 irq_stat |= NV_INT_DEV;
911 handled += nv_host_intr(ap, irq_stat);
914 notifier = readl(mmio + NV_ADMA_NOTIFIER);
915 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
916 notifier_clears[i] = notifier | notifier_error;
918 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
920 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
925 status = readw(mmio + NV_ADMA_STAT);
928 * Clear status. Ensure the controller sees the
929 * clearing before we start looking at any of the CPB
930 * statuses, so that any CPB completions after this
931 * point in the handler will raise another interrupt.
933 writew(status, mmio + NV_ADMA_STAT);
934 readw(mmio + NV_ADMA_STAT); /* flush posted write */
937 handled++; /* irq handled if we got here */
939 /* freeze if hotplugged or controller error */
940 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
941 NV_ADMA_STAT_HOTUNPLUG |
942 NV_ADMA_STAT_TIMEOUT |
943 NV_ADMA_STAT_SERROR))) {
944 struct ata_eh_info *ehi = &ap->link.eh_info;
946 ata_ehi_clear_desc(ehi);
947 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
948 if (status & NV_ADMA_STAT_TIMEOUT) {
949 ehi->err_mask |= AC_ERR_SYSTEM;
950 ata_ehi_push_desc(ehi, "timeout");
951 } else if (status & NV_ADMA_STAT_HOTPLUG) {
952 ata_ehi_hotplugged(ehi);
953 ata_ehi_push_desc(ehi, "hotplug");
954 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
955 ata_ehi_hotplugged(ehi);
956 ata_ehi_push_desc(ehi, "hot unplug");
957 } else if (status & NV_ADMA_STAT_SERROR) {
958 /* let EH analyze SError and figure out cause */
959 ata_ehi_push_desc(ehi, "SError");
961 ata_ehi_push_desc(ehi, "unknown");
966 if (status & (NV_ADMA_STAT_DONE |
967 NV_ADMA_STAT_CPBERR |
968 NV_ADMA_STAT_CMD_COMPLETE)) {
969 u32 check_commands = notifier_clears[i];
973 if (status & NV_ADMA_STAT_CPBERR) {
974 /* check all active commands */
975 if (ata_tag_valid(ap->link.active_tag))
976 check_commands = 1 <<
979 check_commands = ap->link.sactive;
982 /* check CPBs for completed commands */
983 while ((pos = ffs(check_commands))) {
985 rc = nv_adma_check_cpb(ap, pos,
986 notifier_error & (1 << pos));
988 done_mask |= 1 << pos;
989 else if (unlikely(rc < 0))
991 check_commands &= ~(1 << pos);
993 ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
997 if (notifier_clears[0] || notifier_clears[1]) {
998 /* Note: Both notifier clear registers must be written
999 if either is set, even if one is zero, according to NVIDIA. */
1000 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1001 writel(notifier_clears[0], pp->notifier_clear_block);
1002 pp = host->ports[1]->private_data;
1003 writel(notifier_clears[1], pp->notifier_clear_block);
1006 spin_unlock(&host->lock);
1008 return IRQ_RETVAL(handled);
1011 static void nv_adma_freeze(struct ata_port *ap)
1013 struct nv_adma_port_priv *pp = ap->private_data;
1014 void __iomem *mmio = pp->ctl_block;
1017 nv_ck804_freeze(ap);
1019 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1022 /* clear any outstanding CK804 notifications */
1023 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1024 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1026 /* Disable interrupt */
1027 tmp = readw(mmio + NV_ADMA_CTL);
1028 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1029 mmio + NV_ADMA_CTL);
1030 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1033 static void nv_adma_thaw(struct ata_port *ap)
1035 struct nv_adma_port_priv *pp = ap->private_data;
1036 void __iomem *mmio = pp->ctl_block;
1041 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1044 /* Enable interrupt */
1045 tmp = readw(mmio + NV_ADMA_CTL);
1046 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1047 mmio + NV_ADMA_CTL);
1048 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1051 static void nv_adma_irq_clear(struct ata_port *ap)
1053 struct nv_adma_port_priv *pp = ap->private_data;
1054 void __iomem *mmio = pp->ctl_block;
1055 u32 notifier_clears[2];
1057 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1058 ata_bmdma_irq_clear(ap);
1062 /* clear any outstanding CK804 notifications */
1063 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1064 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1066 /* clear ADMA status */
1067 writew(0xffff, mmio + NV_ADMA_STAT);
1069 /* clear notifiers - note both ports need to be written with
1070 something even though we are only clearing on one */
1071 if (ap->port_no == 0) {
1072 notifier_clears[0] = 0xFFFFFFFF;
1073 notifier_clears[1] = 0;
1075 notifier_clears[0] = 0;
1076 notifier_clears[1] = 0xFFFFFFFF;
1078 pp = ap->host->ports[0]->private_data;
1079 writel(notifier_clears[0], pp->notifier_clear_block);
1080 pp = ap->host->ports[1]->private_data;
1081 writel(notifier_clears[1], pp->notifier_clear_block);
1084 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1086 struct nv_adma_port_priv *pp = qc->ap->private_data;
1088 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1089 ata_bmdma_post_internal_cmd(qc);
1092 static int nv_adma_port_start(struct ata_port *ap)
1094 struct device *dev = ap->host->dev;
1095 struct nv_adma_port_priv *pp;
1100 struct pci_dev *pdev = to_pci_dev(dev);
1106 * Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1109 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1113 /* we might fallback to bmdma, allocate bmdma resources */
1114 rc = ata_bmdma_port_start(ap);
1118 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1122 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1123 ap->port_no * NV_ADMA_PORT_SIZE;
1124 pp->ctl_block = mmio;
1125 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1126 pp->notifier_clear_block = pp->gen_block +
1127 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1130 * Now that the legacy PRD and padding buffer are allocated we can
1131 * raise the DMA mask to allocate the CPB/APRD table.
1133 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1135 pp->adma_dma_mask = *dev->dma_mask;
1137 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1138 &mem_dma, GFP_KERNEL);
1143 * First item in chunk of DMA memory:
1144 * 128-byte command parameter block (CPB)
1145 * one for each command tag
1148 pp->cpb_dma = mem_dma;
1150 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1151 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1153 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1154 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1157 * Second item: block of ADMA_SGTBL_LEN s/g entries
1160 pp->aprd_dma = mem_dma;
1162 ap->private_data = pp;
1164 /* clear any outstanding interrupt conditions */
1165 writew(0xffff, mmio + NV_ADMA_STAT);
1167 /* initialize port variables */
1168 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1170 /* clear CPB fetch count */
1171 writew(0, mmio + NV_ADMA_CPB_COUNT);
1173 /* clear GO for register mode, enable interrupt */
1174 tmp = readw(mmio + NV_ADMA_CTL);
1175 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1176 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1178 tmp = readw(mmio + NV_ADMA_CTL);
1179 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1180 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1182 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1183 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1188 static void nv_adma_port_stop(struct ata_port *ap)
1190 struct nv_adma_port_priv *pp = ap->private_data;
1191 void __iomem *mmio = pp->ctl_block;
1194 writew(0, mmio + NV_ADMA_CTL);
1198 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1200 struct nv_adma_port_priv *pp = ap->private_data;
1201 void __iomem *mmio = pp->ctl_block;
1203 /* Go to register mode - clears GO */
1204 nv_adma_register_mode(ap);
1206 /* clear CPB fetch count */
1207 writew(0, mmio + NV_ADMA_CPB_COUNT);
1209 /* disable interrupt, shut down port */
1210 writew(0, mmio + NV_ADMA_CTL);
1215 static int nv_adma_port_resume(struct ata_port *ap)
1217 struct nv_adma_port_priv *pp = ap->private_data;
1218 void __iomem *mmio = pp->ctl_block;
1221 /* set CPB block location */
1222 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1223 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1225 /* clear any outstanding interrupt conditions */
1226 writew(0xffff, mmio + NV_ADMA_STAT);
1228 /* initialize port variables */
1229 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1231 /* clear CPB fetch count */
1232 writew(0, mmio + NV_ADMA_CPB_COUNT);
1234 /* clear GO for register mode, enable interrupt */
1235 tmp = readw(mmio + NV_ADMA_CTL);
1236 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1237 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1239 tmp = readw(mmio + NV_ADMA_CTL);
1240 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1241 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1243 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1244 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1250 static void nv_adma_setup_port(struct ata_port *ap)
1252 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1253 struct ata_ioports *ioport = &ap->ioaddr;
1257 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1259 ioport->cmd_addr = mmio;
1260 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1261 ioport->error_addr =
1262 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1263 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1264 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1265 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1266 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1267 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1268 ioport->status_addr =
1269 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1270 ioport->altstatus_addr =
1271 ioport->ctl_addr = mmio + 0x20;
1274 static int nv_adma_host_init(struct ata_host *host)
1276 struct pci_dev *pdev = to_pci_dev(host->dev);
1280 /* enable ADMA on the ports */
1281 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1282 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1283 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1284 NV_MCP_SATA_CFG_20_PORT1_EN |
1285 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1287 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1289 for (i = 0; i < host->n_ports; i++)
1290 nv_adma_setup_port(host->ports[i]);
1295 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1296 struct scatterlist *sg,
1298 struct nv_adma_prd *aprd)
1301 if (qc->tf.flags & ATA_TFLAG_WRITE)
1302 flags |= NV_APRD_WRITE;
1303 if (idx == qc->n_elem - 1)
1304 flags |= NV_APRD_END;
1306 flags |= NV_APRD_CONT;
1308 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1309 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1310 aprd->flags = flags;
1311 aprd->packet_len = 0;
1314 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1316 struct nv_adma_port_priv *pp = qc->ap->private_data;
1317 struct nv_adma_prd *aprd;
1318 struct scatterlist *sg;
1321 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1322 aprd = (si < 5) ? &cpb->aprd[si] :
1323 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)];
1324 nv_adma_fill_aprd(qc, sg, si, aprd);
1327 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag)));
1329 cpb->next_aprd = cpu_to_le64(0);
1332 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1334 struct nv_adma_port_priv *pp = qc->ap->private_data;
1336 /* ADMA engine can only be used for non-ATAPI DMA commands,
1337 or interrupt-driven no-data commands. */
1338 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1339 (qc->tf.flags & ATA_TFLAG_POLLING))
1342 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1343 (qc->tf.protocol == ATA_PROT_NODATA))
1349 static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
1351 struct nv_adma_port_priv *pp = qc->ap->private_data;
1352 struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
1353 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1356 if (nv_adma_use_reg_mode(qc)) {
1357 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1358 (qc->flags & ATA_QCFLAG_DMAMAP));
1359 nv_adma_register_mode(qc->ap);
1360 ata_bmdma_qc_prep(qc);
1364 cpb->resp_flags = NV_CPB_RESP_DONE;
1370 cpb->tag = qc->hw_tag;
1371 cpb->next_cpb_idx = 0;
1373 /* turn on NCQ flags for NCQ commands */
1374 if (qc->tf.protocol == ATA_PROT_NCQ)
1375 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1377 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1379 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1381 if (qc->flags & ATA_QCFLAG_DMAMAP) {
1382 nv_adma_fill_sg(qc, cpb);
1383 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1385 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1387 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1388 until we are finished filling in all of the contents */
1390 cpb->ctl_flags = ctl_flags;
1392 cpb->resp_flags = 0;
1397 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1399 struct nv_adma_port_priv *pp = qc->ap->private_data;
1400 void __iomem *mmio = pp->ctl_block;
1401 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1405 /* We can't handle result taskfile with NCQ commands, since
1406 retrieving the taskfile switches us out of ADMA mode and would abort
1407 existing commands. */
1408 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1409 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1410 ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
1411 return AC_ERR_SYSTEM;
1414 if (nv_adma_use_reg_mode(qc)) {
1415 /* use ATA register mode */
1416 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1417 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1418 (qc->flags & ATA_QCFLAG_DMAMAP));
1419 nv_adma_register_mode(qc->ap);
1420 return ata_bmdma_qc_issue(qc);
1422 nv_adma_mode(qc->ap);
1424 /* write append register, command tag in lower 8 bits
1425 and (number of cpbs to append -1) in top 8 bits */
1428 if (curr_ncq != pp->last_issue_ncq) {
1429 /* Seems to need some delay before switching between NCQ and
1430 non-NCQ commands, else we get command timeouts and such. */
1432 pp->last_issue_ncq = curr_ncq;
1435 writew(qc->hw_tag, mmio + NV_ADMA_APPEND);
1437 DPRINTK("Issued tag %u\n", qc->hw_tag);
1442 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1444 struct ata_host *host = dev_instance;
1446 unsigned int handled = 0;
1447 unsigned long flags;
1449 spin_lock_irqsave(&host->lock, flags);
1451 for (i = 0; i < host->n_ports; i++) {
1452 struct ata_port *ap = host->ports[i];
1453 struct ata_queued_cmd *qc;
1455 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1456 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1457 handled += ata_bmdma_port_intr(ap, qc);
1460 * No request pending? Clear interrupt status
1461 * anyway, in case there's one pending.
1463 ap->ops->sff_check_status(ap);
1467 spin_unlock_irqrestore(&host->lock, flags);
1469 return IRQ_RETVAL(handled);
1472 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1476 for (i = 0; i < host->n_ports; i++) {
1477 handled += nv_host_intr(host->ports[i], irq_stat);
1478 irq_stat >>= NV_INT_PORT_SHIFT;
1481 return IRQ_RETVAL(handled);
1484 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1486 struct ata_host *host = dev_instance;
1490 spin_lock(&host->lock);
1491 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1492 ret = nv_do_interrupt(host, irq_stat);
1493 spin_unlock(&host->lock);
1498 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1500 struct ata_host *host = dev_instance;
1504 spin_lock(&host->lock);
1505 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1506 ret = nv_do_interrupt(host, irq_stat);
1507 spin_unlock(&host->lock);
1512 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1514 if (sc_reg > SCR_CONTROL)
1517 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1521 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1523 if (sc_reg > SCR_CONTROL)
1526 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1530 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1531 unsigned long deadline)
1533 struct ata_eh_context *ehc = &link->eh_context;
1535 /* Do hardreset iff it's post-boot probing, please read the
1536 * comment above port ops for details.
1538 if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1539 !ata_dev_enabled(link->device))
1540 sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1543 const unsigned long *timing = sata_ehc_deb_timing(ehc);
1546 if (!(ehc->i.flags & ATA_EHI_QUIET))
1548 "nv: skipping hardreset on occupied port\n");
1550 /* make sure the link is online */
1551 rc = sata_link_resume(link, timing, deadline);
1552 /* whine about phy resume failure but proceed */
1553 if (rc && rc != -EOPNOTSUPP)
1554 ata_link_warn(link, "failed to resume link (errno=%d)\n",
1558 /* device signature acquisition is unreliable */
1562 static void nv_nf2_freeze(struct ata_port *ap)
1564 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1565 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1568 mask = ioread8(scr_addr + NV_INT_ENABLE);
1569 mask &= ~(NV_INT_ALL << shift);
1570 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1573 static void nv_nf2_thaw(struct ata_port *ap)
1575 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1576 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1579 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1581 mask = ioread8(scr_addr + NV_INT_ENABLE);
1582 mask |= (NV_INT_MASK << shift);
1583 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1586 static void nv_ck804_freeze(struct ata_port *ap)
1588 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1589 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1592 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1593 mask &= ~(NV_INT_ALL << shift);
1594 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1597 static void nv_ck804_thaw(struct ata_port *ap)
1599 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1600 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1603 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1605 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1606 mask |= (NV_INT_MASK << shift);
1607 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1610 static void nv_mcp55_freeze(struct ata_port *ap)
1612 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1613 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1616 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1618 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1619 mask &= ~(NV_INT_ALL_MCP55 << shift);
1620 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1623 static void nv_mcp55_thaw(struct ata_port *ap)
1625 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1626 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1629 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1631 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1632 mask |= (NV_INT_MASK_MCP55 << shift);
1633 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1636 static void nv_adma_error_handler(struct ata_port *ap)
1638 struct nv_adma_port_priv *pp = ap->private_data;
1639 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1640 void __iomem *mmio = pp->ctl_block;
1644 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1645 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1646 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1647 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1648 u32 status = readw(mmio + NV_ADMA_STAT);
1649 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1650 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1653 "EH in ADMA mode, notifier 0x%X "
1654 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1655 "next cpb count 0x%X next cpb idx 0x%x\n",
1656 notifier, notifier_error, gen_ctl, status,
1657 cpb_count, next_cpb_idx);
1659 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1660 struct nv_adma_cpb *cpb = &pp->cpb[i];
1661 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1662 ap->link.sactive & (1 << i))
1664 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1665 i, cpb->ctl_flags, cpb->resp_flags);
1669 /* Push us back into port register mode for error handling. */
1670 nv_adma_register_mode(ap);
1672 /* Mark all of the CPBs as invalid to prevent them from
1674 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1675 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1677 /* clear CPB fetch count */
1678 writew(0, mmio + NV_ADMA_CPB_COUNT);
1681 tmp = readw(mmio + NV_ADMA_CTL);
1682 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1683 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1685 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1686 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1689 ata_bmdma_error_handler(ap);
1692 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1694 struct nv_swncq_port_priv *pp = ap->private_data;
1695 struct defer_queue *dq = &pp->defer_queue;
1698 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1699 dq->defer_bits |= (1 << qc->hw_tag);
1700 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag;
1703 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1705 struct nv_swncq_port_priv *pp = ap->private_data;
1706 struct defer_queue *dq = &pp->defer_queue;
1709 if (dq->head == dq->tail) /* null queue */
1712 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1713 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1714 WARN_ON(!(dq->defer_bits & (1 << tag)));
1715 dq->defer_bits &= ~(1 << tag);
1717 return ata_qc_from_tag(ap, tag);
1720 static void nv_swncq_fis_reinit(struct ata_port *ap)
1722 struct nv_swncq_port_priv *pp = ap->private_data;
1725 pp->dmafis_bits = 0;
1726 pp->sdbfis_bits = 0;
1730 static void nv_swncq_pp_reinit(struct ata_port *ap)
1732 struct nv_swncq_port_priv *pp = ap->private_data;
1733 struct defer_queue *dq = &pp->defer_queue;
1739 pp->last_issue_tag = ATA_TAG_POISON;
1740 nv_swncq_fis_reinit(ap);
1743 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1745 struct nv_swncq_port_priv *pp = ap->private_data;
1747 writew(fis, pp->irq_block);
1750 static void __ata_bmdma_stop(struct ata_port *ap)
1752 struct ata_queued_cmd qc;
1755 ata_bmdma_stop(&qc);
1758 static void nv_swncq_ncq_stop(struct ata_port *ap)
1760 struct nv_swncq_port_priv *pp = ap->private_data;
1765 ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n",
1766 ap->qc_active, ap->link.sactive);
1768 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1769 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1770 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1771 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1773 ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
1774 ap->ops->sff_check_status(ap),
1775 ioread8(ap->ioaddr.error_addr));
1777 sactive = readl(pp->sactive_block);
1778 done_mask = pp->qc_active ^ sactive;
1780 ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
1781 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1783 if (pp->qc_active & (1 << i))
1785 else if (done_mask & (1 << i))
1791 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1792 (pp->dhfis_bits >> i) & 0x1,
1793 (pp->dmafis_bits >> i) & 0x1,
1794 (pp->sdbfis_bits >> i) & 0x1,
1795 (sactive >> i) & 0x1,
1796 (err ? "error! tag doesn't exit" : " "));
1799 nv_swncq_pp_reinit(ap);
1800 ap->ops->sff_irq_clear(ap);
1801 __ata_bmdma_stop(ap);
1802 nv_swncq_irq_clear(ap, 0xffff);
1805 static void nv_swncq_error_handler(struct ata_port *ap)
1807 struct ata_eh_context *ehc = &ap->link.eh_context;
1809 if (ap->link.sactive) {
1810 nv_swncq_ncq_stop(ap);
1811 ehc->i.action |= ATA_EH_RESET;
1814 ata_bmdma_error_handler(ap);
1818 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1820 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1824 writel(~0, mmio + NV_INT_STATUS_MCP55);
1827 writel(0, mmio + NV_INT_ENABLE_MCP55);
1830 tmp = readl(mmio + NV_CTL_MCP55);
1831 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1832 writel(tmp, mmio + NV_CTL_MCP55);
1837 static int nv_swncq_port_resume(struct ata_port *ap)
1839 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1843 writel(~0, mmio + NV_INT_STATUS_MCP55);
1846 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1849 tmp = readl(mmio + NV_CTL_MCP55);
1850 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1856 static void nv_swncq_host_init(struct ata_host *host)
1859 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1860 struct pci_dev *pdev = to_pci_dev(host->dev);
1863 /* disable ECO 398 */
1864 pci_read_config_byte(pdev, 0x7f, ®val);
1865 regval &= ~(1 << 7);
1866 pci_write_config_byte(pdev, 0x7f, regval);
1869 tmp = readl(mmio + NV_CTL_MCP55);
1870 VPRINTK("HOST_CTL:0x%X\n", tmp);
1871 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1873 /* enable irq intr */
1874 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1875 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1876 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1878 /* clear port irq */
1879 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1882 static int nv_swncq_slave_config(struct scsi_device *sdev)
1884 struct ata_port *ap = ata_shost_to_port(sdev->host);
1885 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1886 struct ata_device *dev;
1889 u8 check_maxtor = 0;
1890 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1892 rc = ata_scsi_slave_config(sdev);
1893 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1894 /* Not a proper libata device, ignore */
1897 dev = &ap->link.device[sdev->id];
1898 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1901 /* if MCP51 and Maxtor, then disable ncq */
1902 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1903 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1906 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1907 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1908 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1909 pci_read_config_byte(pdev, 0x8, &rev);
1917 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1919 if (strncmp(model_num, "Maxtor", 6) == 0) {
1920 ata_scsi_change_queue_depth(sdev, 1);
1921 ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1928 static int nv_swncq_port_start(struct ata_port *ap)
1930 struct device *dev = ap->host->dev;
1931 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1932 struct nv_swncq_port_priv *pp;
1935 /* we might fallback to bmdma, allocate bmdma resources */
1936 rc = ata_bmdma_port_start(ap);
1940 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1944 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1945 &pp->prd_dma, GFP_KERNEL);
1949 ap->private_data = pp;
1950 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1951 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1952 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1957 static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1959 if (qc->tf.protocol != ATA_PROT_NCQ) {
1960 ata_bmdma_qc_prep(qc);
1964 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1967 nv_swncq_fill_sg(qc);
1972 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1974 struct ata_port *ap = qc->ap;
1975 struct scatterlist *sg;
1976 struct nv_swncq_port_priv *pp = ap->private_data;
1977 struct ata_bmdma_prd *prd;
1978 unsigned int si, idx;
1980 prd = pp->prd + ATA_MAX_PRD * qc->hw_tag;
1983 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1987 addr = (u32)sg_dma_address(sg);
1988 sg_len = sg_dma_len(sg);
1991 offset = addr & 0xffff;
1993 if ((offset + sg_len) > 0x10000)
1994 len = 0x10000 - offset;
1996 prd[idx].addr = cpu_to_le32(addr);
1997 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2005 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2008 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2009 struct ata_queued_cmd *qc)
2011 struct nv_swncq_port_priv *pp = ap->private_data;
2018 writel((1 << qc->hw_tag), pp->sactive_block);
2019 pp->last_issue_tag = qc->hw_tag;
2020 pp->dhfis_bits &= ~(1 << qc->hw_tag);
2021 pp->dmafis_bits &= ~(1 << qc->hw_tag);
2022 pp->qc_active |= (0x1 << qc->hw_tag);
2024 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2025 ap->ops->sff_exec_command(ap, &qc->tf);
2027 DPRINTK("Issued tag %u\n", qc->hw_tag);
2032 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2034 struct ata_port *ap = qc->ap;
2035 struct nv_swncq_port_priv *pp = ap->private_data;
2037 if (qc->tf.protocol != ATA_PROT_NCQ)
2038 return ata_bmdma_qc_issue(qc);
2043 nv_swncq_issue_atacmd(ap, qc);
2045 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2050 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2053 struct ata_eh_info *ehi = &ap->link.eh_info;
2055 ata_ehi_clear_desc(ehi);
2057 /* AHCI needs SError cleared; otherwise, it might lock up */
2058 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2059 sata_scr_write(&ap->link, SCR_ERROR, serror);
2061 /* analyze @irq_stat */
2062 if (fis & NV_SWNCQ_IRQ_ADDED)
2063 ata_ehi_push_desc(ehi, "hot plug");
2064 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2065 ata_ehi_push_desc(ehi, "hot unplug");
2067 ata_ehi_hotplugged(ehi);
2069 /* okay, let's hand over to EH */
2070 ehi->serror |= serror;
2072 ata_port_freeze(ap);
2075 static int nv_swncq_sdbfis(struct ata_port *ap)
2077 struct ata_queued_cmd *qc;
2078 struct nv_swncq_port_priv *pp = ap->private_data;
2079 struct ata_eh_info *ehi = &ap->link.eh_info;
2085 host_stat = ap->ops->bmdma_status(ap);
2086 if (unlikely(host_stat & ATA_DMA_ERR)) {
2087 /* error when transferring data to/from memory */
2088 ata_ehi_clear_desc(ehi);
2089 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2090 ehi->err_mask |= AC_ERR_HOST_BUS;
2091 ehi->action |= ATA_EH_RESET;
2095 ap->ops->sff_irq_clear(ap);
2096 __ata_bmdma_stop(ap);
2098 sactive = readl(pp->sactive_block);
2099 done_mask = pp->qc_active ^ sactive;
2101 pp->qc_active &= ~done_mask;
2102 pp->dhfis_bits &= ~done_mask;
2103 pp->dmafis_bits &= ~done_mask;
2104 pp->sdbfis_bits |= done_mask;
2105 ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
2107 if (!ap->qc_active) {
2109 nv_swncq_pp_reinit(ap);
2113 if (pp->qc_active & pp->dhfis_bits)
2116 if ((pp->ncq_flags & ncq_saw_backout) ||
2117 (pp->qc_active ^ pp->dhfis_bits))
2118 /* if the controller can't get a device to host register FIS,
2119 * The driver needs to reissue the new command.
2123 DPRINTK("id 0x%x QC: qc_active 0x%llx,"
2124 "SWNCQ:qc_active 0x%X defer_bits %X "
2125 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2126 ap->print_id, ap->qc_active, pp->qc_active,
2127 pp->defer_queue.defer_bits, pp->dhfis_bits,
2128 pp->dmafis_bits, pp->last_issue_tag);
2130 nv_swncq_fis_reinit(ap);
2133 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2134 nv_swncq_issue_atacmd(ap, qc);
2138 if (pp->defer_queue.defer_bits) {
2139 /* send deferral queue command */
2140 qc = nv_swncq_qc_from_dq(ap);
2141 WARN_ON(qc == NULL);
2142 nv_swncq_issue_atacmd(ap, qc);
2148 static inline u32 nv_swncq_tag(struct ata_port *ap)
2150 struct nv_swncq_port_priv *pp = ap->private_data;
2153 tag = readb(pp->tag_block) >> 2;
2154 return (tag & 0x1f);
2157 static void nv_swncq_dmafis(struct ata_port *ap)
2159 struct ata_queued_cmd *qc;
2163 struct nv_swncq_port_priv *pp = ap->private_data;
2165 __ata_bmdma_stop(ap);
2166 tag = nv_swncq_tag(ap);
2168 DPRINTK("dma setup tag 0x%x\n", tag);
2169 qc = ata_qc_from_tag(ap, tag);
2174 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2176 /* load PRD table addr. */
2177 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag,
2178 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2180 /* specify data direction, triple-check start bit is clear */
2181 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2182 dmactl &= ~ATA_DMA_WR;
2184 dmactl |= ATA_DMA_WR;
2186 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2189 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2191 struct nv_swncq_port_priv *pp = ap->private_data;
2192 struct ata_queued_cmd *qc;
2193 struct ata_eh_info *ehi = &ap->link.eh_info;
2197 ata_stat = ap->ops->sff_check_status(ap);
2198 nv_swncq_irq_clear(ap, fis);
2202 if (ap->pflags & ATA_PFLAG_FROZEN)
2205 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2206 nv_swncq_hotplug(ap, fis);
2213 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2215 ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2217 if (ata_stat & ATA_ERR) {
2218 ata_ehi_clear_desc(ehi);
2219 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2220 ehi->err_mask |= AC_ERR_DEV;
2221 ehi->serror |= serror;
2222 ehi->action |= ATA_EH_RESET;
2223 ata_port_freeze(ap);
2227 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2228 /* If the IRQ is backout, driver must issue
2229 * the new command again some time later.
2231 pp->ncq_flags |= ncq_saw_backout;
2234 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2235 pp->ncq_flags |= ncq_saw_sdb;
2236 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2237 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2238 ap->print_id, pp->qc_active, pp->dhfis_bits,
2239 pp->dmafis_bits, readl(pp->sactive_block));
2240 if (nv_swncq_sdbfis(ap) < 0)
2244 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2245 /* The interrupt indicates the new command
2246 * was transmitted correctly to the drive.
2248 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2249 pp->ncq_flags |= ncq_saw_d2h;
2250 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2251 ata_ehi_push_desc(ehi, "illegal fis transaction");
2252 ehi->err_mask |= AC_ERR_HSM;
2253 ehi->action |= ATA_EH_RESET;
2257 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2258 !(pp->ncq_flags & ncq_saw_dmas)) {
2259 ata_stat = ap->ops->sff_check_status(ap);
2260 if (ata_stat & ATA_BUSY)
2263 if (pp->defer_queue.defer_bits) {
2264 DPRINTK("send next command\n");
2265 qc = nv_swncq_qc_from_dq(ap);
2266 nv_swncq_issue_atacmd(ap, qc);
2271 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2272 /* program the dma controller with appropriate PRD buffers
2273 * and start the DMA transfer for requested command.
2275 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2276 pp->ncq_flags |= ncq_saw_dmas;
2277 nv_swncq_dmafis(ap);
2283 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2284 ata_port_freeze(ap);
2288 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2290 struct ata_host *host = dev_instance;
2292 unsigned int handled = 0;
2293 unsigned long flags;
2296 spin_lock_irqsave(&host->lock, flags);
2298 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2300 for (i = 0; i < host->n_ports; i++) {
2301 struct ata_port *ap = host->ports[i];
2303 if (ap->link.sactive) {
2304 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2307 if (irq_stat) /* reserve Hotplug */
2308 nv_swncq_irq_clear(ap, 0xfff0);
2310 handled += nv_host_intr(ap, (u8)irq_stat);
2312 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2315 spin_unlock_irqrestore(&host->lock, flags);
2317 return IRQ_RETVAL(handled);
2320 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2322 const struct ata_port_info *ppi[] = { NULL, NULL };
2323 struct nv_pi_priv *ipriv;
2324 struct ata_host *host;
2325 struct nv_host_priv *hpriv;
2329 unsigned long type = ent->driver_data;
2331 // Make sure this is a SATA controller by counting the number of bars
2332 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2333 // it's an IDE controller and we ignore it.
2334 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
2335 if (pci_resource_start(pdev, bar) == 0)
2338 ata_print_version_once(&pdev->dev, DRV_VERSION);
2340 rc = pcim_enable_device(pdev);
2344 /* determine type and allocate host */
2345 if (type == CK804 && adma_enabled) {
2346 dev_notice(&pdev->dev, "Using ADMA mode\n");
2348 } else if (type == MCP5x && swncq_enabled) {
2349 dev_notice(&pdev->dev, "Using SWNCQ mode\n");
2353 ppi[0] = &nv_port_info[type];
2354 ipriv = ppi[0]->private_data;
2355 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2359 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2363 host->private_data = hpriv;
2365 /* request and iomap NV_MMIO_BAR */
2366 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2370 /* configure SCR access */
2371 base = host->iomap[NV_MMIO_BAR];
2372 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2373 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2375 /* enable SATA space for CK804 */
2376 if (type >= CK804) {
2379 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2380 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2381 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2386 rc = nv_adma_host_init(host);
2389 } else if (type == SWNCQ)
2390 nv_swncq_host_init(host);
2393 dev_notice(&pdev->dev, "Using MSI\n");
2394 pci_enable_msi(pdev);
2397 pci_set_master(pdev);
2398 return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2401 #ifdef CONFIG_PM_SLEEP
2402 static int nv_pci_device_resume(struct pci_dev *pdev)
2404 struct ata_host *host = pci_get_drvdata(pdev);
2405 struct nv_host_priv *hpriv = host->private_data;
2408 rc = ata_pci_device_do_resume(pdev);
2412 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2413 if (hpriv->type >= CK804) {
2416 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2417 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2418 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2420 if (hpriv->type == ADMA) {
2422 struct nv_adma_port_priv *pp;
2423 /* enable/disable ADMA on the ports appropriately */
2424 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2426 pp = host->ports[0]->private_data;
2427 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2428 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2429 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2431 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
2432 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2433 pp = host->ports[1]->private_data;
2434 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2435 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2436 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2438 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
2439 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2441 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2445 ata_host_resume(host);
2451 static void nv_ck804_host_stop(struct ata_host *host)
2453 struct pci_dev *pdev = to_pci_dev(host->dev);
2456 /* disable SATA space for CK804 */
2457 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2458 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2459 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2462 static void nv_adma_host_stop(struct ata_host *host)
2464 struct pci_dev *pdev = to_pci_dev(host->dev);
2467 /* disable ADMA on the ports */
2468 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2469 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2470 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2471 NV_MCP_SATA_CFG_20_PORT1_EN |
2472 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2474 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2476 nv_ck804_host_stop(host);
2479 module_pci_driver(nv_pci_driver);
2481 module_param_named(adma, adma_enabled, bool, 0444);
2482 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2483 module_param_named(swncq, swncq_enabled, bool, 0444);
2484 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2485 module_param_named(msi, msi_enabled, bool, 0444);
2486 MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");