2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/driver-api/libata.rst
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/gfp.h>
42 #include <linux/pci.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.5"
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
60 NV_PIO_MASK = ATA_PIO4,
61 NV_MWDMA_MASK = ATA_MWDMA2,
62 NV_UDMA_MASK = ATA_UDMA6,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
66 /* INT_STATUS/ENABLE */
69 NV_INT_STATUS_CK804 = 0x440,
70 NV_INT_ENABLE_CK804 = 0x441,
72 /* INT_STATUS/ENABLE bits */
76 NV_INT_REMOVED = 0x08,
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
96 NV_ADMA_MAX_CPBS = 32,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
106 /* BAR5 offset to ADMA general registers */
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
117 /* ADMA port registers */
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
172 /* MCP55 reg offset */
173 NV_CTL_MCP55 = 0x400,
174 NV_INT_STATUS_MCP55 = 0x440,
175 NV_INT_ENABLE_MCP55 = 0x444,
176 NV_NCQ_REG_MCP55 = 0x448,
179 NV_INT_ALL_MCP55 = 0xffff,
180 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
181 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
183 /* SWNCQ ENABLE BITS*/
184 NV_CTL_PRI_SWNCQ = 0x02,
185 NV_CTL_SEC_SWNCQ = 0x04,
187 /* SW NCQ status bits*/
188 NV_SWNCQ_IRQ_DEV = (1 << 0),
189 NV_SWNCQ_IRQ_PM = (1 << 1),
190 NV_SWNCQ_IRQ_ADDED = (1 << 2),
191 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
193 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
194 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
195 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
196 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
198 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
199 NV_SWNCQ_IRQ_REMOVED,
203 /* ADMA Physical Region Descriptor - one SG segment */
212 enum nv_adma_regbits {
213 CMDEND = (1 << 15), /* end of command list */
214 WNB = (1 << 14), /* wait-not-BSY */
215 IGN = (1 << 13), /* ignore this entry */
216 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
217 DA2 = (1 << (2 + 8)),
218 DA1 = (1 << (1 + 8)),
219 DA0 = (1 << (0 + 8)),
222 /* ADMA Command Parameter Block
223 The first 5 SG segments are stored inside the Command Parameter Block itself.
224 If there are more than 5 segments the remainder are stored in a separate
225 memory area indicated by next_aprd. */
227 u8 resp_flags; /* 0 */
228 u8 reserved1; /* 1 */
229 u8 ctl_flags; /* 2 */
230 /* len is length of taskfile in 64 bit words */
233 u8 next_cpb_idx; /* 5 */
234 __le16 reserved2; /* 6-7 */
235 __le16 tf[12]; /* 8-31 */
236 struct nv_adma_prd aprd[5]; /* 32-111 */
237 __le64 next_aprd; /* 112-119 */
238 __le64 reserved3; /* 120-127 */
242 struct nv_adma_port_priv {
243 struct nv_adma_cpb *cpb;
245 struct nv_adma_prd *aprd;
247 void __iomem *ctl_block;
248 void __iomem *gen_block;
249 void __iomem *notifier_clear_block;
255 struct nv_host_priv {
263 unsigned int tag[ATA_MAX_QUEUE];
266 enum ncq_saw_flag_list {
267 ncq_saw_d2h = (1U << 0),
268 ncq_saw_dmas = (1U << 1),
269 ncq_saw_sdb = (1U << 2),
270 ncq_saw_backout = (1U << 3),
273 struct nv_swncq_port_priv {
274 struct ata_bmdma_prd *prd; /* our SG list */
275 dma_addr_t prd_dma; /* and its DMA mapping */
276 void __iomem *sactive_block;
277 void __iomem *irq_block;
278 void __iomem *tag_block;
281 unsigned int last_issue_tag;
283 /* fifo circular queue to store deferral command */
284 struct defer_queue defer_queue;
286 /* for NCQ interrupt analysis */
291 unsigned int ncq_flags;
295 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
297 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
298 #ifdef CONFIG_PM_SLEEP
299 static int nv_pci_device_resume(struct pci_dev *pdev);
301 static void nv_ck804_host_stop(struct ata_host *host);
302 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
305 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
306 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
308 static int nv_hardreset(struct ata_link *link, unsigned int *class,
309 unsigned long deadline);
310 static void nv_nf2_freeze(struct ata_port *ap);
311 static void nv_nf2_thaw(struct ata_port *ap);
312 static void nv_ck804_freeze(struct ata_port *ap);
313 static void nv_ck804_thaw(struct ata_port *ap);
314 static int nv_adma_slave_config(struct scsi_device *sdev);
315 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
316 static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
317 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
318 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
319 static void nv_adma_irq_clear(struct ata_port *ap);
320 static int nv_adma_port_start(struct ata_port *ap);
321 static void nv_adma_port_stop(struct ata_port *ap);
323 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
324 static int nv_adma_port_resume(struct ata_port *ap);
326 static void nv_adma_freeze(struct ata_port *ap);
327 static void nv_adma_thaw(struct ata_port *ap);
328 static void nv_adma_error_handler(struct ata_port *ap);
329 static void nv_adma_host_stop(struct ata_host *host);
330 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
331 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
333 static void nv_mcp55_thaw(struct ata_port *ap);
334 static void nv_mcp55_freeze(struct ata_port *ap);
335 static void nv_swncq_error_handler(struct ata_port *ap);
336 static int nv_swncq_slave_config(struct scsi_device *sdev);
337 static int nv_swncq_port_start(struct ata_port *ap);
338 static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
339 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
340 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
341 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
342 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
344 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
345 static int nv_swncq_port_resume(struct ata_port *ap);
352 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
359 static const struct pci_device_id nv_pci_tbl[] = {
360 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
361 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
362 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
363 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
364 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
365 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
366 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
367 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
368 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
369 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
370 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
371 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
372 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
373 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
375 { } /* terminate list */
378 static struct pci_driver nv_pci_driver = {
380 .id_table = nv_pci_tbl,
381 .probe = nv_init_one,
382 #ifdef CONFIG_PM_SLEEP
383 .suspend = ata_pci_device_suspend,
384 .resume = nv_pci_device_resume,
386 .remove = ata_pci_remove_one,
389 static struct scsi_host_template nv_sht = {
390 ATA_BMDMA_SHT(DRV_NAME),
393 static struct scsi_host_template nv_adma_sht = {
394 ATA_NCQ_SHT(DRV_NAME),
395 .can_queue = NV_ADMA_MAX_CPBS,
396 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
397 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
398 .slave_configure = nv_adma_slave_config,
401 static struct scsi_host_template nv_swncq_sht = {
402 ATA_NCQ_SHT(DRV_NAME),
403 .can_queue = ATA_MAX_QUEUE,
404 .sg_tablesize = LIBATA_MAX_PRD,
405 .dma_boundary = ATA_DMA_BOUNDARY,
406 .slave_configure = nv_swncq_slave_config,
410 * NV SATA controllers have various different problems with hardreset
411 * protocol depending on the specific controller and device.
415 * bko11195 reports that link doesn't come online after hardreset on
416 * generic nv's and there have been several other similar reports on
419 * bko12351#c23 reports that warmplug on MCP61 doesn't work with
424 * bko3352 reports nf2/3 controllers can't determine device signature
425 * reliably after hardreset. The following thread reports detection
426 * failure on cold boot with the standard debouncing timing.
428 * http://thread.gmane.org/gmane.linux.ide/34098
430 * bko12176 reports that hardreset fails to bring up the link during
435 * For initial probing after boot and hot plugging, hardreset mostly
436 * works fine on CK804 but curiously, reprobing on the initial port
437 * by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
438 * FIS in somewhat undeterministic way.
442 * bko12351 reports that when SWNCQ is enabled, for hotplug to work,
443 * hardreset should be used and hardreset can't report proper
444 * signature, which suggests that mcp5x is closer to nf2 as long as
445 * reset quirkiness is concerned.
447 * bko12703 reports that boot probing fails for intel SSD with
448 * hardreset. Link fails to come online. Softreset works fine.
450 * The failures are varied but the following patterns seem true for
453 * - Softreset during boot always works.
455 * - Hardreset during boot sometimes fails to bring up the link on
456 * certain comibnations and device signature acquisition is
459 * - Hardreset is often necessary after hotplug.
461 * So, preferring softreset for boot probing and error handling (as
462 * hardreset might bring down the link) but using hardreset for
463 * post-boot probing should work around the above issues in most
464 * cases. Define nv_hardreset() which only kicks in for post-boot
465 * probing and use it for all variants.
467 static struct ata_port_operations nv_generic_ops = {
468 .inherits = &ata_bmdma_port_ops,
469 .lost_interrupt = ATA_OP_NULL,
470 .scr_read = nv_scr_read,
471 .scr_write = nv_scr_write,
472 .hardreset = nv_hardreset,
475 static struct ata_port_operations nv_nf2_ops = {
476 .inherits = &nv_generic_ops,
477 .freeze = nv_nf2_freeze,
481 static struct ata_port_operations nv_ck804_ops = {
482 .inherits = &nv_generic_ops,
483 .freeze = nv_ck804_freeze,
484 .thaw = nv_ck804_thaw,
485 .host_stop = nv_ck804_host_stop,
488 static struct ata_port_operations nv_adma_ops = {
489 .inherits = &nv_ck804_ops,
491 .check_atapi_dma = nv_adma_check_atapi_dma,
492 .sff_tf_read = nv_adma_tf_read,
493 .qc_defer = ata_std_qc_defer,
494 .qc_prep = nv_adma_qc_prep,
495 .qc_issue = nv_adma_qc_issue,
496 .sff_irq_clear = nv_adma_irq_clear,
498 .freeze = nv_adma_freeze,
499 .thaw = nv_adma_thaw,
500 .error_handler = nv_adma_error_handler,
501 .post_internal_cmd = nv_adma_post_internal_cmd,
503 .port_start = nv_adma_port_start,
504 .port_stop = nv_adma_port_stop,
506 .port_suspend = nv_adma_port_suspend,
507 .port_resume = nv_adma_port_resume,
509 .host_stop = nv_adma_host_stop,
512 static struct ata_port_operations nv_swncq_ops = {
513 .inherits = &nv_generic_ops,
515 .qc_defer = ata_std_qc_defer,
516 .qc_prep = nv_swncq_qc_prep,
517 .qc_issue = nv_swncq_qc_issue,
519 .freeze = nv_mcp55_freeze,
520 .thaw = nv_mcp55_thaw,
521 .error_handler = nv_swncq_error_handler,
524 .port_suspend = nv_swncq_port_suspend,
525 .port_resume = nv_swncq_port_resume,
527 .port_start = nv_swncq_port_start,
531 irq_handler_t irq_handler;
532 struct scsi_host_template *sht;
535 #define NV_PI_PRIV(_irq_handler, _sht) \
536 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
538 static const struct ata_port_info nv_port_info[] = {
541 .flags = ATA_FLAG_SATA,
542 .pio_mask = NV_PIO_MASK,
543 .mwdma_mask = NV_MWDMA_MASK,
544 .udma_mask = NV_UDMA_MASK,
545 .port_ops = &nv_generic_ops,
546 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
550 .flags = ATA_FLAG_SATA,
551 .pio_mask = NV_PIO_MASK,
552 .mwdma_mask = NV_MWDMA_MASK,
553 .udma_mask = NV_UDMA_MASK,
554 .port_ops = &nv_nf2_ops,
555 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
559 .flags = ATA_FLAG_SATA,
560 .pio_mask = NV_PIO_MASK,
561 .mwdma_mask = NV_MWDMA_MASK,
562 .udma_mask = NV_UDMA_MASK,
563 .port_ops = &nv_ck804_ops,
564 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
568 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
569 .pio_mask = NV_PIO_MASK,
570 .mwdma_mask = NV_MWDMA_MASK,
571 .udma_mask = NV_UDMA_MASK,
572 .port_ops = &nv_adma_ops,
573 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
577 .flags = ATA_FLAG_SATA,
578 .pio_mask = NV_PIO_MASK,
579 .mwdma_mask = NV_MWDMA_MASK,
580 .udma_mask = NV_UDMA_MASK,
581 .port_ops = &nv_generic_ops,
582 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
586 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
587 .pio_mask = NV_PIO_MASK,
588 .mwdma_mask = NV_MWDMA_MASK,
589 .udma_mask = NV_UDMA_MASK,
590 .port_ops = &nv_swncq_ops,
591 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
595 MODULE_AUTHOR("NVIDIA");
596 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
597 MODULE_LICENSE("GPL");
598 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
599 MODULE_VERSION(DRV_VERSION);
601 static bool adma_enabled;
602 static bool swncq_enabled = true;
603 static bool msi_enabled;
605 static void nv_adma_register_mode(struct ata_port *ap)
607 struct nv_adma_port_priv *pp = ap->private_data;
608 void __iomem *mmio = pp->ctl_block;
612 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
615 status = readw(mmio + NV_ADMA_STAT);
616 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
618 status = readw(mmio + NV_ADMA_STAT);
622 ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
625 tmp = readw(mmio + NV_ADMA_CTL);
626 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
629 status = readw(mmio + NV_ADMA_STAT);
630 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
632 status = readw(mmio + NV_ADMA_STAT);
637 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
640 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
643 static void nv_adma_mode(struct ata_port *ap)
645 struct nv_adma_port_priv *pp = ap->private_data;
646 void __iomem *mmio = pp->ctl_block;
650 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
653 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
655 tmp = readw(mmio + NV_ADMA_CTL);
656 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
658 status = readw(mmio + NV_ADMA_STAT);
659 while (((status & NV_ADMA_STAT_LEGACY) ||
660 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
662 status = readw(mmio + NV_ADMA_STAT);
667 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
670 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
673 static int nv_adma_slave_config(struct scsi_device *sdev)
675 struct ata_port *ap = ata_shost_to_port(sdev->host);
676 struct nv_adma_port_priv *pp = ap->private_data;
677 struct nv_adma_port_priv *port0, *port1;
678 struct scsi_device *sdev0, *sdev1;
679 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
680 unsigned long segment_boundary, flags;
681 unsigned short sg_tablesize;
684 u32 current_reg, new_reg, config_mask;
686 rc = ata_scsi_slave_config(sdev);
688 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
689 /* Not a proper libata device, ignore */
692 spin_lock_irqsave(ap->lock, flags);
694 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
696 * NVIDIA reports that ADMA mode does not support ATAPI commands.
697 * Therefore ATAPI commands are sent through the legacy interface.
698 * However, the legacy interface only supports 32-bit DMA.
699 * Restrict DMA parameters as required by the legacy interface
700 * when an ATAPI device is connected.
702 segment_boundary = ATA_DMA_BOUNDARY;
703 /* Subtract 1 since an extra entry may be needed for padding, see
705 sg_tablesize = LIBATA_MAX_PRD - 1;
707 /* Since the legacy DMA engine is in use, we need to disable ADMA
710 nv_adma_register_mode(ap);
712 segment_boundary = NV_ADMA_DMA_BOUNDARY;
713 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
717 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, ¤t_reg);
719 if (ap->port_no == 1)
720 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
721 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
723 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
724 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
727 new_reg = current_reg | config_mask;
728 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
730 new_reg = current_reg & ~config_mask;
731 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
734 if (current_reg != new_reg)
735 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
737 port0 = ap->host->ports[0]->private_data;
738 port1 = ap->host->ports[1]->private_data;
739 sdev0 = ap->host->ports[0]->link.device[0].sdev;
740 sdev1 = ap->host->ports[1]->link.device[0].sdev;
741 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
742 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
743 /** We have to set the DMA mask to 32-bit if either port is in
744 ATAPI mode, since they are on the same PCI device which is
745 used for DMA mapping. If we set the mask we also need to set
746 the bounce limit on both ports to ensure that the block
747 layer doesn't feed addresses that cause DMA mapping to
748 choke. If either SCSI device is not allocated yet, it's OK
749 since that port will discover its correct setting when it
751 Note: Setting 32-bit mask should not fail. */
753 blk_queue_bounce_limit(sdev0->request_queue,
756 blk_queue_bounce_limit(sdev1->request_queue,
759 dma_set_mask(&pdev->dev, ATA_DMA_MASK);
761 /** This shouldn't fail as it was set to this value before */
762 dma_set_mask(&pdev->dev, pp->adma_dma_mask);
764 blk_queue_bounce_limit(sdev0->request_queue,
767 blk_queue_bounce_limit(sdev1->request_queue,
771 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
772 blk_queue_max_segments(sdev->request_queue, sg_tablesize);
774 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
775 (unsigned long long)*ap->host->dev->dma_mask,
776 segment_boundary, sg_tablesize);
778 spin_unlock_irqrestore(ap->lock, flags);
783 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
785 struct nv_adma_port_priv *pp = qc->ap->private_data;
786 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
789 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
791 /* Other than when internal or pass-through commands are executed,
792 the only time this function will be called in ADMA mode will be
793 if a command fails. In the failure case we don't care about going
794 into register mode with ADMA commands pending, as the commands will
795 all shortly be aborted anyway. We assume that NCQ commands are not
796 issued via passthrough, which is the only way that switching into
797 ADMA mode could abort outstanding commands. */
798 nv_adma_register_mode(ap);
800 ata_sff_tf_read(ap, tf);
803 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
805 unsigned int idx = 0;
807 if (tf->flags & ATA_TFLAG_ISADDR) {
808 if (tf->flags & ATA_TFLAG_LBA48) {
809 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
810 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
811 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
812 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
813 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
814 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
816 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
818 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
819 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
820 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
821 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
824 if (tf->flags & ATA_TFLAG_DEVICE)
825 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
827 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
830 cpb[idx++] = cpu_to_le16(IGN);
835 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
837 struct nv_adma_port_priv *pp = ap->private_data;
838 u8 flags = pp->cpb[cpb_num].resp_flags;
840 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
842 if (unlikely((force_err ||
843 flags & (NV_CPB_RESP_ATA_ERR |
844 NV_CPB_RESP_CMD_ERR |
845 NV_CPB_RESP_CPB_ERR)))) {
846 struct ata_eh_info *ehi = &ap->link.eh_info;
849 ata_ehi_clear_desc(ehi);
850 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
851 if (flags & NV_CPB_RESP_ATA_ERR) {
852 ata_ehi_push_desc(ehi, "ATA error");
853 ehi->err_mask |= AC_ERR_DEV;
854 } else if (flags & NV_CPB_RESP_CMD_ERR) {
855 ata_ehi_push_desc(ehi, "CMD error");
856 ehi->err_mask |= AC_ERR_DEV;
857 } else if (flags & NV_CPB_RESP_CPB_ERR) {
858 ata_ehi_push_desc(ehi, "CPB error");
859 ehi->err_mask |= AC_ERR_SYSTEM;
862 /* notifier error, but no error in CPB flags? */
863 ata_ehi_push_desc(ehi, "unknown");
864 ehi->err_mask |= AC_ERR_OTHER;
867 /* Kill all commands. EH will determine what actually failed. */
875 if (likely(flags & NV_CPB_RESP_DONE))
880 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
882 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
884 /* freeze if hotplugged */
885 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
890 /* bail out if not our interrupt */
891 if (!(irq_stat & NV_INT_DEV))
894 /* DEV interrupt w/ no active qc? */
895 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
896 ata_sff_check_status(ap);
900 /* handle interrupt */
901 return ata_bmdma_port_intr(ap, qc);
904 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
906 struct ata_host *host = dev_instance;
908 u32 notifier_clears[2];
910 spin_lock(&host->lock);
912 for (i = 0; i < host->n_ports; i++) {
913 struct ata_port *ap = host->ports[i];
914 struct nv_adma_port_priv *pp = ap->private_data;
915 void __iomem *mmio = pp->ctl_block;
918 u32 notifier, notifier_error;
920 notifier_clears[i] = 0;
922 /* if ADMA is disabled, use standard ata interrupt handler */
923 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
924 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
925 >> (NV_INT_PORT_SHIFT * i);
926 handled += nv_host_intr(ap, irq_stat);
930 /* if in ATA register mode, check for standard interrupts */
931 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
932 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
933 >> (NV_INT_PORT_SHIFT * i);
934 if (ata_tag_valid(ap->link.active_tag))
935 /** NV_INT_DEV indication seems unreliable
936 at times at least in ADMA mode. Force it
937 on always when a command is active, to
938 prevent losing interrupts. */
939 irq_stat |= NV_INT_DEV;
940 handled += nv_host_intr(ap, irq_stat);
943 notifier = readl(mmio + NV_ADMA_NOTIFIER);
944 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
945 notifier_clears[i] = notifier | notifier_error;
947 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
949 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
954 status = readw(mmio + NV_ADMA_STAT);
957 * Clear status. Ensure the controller sees the
958 * clearing before we start looking at any of the CPB
959 * statuses, so that any CPB completions after this
960 * point in the handler will raise another interrupt.
962 writew(status, mmio + NV_ADMA_STAT);
963 readw(mmio + NV_ADMA_STAT); /* flush posted write */
966 handled++; /* irq handled if we got here */
968 /* freeze if hotplugged or controller error */
969 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
970 NV_ADMA_STAT_HOTUNPLUG |
971 NV_ADMA_STAT_TIMEOUT |
972 NV_ADMA_STAT_SERROR))) {
973 struct ata_eh_info *ehi = &ap->link.eh_info;
975 ata_ehi_clear_desc(ehi);
976 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
977 if (status & NV_ADMA_STAT_TIMEOUT) {
978 ehi->err_mask |= AC_ERR_SYSTEM;
979 ata_ehi_push_desc(ehi, "timeout");
980 } else if (status & NV_ADMA_STAT_HOTPLUG) {
981 ata_ehi_hotplugged(ehi);
982 ata_ehi_push_desc(ehi, "hotplug");
983 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
984 ata_ehi_hotplugged(ehi);
985 ata_ehi_push_desc(ehi, "hot unplug");
986 } else if (status & NV_ADMA_STAT_SERROR) {
987 /* let EH analyze SError and figure out cause */
988 ata_ehi_push_desc(ehi, "SError");
990 ata_ehi_push_desc(ehi, "unknown");
995 if (status & (NV_ADMA_STAT_DONE |
996 NV_ADMA_STAT_CPBERR |
997 NV_ADMA_STAT_CMD_COMPLETE)) {
998 u32 check_commands = notifier_clears[i];
1002 if (status & NV_ADMA_STAT_CPBERR) {
1003 /* check all active commands */
1004 if (ata_tag_valid(ap->link.active_tag))
1005 check_commands = 1 <<
1006 ap->link.active_tag;
1008 check_commands = ap->link.sactive;
1011 /* check CPBs for completed commands */
1012 while ((pos = ffs(check_commands))) {
1014 rc = nv_adma_check_cpb(ap, pos,
1015 notifier_error & (1 << pos));
1017 done_mask |= 1 << pos;
1018 else if (unlikely(rc < 0))
1020 check_commands &= ~(1 << pos);
1022 ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
1026 if (notifier_clears[0] || notifier_clears[1]) {
1027 /* Note: Both notifier clear registers must be written
1028 if either is set, even if one is zero, according to NVIDIA. */
1029 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1030 writel(notifier_clears[0], pp->notifier_clear_block);
1031 pp = host->ports[1]->private_data;
1032 writel(notifier_clears[1], pp->notifier_clear_block);
1035 spin_unlock(&host->lock);
1037 return IRQ_RETVAL(handled);
1040 static void nv_adma_freeze(struct ata_port *ap)
1042 struct nv_adma_port_priv *pp = ap->private_data;
1043 void __iomem *mmio = pp->ctl_block;
1046 nv_ck804_freeze(ap);
1048 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1051 /* clear any outstanding CK804 notifications */
1052 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1053 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1055 /* Disable interrupt */
1056 tmp = readw(mmio + NV_ADMA_CTL);
1057 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1058 mmio + NV_ADMA_CTL);
1059 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1062 static void nv_adma_thaw(struct ata_port *ap)
1064 struct nv_adma_port_priv *pp = ap->private_data;
1065 void __iomem *mmio = pp->ctl_block;
1070 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1073 /* Enable interrupt */
1074 tmp = readw(mmio + NV_ADMA_CTL);
1075 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1076 mmio + NV_ADMA_CTL);
1077 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1080 static void nv_adma_irq_clear(struct ata_port *ap)
1082 struct nv_adma_port_priv *pp = ap->private_data;
1083 void __iomem *mmio = pp->ctl_block;
1084 u32 notifier_clears[2];
1086 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1087 ata_bmdma_irq_clear(ap);
1091 /* clear any outstanding CK804 notifications */
1092 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1093 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1095 /* clear ADMA status */
1096 writew(0xffff, mmio + NV_ADMA_STAT);
1098 /* clear notifiers - note both ports need to be written with
1099 something even though we are only clearing on one */
1100 if (ap->port_no == 0) {
1101 notifier_clears[0] = 0xFFFFFFFF;
1102 notifier_clears[1] = 0;
1104 notifier_clears[0] = 0;
1105 notifier_clears[1] = 0xFFFFFFFF;
1107 pp = ap->host->ports[0]->private_data;
1108 writel(notifier_clears[0], pp->notifier_clear_block);
1109 pp = ap->host->ports[1]->private_data;
1110 writel(notifier_clears[1], pp->notifier_clear_block);
1113 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1115 struct nv_adma_port_priv *pp = qc->ap->private_data;
1117 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1118 ata_bmdma_post_internal_cmd(qc);
1121 static int nv_adma_port_start(struct ata_port *ap)
1123 struct device *dev = ap->host->dev;
1124 struct nv_adma_port_priv *pp;
1129 struct pci_dev *pdev = to_pci_dev(dev);
1134 /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1136 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1139 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1143 /* we might fallback to bmdma, allocate bmdma resources */
1144 rc = ata_bmdma_port_start(ap);
1148 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1152 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1153 ap->port_no * NV_ADMA_PORT_SIZE;
1154 pp->ctl_block = mmio;
1155 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1156 pp->notifier_clear_block = pp->gen_block +
1157 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1159 /* Now that the legacy PRD and padding buffer are allocated we can
1160 safely raise the DMA mask to allocate the CPB/APRD table.
1161 These are allowed to fail since we store the value that ends up
1162 being used to set as the bounce limit in slave_config later if
1164 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
1165 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1166 pp->adma_dma_mask = *dev->dma_mask;
1168 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1169 &mem_dma, GFP_KERNEL);
1172 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1175 * First item in chunk of DMA memory:
1176 * 128-byte command parameter block (CPB)
1177 * one for each command tag
1180 pp->cpb_dma = mem_dma;
1182 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1183 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1185 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1186 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1189 * Second item: block of ADMA_SGTBL_LEN s/g entries
1192 pp->aprd_dma = mem_dma;
1194 ap->private_data = pp;
1196 /* clear any outstanding interrupt conditions */
1197 writew(0xffff, mmio + NV_ADMA_STAT);
1199 /* initialize port variables */
1200 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1202 /* clear CPB fetch count */
1203 writew(0, mmio + NV_ADMA_CPB_COUNT);
1205 /* clear GO for register mode, enable interrupt */
1206 tmp = readw(mmio + NV_ADMA_CTL);
1207 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1208 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1210 tmp = readw(mmio + NV_ADMA_CTL);
1211 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1212 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1214 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1215 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1220 static void nv_adma_port_stop(struct ata_port *ap)
1222 struct nv_adma_port_priv *pp = ap->private_data;
1223 void __iomem *mmio = pp->ctl_block;
1226 writew(0, mmio + NV_ADMA_CTL);
1230 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1232 struct nv_adma_port_priv *pp = ap->private_data;
1233 void __iomem *mmio = pp->ctl_block;
1235 /* Go to register mode - clears GO */
1236 nv_adma_register_mode(ap);
1238 /* clear CPB fetch count */
1239 writew(0, mmio + NV_ADMA_CPB_COUNT);
1241 /* disable interrupt, shut down port */
1242 writew(0, mmio + NV_ADMA_CTL);
1247 static int nv_adma_port_resume(struct ata_port *ap)
1249 struct nv_adma_port_priv *pp = ap->private_data;
1250 void __iomem *mmio = pp->ctl_block;
1253 /* set CPB block location */
1254 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1255 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1257 /* clear any outstanding interrupt conditions */
1258 writew(0xffff, mmio + NV_ADMA_STAT);
1260 /* initialize port variables */
1261 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1263 /* clear CPB fetch count */
1264 writew(0, mmio + NV_ADMA_CPB_COUNT);
1266 /* clear GO for register mode, enable interrupt */
1267 tmp = readw(mmio + NV_ADMA_CTL);
1268 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1269 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1271 tmp = readw(mmio + NV_ADMA_CTL);
1272 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1273 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1275 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1276 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1282 static void nv_adma_setup_port(struct ata_port *ap)
1284 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1285 struct ata_ioports *ioport = &ap->ioaddr;
1289 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1291 ioport->cmd_addr = mmio;
1292 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1293 ioport->error_addr =
1294 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1295 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1296 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1297 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1298 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1299 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1300 ioport->status_addr =
1301 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1302 ioport->altstatus_addr =
1303 ioport->ctl_addr = mmio + 0x20;
1306 static int nv_adma_host_init(struct ata_host *host)
1308 struct pci_dev *pdev = to_pci_dev(host->dev);
1314 /* enable ADMA on the ports */
1315 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1316 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1317 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1318 NV_MCP_SATA_CFG_20_PORT1_EN |
1319 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1321 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1323 for (i = 0; i < host->n_ports; i++)
1324 nv_adma_setup_port(host->ports[i]);
1329 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1330 struct scatterlist *sg,
1332 struct nv_adma_prd *aprd)
1335 if (qc->tf.flags & ATA_TFLAG_WRITE)
1336 flags |= NV_APRD_WRITE;
1337 if (idx == qc->n_elem - 1)
1338 flags |= NV_APRD_END;
1340 flags |= NV_APRD_CONT;
1342 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1343 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1344 aprd->flags = flags;
1345 aprd->packet_len = 0;
1348 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1350 struct nv_adma_port_priv *pp = qc->ap->private_data;
1351 struct nv_adma_prd *aprd;
1352 struct scatterlist *sg;
1357 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1358 aprd = (si < 5) ? &cpb->aprd[si] :
1359 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1360 nv_adma_fill_aprd(qc, sg, si, aprd);
1363 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1365 cpb->next_aprd = cpu_to_le64(0);
1368 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1370 struct nv_adma_port_priv *pp = qc->ap->private_data;
1372 /* ADMA engine can only be used for non-ATAPI DMA commands,
1373 or interrupt-driven no-data commands. */
1374 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1375 (qc->tf.flags & ATA_TFLAG_POLLING))
1378 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1379 (qc->tf.protocol == ATA_PROT_NODATA))
1385 static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
1387 struct nv_adma_port_priv *pp = qc->ap->private_data;
1388 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1389 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1392 if (nv_adma_use_reg_mode(qc)) {
1393 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1394 (qc->flags & ATA_QCFLAG_DMAMAP));
1395 nv_adma_register_mode(qc->ap);
1396 ata_bmdma_qc_prep(qc);
1400 cpb->resp_flags = NV_CPB_RESP_DONE;
1407 cpb->next_cpb_idx = 0;
1409 /* turn on NCQ flags for NCQ commands */
1410 if (qc->tf.protocol == ATA_PROT_NCQ)
1411 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1413 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1415 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1417 if (qc->flags & ATA_QCFLAG_DMAMAP) {
1418 nv_adma_fill_sg(qc, cpb);
1419 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1421 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1423 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1424 until we are finished filling in all of the contents */
1426 cpb->ctl_flags = ctl_flags;
1428 cpb->resp_flags = 0;
1433 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1435 struct nv_adma_port_priv *pp = qc->ap->private_data;
1436 void __iomem *mmio = pp->ctl_block;
1437 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1441 /* We can't handle result taskfile with NCQ commands, since
1442 retrieving the taskfile switches us out of ADMA mode and would abort
1443 existing commands. */
1444 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1445 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1446 ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
1447 return AC_ERR_SYSTEM;
1450 if (nv_adma_use_reg_mode(qc)) {
1451 /* use ATA register mode */
1452 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1453 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1454 (qc->flags & ATA_QCFLAG_DMAMAP));
1455 nv_adma_register_mode(qc->ap);
1456 return ata_bmdma_qc_issue(qc);
1458 nv_adma_mode(qc->ap);
1460 /* write append register, command tag in lower 8 bits
1461 and (number of cpbs to append -1) in top 8 bits */
1464 if (curr_ncq != pp->last_issue_ncq) {
1465 /* Seems to need some delay before switching between NCQ and
1466 non-NCQ commands, else we get command timeouts and such. */
1468 pp->last_issue_ncq = curr_ncq;
1471 writew(qc->tag, mmio + NV_ADMA_APPEND);
1473 DPRINTK("Issued tag %u\n", qc->tag);
1478 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1480 struct ata_host *host = dev_instance;
1482 unsigned int handled = 0;
1483 unsigned long flags;
1485 spin_lock_irqsave(&host->lock, flags);
1487 for (i = 0; i < host->n_ports; i++) {
1488 struct ata_port *ap = host->ports[i];
1489 struct ata_queued_cmd *qc;
1491 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1492 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1493 handled += ata_bmdma_port_intr(ap, qc);
1496 * No request pending? Clear interrupt status
1497 * anyway, in case there's one pending.
1499 ap->ops->sff_check_status(ap);
1503 spin_unlock_irqrestore(&host->lock, flags);
1505 return IRQ_RETVAL(handled);
1508 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1512 for (i = 0; i < host->n_ports; i++) {
1513 handled += nv_host_intr(host->ports[i], irq_stat);
1514 irq_stat >>= NV_INT_PORT_SHIFT;
1517 return IRQ_RETVAL(handled);
1520 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1522 struct ata_host *host = dev_instance;
1526 spin_lock(&host->lock);
1527 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1528 ret = nv_do_interrupt(host, irq_stat);
1529 spin_unlock(&host->lock);
1534 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1536 struct ata_host *host = dev_instance;
1540 spin_lock(&host->lock);
1541 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1542 ret = nv_do_interrupt(host, irq_stat);
1543 spin_unlock(&host->lock);
1548 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1550 if (sc_reg > SCR_CONTROL)
1553 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1557 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1559 if (sc_reg > SCR_CONTROL)
1562 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1566 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1567 unsigned long deadline)
1569 struct ata_eh_context *ehc = &link->eh_context;
1571 /* Do hardreset iff it's post-boot probing, please read the
1572 * comment above port ops for details.
1574 if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1575 !ata_dev_enabled(link->device))
1576 sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1579 const unsigned long *timing = sata_ehc_deb_timing(ehc);
1582 if (!(ehc->i.flags & ATA_EHI_QUIET))
1584 "nv: skipping hardreset on occupied port\n");
1586 /* make sure the link is online */
1587 rc = sata_link_resume(link, timing, deadline);
1588 /* whine about phy resume failure but proceed */
1589 if (rc && rc != -EOPNOTSUPP)
1590 ata_link_warn(link, "failed to resume link (errno=%d)\n",
1594 /* device signature acquisition is unreliable */
1598 static void nv_nf2_freeze(struct ata_port *ap)
1600 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1601 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1604 mask = ioread8(scr_addr + NV_INT_ENABLE);
1605 mask &= ~(NV_INT_ALL << shift);
1606 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1609 static void nv_nf2_thaw(struct ata_port *ap)
1611 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1612 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1615 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1617 mask = ioread8(scr_addr + NV_INT_ENABLE);
1618 mask |= (NV_INT_MASK << shift);
1619 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1622 static void nv_ck804_freeze(struct ata_port *ap)
1624 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1625 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1628 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1629 mask &= ~(NV_INT_ALL << shift);
1630 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1633 static void nv_ck804_thaw(struct ata_port *ap)
1635 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1636 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1639 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1641 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1642 mask |= (NV_INT_MASK << shift);
1643 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1646 static void nv_mcp55_freeze(struct ata_port *ap)
1648 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1649 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1652 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1654 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1655 mask &= ~(NV_INT_ALL_MCP55 << shift);
1656 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1659 static void nv_mcp55_thaw(struct ata_port *ap)
1661 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1662 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1665 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1667 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1668 mask |= (NV_INT_MASK_MCP55 << shift);
1669 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1672 static void nv_adma_error_handler(struct ata_port *ap)
1674 struct nv_adma_port_priv *pp = ap->private_data;
1675 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1676 void __iomem *mmio = pp->ctl_block;
1680 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1681 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1682 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1683 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1684 u32 status = readw(mmio + NV_ADMA_STAT);
1685 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1686 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1689 "EH in ADMA mode, notifier 0x%X "
1690 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1691 "next cpb count 0x%X next cpb idx 0x%x\n",
1692 notifier, notifier_error, gen_ctl, status,
1693 cpb_count, next_cpb_idx);
1695 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1696 struct nv_adma_cpb *cpb = &pp->cpb[i];
1697 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1698 ap->link.sactive & (1 << i))
1700 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1701 i, cpb->ctl_flags, cpb->resp_flags);
1705 /* Push us back into port register mode for error handling. */
1706 nv_adma_register_mode(ap);
1708 /* Mark all of the CPBs as invalid to prevent them from
1710 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1711 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1713 /* clear CPB fetch count */
1714 writew(0, mmio + NV_ADMA_CPB_COUNT);
1717 tmp = readw(mmio + NV_ADMA_CTL);
1718 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1719 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1721 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1722 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1725 ata_bmdma_error_handler(ap);
1728 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1730 struct nv_swncq_port_priv *pp = ap->private_data;
1731 struct defer_queue *dq = &pp->defer_queue;
1734 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1735 dq->defer_bits |= (1 << qc->tag);
1736 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1739 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1741 struct nv_swncq_port_priv *pp = ap->private_data;
1742 struct defer_queue *dq = &pp->defer_queue;
1745 if (dq->head == dq->tail) /* null queue */
1748 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1749 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1750 WARN_ON(!(dq->defer_bits & (1 << tag)));
1751 dq->defer_bits &= ~(1 << tag);
1753 return ata_qc_from_tag(ap, tag);
1756 static void nv_swncq_fis_reinit(struct ata_port *ap)
1758 struct nv_swncq_port_priv *pp = ap->private_data;
1761 pp->dmafis_bits = 0;
1762 pp->sdbfis_bits = 0;
1766 static void nv_swncq_pp_reinit(struct ata_port *ap)
1768 struct nv_swncq_port_priv *pp = ap->private_data;
1769 struct defer_queue *dq = &pp->defer_queue;
1775 pp->last_issue_tag = ATA_TAG_POISON;
1776 nv_swncq_fis_reinit(ap);
1779 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1781 struct nv_swncq_port_priv *pp = ap->private_data;
1783 writew(fis, pp->irq_block);
1786 static void __ata_bmdma_stop(struct ata_port *ap)
1788 struct ata_queued_cmd qc;
1791 ata_bmdma_stop(&qc);
1794 static void nv_swncq_ncq_stop(struct ata_port *ap)
1796 struct nv_swncq_port_priv *pp = ap->private_data;
1801 ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1802 ap->qc_active, ap->link.sactive);
1804 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1805 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1806 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1807 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1809 ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
1810 ap->ops->sff_check_status(ap),
1811 ioread8(ap->ioaddr.error_addr));
1813 sactive = readl(pp->sactive_block);
1814 done_mask = pp->qc_active ^ sactive;
1816 ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
1817 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1819 if (pp->qc_active & (1 << i))
1821 else if (done_mask & (1 << i))
1827 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1828 (pp->dhfis_bits >> i) & 0x1,
1829 (pp->dmafis_bits >> i) & 0x1,
1830 (pp->sdbfis_bits >> i) & 0x1,
1831 (sactive >> i) & 0x1,
1832 (err ? "error! tag doesn't exit" : " "));
1835 nv_swncq_pp_reinit(ap);
1836 ap->ops->sff_irq_clear(ap);
1837 __ata_bmdma_stop(ap);
1838 nv_swncq_irq_clear(ap, 0xffff);
1841 static void nv_swncq_error_handler(struct ata_port *ap)
1843 struct ata_eh_context *ehc = &ap->link.eh_context;
1845 if (ap->link.sactive) {
1846 nv_swncq_ncq_stop(ap);
1847 ehc->i.action |= ATA_EH_RESET;
1850 ata_bmdma_error_handler(ap);
1854 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1856 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1860 writel(~0, mmio + NV_INT_STATUS_MCP55);
1863 writel(0, mmio + NV_INT_ENABLE_MCP55);
1866 tmp = readl(mmio + NV_CTL_MCP55);
1867 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1868 writel(tmp, mmio + NV_CTL_MCP55);
1873 static int nv_swncq_port_resume(struct ata_port *ap)
1875 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1879 writel(~0, mmio + NV_INT_STATUS_MCP55);
1882 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1885 tmp = readl(mmio + NV_CTL_MCP55);
1886 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1892 static void nv_swncq_host_init(struct ata_host *host)
1895 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1896 struct pci_dev *pdev = to_pci_dev(host->dev);
1899 /* disable ECO 398 */
1900 pci_read_config_byte(pdev, 0x7f, ®val);
1901 regval &= ~(1 << 7);
1902 pci_write_config_byte(pdev, 0x7f, regval);
1905 tmp = readl(mmio + NV_CTL_MCP55);
1906 VPRINTK("HOST_CTL:0x%X\n", tmp);
1907 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1909 /* enable irq intr */
1910 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1911 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1912 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1914 /* clear port irq */
1915 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1918 static int nv_swncq_slave_config(struct scsi_device *sdev)
1920 struct ata_port *ap = ata_shost_to_port(sdev->host);
1921 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1922 struct ata_device *dev;
1925 u8 check_maxtor = 0;
1926 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1928 rc = ata_scsi_slave_config(sdev);
1929 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1930 /* Not a proper libata device, ignore */
1933 dev = &ap->link.device[sdev->id];
1934 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1937 /* if MCP51 and Maxtor, then disable ncq */
1938 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1939 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1942 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1943 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1944 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1945 pci_read_config_byte(pdev, 0x8, &rev);
1953 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1955 if (strncmp(model_num, "Maxtor", 6) == 0) {
1956 ata_scsi_change_queue_depth(sdev, 1);
1957 ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1964 static int nv_swncq_port_start(struct ata_port *ap)
1966 struct device *dev = ap->host->dev;
1967 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1968 struct nv_swncq_port_priv *pp;
1971 /* we might fallback to bmdma, allocate bmdma resources */
1972 rc = ata_bmdma_port_start(ap);
1976 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1980 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1981 &pp->prd_dma, GFP_KERNEL);
1984 memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1986 ap->private_data = pp;
1987 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1988 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1989 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1994 static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1996 if (qc->tf.protocol != ATA_PROT_NCQ) {
1997 ata_bmdma_qc_prep(qc);
2001 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2004 nv_swncq_fill_sg(qc);
2009 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
2011 struct ata_port *ap = qc->ap;
2012 struct scatterlist *sg;
2013 struct nv_swncq_port_priv *pp = ap->private_data;
2014 struct ata_bmdma_prd *prd;
2015 unsigned int si, idx;
2017 prd = pp->prd + ATA_MAX_PRD * qc->tag;
2020 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2024 addr = (u32)sg_dma_address(sg);
2025 sg_len = sg_dma_len(sg);
2028 offset = addr & 0xffff;
2030 if ((offset + sg_len) > 0x10000)
2031 len = 0x10000 - offset;
2033 prd[idx].addr = cpu_to_le32(addr);
2034 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2042 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2045 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2046 struct ata_queued_cmd *qc)
2048 struct nv_swncq_port_priv *pp = ap->private_data;
2055 writel((1 << qc->tag), pp->sactive_block);
2056 pp->last_issue_tag = qc->tag;
2057 pp->dhfis_bits &= ~(1 << qc->tag);
2058 pp->dmafis_bits &= ~(1 << qc->tag);
2059 pp->qc_active |= (0x1 << qc->tag);
2061 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2062 ap->ops->sff_exec_command(ap, &qc->tf);
2064 DPRINTK("Issued tag %u\n", qc->tag);
2069 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2071 struct ata_port *ap = qc->ap;
2072 struct nv_swncq_port_priv *pp = ap->private_data;
2074 if (qc->tf.protocol != ATA_PROT_NCQ)
2075 return ata_bmdma_qc_issue(qc);
2080 nv_swncq_issue_atacmd(ap, qc);
2082 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2087 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2090 struct ata_eh_info *ehi = &ap->link.eh_info;
2092 ata_ehi_clear_desc(ehi);
2094 /* AHCI needs SError cleared; otherwise, it might lock up */
2095 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2096 sata_scr_write(&ap->link, SCR_ERROR, serror);
2098 /* analyze @irq_stat */
2099 if (fis & NV_SWNCQ_IRQ_ADDED)
2100 ata_ehi_push_desc(ehi, "hot plug");
2101 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2102 ata_ehi_push_desc(ehi, "hot unplug");
2104 ata_ehi_hotplugged(ehi);
2106 /* okay, let's hand over to EH */
2107 ehi->serror |= serror;
2109 ata_port_freeze(ap);
2112 static int nv_swncq_sdbfis(struct ata_port *ap)
2114 struct ata_queued_cmd *qc;
2115 struct nv_swncq_port_priv *pp = ap->private_data;
2116 struct ata_eh_info *ehi = &ap->link.eh_info;
2122 host_stat = ap->ops->bmdma_status(ap);
2123 if (unlikely(host_stat & ATA_DMA_ERR)) {
2124 /* error when transferring data to/from memory */
2125 ata_ehi_clear_desc(ehi);
2126 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2127 ehi->err_mask |= AC_ERR_HOST_BUS;
2128 ehi->action |= ATA_EH_RESET;
2132 ap->ops->sff_irq_clear(ap);
2133 __ata_bmdma_stop(ap);
2135 sactive = readl(pp->sactive_block);
2136 done_mask = pp->qc_active ^ sactive;
2138 pp->qc_active &= ~done_mask;
2139 pp->dhfis_bits &= ~done_mask;
2140 pp->dmafis_bits &= ~done_mask;
2141 pp->sdbfis_bits |= done_mask;
2142 ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2144 if (!ap->qc_active) {
2146 nv_swncq_pp_reinit(ap);
2150 if (pp->qc_active & pp->dhfis_bits)
2153 if ((pp->ncq_flags & ncq_saw_backout) ||
2154 (pp->qc_active ^ pp->dhfis_bits))
2155 /* if the controller can't get a device to host register FIS,
2156 * The driver needs to reissue the new command.
2160 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2161 "SWNCQ:qc_active 0x%X defer_bits %X "
2162 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2163 ap->print_id, ap->qc_active, pp->qc_active,
2164 pp->defer_queue.defer_bits, pp->dhfis_bits,
2165 pp->dmafis_bits, pp->last_issue_tag);
2167 nv_swncq_fis_reinit(ap);
2170 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2171 nv_swncq_issue_atacmd(ap, qc);
2175 if (pp->defer_queue.defer_bits) {
2176 /* send deferral queue command */
2177 qc = nv_swncq_qc_from_dq(ap);
2178 WARN_ON(qc == NULL);
2179 nv_swncq_issue_atacmd(ap, qc);
2185 static inline u32 nv_swncq_tag(struct ata_port *ap)
2187 struct nv_swncq_port_priv *pp = ap->private_data;
2190 tag = readb(pp->tag_block) >> 2;
2191 return (tag & 0x1f);
2194 static void nv_swncq_dmafis(struct ata_port *ap)
2196 struct ata_queued_cmd *qc;
2200 struct nv_swncq_port_priv *pp = ap->private_data;
2202 __ata_bmdma_stop(ap);
2203 tag = nv_swncq_tag(ap);
2205 DPRINTK("dma setup tag 0x%x\n", tag);
2206 qc = ata_qc_from_tag(ap, tag);
2211 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2213 /* load PRD table addr. */
2214 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2215 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2217 /* specify data direction, triple-check start bit is clear */
2218 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2219 dmactl &= ~ATA_DMA_WR;
2221 dmactl |= ATA_DMA_WR;
2223 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2226 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2228 struct nv_swncq_port_priv *pp = ap->private_data;
2229 struct ata_queued_cmd *qc;
2230 struct ata_eh_info *ehi = &ap->link.eh_info;
2234 ata_stat = ap->ops->sff_check_status(ap);
2235 nv_swncq_irq_clear(ap, fis);
2239 if (ap->pflags & ATA_PFLAG_FROZEN)
2242 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2243 nv_swncq_hotplug(ap, fis);
2250 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2252 ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2254 if (ata_stat & ATA_ERR) {
2255 ata_ehi_clear_desc(ehi);
2256 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2257 ehi->err_mask |= AC_ERR_DEV;
2258 ehi->serror |= serror;
2259 ehi->action |= ATA_EH_RESET;
2260 ata_port_freeze(ap);
2264 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2265 /* If the IRQ is backout, driver must issue
2266 * the new command again some time later.
2268 pp->ncq_flags |= ncq_saw_backout;
2271 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2272 pp->ncq_flags |= ncq_saw_sdb;
2273 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2274 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2275 ap->print_id, pp->qc_active, pp->dhfis_bits,
2276 pp->dmafis_bits, readl(pp->sactive_block));
2277 if (nv_swncq_sdbfis(ap) < 0)
2281 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2282 /* The interrupt indicates the new command
2283 * was transmitted correctly to the drive.
2285 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2286 pp->ncq_flags |= ncq_saw_d2h;
2287 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2288 ata_ehi_push_desc(ehi, "illegal fis transaction");
2289 ehi->err_mask |= AC_ERR_HSM;
2290 ehi->action |= ATA_EH_RESET;
2294 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2295 !(pp->ncq_flags & ncq_saw_dmas)) {
2296 ata_stat = ap->ops->sff_check_status(ap);
2297 if (ata_stat & ATA_BUSY)
2300 if (pp->defer_queue.defer_bits) {
2301 DPRINTK("send next command\n");
2302 qc = nv_swncq_qc_from_dq(ap);
2303 nv_swncq_issue_atacmd(ap, qc);
2308 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2309 /* program the dma controller with appropriate PRD buffers
2310 * and start the DMA transfer for requested command.
2312 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2313 pp->ncq_flags |= ncq_saw_dmas;
2314 nv_swncq_dmafis(ap);
2320 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2321 ata_port_freeze(ap);
2325 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2327 struct ata_host *host = dev_instance;
2329 unsigned int handled = 0;
2330 unsigned long flags;
2333 spin_lock_irqsave(&host->lock, flags);
2335 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2337 for (i = 0; i < host->n_ports; i++) {
2338 struct ata_port *ap = host->ports[i];
2340 if (ap->link.sactive) {
2341 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2344 if (irq_stat) /* reserve Hotplug */
2345 nv_swncq_irq_clear(ap, 0xfff0);
2347 handled += nv_host_intr(ap, (u8)irq_stat);
2349 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2352 spin_unlock_irqrestore(&host->lock, flags);
2354 return IRQ_RETVAL(handled);
2357 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2359 const struct ata_port_info *ppi[] = { NULL, NULL };
2360 struct nv_pi_priv *ipriv;
2361 struct ata_host *host;
2362 struct nv_host_priv *hpriv;
2366 unsigned long type = ent->driver_data;
2368 // Make sure this is a SATA controller by counting the number of bars
2369 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2370 // it's an IDE controller and we ignore it.
2371 for (bar = 0; bar < 6; bar++)
2372 if (pci_resource_start(pdev, bar) == 0)
2375 ata_print_version_once(&pdev->dev, DRV_VERSION);
2377 rc = pcim_enable_device(pdev);
2381 /* determine type and allocate host */
2382 if (type == CK804 && adma_enabled) {
2383 dev_notice(&pdev->dev, "Using ADMA mode\n");
2385 } else if (type == MCP5x && swncq_enabled) {
2386 dev_notice(&pdev->dev, "Using SWNCQ mode\n");
2390 ppi[0] = &nv_port_info[type];
2391 ipriv = ppi[0]->private_data;
2392 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2396 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2400 host->private_data = hpriv;
2402 /* request and iomap NV_MMIO_BAR */
2403 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2407 /* configure SCR access */
2408 base = host->iomap[NV_MMIO_BAR];
2409 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2410 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2412 /* enable SATA space for CK804 */
2413 if (type >= CK804) {
2416 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2417 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2418 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2423 rc = nv_adma_host_init(host);
2426 } else if (type == SWNCQ)
2427 nv_swncq_host_init(host);
2430 dev_notice(&pdev->dev, "Using MSI\n");
2431 pci_enable_msi(pdev);
2434 pci_set_master(pdev);
2435 return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2438 #ifdef CONFIG_PM_SLEEP
2439 static int nv_pci_device_resume(struct pci_dev *pdev)
2441 struct ata_host *host = pci_get_drvdata(pdev);
2442 struct nv_host_priv *hpriv = host->private_data;
2445 rc = ata_pci_device_do_resume(pdev);
2449 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2450 if (hpriv->type >= CK804) {
2453 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2454 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2455 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2457 if (hpriv->type == ADMA) {
2459 struct nv_adma_port_priv *pp;
2460 /* enable/disable ADMA on the ports appropriately */
2461 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2463 pp = host->ports[0]->private_data;
2464 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2465 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2466 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2468 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
2469 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2470 pp = host->ports[1]->private_data;
2471 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2472 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2473 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2475 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
2476 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2478 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2482 ata_host_resume(host);
2488 static void nv_ck804_host_stop(struct ata_host *host)
2490 struct pci_dev *pdev = to_pci_dev(host->dev);
2493 /* disable SATA space for CK804 */
2494 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2495 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2496 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2499 static void nv_adma_host_stop(struct ata_host *host)
2501 struct pci_dev *pdev = to_pci_dev(host->dev);
2504 /* disable ADMA on the ports */
2505 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2506 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2507 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2508 NV_MCP_SATA_CFG_20_PORT1_EN |
2509 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2511 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2513 nv_ck804_host_stop(host);
2516 module_pci_driver(nv_pci_driver);
2518 module_param_named(adma, adma_enabled, bool, 0444);
2519 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2520 module_param_named(swncq, swncq_enabled, bool, 0444);
2521 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2522 module_param_named(msi, msi_enabled, bool, 0444);
2523 MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");