1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * sata_sx4.c - Promise SATA
5 * Maintained by: Tejun Heo <tj@kernel.org>
6 * Please ALWAYS copy linux-ide@vger.kernel.org
9 * Copyright 2003-2004 Red Hat, Inc.
11 * libata documentation is available via 'make {ps|pdf}docs',
12 * as Documentation/driver-api/libata.rst
14 * Hardware documentation available under NDA.
21 The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy
22 engine, DIMM memory, and four ATA engines (one per SATA port).
23 Data is copied to/from DIMM memory by the HDMA engine, before
24 handing off to one (or more) of the ATA engines. The ATA
25 engines operate solely on DIMM memory.
27 The SX4 behaves like a PATA chip, with no SATA controls or
28 knowledge whatsoever, leading to the presumption that
29 PATA<->SATA bridges exist on SX4 boards, external to the
32 The chip is quite capable, supporting an XOR engine and linked
33 hardware commands (permits a string to transactions to be
34 submitted and waited-on as a single unit), and an optional
37 The limiting factor is largely software. This Linux driver was
38 written to multiplex the single HDMA engine to copy disk
39 transactions into a fixed DIMM memory space, from where an ATA
40 engine takes over. As a result, each WRITE looks like this:
42 submit HDMA packet to hardware
43 hardware copies data from system memory to DIMM
44 hardware raises interrupt
46 submit ATA packet to hardware
47 hardware executes ATA WRITE command, w/ data in DIMM
48 hardware raises interrupt
50 and each READ looks like this:
52 submit ATA packet to hardware
53 hardware executes ATA READ command, w/ data in DIMM
54 hardware raises interrupt
56 submit HDMA packet to hardware
57 hardware copies data from DIMM to system memory
58 hardware raises interrupt
60 This is a very slow, lock-step way of doing things that can
61 certainly be improved by motivated kernel hackers.
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/pci.h>
68 #include <linux/slab.h>
69 #include <linux/blkdev.h>
70 #include <linux/delay.h>
71 #include <linux/interrupt.h>
72 #include <linux/device.h>
73 #include <scsi/scsi_host.h>
74 #include <scsi/scsi_cmnd.h>
75 #include <linux/libata.h>
76 #include "sata_promise.h"
78 #define DRV_NAME "sata_sx4"
79 #define DRV_VERSION "0.12"
86 PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
88 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
89 PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */
90 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
91 PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */
93 PDC_CTLSTAT = 0x60, /* IDEn control / status */
95 PDC_20621_SEQCTL = 0x400,
96 PDC_20621_SEQMASK = 0x480,
97 PDC_20621_GENERAL_CTL = 0x484,
98 PDC_20621_PAGE_SIZE = (32 * 1024),
100 /* chosen, not constant, values; we design our own DIMM mem map */
101 PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */
102 PDC_20621_DIMM_BASE = 0x00200000,
103 PDC_20621_DIMM_DATA = (64 * 1024),
104 PDC_DIMM_DATA_STEP = (256 * 1024),
105 PDC_DIMM_WINDOW_STEP = (8 * 1024),
106 PDC_DIMM_HOST_PRD = (6 * 1024),
107 PDC_DIMM_HOST_PKT = (128 * 0),
108 PDC_DIMM_HPKT_PRD = (128 * 1),
109 PDC_DIMM_ATA_PKT = (128 * 2),
110 PDC_DIMM_APKT_PRD = (128 * 3),
111 PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128,
112 PDC_PAGE_WINDOW = 0x40,
113 PDC_PAGE_DATA = PDC_PAGE_WINDOW +
114 (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
115 PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
117 PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
119 PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
122 board_20621 = 0, /* FastTrak S150 SX4 */
124 PDC_MASK_INT = (1 << 10), /* HDMA/ATA mask int */
125 PDC_RESET = (1 << 11), /* HDMA/ATA reset */
126 PDC_DMA_ENABLE = (1 << 7), /* DMA start/stop */
129 PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1),
131 PDC_DIMM0_SPD_DEV_ADDRESS = 0x50,
132 PDC_DIMM1_SPD_DEV_ADDRESS = 0x51,
133 PDC_I2C_CONTROL = 0x48,
134 PDC_I2C_ADDR_DATA = 0x4C,
135 PDC_DIMM0_CONTROL = 0x80,
136 PDC_DIMM1_CONTROL = 0x84,
137 PDC_SDRAM_CONTROL = 0x88,
138 PDC_I2C_WRITE = 0, /* master -> slave */
139 PDC_I2C_READ = (1 << 6), /* master <- slave */
140 PDC_I2C_START = (1 << 7), /* start I2C proto */
141 PDC_I2C_MASK_INT = (1 << 5), /* mask I2C interrupt */
142 PDC_I2C_COMPLETE = (1 << 16), /* I2C normal compl. */
143 PDC_I2C_NO_ACK = (1 << 20), /* slave no-ack addr */
144 PDC_DIMM_SPD_SUBADDRESS_START = 0x00,
145 PDC_DIMM_SPD_SUBADDRESS_END = 0x7F,
146 PDC_DIMM_SPD_ROW_NUM = 3,
147 PDC_DIMM_SPD_COLUMN_NUM = 4,
148 PDC_DIMM_SPD_MODULE_ROW = 5,
149 PDC_DIMM_SPD_TYPE = 11,
150 PDC_DIMM_SPD_FRESH_RATE = 12,
151 PDC_DIMM_SPD_BANK_NUM = 17,
152 PDC_DIMM_SPD_CAS_LATENCY = 18,
153 PDC_DIMM_SPD_ATTRIBUTE = 21,
154 PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
155 PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
156 PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
157 PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
158 PDC_DIMM_SPD_SYSTEM_FREQ = 126,
159 PDC_CTL_STATUS = 0x08,
160 PDC_DIMM_WINDOW_CTLR = 0x0C,
161 PDC_TIME_CONTROL = 0x3C,
162 PDC_TIME_PERIOD = 0x40,
163 PDC_TIME_COUNTER = 0x44,
164 PDC_GENERAL_CTLR = 0x484,
165 PCI_PLL_INIT = 0x8A531824,
166 PCI_X_TCOUNT = 0xEE1E5CFF,
168 /* PDC_TIME_CONTROL bits */
169 PDC_TIMER_BUZZER = (1 << 10),
170 PDC_TIMER_MODE_PERIODIC = 0, /* bits 9:8 == 00 */
171 PDC_TIMER_MODE_ONCE = (1 << 8), /* bits 9:8 == 01 */
172 PDC_TIMER_ENABLE = (1 << 7),
173 PDC_TIMER_MASK_INT = (1 << 5),
174 PDC_TIMER_SEQ_MASK = 0x1f, /* SEQ ID for timer */
175 PDC_TIMER_DEFAULT = PDC_TIMER_MODE_ONCE |
180 #define ECC_ERASE_BUF_SZ (128 * 1024)
182 struct pdc_port_priv {
183 u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
188 struct pdc_host_priv {
189 unsigned int doing_hdma;
190 unsigned int hdma_prod;
191 unsigned int hdma_cons;
193 struct ata_queued_cmd *qc;
195 unsigned long pkt_ofs;
200 static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
201 static void pdc_error_handler(struct ata_port *ap);
202 static void pdc_freeze(struct ata_port *ap);
203 static void pdc_thaw(struct ata_port *ap);
204 static int pdc_port_start(struct ata_port *ap);
205 static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc);
206 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
207 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
208 static unsigned int pdc20621_dimm_init(struct ata_host *host);
209 static int pdc20621_detect_dimm(struct ata_host *host);
210 static unsigned int pdc20621_i2c_read(struct ata_host *host,
211 u32 device, u32 subaddr, u32 *pdata);
212 static int pdc20621_prog_dimm0(struct ata_host *host);
213 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
214 #ifdef ATA_VERBOSE_DEBUG
215 static void pdc20621_get_from_dimm(struct ata_host *host,
216 void *psource, u32 offset, u32 size);
218 static void pdc20621_put_to_dimm(struct ata_host *host,
219 void *psource, u32 offset, u32 size);
220 static void pdc20621_irq_clear(struct ata_port *ap);
221 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc);
222 static int pdc_softreset(struct ata_link *link, unsigned int *class,
223 unsigned long deadline);
224 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
225 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
228 static struct scsi_host_template pdc_sata_sht = {
229 ATA_BASE_SHT(DRV_NAME),
230 .sg_tablesize = LIBATA_MAX_PRD,
231 .dma_boundary = ATA_DMA_BOUNDARY,
234 /* TODO: inherit from base port_ops after converting to new EH */
235 static struct ata_port_operations pdc_20621_ops = {
236 .inherits = &ata_sff_port_ops,
238 .check_atapi_dma = pdc_check_atapi_dma,
239 .qc_prep = pdc20621_qc_prep,
240 .qc_issue = pdc20621_qc_issue,
242 .freeze = pdc_freeze,
244 .softreset = pdc_softreset,
245 .error_handler = pdc_error_handler,
246 .lost_interrupt = ATA_OP_NULL,
247 .post_internal_cmd = pdc_post_internal_cmd,
249 .port_start = pdc_port_start,
251 .sff_tf_load = pdc_tf_load_mmio,
252 .sff_exec_command = pdc_exec_command_mmio,
253 .sff_irq_clear = pdc20621_irq_clear,
256 static const struct ata_port_info pdc_port_info[] = {
259 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI |
260 ATA_FLAG_PIO_POLLING,
261 .pio_mask = ATA_PIO4,
262 .mwdma_mask = ATA_MWDMA2,
263 .udma_mask = ATA_UDMA6,
264 .port_ops = &pdc_20621_ops,
269 static const struct pci_device_id pdc_sata_pci_tbl[] = {
270 { PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
272 { } /* terminate list */
275 static struct pci_driver pdc_sata_pci_driver = {
277 .id_table = pdc_sata_pci_tbl,
278 .probe = pdc_sata_init_one,
279 .remove = ata_pci_remove_one,
283 static int pdc_port_start(struct ata_port *ap)
285 struct device *dev = ap->host->dev;
286 struct pdc_port_priv *pp;
288 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
292 pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
296 ap->private_data = pp;
301 static inline void pdc20621_ata_sg(u8 *buf, unsigned int portno,
302 unsigned int total_len)
305 unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
306 __le32 *buf32 = (__le32 *) buf;
308 /* output ATA packet S/G table */
309 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
310 (PDC_DIMM_DATA_STEP * portno);
311 VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr);
312 buf32[dw] = cpu_to_le32(addr);
313 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
315 VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n",
316 PDC_20621_DIMM_BASE +
317 (PDC_DIMM_WINDOW_STEP * portno) +
319 buf32[dw], buf32[dw + 1]);
322 static inline void pdc20621_host_sg(u8 *buf, unsigned int portno,
323 unsigned int total_len)
326 unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
327 __le32 *buf32 = (__le32 *) buf;
329 /* output Host DMA packet S/G table */
330 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
331 (PDC_DIMM_DATA_STEP * portno);
333 buf32[dw] = cpu_to_le32(addr);
334 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
336 VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n",
337 PDC_20621_DIMM_BASE +
338 (PDC_DIMM_WINDOW_STEP * portno) +
340 buf32[dw], buf32[dw + 1]);
343 static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
344 unsigned int devno, u8 *buf,
348 __le32 *buf32 = (__le32 *) buf;
351 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
352 (PDC_DIMM_WINDOW_STEP * portno) +
354 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
356 i = PDC_DIMM_ATA_PKT;
361 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
362 buf[i++] = PDC_PKT_READ;
363 else if (tf->protocol == ATA_PROT_NODATA)
364 buf[i++] = PDC_PKT_NODATA;
367 buf[i++] = 0; /* reserved */
368 buf[i++] = portno + 1; /* seq. id */
369 buf[i++] = 0xff; /* delay seq. id */
371 /* dimm dma S/G, and next-pkt */
373 if (tf->protocol == ATA_PROT_NODATA)
376 buf32[dw] = cpu_to_le32(dimm_sg);
381 dev_reg = ATA_DEVICE_OBS;
383 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
386 buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
389 /* device control register */
390 buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
396 static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
401 __le32 *buf32 = (__le32 *) buf;
403 unsigned int host_sg = PDC_20621_DIMM_BASE +
404 (PDC_DIMM_WINDOW_STEP * portno) +
406 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
407 (PDC_DIMM_WINDOW_STEP * portno) +
409 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
410 VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg);
412 dw = PDC_DIMM_HOST_PKT >> 2;
415 * Set up Host DMA packet
417 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
421 tmp |= ((portno + 1 + 4) << 16); /* seq. id */
422 tmp |= (0xff << 24); /* delay seq. id */
423 buf32[dw + 0] = cpu_to_le32(tmp);
424 buf32[dw + 1] = cpu_to_le32(host_sg);
425 buf32[dw + 2] = cpu_to_le32(dimm_sg);
428 VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n",
429 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) +
437 static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
439 struct scatterlist *sg;
440 struct ata_port *ap = qc->ap;
441 struct pdc_port_priv *pp = ap->private_data;
442 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
443 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
444 unsigned int portno = ap->port_no;
445 unsigned int i, si, idx, total_len = 0, sgt_len;
446 __le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
448 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
450 VPRINTK("ata%u: ENTER\n", ap->print_id);
452 /* hard-code chip #0 */
453 mmio += PDC_CHIP0_OFS;
459 for_each_sg(qc->sg, sg, qc->n_elem, si) {
460 buf[idx++] = cpu_to_le32(sg_dma_address(sg));
461 buf[idx++] = cpu_to_le32(sg_dma_len(sg));
462 total_len += sg_dma_len(sg);
464 buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
468 * Build ATA, host DMA packets
470 pdc20621_host_sg(&pp->dimm_buf[0], portno, total_len);
471 pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
473 pdc20621_ata_sg(&pp->dimm_buf[0], portno, total_len);
474 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
476 if (qc->tf.flags & ATA_TFLAG_LBA48)
477 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
479 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
481 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
483 /* copy three S/G tables and two packets to DIMM MMIO window */
484 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
485 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
486 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
488 &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
490 /* force host FIFO dump */
491 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
493 readl(dimm_mmio); /* MMIO PCI posting flush */
495 VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len);
498 static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
500 struct ata_port *ap = qc->ap;
501 struct pdc_port_priv *pp = ap->private_data;
502 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
503 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
504 unsigned int portno = ap->port_no;
507 VPRINTK("ata%u: ENTER\n", ap->print_id);
509 /* hard-code chip #0 */
510 mmio += PDC_CHIP0_OFS;
512 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
514 if (qc->tf.flags & ATA_TFLAG_LBA48)
515 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
517 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
519 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
521 /* copy three S/G tables and two packets to DIMM MMIO window */
522 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
523 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
525 /* force host FIFO dump */
526 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
528 readl(dimm_mmio); /* MMIO PCI posting flush */
530 VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
533 static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc)
535 switch (qc->tf.protocol) {
537 pdc20621_dma_prep(qc);
539 case ATA_PROT_NODATA:
540 pdc20621_nodata_prep(qc);
549 static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
553 struct ata_port *ap = qc->ap;
554 struct ata_host *host = ap->host;
555 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
557 /* hard-code chip #0 */
558 mmio += PDC_CHIP0_OFS;
560 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
561 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
563 writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
564 readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */
567 static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
571 struct ata_port *ap = qc->ap;
572 struct pdc_host_priv *pp = ap->host->private_data;
573 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
575 if (!pp->doing_hdma) {
576 __pdc20621_push_hdma(qc, seq, pkt_ofs);
581 pp->hdma[idx].qc = qc;
582 pp->hdma[idx].seq = seq;
583 pp->hdma[idx].pkt_ofs = pkt_ofs;
587 static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
589 struct ata_port *ap = qc->ap;
590 struct pdc_host_priv *pp = ap->host->private_data;
591 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
593 /* if nothing on queue, we're done */
594 if (pp->hdma_prod == pp->hdma_cons) {
599 __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
600 pp->hdma[idx].pkt_ofs);
604 #ifdef ATA_VERBOSE_DEBUG
605 static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
607 struct ata_port *ap = qc->ap;
608 unsigned int port_no = ap->port_no;
609 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
611 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
612 dimm_mmio += PDC_DIMM_HOST_PKT;
614 printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio));
615 printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4));
616 printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8));
617 printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12));
620 static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
621 #endif /* ATA_VERBOSE_DEBUG */
623 static void pdc20621_packet_start(struct ata_queued_cmd *qc)
625 struct ata_port *ap = qc->ap;
626 struct ata_host *host = ap->host;
627 unsigned int port_no = ap->port_no;
628 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
629 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
630 u8 seq = (u8) (port_no + 1);
631 unsigned int port_ofs;
633 /* hard-code chip #0 */
634 mmio += PDC_CHIP0_OFS;
636 VPRINTK("ata%u: ENTER\n", ap->print_id);
638 wmb(); /* flush PRD, pkt writes */
640 port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
642 /* if writing, we (1) DMA to DIMM, then (2) do ATA command */
643 if (rw && qc->tf.protocol == ATA_PROT_DMA) {
646 pdc20621_dump_hdma(qc);
647 pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
648 VPRINTK("queued ofs 0x%x (%u), seq %u\n",
649 port_ofs + PDC_DIMM_HOST_PKT,
650 port_ofs + PDC_DIMM_HOST_PKT,
653 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
654 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
656 writel(port_ofs + PDC_DIMM_ATA_PKT,
657 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
658 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
659 VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
660 port_ofs + PDC_DIMM_ATA_PKT,
661 port_ofs + PDC_DIMM_ATA_PKT,
666 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc)
668 switch (qc->tf.protocol) {
669 case ATA_PROT_NODATA:
670 if (qc->tf.flags & ATA_TFLAG_POLLING)
674 pdc20621_packet_start(qc);
685 return ata_sff_qc_issue(qc);
688 static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
689 struct ata_queued_cmd *qc,
690 unsigned int doing_hdma,
693 unsigned int port_no = ap->port_no;
694 unsigned int port_ofs =
695 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
697 unsigned int handled = 0;
701 if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
702 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
704 /* step two - DMA from DIMM to host */
706 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id,
707 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
708 /* get drive status; clear intr; complete txn */
709 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
711 pdc20621_pop_hdma(qc);
714 /* step one - exec ATA command */
716 u8 seq = (u8) (port_no + 1 + 4);
717 VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id,
718 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
720 /* submit hdma pkt */
721 pdc20621_dump_hdma(qc);
722 pdc20621_push_hdma(qc, seq,
723 port_ofs + PDC_DIMM_HOST_PKT);
727 } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */
729 /* step one - DMA from host to DIMM */
731 u8 seq = (u8) (port_no + 1);
732 VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id,
733 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
736 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
737 readl(mmio + PDC_20621_SEQCTL + (seq * 4));
738 writel(port_ofs + PDC_DIMM_ATA_PKT,
739 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
740 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
743 /* step two - execute ATA command */
745 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id,
746 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
747 /* get drive status; clear intr; complete txn */
748 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
750 pdc20621_pop_hdma(qc);
754 /* command completion, but no data xfer */
755 } else if (qc->tf.protocol == ATA_PROT_NODATA) {
757 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
758 DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
759 qc->err_mask |= ac_err_mask(status);
764 ap->stats.idle_irq++;
770 static void pdc20621_irq_clear(struct ata_port *ap)
772 ioread8(ap->ioaddr.status_addr);
775 static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
777 struct ata_host *host = dev_instance;
780 unsigned int i, tmp, port_no;
781 unsigned int handled = 0;
782 void __iomem *mmio_base;
786 if (!host || !host->iomap[PDC_MMIO_BAR]) {
787 VPRINTK("QUICK EXIT\n");
791 mmio_base = host->iomap[PDC_MMIO_BAR];
793 /* reading should also clear interrupts */
794 mmio_base += PDC_CHIP0_OFS;
795 mask = readl(mmio_base + PDC_20621_SEQMASK);
796 VPRINTK("mask == 0x%x\n", mask);
798 if (mask == 0xffffffff) {
799 VPRINTK("QUICK EXIT 2\n");
802 mask &= 0xffff; /* only 16 tags possible */
804 VPRINTK("QUICK EXIT 3\n");
808 spin_lock(&host->lock);
810 for (i = 1; i < 9; i++) {
814 if (port_no >= host->n_ports)
817 ap = host->ports[port_no];
818 tmp = mask & (1 << i);
819 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
821 struct ata_queued_cmd *qc;
823 qc = ata_qc_from_tag(ap, ap->link.active_tag);
824 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
825 handled += pdc20621_host_intr(ap, qc, (i > 4),
830 spin_unlock(&host->lock);
832 VPRINTK("mask == 0x%x\n", mask);
836 return IRQ_RETVAL(handled);
839 static void pdc_freeze(struct ata_port *ap)
841 void __iomem *mmio = ap->ioaddr.cmd_addr;
844 /* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */
846 tmp = readl(mmio + PDC_CTLSTAT);
848 tmp &= ~PDC_DMA_ENABLE;
849 writel(tmp, mmio + PDC_CTLSTAT);
850 readl(mmio + PDC_CTLSTAT); /* flush */
853 static void pdc_thaw(struct ata_port *ap)
855 void __iomem *mmio = ap->ioaddr.cmd_addr;
858 /* FIXME: start HDMA engine, if zero ATA engines running */
861 ioread8(ap->ioaddr.status_addr);
863 /* turn IRQ back on */
864 tmp = readl(mmio + PDC_CTLSTAT);
865 tmp &= ~PDC_MASK_INT;
866 writel(tmp, mmio + PDC_CTLSTAT);
867 readl(mmio + PDC_CTLSTAT); /* flush */
870 static void pdc_reset_port(struct ata_port *ap)
872 void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
876 /* FIXME: handle HDMA copy engine */
878 for (i = 11; i > 0; i--) {
891 readl(mmio); /* flush */
894 static int pdc_softreset(struct ata_link *link, unsigned int *class,
895 unsigned long deadline)
897 pdc_reset_port(link->ap);
898 return ata_sff_softreset(link, class, deadline);
901 static void pdc_error_handler(struct ata_port *ap)
903 if (!(ap->pflags & ATA_PFLAG_FROZEN))
906 ata_sff_error_handler(ap);
909 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
911 struct ata_port *ap = qc->ap;
913 /* make DMA engine forget about the failed command */
914 if (qc->flags & ATA_QCFLAG_FAILED)
918 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
920 u8 *scsicmd = qc->scsicmd->cmnd;
921 int pio = 1; /* atapi dma off by default */
923 /* Whitelist commands that may use DMA. */
924 switch (scsicmd[0]) {
931 case 0xad: /* READ_DVD_STRUCTURE */
932 case 0xbe: /* READ_CD */
935 /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
936 if (scsicmd[0] == WRITE_10) {
942 if (lba >= 0xFFFF4FA2)
948 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
950 WARN_ON(tf->protocol == ATA_PROT_DMA ||
951 tf->protocol == ATAPI_PROT_DMA);
952 ata_sff_tf_load(ap, tf);
956 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
958 WARN_ON(tf->protocol == ATA_PROT_DMA ||
959 tf->protocol == ATAPI_PROT_DMA);
960 ata_sff_exec_command(ap, tf);
964 static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
966 port->cmd_addr = base;
967 port->data_addr = base;
969 port->error_addr = base + 0x4;
970 port->nsect_addr = base + 0x8;
971 port->lbal_addr = base + 0xc;
972 port->lbam_addr = base + 0x10;
973 port->lbah_addr = base + 0x14;
974 port->device_addr = base + 0x18;
976 port->status_addr = base + 0x1c;
977 port->altstatus_addr =
978 port->ctl_addr = base + 0x38;
982 #ifdef ATA_VERBOSE_DEBUG
983 static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
984 u32 offset, u32 size)
990 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
991 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
993 /* hard-code chip #0 */
994 mmio += PDC_CHIP0_OFS;
997 window_size = 0x2000 * 4; /* 32K byte uchar size */
998 idx = (u16) (offset / window_size);
1000 writel(0x01, mmio + PDC_GENERAL_CTLR);
1001 readl(mmio + PDC_GENERAL_CTLR);
1002 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1003 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1005 offset -= (idx * window_size);
1007 dist = min(size, window_size - offset);
1008 memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
1012 for (; (long) size >= (long) window_size ;) {
1013 writel(0x01, mmio + PDC_GENERAL_CTLR);
1014 readl(mmio + PDC_GENERAL_CTLR);
1015 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1016 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1017 memcpy_fromio(psource, dimm_mmio, window_size / 4);
1018 psource += window_size;
1019 size -= window_size;
1024 writel(0x01, mmio + PDC_GENERAL_CTLR);
1025 readl(mmio + PDC_GENERAL_CTLR);
1026 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1027 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1028 memcpy_fromio(psource, dimm_mmio, size / 4);
1034 static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
1035 u32 offset, u32 size)
1041 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1042 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
1044 /* hard-code chip #0 */
1045 mmio += PDC_CHIP0_OFS;
1048 window_size = 0x2000 * 4; /* 32K byte uchar size */
1049 idx = (u16) (offset / window_size);
1051 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1052 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1053 offset -= (idx * window_size);
1055 dist = min(size, window_size - offset);
1056 memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1057 writel(0x01, mmio + PDC_GENERAL_CTLR);
1058 readl(mmio + PDC_GENERAL_CTLR);
1062 for (; (long) size >= (long) window_size ;) {
1063 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1064 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1065 memcpy_toio(dimm_mmio, psource, window_size / 4);
1066 writel(0x01, mmio + PDC_GENERAL_CTLR);
1067 readl(mmio + PDC_GENERAL_CTLR);
1068 psource += window_size;
1069 size -= window_size;
1074 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1075 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1076 memcpy_toio(dimm_mmio, psource, size / 4);
1077 writel(0x01, mmio + PDC_GENERAL_CTLR);
1078 readl(mmio + PDC_GENERAL_CTLR);
1083 static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
1084 u32 subaddr, u32 *pdata)
1086 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1091 /* hard-code chip #0 */
1092 mmio += PDC_CHIP0_OFS;
1094 i2creg |= device << 24;
1095 i2creg |= subaddr << 16;
1097 /* Set the device and subaddress */
1098 writel(i2creg, mmio + PDC_I2C_ADDR_DATA);
1099 readl(mmio + PDC_I2C_ADDR_DATA);
1101 /* Write Control to perform read operation, mask int */
1102 writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1103 mmio + PDC_I2C_CONTROL);
1105 for (count = 0; count <= 1000; count ++) {
1106 status = readl(mmio + PDC_I2C_CONTROL);
1107 if (status & PDC_I2C_COMPLETE) {
1108 status = readl(mmio + PDC_I2C_ADDR_DATA);
1110 } else if (count == 1000)
1114 *pdata = (status >> 8) & 0x000000ff;
1119 static int pdc20621_detect_dimm(struct ata_host *host)
1122 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1123 PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1129 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1139 static int pdc20621_prog_dimm0(struct ata_host *host)
1145 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1146 static const struct {
1149 } pdc_i2c_read_data [] = {
1150 { PDC_DIMM_SPD_TYPE, 11 },
1151 { PDC_DIMM_SPD_FRESH_RATE, 12 },
1152 { PDC_DIMM_SPD_COLUMN_NUM, 4 },
1153 { PDC_DIMM_SPD_ATTRIBUTE, 21 },
1154 { PDC_DIMM_SPD_ROW_NUM, 3 },
1155 { PDC_DIMM_SPD_BANK_NUM, 17 },
1156 { PDC_DIMM_SPD_MODULE_ROW, 5 },
1157 { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
1158 { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1159 { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1160 { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1161 { PDC_DIMM_SPD_CAS_LATENCY, 18 },
1164 /* hard-code chip #0 */
1165 mmio += PDC_CHIP0_OFS;
1167 for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
1168 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1169 pdc_i2c_read_data[i].reg,
1170 &spd0[pdc_i2c_read_data[i].ofs]);
1172 data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1173 data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1174 ((((spd0[27] + 9) / 10) - 1) << 8) ;
1175 data |= (((((spd0[29] > spd0[28])
1176 ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1177 data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1179 if (spd0[18] & 0x08)
1180 data |= ((0x03) << 14);
1181 else if (spd0[18] & 0x04)
1182 data |= ((0x02) << 14);
1183 else if (spd0[18] & 0x01)
1184 data |= ((0x01) << 14);
1189 Calculate the size of bDIMMSize (power of 2) and
1190 merge the DIMM size by program start/end address.
1193 bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1194 size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
1195 data |= (((size / 16) - 1) << 16);
1198 writel(data, mmio + PDC_DIMM0_CONTROL);
1199 readl(mmio + PDC_DIMM0_CONTROL);
1204 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
1208 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1210 /* hard-code chip #0 */
1211 mmio += PDC_CHIP0_OFS;
1214 Set To Default : DIMM Module Global Control Register (0x022259F1)
1215 DIMM Arbitration Disable (bit 20)
1216 DIMM Data/Control Output Driving Selection (bit12 - bit15)
1217 Refresh Enable (bit 17)
1221 writel(data, mmio + PDC_SDRAM_CONTROL);
1222 readl(mmio + PDC_SDRAM_CONTROL);
1224 /* Turn on for ECC */
1225 if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1226 PDC_DIMM_SPD_TYPE, &spd0)) {
1227 pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
1228 PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1232 data |= (0x01 << 16);
1233 writel(data, mmio + PDC_SDRAM_CONTROL);
1234 readl(mmio + PDC_SDRAM_CONTROL);
1235 printk(KERN_ERR "Local DIMM ECC Enabled\n");
1238 /* DIMM Initialization Select/Enable (bit 18/19) */
1241 writel(data, mmio + PDC_SDRAM_CONTROL);
1244 for (i = 1; i <= 10; i++) { /* polling ~5 secs */
1245 data = readl(mmio + PDC_SDRAM_CONTROL);
1246 if (!(data & (1<<19))) {
1256 static unsigned int pdc20621_dimm_init(struct ata_host *host)
1258 int speed, size, length;
1259 u32 addr, spd0, pci_status;
1260 u32 time_period = 0;
1265 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1267 /* hard-code chip #0 */
1268 mmio += PDC_CHIP0_OFS;
1270 /* Initialize PLL based upon PCI Bus Frequency */
1272 /* Initialize Time Period Register */
1273 writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1274 time_period = readl(mmio + PDC_TIME_PERIOD);
1275 VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
1278 writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
1279 readl(mmio + PDC_TIME_CONTROL);
1281 /* Wait 3 seconds */
1285 When timer is enabled, counter is decreased every internal
1289 tcount = readl(mmio + PDC_TIME_COUNTER);
1290 VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
1293 If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1294 register should be >= (0xffffffff - 3x10^8).
1296 if (tcount >= PCI_X_TCOUNT) {
1297 ticks = (time_period - tcount);
1298 VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
1300 clock = (ticks / 300000);
1301 VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
1303 clock = (clock * 33);
1304 VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
1306 /* PLL F Param (bit 22:16) */
1307 fparam = (1400000 / clock) - 2;
1308 VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
1310 /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1311 pci_status = (0x8a001824 | (fparam << 16));
1313 pci_status = PCI_PLL_INIT;
1315 /* Initialize PLL. */
1316 VPRINTK("pci_status: 0x%x\n", pci_status);
1317 writel(pci_status, mmio + PDC_CTL_STATUS);
1318 readl(mmio + PDC_CTL_STATUS);
1321 Read SPD of DIMM by I2C interface,
1322 and program the DIMM Module Controller.
1324 if (!(speed = pdc20621_detect_dimm(host))) {
1325 printk(KERN_ERR "Detect Local DIMM Fail\n");
1326 return 1; /* DIMM error */
1328 VPRINTK("Local DIMM Speed = %d\n", speed);
1330 /* Programming DIMM0 Module Control Register (index_CID0:80h) */
1331 size = pdc20621_prog_dimm0(host);
1332 VPRINTK("Local DIMM Size = %dMB\n", size);
1334 /* Programming DIMM Module Global Control Register (index_CID0:88h) */
1335 if (pdc20621_prog_dimm_global(host)) {
1336 printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
1340 #ifdef ATA_VERBOSE_DEBUG
1342 u8 test_parttern1[40] =
1343 {0x55,0xAA,'P','r','o','m','i','s','e',' ',
1344 'N','o','t',' ','Y','e','t',' ',
1345 'D','e','f','i','n','e','d',' ',
1347 '9','8','0','3','1','6','1','2',0,0};
1348 u8 test_parttern2[40] = {0};
1350 pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
1351 pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
1353 pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
1354 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1355 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1356 test_parttern2[1], &(test_parttern2[2]));
1357 pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
1359 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1360 test_parttern2[1], &(test_parttern2[2]));
1362 pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
1363 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1364 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1365 test_parttern2[1], &(test_parttern2[2]));
1369 /* ECC initiliazation. */
1371 if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1372 PDC_DIMM_SPD_TYPE, &spd0)) {
1373 pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
1374 PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1379 VPRINTK("Start ECC initialization\n");
1381 length = size * 1024 * 1024;
1382 buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL);
1385 while (addr < length) {
1386 pdc20621_put_to_dimm(host, buf, addr,
1388 addr += ECC_ERASE_BUF_SZ;
1391 VPRINTK("Finish ECC initialization\n");
1397 static void pdc_20621_init(struct ata_host *host)
1400 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1402 /* hard-code chip #0 */
1403 mmio += PDC_CHIP0_OFS;
1406 * Select page 0x40 for our 32k DIMM window
1408 tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1409 tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */
1410 writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1415 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1417 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1418 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1422 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1424 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1425 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1428 static int pdc_sata_init_one(struct pci_dev *pdev,
1429 const struct pci_device_id *ent)
1431 const struct ata_port_info *ppi[] =
1432 { &pdc_port_info[ent->driver_data], NULL };
1433 struct ata_host *host;
1434 struct pdc_host_priv *hpriv;
1437 ata_print_version_once(&pdev->dev, DRV_VERSION);
1440 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
1441 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1442 if (!host || !hpriv)
1445 host->private_data = hpriv;
1447 /* acquire resources and fill host */
1448 rc = pcim_enable_device(pdev);
1452 rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
1455 pcim_pin_device(pdev);
1458 host->iomap = pcim_iomap_table(pdev);
1460 for (i = 0; i < 4; i++) {
1461 struct ata_port *ap = host->ports[i];
1462 void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
1463 unsigned int offset = 0x200 + i * 0x80;
1465 pdc_sata_setup_port(&ap->ioaddr, base + offset);
1467 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
1468 ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
1469 ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
1472 /* configure and activate */
1473 rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
1477 if (pdc20621_dimm_init(host))
1479 pdc_20621_init(host);
1481 pci_set_master(pdev);
1482 return ata_host_activate(host, pdev->irq, pdc20621_interrupt,
1483 IRQF_SHARED, &pdc_sata_sht);
1486 module_pci_driver(pdc_sata_pci_driver);
1488 MODULE_AUTHOR("Jeff Garzik");
1489 MODULE_DESCRIPTION("Promise SATA low-level driver");
1490 MODULE_LICENSE("GPL");
1491 MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1492 MODULE_VERSION(DRV_VERSION);