1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * sata_sx4.c - Promise SATA
5 * Maintained by: Tejun Heo <tj@kernel.org>
6 * Please ALWAYS copy linux-ide@vger.kernel.org
9 * Copyright 2003-2004 Red Hat, Inc.
11 * libata documentation is available via 'make {ps|pdf}docs',
12 * as Documentation/driver-api/libata.rst
14 * Hardware documentation available under NDA.
21 The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy
22 engine, DIMM memory, and four ATA engines (one per SATA port).
23 Data is copied to/from DIMM memory by the HDMA engine, before
24 handing off to one (or more) of the ATA engines. The ATA
25 engines operate solely on DIMM memory.
27 The SX4 behaves like a PATA chip, with no SATA controls or
28 knowledge whatsoever, leading to the presumption that
29 PATA<->SATA bridges exist on SX4 boards, external to the
32 The chip is quite capable, supporting an XOR engine and linked
33 hardware commands (permits a string to transactions to be
34 submitted and waited-on as a single unit), and an optional
37 The limiting factor is largely software. This Linux driver was
38 written to multiplex the single HDMA engine to copy disk
39 transactions into a fixed DIMM memory space, from where an ATA
40 engine takes over. As a result, each WRITE looks like this:
42 submit HDMA packet to hardware
43 hardware copies data from system memory to DIMM
44 hardware raises interrupt
46 submit ATA packet to hardware
47 hardware executes ATA WRITE command, w/ data in DIMM
48 hardware raises interrupt
50 and each READ looks like this:
52 submit ATA packet to hardware
53 hardware executes ATA READ command, w/ data in DIMM
54 hardware raises interrupt
56 submit HDMA packet to hardware
57 hardware copies data from DIMM to system memory
58 hardware raises interrupt
60 This is a very slow, lock-step way of doing things that can
61 certainly be improved by motivated kernel hackers.
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/pci.h>
68 #include <linux/slab.h>
69 #include <linux/blkdev.h>
70 #include <linux/delay.h>
71 #include <linux/interrupt.h>
72 #include <linux/device.h>
73 #include <scsi/scsi_host.h>
74 #include <scsi/scsi_cmnd.h>
75 #include <linux/libata.h>
76 #include "sata_promise.h"
78 #define DRV_NAME "sata_sx4"
79 #define DRV_VERSION "0.12"
82 module_param(dimm_test, int, 0644);
83 MODULE_PARM_DESC(dimm_test, "Enable DIMM test during startup (1 = enabled)");
89 PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
91 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
92 PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */
93 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
94 PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */
96 PDC_CTLSTAT = 0x60, /* IDEn control / status */
98 PDC_20621_SEQCTL = 0x400,
99 PDC_20621_SEQMASK = 0x480,
100 PDC_20621_GENERAL_CTL = 0x484,
101 PDC_20621_PAGE_SIZE = (32 * 1024),
103 /* chosen, not constant, values; we design our own DIMM mem map */
104 PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */
105 PDC_20621_DIMM_BASE = 0x00200000,
106 PDC_20621_DIMM_DATA = (64 * 1024),
107 PDC_DIMM_DATA_STEP = (256 * 1024),
108 PDC_DIMM_WINDOW_STEP = (8 * 1024),
109 PDC_DIMM_HOST_PRD = (6 * 1024),
110 PDC_DIMM_HOST_PKT = (128 * 0),
111 PDC_DIMM_HPKT_PRD = (128 * 1),
112 PDC_DIMM_ATA_PKT = (128 * 2),
113 PDC_DIMM_APKT_PRD = (128 * 3),
114 PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128,
115 PDC_PAGE_WINDOW = 0x40,
116 PDC_PAGE_DATA = PDC_PAGE_WINDOW +
117 (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
118 PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
120 PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
122 PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
125 board_20621 = 0, /* FastTrak S150 SX4 */
127 PDC_MASK_INT = (1 << 10), /* HDMA/ATA mask int */
128 PDC_RESET = (1 << 11), /* HDMA/ATA reset */
129 PDC_DMA_ENABLE = (1 << 7), /* DMA start/stop */
132 PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1),
134 PDC_DIMM0_SPD_DEV_ADDRESS = 0x50,
135 PDC_DIMM1_SPD_DEV_ADDRESS = 0x51,
136 PDC_I2C_CONTROL = 0x48,
137 PDC_I2C_ADDR_DATA = 0x4C,
138 PDC_DIMM0_CONTROL = 0x80,
139 PDC_DIMM1_CONTROL = 0x84,
140 PDC_SDRAM_CONTROL = 0x88,
141 PDC_I2C_WRITE = 0, /* master -> slave */
142 PDC_I2C_READ = (1 << 6), /* master <- slave */
143 PDC_I2C_START = (1 << 7), /* start I2C proto */
144 PDC_I2C_MASK_INT = (1 << 5), /* mask I2C interrupt */
145 PDC_I2C_COMPLETE = (1 << 16), /* I2C normal compl. */
146 PDC_I2C_NO_ACK = (1 << 20), /* slave no-ack addr */
147 PDC_DIMM_SPD_SUBADDRESS_START = 0x00,
148 PDC_DIMM_SPD_SUBADDRESS_END = 0x7F,
149 PDC_DIMM_SPD_ROW_NUM = 3,
150 PDC_DIMM_SPD_COLUMN_NUM = 4,
151 PDC_DIMM_SPD_MODULE_ROW = 5,
152 PDC_DIMM_SPD_TYPE = 11,
153 PDC_DIMM_SPD_FRESH_RATE = 12,
154 PDC_DIMM_SPD_BANK_NUM = 17,
155 PDC_DIMM_SPD_CAS_LATENCY = 18,
156 PDC_DIMM_SPD_ATTRIBUTE = 21,
157 PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
158 PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
159 PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
160 PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
161 PDC_DIMM_SPD_SYSTEM_FREQ = 126,
162 PDC_CTL_STATUS = 0x08,
163 PDC_DIMM_WINDOW_CTLR = 0x0C,
164 PDC_TIME_CONTROL = 0x3C,
165 PDC_TIME_PERIOD = 0x40,
166 PDC_TIME_COUNTER = 0x44,
167 PDC_GENERAL_CTLR = 0x484,
168 PCI_PLL_INIT = 0x8A531824,
169 PCI_X_TCOUNT = 0xEE1E5CFF,
171 /* PDC_TIME_CONTROL bits */
172 PDC_TIMER_BUZZER = (1 << 10),
173 PDC_TIMER_MODE_PERIODIC = 0, /* bits 9:8 == 00 */
174 PDC_TIMER_MODE_ONCE = (1 << 8), /* bits 9:8 == 01 */
175 PDC_TIMER_ENABLE = (1 << 7),
176 PDC_TIMER_MASK_INT = (1 << 5),
177 PDC_TIMER_SEQ_MASK = 0x1f, /* SEQ ID for timer */
178 PDC_TIMER_DEFAULT = PDC_TIMER_MODE_ONCE |
183 #define ECC_ERASE_BUF_SZ (128 * 1024)
185 struct pdc_port_priv {
186 u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
191 struct pdc_host_priv {
192 unsigned int doing_hdma;
193 unsigned int hdma_prod;
194 unsigned int hdma_cons;
196 struct ata_queued_cmd *qc;
198 unsigned long pkt_ofs;
203 static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
204 static void pdc_error_handler(struct ata_port *ap);
205 static void pdc_freeze(struct ata_port *ap);
206 static void pdc_thaw(struct ata_port *ap);
207 static int pdc_port_start(struct ata_port *ap);
208 static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc);
209 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
210 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
211 static unsigned int pdc20621_dimm_init(struct ata_host *host);
212 static int pdc20621_detect_dimm(struct ata_host *host);
213 static unsigned int pdc20621_i2c_read(struct ata_host *host,
214 u32 device, u32 subaddr, u32 *pdata);
215 static int pdc20621_prog_dimm0(struct ata_host *host);
216 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
217 static void pdc20621_get_from_dimm(struct ata_host *host,
218 void *psource, u32 offset, u32 size);
219 static void pdc20621_put_to_dimm(struct ata_host *host,
220 void *psource, u32 offset, u32 size);
221 static void pdc20621_irq_clear(struct ata_port *ap);
222 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc);
223 static int pdc_softreset(struct ata_link *link, unsigned int *class,
224 unsigned long deadline);
225 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
226 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
229 static const struct scsi_host_template pdc_sata_sht = {
230 ATA_BASE_SHT(DRV_NAME),
231 .sg_tablesize = LIBATA_MAX_PRD,
232 .dma_boundary = ATA_DMA_BOUNDARY,
235 static struct ata_port_operations pdc_20621_ops = {
236 .inherits = &ata_sff_port_ops,
238 .check_atapi_dma = pdc_check_atapi_dma,
239 .qc_prep = pdc20621_qc_prep,
240 .qc_issue = pdc20621_qc_issue,
242 .freeze = pdc_freeze,
244 .softreset = pdc_softreset,
245 .error_handler = pdc_error_handler,
246 .lost_interrupt = ATA_OP_NULL,
247 .post_internal_cmd = pdc_post_internal_cmd,
249 .port_start = pdc_port_start,
251 .sff_tf_load = pdc_tf_load_mmio,
252 .sff_exec_command = pdc_exec_command_mmio,
253 .sff_irq_clear = pdc20621_irq_clear,
256 static const struct ata_port_info pdc_port_info[] = {
259 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI |
260 ATA_FLAG_PIO_POLLING,
261 .pio_mask = ATA_PIO4,
262 .mwdma_mask = ATA_MWDMA2,
263 .udma_mask = ATA_UDMA6,
264 .port_ops = &pdc_20621_ops,
269 static const struct pci_device_id pdc_sata_pci_tbl[] = {
270 { PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
272 { } /* terminate list */
275 static struct pci_driver pdc_sata_pci_driver = {
277 .id_table = pdc_sata_pci_tbl,
278 .probe = pdc_sata_init_one,
279 .remove = ata_pci_remove_one,
283 static int pdc_port_start(struct ata_port *ap)
285 struct device *dev = ap->host->dev;
286 struct pdc_port_priv *pp;
288 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
292 pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
296 ap->private_data = pp;
301 static inline void pdc20621_ata_sg(u8 *buf, unsigned int portno,
302 unsigned int total_len)
305 unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
306 __le32 *buf32 = (__le32 *) buf;
308 /* output ATA packet S/G table */
309 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
310 (PDC_DIMM_DATA_STEP * portno);
312 buf32[dw] = cpu_to_le32(addr);
313 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
316 static inline void pdc20621_host_sg(u8 *buf, unsigned int portno,
317 unsigned int total_len)
320 unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
321 __le32 *buf32 = (__le32 *) buf;
323 /* output Host DMA packet S/G table */
324 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
325 (PDC_DIMM_DATA_STEP * portno);
327 buf32[dw] = cpu_to_le32(addr);
328 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
331 static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
332 unsigned int devno, u8 *buf,
336 __le32 *buf32 = (__le32 *) buf;
339 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
340 (PDC_DIMM_WINDOW_STEP * portno) +
343 i = PDC_DIMM_ATA_PKT;
348 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
349 buf[i++] = PDC_PKT_READ;
350 else if (tf->protocol == ATA_PROT_NODATA)
351 buf[i++] = PDC_PKT_NODATA;
354 buf[i++] = 0; /* reserved */
355 buf[i++] = portno + 1; /* seq. id */
356 buf[i++] = 0xff; /* delay seq. id */
358 /* dimm dma S/G, and next-pkt */
360 if (tf->protocol == ATA_PROT_NODATA)
363 buf32[dw] = cpu_to_le32(dimm_sg);
368 dev_reg = ATA_DEVICE_OBS;
370 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
373 buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
376 /* device control register */
377 buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
383 static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
388 __le32 *buf32 = (__le32 *) buf;
390 unsigned int host_sg = PDC_20621_DIMM_BASE +
391 (PDC_DIMM_WINDOW_STEP * portno) +
393 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
394 (PDC_DIMM_WINDOW_STEP * portno) +
397 dw = PDC_DIMM_HOST_PKT >> 2;
400 * Set up Host DMA packet
402 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
406 tmp |= ((portno + 1 + 4) << 16); /* seq. id */
407 tmp |= (0xff << 24); /* delay seq. id */
408 buf32[dw + 0] = cpu_to_le32(tmp);
409 buf32[dw + 1] = cpu_to_le32(host_sg);
410 buf32[dw + 2] = cpu_to_le32(dimm_sg);
414 static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
416 struct scatterlist *sg;
417 struct ata_port *ap = qc->ap;
418 struct pdc_port_priv *pp = ap->private_data;
419 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
420 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
421 unsigned int portno = ap->port_no;
422 unsigned int i, si, idx, total_len = 0, sgt_len;
423 __le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
425 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
427 /* hard-code chip #0 */
428 mmio += PDC_CHIP0_OFS;
434 for_each_sg(qc->sg, sg, qc->n_elem, si) {
435 buf[idx++] = cpu_to_le32(sg_dma_address(sg));
436 buf[idx++] = cpu_to_le32(sg_dma_len(sg));
437 total_len += sg_dma_len(sg);
439 buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
443 * Build ATA, host DMA packets
445 pdc20621_host_sg(&pp->dimm_buf[0], portno, total_len);
446 pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
448 pdc20621_ata_sg(&pp->dimm_buf[0], portno, total_len);
449 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
451 if (qc->tf.flags & ATA_TFLAG_LBA48)
452 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
454 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
456 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
458 /* copy three S/G tables and two packets to DIMM MMIO window */
459 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
460 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
461 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
463 &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
465 /* force host FIFO dump */
466 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
468 readl(dimm_mmio); /* MMIO PCI posting flush */
470 ata_port_dbg(ap, "ata pkt buf ofs %u, prd size %u, mmio copied\n",
474 static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
476 struct ata_port *ap = qc->ap;
477 struct pdc_port_priv *pp = ap->private_data;
478 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
479 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
480 unsigned int portno = ap->port_no;
483 /* hard-code chip #0 */
484 mmio += PDC_CHIP0_OFS;
486 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
488 if (qc->tf.flags & ATA_TFLAG_LBA48)
489 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
491 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
493 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
495 /* copy three S/G tables and two packets to DIMM MMIO window */
496 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
497 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
499 /* force host FIFO dump */
500 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
502 readl(dimm_mmio); /* MMIO PCI posting flush */
504 ata_port_dbg(ap, "ata pkt buf ofs %u, mmio copied\n", i);
507 static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc)
509 switch (qc->tf.protocol) {
511 pdc20621_dma_prep(qc);
513 case ATA_PROT_NODATA:
514 pdc20621_nodata_prep(qc);
523 static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
527 struct ata_port *ap = qc->ap;
528 struct ata_host *host = ap->host;
529 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
531 /* hard-code chip #0 */
532 mmio += PDC_CHIP0_OFS;
534 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
535 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
537 writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
538 readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */
541 static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
545 struct ata_port *ap = qc->ap;
546 struct pdc_host_priv *pp = ap->host->private_data;
547 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
549 if (!pp->doing_hdma) {
550 __pdc20621_push_hdma(qc, seq, pkt_ofs);
555 pp->hdma[idx].qc = qc;
556 pp->hdma[idx].seq = seq;
557 pp->hdma[idx].pkt_ofs = pkt_ofs;
561 static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
563 struct ata_port *ap = qc->ap;
564 struct pdc_host_priv *pp = ap->host->private_data;
565 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
567 /* if nothing on queue, we're done */
568 if (pp->hdma_prod == pp->hdma_cons) {
573 __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
574 pp->hdma[idx].pkt_ofs);
578 static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
580 struct ata_port *ap = qc->ap;
581 unsigned int port_no = ap->port_no;
582 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
584 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
585 dimm_mmio += PDC_DIMM_HOST_PKT;
587 ata_port_dbg(ap, "HDMA 0x%08X 0x%08X 0x%08X 0x%08X\n",
588 readl(dimm_mmio), readl(dimm_mmio + 4),
589 readl(dimm_mmio + 8), readl(dimm_mmio + 12));
592 static void pdc20621_packet_start(struct ata_queued_cmd *qc)
594 struct ata_port *ap = qc->ap;
595 struct ata_host *host = ap->host;
596 unsigned int port_no = ap->port_no;
597 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
598 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
599 u8 seq = (u8) (port_no + 1);
600 unsigned int port_ofs;
602 /* hard-code chip #0 */
603 mmio += PDC_CHIP0_OFS;
605 wmb(); /* flush PRD, pkt writes */
607 port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
609 /* if writing, we (1) DMA to DIMM, then (2) do ATA command */
610 if (rw && qc->tf.protocol == ATA_PROT_DMA) {
613 pdc20621_dump_hdma(qc);
614 pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
615 ata_port_dbg(ap, "queued ofs 0x%x (%u), seq %u\n",
616 port_ofs + PDC_DIMM_HOST_PKT,
617 port_ofs + PDC_DIMM_HOST_PKT,
620 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
621 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
623 writel(port_ofs + PDC_DIMM_ATA_PKT,
624 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
625 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
626 ata_port_dbg(ap, "submitted ofs 0x%x (%u), seq %u\n",
627 port_ofs + PDC_DIMM_ATA_PKT,
628 port_ofs + PDC_DIMM_ATA_PKT,
633 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc)
635 switch (qc->tf.protocol) {
636 case ATA_PROT_NODATA:
637 if (qc->tf.flags & ATA_TFLAG_POLLING)
641 pdc20621_packet_start(qc);
652 return ata_sff_qc_issue(qc);
655 static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
656 struct ata_queued_cmd *qc,
657 unsigned int doing_hdma,
660 unsigned int port_no = ap->port_no;
661 unsigned int port_ofs =
662 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
664 unsigned int handled = 0;
666 if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
667 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
669 /* step two - DMA from DIMM to host */
671 ata_port_dbg(ap, "read hdma, 0x%x 0x%x\n",
672 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
673 /* get drive status; clear intr; complete txn */
674 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
676 pdc20621_pop_hdma(qc);
679 /* step one - exec ATA command */
681 u8 seq = (u8) (port_no + 1 + 4);
682 ata_port_dbg(ap, "read ata, 0x%x 0x%x\n",
683 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
685 /* submit hdma pkt */
686 pdc20621_dump_hdma(qc);
687 pdc20621_push_hdma(qc, seq,
688 port_ofs + PDC_DIMM_HOST_PKT);
692 } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */
694 /* step one - DMA from host to DIMM */
696 u8 seq = (u8) (port_no + 1);
697 ata_port_dbg(ap, "write hdma, 0x%x 0x%x\n",
698 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
701 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
702 readl(mmio + PDC_20621_SEQCTL + (seq * 4));
703 writel(port_ofs + PDC_DIMM_ATA_PKT,
704 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
705 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
708 /* step two - execute ATA command */
710 ata_port_dbg(ap, "write ata, 0x%x 0x%x\n",
711 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
712 /* get drive status; clear intr; complete txn */
713 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
715 pdc20621_pop_hdma(qc);
719 /* command completion, but no data xfer */
720 } else if (qc->tf.protocol == ATA_PROT_NODATA) {
722 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
723 ata_port_dbg(ap, "BUS_NODATA (drv_stat 0x%X)\n", status);
724 qc->err_mask |= ac_err_mask(status);
729 ap->stats.idle_irq++;
735 static void pdc20621_irq_clear(struct ata_port *ap)
737 ioread8(ap->ioaddr.status_addr);
740 static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
742 struct ata_host *host = dev_instance;
745 unsigned int i, tmp, port_no;
746 unsigned int handled = 0;
747 void __iomem *mmio_base;
749 if (!host || !host->iomap[PDC_MMIO_BAR])
752 mmio_base = host->iomap[PDC_MMIO_BAR];
754 /* reading should also clear interrupts */
755 mmio_base += PDC_CHIP0_OFS;
756 mask = readl(mmio_base + PDC_20621_SEQMASK);
758 if (mask == 0xffffffff)
761 mask &= 0xffff; /* only 16 tags possible */
765 spin_lock(&host->lock);
767 for (i = 1; i < 9; i++) {
771 if (port_no >= host->n_ports)
774 ap = host->ports[port_no];
775 tmp = mask & (1 << i);
777 ata_port_dbg(ap, "seq %u, tmp %x\n", i, tmp);
779 struct ata_queued_cmd *qc;
781 qc = ata_qc_from_tag(ap, ap->link.active_tag);
782 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
783 handled += pdc20621_host_intr(ap, qc, (i > 4),
788 spin_unlock(&host->lock);
790 return IRQ_RETVAL(handled);
793 static void pdc_freeze(struct ata_port *ap)
795 void __iomem *mmio = ap->ioaddr.cmd_addr;
798 /* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */
800 tmp = readl(mmio + PDC_CTLSTAT);
802 tmp &= ~PDC_DMA_ENABLE;
803 writel(tmp, mmio + PDC_CTLSTAT);
804 readl(mmio + PDC_CTLSTAT); /* flush */
807 static void pdc_thaw(struct ata_port *ap)
809 void __iomem *mmio = ap->ioaddr.cmd_addr;
812 /* FIXME: start HDMA engine, if zero ATA engines running */
815 ioread8(ap->ioaddr.status_addr);
817 /* turn IRQ back on */
818 tmp = readl(mmio + PDC_CTLSTAT);
819 tmp &= ~PDC_MASK_INT;
820 writel(tmp, mmio + PDC_CTLSTAT);
821 readl(mmio + PDC_CTLSTAT); /* flush */
824 static void pdc_reset_port(struct ata_port *ap)
826 void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
830 /* FIXME: handle HDMA copy engine */
832 for (i = 11; i > 0; i--) {
845 readl(mmio); /* flush */
848 static int pdc_softreset(struct ata_link *link, unsigned int *class,
849 unsigned long deadline)
851 pdc_reset_port(link->ap);
852 return ata_sff_softreset(link, class, deadline);
855 static void pdc_error_handler(struct ata_port *ap)
857 if (!ata_port_is_frozen(ap))
860 ata_sff_error_handler(ap);
863 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
865 struct ata_port *ap = qc->ap;
867 /* make DMA engine forget about the failed command */
868 if (qc->flags & ATA_QCFLAG_EH)
872 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
874 u8 *scsicmd = qc->scsicmd->cmnd;
875 int pio = 1; /* atapi dma off by default */
877 /* Whitelist commands that may use DMA. */
878 switch (scsicmd[0]) {
885 case 0xad: /* READ_DVD_STRUCTURE */
886 case 0xbe: /* READ_CD */
889 /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
890 if (scsicmd[0] == WRITE_10) {
896 if (lba >= 0xFFFF4FA2)
902 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
904 WARN_ON(tf->protocol == ATA_PROT_DMA ||
905 tf->protocol == ATAPI_PROT_DMA);
906 ata_sff_tf_load(ap, tf);
910 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
912 WARN_ON(tf->protocol == ATA_PROT_DMA ||
913 tf->protocol == ATAPI_PROT_DMA);
914 ata_sff_exec_command(ap, tf);
918 static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
920 port->cmd_addr = base;
921 port->data_addr = base;
923 port->error_addr = base + 0x4;
924 port->nsect_addr = base + 0x8;
925 port->lbal_addr = base + 0xc;
926 port->lbam_addr = base + 0x10;
927 port->lbah_addr = base + 0x14;
928 port->device_addr = base + 0x18;
930 port->status_addr = base + 0x1c;
931 port->altstatus_addr =
932 port->ctl_addr = base + 0x38;
936 static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
937 u32 offset, u32 size)
943 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
944 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
946 /* hard-code chip #0 */
947 mmio += PDC_CHIP0_OFS;
950 window_size = 0x2000 * 4; /* 32K byte uchar size */
951 idx = (u16) (offset / window_size);
953 writel(0x01, mmio + PDC_GENERAL_CTLR);
954 readl(mmio + PDC_GENERAL_CTLR);
955 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
956 readl(mmio + PDC_DIMM_WINDOW_CTLR);
958 offset -= (idx * window_size);
960 dist = ((long) (window_size - (offset + size))) >= 0 ? size :
961 (long) (window_size - offset);
962 memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
966 for (; (long) size >= (long) window_size ;) {
967 writel(0x01, mmio + PDC_GENERAL_CTLR);
968 readl(mmio + PDC_GENERAL_CTLR);
969 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
970 readl(mmio + PDC_DIMM_WINDOW_CTLR);
971 memcpy_fromio(psource, dimm_mmio, window_size / 4);
972 psource += window_size;
978 writel(0x01, mmio + PDC_GENERAL_CTLR);
979 readl(mmio + PDC_GENERAL_CTLR);
980 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
981 readl(mmio + PDC_DIMM_WINDOW_CTLR);
982 memcpy_fromio(psource, dimm_mmio, size / 4);
987 static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
988 u32 offset, u32 size)
994 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
995 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
997 /* hard-code chip #0 */
998 mmio += PDC_CHIP0_OFS;
1001 window_size = 0x2000 * 4; /* 32K byte uchar size */
1002 idx = (u16) (offset / window_size);
1004 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1005 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1006 offset -= (idx * window_size);
1008 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1009 (long) (window_size - offset);
1010 memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1011 writel(0x01, mmio + PDC_GENERAL_CTLR);
1012 readl(mmio + PDC_GENERAL_CTLR);
1016 for (; (long) size >= (long) window_size ;) {
1017 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1018 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1019 memcpy_toio(dimm_mmio, psource, window_size / 4);
1020 writel(0x01, mmio + PDC_GENERAL_CTLR);
1021 readl(mmio + PDC_GENERAL_CTLR);
1022 psource += window_size;
1023 size -= window_size;
1028 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1029 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1030 memcpy_toio(dimm_mmio, psource, size / 4);
1031 writel(0x01, mmio + PDC_GENERAL_CTLR);
1032 readl(mmio + PDC_GENERAL_CTLR);
1037 static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
1038 u32 subaddr, u32 *pdata)
1040 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1045 /* hard-code chip #0 */
1046 mmio += PDC_CHIP0_OFS;
1048 i2creg |= device << 24;
1049 i2creg |= subaddr << 16;
1051 /* Set the device and subaddress */
1052 writel(i2creg, mmio + PDC_I2C_ADDR_DATA);
1053 readl(mmio + PDC_I2C_ADDR_DATA);
1055 /* Write Control to perform read operation, mask int */
1056 writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1057 mmio + PDC_I2C_CONTROL);
1059 for (count = 0; count <= 1000; count ++) {
1060 status = readl(mmio + PDC_I2C_CONTROL);
1061 if (status & PDC_I2C_COMPLETE) {
1062 status = readl(mmio + PDC_I2C_ADDR_DATA);
1064 } else if (count == 1000)
1068 *pdata = (status >> 8) & 0x000000ff;
1073 static int pdc20621_detect_dimm(struct ata_host *host)
1076 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1077 PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1083 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1093 static int pdc20621_prog_dimm0(struct ata_host *host)
1099 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1100 static const struct {
1103 } pdc_i2c_read_data [] = {
1104 { PDC_DIMM_SPD_TYPE, 11 },
1105 { PDC_DIMM_SPD_FRESH_RATE, 12 },
1106 { PDC_DIMM_SPD_COLUMN_NUM, 4 },
1107 { PDC_DIMM_SPD_ATTRIBUTE, 21 },
1108 { PDC_DIMM_SPD_ROW_NUM, 3 },
1109 { PDC_DIMM_SPD_BANK_NUM, 17 },
1110 { PDC_DIMM_SPD_MODULE_ROW, 5 },
1111 { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
1112 { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1113 { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1114 { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1115 { PDC_DIMM_SPD_CAS_LATENCY, 18 },
1118 /* hard-code chip #0 */
1119 mmio += PDC_CHIP0_OFS;
1121 for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
1122 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1123 pdc_i2c_read_data[i].reg,
1124 &spd0[pdc_i2c_read_data[i].ofs]);
1126 data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1127 data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1128 ((((spd0[27] + 9) / 10) - 1) << 8) ;
1129 data |= (((((spd0[29] > spd0[28])
1130 ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1131 data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1133 if (spd0[18] & 0x08)
1134 data |= ((0x03) << 14);
1135 else if (spd0[18] & 0x04)
1136 data |= ((0x02) << 14);
1137 else if (spd0[18] & 0x01)
1138 data |= ((0x01) << 14);
1143 Calculate the size of bDIMMSize (power of 2) and
1144 merge the DIMM size by program start/end address.
1147 bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1148 size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
1149 data |= (((size / 16) - 1) << 16);
1152 writel(data, mmio + PDC_DIMM0_CONTROL);
1153 readl(mmio + PDC_DIMM0_CONTROL);
1158 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
1162 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1164 /* hard-code chip #0 */
1165 mmio += PDC_CHIP0_OFS;
1168 Set To Default : DIMM Module Global Control Register (0x022259F1)
1169 DIMM Arbitration Disable (bit 20)
1170 DIMM Data/Control Output Driving Selection (bit12 - bit15)
1171 Refresh Enable (bit 17)
1175 writel(data, mmio + PDC_SDRAM_CONTROL);
1176 readl(mmio + PDC_SDRAM_CONTROL);
1178 /* Turn on for ECC */
1179 if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1180 PDC_DIMM_SPD_TYPE, &spd0)) {
1182 "Failed in i2c read: device=%#x, subaddr=%#x\n",
1183 PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1187 data |= (0x01 << 16);
1188 writel(data, mmio + PDC_SDRAM_CONTROL);
1189 readl(mmio + PDC_SDRAM_CONTROL);
1190 dev_err(host->dev, "Local DIMM ECC Enabled\n");
1193 /* DIMM Initialization Select/Enable (bit 18/19) */
1196 writel(data, mmio + PDC_SDRAM_CONTROL);
1199 for (i = 1; i <= 10; i++) { /* polling ~5 secs */
1200 data = readl(mmio + PDC_SDRAM_CONTROL);
1201 if (!(data & (1<<19))) {
1211 static unsigned int pdc20621_dimm_init(struct ata_host *host)
1213 int speed, size, length;
1214 u32 addr, spd0, pci_status;
1215 u32 time_period = 0;
1220 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1222 /* hard-code chip #0 */
1223 mmio += PDC_CHIP0_OFS;
1225 /* Initialize PLL based upon PCI Bus Frequency */
1227 /* Initialize Time Period Register */
1228 writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1229 time_period = readl(mmio + PDC_TIME_PERIOD);
1230 dev_dbg(host->dev, "Time Period Register (0x40): 0x%x\n", time_period);
1233 writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
1234 readl(mmio + PDC_TIME_CONTROL);
1236 /* Wait 3 seconds */
1240 When timer is enabled, counter is decreased every internal
1244 tcount = readl(mmio + PDC_TIME_COUNTER);
1245 dev_dbg(host->dev, "Time Counter Register (0x44): 0x%x\n", tcount);
1248 If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1249 register should be >= (0xffffffff - 3x10^8).
1251 if (tcount >= PCI_X_TCOUNT) {
1252 ticks = (time_period - tcount);
1253 dev_dbg(host->dev, "Num counters 0x%x (%d)\n", ticks, ticks);
1255 clock = (ticks / 300000);
1256 dev_dbg(host->dev, "10 * Internal clk = 0x%x (%d)\n",
1259 clock = (clock * 33);
1260 dev_dbg(host->dev, "10 * Internal clk * 33 = 0x%x (%d)\n",
1263 /* PLL F Param (bit 22:16) */
1264 fparam = (1400000 / clock) - 2;
1265 dev_dbg(host->dev, "PLL F Param: 0x%x (%d)\n", fparam, fparam);
1267 /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1268 pci_status = (0x8a001824 | (fparam << 16));
1270 pci_status = PCI_PLL_INIT;
1272 /* Initialize PLL. */
1273 dev_dbg(host->dev, "pci_status: 0x%x\n", pci_status);
1274 writel(pci_status, mmio + PDC_CTL_STATUS);
1275 readl(mmio + PDC_CTL_STATUS);
1278 Read SPD of DIMM by I2C interface,
1279 and program the DIMM Module Controller.
1281 if (!(speed = pdc20621_detect_dimm(host))) {
1282 dev_err(host->dev, "Detect Local DIMM Fail\n");
1283 return 1; /* DIMM error */
1285 dev_dbg(host->dev, "Local DIMM Speed = %d\n", speed);
1287 /* Programming DIMM0 Module Control Register (index_CID0:80h) */
1288 size = pdc20621_prog_dimm0(host);
1289 dev_dbg(host->dev, "Local DIMM Size = %dMB\n", size);
1291 /* Programming DIMM Module Global Control Register (index_CID0:88h) */
1292 if (pdc20621_prog_dimm_global(host)) {
1294 "Programming DIMM Module Global Control Register Fail\n");
1299 u8 test_parttern1[40] =
1300 {0x55,0xAA,'P','r','o','m','i','s','e',' ',
1301 'N','o','t',' ','Y','e','t',' ',
1302 'D','e','f','i','n','e','d',' ',
1304 '9','8','0','3','1','6','1','2',0,0};
1305 u8 test_parttern2[40] = {0};
1307 pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
1308 pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
1310 pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
1311 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1312 dev_info(host->dev, "DIMM test pattern 1: %x, %x, %s\n", test_parttern2[0],
1313 test_parttern2[1], &(test_parttern2[2]));
1314 pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
1316 dev_info(host->dev, "DIMM test pattern 2: %x, %x, %s\n",
1318 test_parttern2[1], &(test_parttern2[2]));
1320 pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
1321 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1322 dev_info(host->dev, "DIMM test pattern 3: %x, %x, %s\n",
1324 test_parttern2[1], &(test_parttern2[2]));
1327 /* ECC initiliazation. */
1329 if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1330 PDC_DIMM_SPD_TYPE, &spd0)) {
1332 "Failed in i2c read: device=%#x, subaddr=%#x\n",
1333 PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1338 dev_dbg(host->dev, "Start ECC initialization\n");
1340 length = size * 1024 * 1024;
1341 buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL);
1344 while (addr < length) {
1345 pdc20621_put_to_dimm(host, buf, addr,
1347 addr += ECC_ERASE_BUF_SZ;
1350 dev_dbg(host->dev, "Finish ECC initialization\n");
1356 static void pdc_20621_init(struct ata_host *host)
1359 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1361 /* hard-code chip #0 */
1362 mmio += PDC_CHIP0_OFS;
1365 * Select page 0x40 for our 32k DIMM window
1367 tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1368 tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */
1369 writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1374 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1376 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1377 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1381 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1383 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1384 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1387 static int pdc_sata_init_one(struct pci_dev *pdev,
1388 const struct pci_device_id *ent)
1390 const struct ata_port_info *ppi[] =
1391 { &pdc_port_info[ent->driver_data], NULL };
1392 struct ata_host *host;
1393 struct pdc_host_priv *hpriv;
1396 ata_print_version_once(&pdev->dev, DRV_VERSION);
1399 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
1400 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1401 if (!host || !hpriv)
1404 host->private_data = hpriv;
1406 /* acquire resources and fill host */
1407 rc = pcim_enable_device(pdev);
1411 rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
1414 pcim_pin_device(pdev);
1417 host->iomap = pcim_iomap_table(pdev);
1419 for (i = 0; i < 4; i++) {
1420 struct ata_port *ap = host->ports[i];
1421 void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
1422 unsigned int offset = 0x200 + i * 0x80;
1424 pdc_sata_setup_port(&ap->ioaddr, base + offset);
1426 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
1427 ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
1428 ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
1431 /* configure and activate */
1432 rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
1436 if (pdc20621_dimm_init(host))
1438 pdc_20621_init(host);
1440 pci_set_master(pdev);
1441 return ata_host_activate(host, pdev->irq, pdc20621_interrupt,
1442 IRQF_SHARED, &pdc_sata_sht);
1445 module_pci_driver(pdc_sata_pci_driver);
1447 MODULE_AUTHOR("Jeff Garzik");
1448 MODULE_DESCRIPTION("Promise SATA low-level driver");
1449 MODULE_LICENSE("GPL");
1450 MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1451 MODULE_VERSION(DRV_VERSION);