1 // SPDX-License-Identifier: GPL-2.0-only
3 * sata_mv.c - Marvell SATA support
5 * Copyright 2008-2009: Marvell Corporation, all rights reserved.
6 * Copyright 2005: EMC Corporation, all rights reserved.
7 * Copyright 2005 Red Hat, Inc. All rights reserved.
9 * Originally written by Brett Russ.
10 * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
12 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
18 * --> Develop a low-power-consumption strategy, and implement it.
20 * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
22 * --> [Experiment, Marvell value added] Is it possible to use target
23 * mode to cross-connect two Linux boxes with Marvell cards? If so,
24 * creating LibATA target mode support would be very interesting.
26 * Target mode, for those without docs, is the ability to directly
27 * connect two SATA ports.
31 * 80x1-B2 errata PCI#11:
33 * Users of the 6041/6081 Rev.B2 chips (current is C0)
34 * should be careful to insert those cards only onto PCI-X bus #0,
35 * and only in device slots 0..7, not higher. The chips may not
36 * work correctly otherwise (note: this is a pretty rare condition).
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/dmapool.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/device.h>
49 #include <linux/clk.h>
50 #include <linux/phy/phy.h>
51 #include <linux/platform_device.h>
52 #include <linux/ata_platform.h>
53 #include <linux/mbus.h>
54 #include <linux/bitops.h>
55 #include <linux/gfp.h>
57 #include <linux/of_irq.h>
58 #include <scsi/scsi_host.h>
59 #include <scsi/scsi_cmnd.h>
60 #include <scsi/scsi_device.h>
61 #include <linux/libata.h>
63 #define DRV_NAME "sata_mv"
64 #define DRV_VERSION "1.28"
72 module_param(msi, int, S_IRUGO);
73 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
76 static int irq_coalescing_io_count;
77 module_param(irq_coalescing_io_count, int, S_IRUGO);
78 MODULE_PARM_DESC(irq_coalescing_io_count,
79 "IRQ coalescing I/O count threshold (0..255)");
81 static int irq_coalescing_usecs;
82 module_param(irq_coalescing_usecs, int, S_IRUGO);
83 MODULE_PARM_DESC(irq_coalescing_usecs,
84 "IRQ coalescing time threshold in usecs");
87 /* BAR's are enumerated in terms of pci_resource_start() terms */
88 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
89 MV_IO_BAR = 2, /* offset 0x18: IO space */
90 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
92 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
93 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
95 /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
96 COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */
97 MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */
98 MAX_COAL_IO_COUNT = 255, /* completed I/O count */
103 * Per-chip ("all ports") interrupt coalescing feature.
104 * This is only for GEN_II / GEN_IIE hardware.
106 * Coalescing defers the interrupt until either the IO_THRESHOLD
107 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
109 COAL_REG_BASE = 0x18000,
110 IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08),
111 ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */
113 IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc),
114 IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
117 * Registers for the (unused here) transaction coalescing feature:
119 TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88),
120 TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c),
122 SATAHC0_REG_BASE = 0x20000,
124 GPIO_PORT_CTL = 0x104f0,
127 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
128 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
129 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
130 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
133 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
135 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
136 * CRPB needs alignment on a 256B boundary. Size == 256B
137 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
139 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
140 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
142 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
144 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
145 MV_PORT_HC_SHIFT = 2,
146 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
147 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
148 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
151 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
153 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
155 MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
157 MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ |
158 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
160 MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN,
162 CRQB_FLAG_READ = (1 << 0),
164 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
165 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
166 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
167 CRQB_CMD_ADDR_SHIFT = 8,
168 CRQB_CMD_CS = (0x2 << 11),
169 CRQB_CMD_LAST = (1 << 15),
171 CRPB_FLAG_STATUS_SHIFT = 8,
172 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
173 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
175 EPRD_FLAG_END_OF_TBL = (1 << 31),
177 /* PCI interface registers */
179 MV_PCI_COMMAND = 0xc00,
180 MV_PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */
181 MV_PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
183 PCI_MAIN_CMD_STS = 0xd30,
184 STOP_PCI_MASTER = (1 << 2),
185 PCI_MASTER_EMPTY = (1 << 3),
186 GLOB_SFT_RST = (1 << 4),
189 MV_PCI_MODE_MASK = 0x30,
191 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
192 MV_PCI_DISC_TIMER = 0xd04,
193 MV_PCI_MSI_TRIGGER = 0xc38,
194 MV_PCI_SERR_MASK = 0xc28,
195 MV_PCI_XBAR_TMOUT = 0x1d04,
196 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
197 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
198 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
199 MV_PCI_ERR_COMMAND = 0x1d50,
201 PCI_IRQ_CAUSE = 0x1d58,
202 PCI_IRQ_MASK = 0x1d5c,
203 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
205 PCIE_IRQ_CAUSE = 0x1900,
206 PCIE_IRQ_MASK = 0x1910,
207 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
209 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
210 PCI_HC_MAIN_IRQ_CAUSE = 0x1d60,
211 PCI_HC_MAIN_IRQ_MASK = 0x1d64,
212 SOC_HC_MAIN_IRQ_CAUSE = 0x20020,
213 SOC_HC_MAIN_IRQ_MASK = 0x20024,
214 ERR_IRQ = (1 << 0), /* shift by (2 * port #) */
215 DONE_IRQ = (1 << 1), /* shift by (2 * port #) */
216 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
217 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
218 DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */
219 DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */
221 TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */
222 TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */
223 PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */
224 PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */
225 ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */
226 GPIO_INT = (1 << 22),
227 SELF_INT = (1 << 23),
228 TWSI_INT = (1 << 24),
229 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
230 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
231 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
233 /* SATAHC registers */
237 DMA_IRQ = (1 << 0), /* shift by port # */
238 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
239 DEV_IRQ = (1 << 8), /* shift by port # */
242 * Per-HC (Host-Controller) interrupt coalescing feature.
243 * This is present on all chip generations.
245 * Coalescing defers the interrupt until either the IO_THRESHOLD
246 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
248 HC_IRQ_COAL_IO_THRESHOLD = 0x000c,
249 HC_IRQ_COAL_TIME_THRESHOLD = 0x0010,
252 SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */
253 SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */
254 /* with dev activity LED */
256 /* Shadow block registers */
258 SHD_CTL_AST = 0x20, /* ofs from SHD_BLK */
261 SATA_STATUS = 0x300, /* ctrl, err regs follow status */
263 FIS_IRQ_CAUSE = 0x364,
264 FIS_IRQ_CAUSE_AN = (1 << 9), /* async notification */
266 LTMODE = 0x30c, /* requires read-after-write */
267 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
272 PHY_MODE4 = 0x314, /* requires read-after-write */
273 PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */
274 PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */
275 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */
276 PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */
279 SATA_TESTCTL = 0x348,
281 VENDOR_UNIQUE_FIS = 0x35c,
284 FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
285 FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
287 PHY_MODE9_GEN2 = 0x398,
288 PHY_MODE9_GEN1 = 0x39c,
289 PHYCFG_OFS = 0x3a0, /* only in 65n devices */
296 LP_PHY_CTL_PIN_PU_PLL = (1 << 0),
297 LP_PHY_CTL_PIN_PU_RX = (1 << 1),
298 LP_PHY_CTL_PIN_PU_TX = (1 << 2),
299 LP_PHY_CTL_GEN_TX_3G = (1 << 5),
300 LP_PHY_CTL_GEN_RX_3G = (1 << 9),
302 MV_M2_PREAMP_MASK = 0x7e0,
306 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
307 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
308 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
309 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
310 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
311 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
312 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
314 EDMA_ERR_IRQ_CAUSE = 0x8,
315 EDMA_ERR_IRQ_MASK = 0xc,
316 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
317 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
318 EDMA_ERR_DEV = (1 << 2), /* device error */
319 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
320 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
321 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
322 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
323 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
324 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
325 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
326 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
327 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
328 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
329 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
331 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
332 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
333 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
334 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
335 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
337 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
339 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
340 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
341 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
342 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
343 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
344 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
346 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
348 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
349 EDMA_ERR_OVERRUN_5 = (1 << 5),
350 EDMA_ERR_UNDERRUN_5 = (1 << 6),
352 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
353 EDMA_ERR_LNK_CTRL_RX_1 |
354 EDMA_ERR_LNK_CTRL_RX_3 |
355 EDMA_ERR_LNK_CTRL_TX,
357 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
367 EDMA_ERR_LNK_CTRL_RX_2 |
368 EDMA_ERR_LNK_DATA_RX |
369 EDMA_ERR_LNK_DATA_TX |
370 EDMA_ERR_TRANS_PROTO,
372 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
377 EDMA_ERR_UNDERRUN_5 |
378 EDMA_ERR_SELF_DIS_5 |
384 EDMA_REQ_Q_BASE_HI = 0x10,
385 EDMA_REQ_Q_IN_PTR = 0x14, /* also contains BASE_LO */
387 EDMA_REQ_Q_OUT_PTR = 0x18,
388 EDMA_REQ_Q_PTR_SHIFT = 5,
390 EDMA_RSP_Q_BASE_HI = 0x1c,
391 EDMA_RSP_Q_IN_PTR = 0x20,
392 EDMA_RSP_Q_OUT_PTR = 0x24, /* also contains BASE_LO */
393 EDMA_RSP_Q_PTR_SHIFT = 3,
395 EDMA_CMD = 0x28, /* EDMA command register */
396 EDMA_EN = (1 << 0), /* enable EDMA */
397 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
398 EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
400 EDMA_STATUS = 0x30, /* EDMA engine status */
401 EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
402 EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
404 EDMA_IORDY_TMOUT = 0x34,
407 EDMA_HALTCOND = 0x60, /* GenIIe halt conditions */
408 EDMA_UNKNOWN_RSVD = 0x6C, /* GenIIe unknown/reserved */
410 BMDMA_CMD = 0x224, /* bmdma command register */
411 BMDMA_STATUS = 0x228, /* bmdma status register */
412 BMDMA_PRD_LOW = 0x22c, /* bmdma PRD addr 31:0 */
413 BMDMA_PRD_HIGH = 0x230, /* bmdma PRD addr 63:32 */
415 /* Host private flags (hp_flags) */
416 MV_HP_FLAG_MSI = (1 << 0),
417 MV_HP_ERRATA_50XXB0 = (1 << 1),
418 MV_HP_ERRATA_50XXB2 = (1 << 2),
419 MV_HP_ERRATA_60X1B2 = (1 << 3),
420 MV_HP_ERRATA_60X1C0 = (1 << 4),
421 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
422 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
423 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
424 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
425 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
426 MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
427 MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */
428 MV_HP_FIX_LP_PHY_CTL = (1 << 13), /* fix speed in LP_PHY_CTL ? */
430 /* Port private flags (pp_flags) */
431 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
432 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
433 MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
434 MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
435 MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */
438 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
439 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
440 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
441 #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
442 #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
444 #define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
445 #define WINDOW_BASE(i) (0x20034 + ((i) << 4))
448 /* DMA boundary 0xffff is required by the s/g splitting
449 * we need on /length/ in mv_fill-sg().
451 MV_DMA_BOUNDARY = 0xffffU,
453 /* mask of register bits containing lower 32 bits
454 * of EDMA request queue DMA address
456 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
458 /* ditto, for response queue */
459 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
473 /* Command ReQuest Block: 32B */
489 /* Command ResPonse Block: 8B */
496 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
505 * We keep a local cache of a few frequently accessed port
506 * registers here, to avoid having to read them (very slow)
507 * when switching between EDMA and non-EDMA modes.
509 struct mv_cached_regs {
516 struct mv_port_priv {
517 struct mv_crqb *crqb;
519 struct mv_crpb *crpb;
521 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
522 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
524 unsigned int req_idx;
525 unsigned int resp_idx;
528 struct mv_cached_regs cached;
529 unsigned int delayed_eh_pmp_map;
532 struct mv_port_signal {
537 struct mv_host_priv {
539 unsigned int board_idx;
541 struct mv_port_signal signal[8];
542 const struct mv_hw_ops *ops;
545 void __iomem *main_irq_cause_addr;
546 void __iomem *main_irq_mask_addr;
547 u32 irq_cause_offset;
552 * Needed on some devices that require their clocks to be enabled.
553 * These are optional: if the platform device does not have any
554 * clocks, they won't be used. Also, if the underlying hardware
555 * does not support the common clock framework (CONFIG_HAVE_CLK=n),
556 * all the clock operations become no-ops (see clk.h).
559 struct clk **port_clks;
561 * Some devices have a SATA PHY which can be enabled/disabled
562 * in order to save power. These are optional: if the platform
563 * devices does not have any phy, they won't be used.
565 struct phy **port_phys;
567 * These consistent DMA memory pools give us guaranteed
568 * alignment for hardware-accessed data structures,
569 * and less memory waste in accomplishing the alignment.
571 struct dma_pool *crqb_pool;
572 struct dma_pool *crpb_pool;
573 struct dma_pool *sg_tbl_pool;
577 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
579 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
580 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
582 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
584 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
585 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
588 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
589 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
590 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
591 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
592 static int mv_port_start(struct ata_port *ap);
593 static void mv_port_stop(struct ata_port *ap);
594 static int mv_qc_defer(struct ata_queued_cmd *qc);
595 static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc);
596 static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc);
597 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
598 static int mv_hardreset(struct ata_link *link, unsigned int *class,
599 unsigned long deadline);
600 static void mv_eh_freeze(struct ata_port *ap);
601 static void mv_eh_thaw(struct ata_port *ap);
602 static void mv6_dev_config(struct ata_device *dev);
604 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
606 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
607 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
609 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
611 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
612 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
614 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
616 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
617 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
619 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
621 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
622 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
624 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
626 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
627 void __iomem *mmio, unsigned int n_hc);
628 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
630 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
631 static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
632 void __iomem *mmio, unsigned int port);
633 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
634 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
635 unsigned int port_no);
636 static int mv_stop_edma(struct ata_port *ap);
637 static int mv_stop_edma_engine(void __iomem *port_mmio);
638 static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
640 static void mv_pmp_select(struct ata_port *ap, int pmp);
641 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
642 unsigned long deadline);
643 static int mv_softreset(struct ata_link *link, unsigned int *class,
644 unsigned long deadline);
645 static void mv_pmp_error_handler(struct ata_port *ap);
646 static void mv_process_crpb_entries(struct ata_port *ap,
647 struct mv_port_priv *pp);
649 static void mv_sff_irq_clear(struct ata_port *ap);
650 static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
651 static void mv_bmdma_setup(struct ata_queued_cmd *qc);
652 static void mv_bmdma_start(struct ata_queued_cmd *qc);
653 static void mv_bmdma_stop(struct ata_queued_cmd *qc);
654 static u8 mv_bmdma_status(struct ata_port *ap);
655 static u8 mv_sff_check_status(struct ata_port *ap);
657 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
658 * because we have to allow room for worst case splitting of
659 * PRDs for 64K boundaries in mv_fill_sg().
662 static struct scsi_host_template mv5_sht = {
663 ATA_BASE_SHT(DRV_NAME),
664 .sg_tablesize = MV_MAX_SG_CT / 2,
665 .dma_boundary = MV_DMA_BOUNDARY,
668 static struct scsi_host_template mv6_sht = {
669 ATA_NCQ_SHT(DRV_NAME),
670 .can_queue = MV_MAX_Q_DEPTH - 1,
671 .sg_tablesize = MV_MAX_SG_CT / 2,
672 .dma_boundary = MV_DMA_BOUNDARY,
675 static struct ata_port_operations mv5_ops = {
676 .inherits = &ata_sff_port_ops,
678 .lost_interrupt = ATA_OP_NULL,
680 .qc_defer = mv_qc_defer,
681 .qc_prep = mv_qc_prep,
682 .qc_issue = mv_qc_issue,
684 .freeze = mv_eh_freeze,
686 .hardreset = mv_hardreset,
688 .scr_read = mv5_scr_read,
689 .scr_write = mv5_scr_write,
691 .port_start = mv_port_start,
692 .port_stop = mv_port_stop,
695 static struct ata_port_operations mv6_ops = {
696 .inherits = &ata_bmdma_port_ops,
698 .lost_interrupt = ATA_OP_NULL,
700 .qc_defer = mv_qc_defer,
701 .qc_prep = mv_qc_prep,
702 .qc_issue = mv_qc_issue,
704 .dev_config = mv6_dev_config,
706 .freeze = mv_eh_freeze,
708 .hardreset = mv_hardreset,
709 .softreset = mv_softreset,
710 .pmp_hardreset = mv_pmp_hardreset,
711 .pmp_softreset = mv_softreset,
712 .error_handler = mv_pmp_error_handler,
714 .scr_read = mv_scr_read,
715 .scr_write = mv_scr_write,
717 .sff_check_status = mv_sff_check_status,
718 .sff_irq_clear = mv_sff_irq_clear,
719 .check_atapi_dma = mv_check_atapi_dma,
720 .bmdma_setup = mv_bmdma_setup,
721 .bmdma_start = mv_bmdma_start,
722 .bmdma_stop = mv_bmdma_stop,
723 .bmdma_status = mv_bmdma_status,
725 .port_start = mv_port_start,
726 .port_stop = mv_port_stop,
729 static struct ata_port_operations mv_iie_ops = {
730 .inherits = &mv6_ops,
731 .dev_config = ATA_OP_NULL,
732 .qc_prep = mv_qc_prep_iie,
735 static const struct ata_port_info mv_port_info[] = {
737 .flags = MV_GEN_I_FLAGS,
738 .pio_mask = ATA_PIO4,
739 .udma_mask = ATA_UDMA6,
740 .port_ops = &mv5_ops,
743 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
744 .pio_mask = ATA_PIO4,
745 .udma_mask = ATA_UDMA6,
746 .port_ops = &mv5_ops,
749 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
750 .pio_mask = ATA_PIO4,
751 .udma_mask = ATA_UDMA6,
752 .port_ops = &mv5_ops,
755 .flags = MV_GEN_II_FLAGS,
756 .pio_mask = ATA_PIO4,
757 .udma_mask = ATA_UDMA6,
758 .port_ops = &mv6_ops,
761 .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
762 .pio_mask = ATA_PIO4,
763 .udma_mask = ATA_UDMA6,
764 .port_ops = &mv6_ops,
767 .flags = MV_GEN_IIE_FLAGS,
768 .pio_mask = ATA_PIO4,
769 .udma_mask = ATA_UDMA6,
770 .port_ops = &mv_iie_ops,
773 .flags = MV_GEN_IIE_FLAGS,
774 .pio_mask = ATA_PIO4,
775 .udma_mask = ATA_UDMA6,
776 .port_ops = &mv_iie_ops,
779 .flags = MV_GEN_IIE_FLAGS,
780 .pio_mask = ATA_PIO4,
781 .udma_mask = ATA_UDMA6,
782 .port_ops = &mv_iie_ops,
786 static const struct mv_hw_ops mv5xxx_ops = {
787 .phy_errata = mv5_phy_errata,
788 .enable_leds = mv5_enable_leds,
789 .read_preamp = mv5_read_preamp,
790 .reset_hc = mv5_reset_hc,
791 .reset_flash = mv5_reset_flash,
792 .reset_bus = mv5_reset_bus,
795 static const struct mv_hw_ops mv6xxx_ops = {
796 .phy_errata = mv6_phy_errata,
797 .enable_leds = mv6_enable_leds,
798 .read_preamp = mv6_read_preamp,
799 .reset_hc = mv6_reset_hc,
800 .reset_flash = mv6_reset_flash,
801 .reset_bus = mv_reset_pci_bus,
804 static const struct mv_hw_ops mv_soc_ops = {
805 .phy_errata = mv6_phy_errata,
806 .enable_leds = mv_soc_enable_leds,
807 .read_preamp = mv_soc_read_preamp,
808 .reset_hc = mv_soc_reset_hc,
809 .reset_flash = mv_soc_reset_flash,
810 .reset_bus = mv_soc_reset_bus,
813 static const struct mv_hw_ops mv_soc_65n_ops = {
814 .phy_errata = mv_soc_65n_phy_errata,
815 .enable_leds = mv_soc_enable_leds,
816 .reset_hc = mv_soc_reset_hc,
817 .reset_flash = mv_soc_reset_flash,
818 .reset_bus = mv_soc_reset_bus,
825 static inline void writelfl(unsigned long data, void __iomem *addr)
828 (void) readl(addr); /* flush to avoid PCI posted write */
831 static inline unsigned int mv_hc_from_port(unsigned int port)
833 return port >> MV_PORT_HC_SHIFT;
836 static inline unsigned int mv_hardport_from_port(unsigned int port)
838 return port & MV_PORT_MASK;
842 * Consolidate some rather tricky bit shift calculations.
843 * This is hot-path stuff, so not a function.
844 * Simple code, with two return values, so macro rather than inline.
846 * port is the sole input, in range 0..7.
847 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
848 * hardport is the other output, in range 0..3.
850 * Note that port and hardport may be the same variable in some cases.
852 #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
854 shift = mv_hc_from_port(port) * HC_SHIFT; \
855 hardport = mv_hardport_from_port(port); \
856 shift += hardport * 2; \
859 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
861 return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
864 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
867 return mv_hc_base(base, mv_hc_from_port(port));
870 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
872 return mv_hc_base_from_port(base, port) +
873 MV_SATAHC_ARBTR_REG_SZ +
874 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
877 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
879 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
880 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
882 return hc_mmio + ofs;
885 static inline void __iomem *mv_host_base(struct ata_host *host)
887 struct mv_host_priv *hpriv = host->private_data;
891 static inline void __iomem *mv_ap_base(struct ata_port *ap)
893 return mv_port_base(mv_host_base(ap->host), ap->port_no);
896 static inline int mv_get_hc_count(unsigned long port_flags)
898 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
902 * mv_save_cached_regs - (re-)initialize cached port registers
903 * @ap: the port whose registers we are caching
905 * Initialize the local cache of port registers,
906 * so that reading them over and over again can
907 * be avoided on the hotter paths of this driver.
908 * This saves a few microseconds each time we switch
909 * to/from EDMA mode to perform (eg.) a drive cache flush.
911 static void mv_save_cached_regs(struct ata_port *ap)
913 void __iomem *port_mmio = mv_ap_base(ap);
914 struct mv_port_priv *pp = ap->private_data;
916 pp->cached.fiscfg = readl(port_mmio + FISCFG);
917 pp->cached.ltmode = readl(port_mmio + LTMODE);
918 pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
919 pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
923 * mv_write_cached_reg - write to a cached port register
924 * @addr: hardware address of the register
925 * @old: pointer to cached value of the register
926 * @new: new value for the register
928 * Write a new value to a cached register,
929 * but only if the value is different from before.
931 static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
937 * Workaround for 88SX60x1-B2 FEr SATA#13:
938 * Read-after-write is needed to prevent generating 64-bit
939 * write cycles on the PCI bus for SATA interface registers
940 * at offsets ending in 0x4 or 0xc.
942 * Looks like a lot of fuss, but it avoids an unnecessary
943 * +1 usec read-after-write delay for unaffected registers.
945 laddr = (unsigned long)addr & 0xffff;
946 if (laddr >= 0x300 && laddr <= 0x33c) {
948 if (laddr == 0x4 || laddr == 0xc) {
949 writelfl(new, addr); /* read after write */
953 writel(new, addr); /* unaffected by the errata */
957 static void mv_set_edma_ptrs(void __iomem *port_mmio,
958 struct mv_host_priv *hpriv,
959 struct mv_port_priv *pp)
964 * initialize request queue
966 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
967 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
969 WARN_ON(pp->crqb_dma & 0x3ff);
970 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
971 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
972 port_mmio + EDMA_REQ_Q_IN_PTR);
973 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
976 * initialize response queue
978 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
979 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
981 WARN_ON(pp->crpb_dma & 0xff);
982 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
983 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
984 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
985 port_mmio + EDMA_RSP_Q_OUT_PTR);
988 static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
991 * When writing to the main_irq_mask in hardware,
992 * we must ensure exclusivity between the interrupt coalescing bits
993 * and the corresponding individual port DONE_IRQ bits.
995 * Note that this register is really an "IRQ enable" register,
996 * not an "IRQ mask" register as Marvell's naming might suggest.
998 if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
999 mask &= ~DONE_IRQ_0_3;
1000 if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
1001 mask &= ~DONE_IRQ_4_7;
1002 writelfl(mask, hpriv->main_irq_mask_addr);
1005 static void mv_set_main_irq_mask(struct ata_host *host,
1006 u32 disable_bits, u32 enable_bits)
1008 struct mv_host_priv *hpriv = host->private_data;
1009 u32 old_mask, new_mask;
1011 old_mask = hpriv->main_irq_mask;
1012 new_mask = (old_mask & ~disable_bits) | enable_bits;
1013 if (new_mask != old_mask) {
1014 hpriv->main_irq_mask = new_mask;
1015 mv_write_main_irq_mask(new_mask, hpriv);
1019 static void mv_enable_port_irqs(struct ata_port *ap,
1020 unsigned int port_bits)
1022 unsigned int shift, hardport, port = ap->port_no;
1023 u32 disable_bits, enable_bits;
1025 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1027 disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
1028 enable_bits = port_bits << shift;
1029 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
1032 static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
1033 void __iomem *port_mmio,
1034 unsigned int port_irqs)
1036 struct mv_host_priv *hpriv = ap->host->private_data;
1037 int hardport = mv_hardport_from_port(ap->port_no);
1038 void __iomem *hc_mmio = mv_hc_base_from_port(
1039 mv_host_base(ap->host), ap->port_no);
1042 /* clear EDMA event indicators, if any */
1043 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
1045 /* clear pending irq events */
1046 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
1047 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
1049 /* clear FIS IRQ Cause */
1050 if (IS_GEN_IIE(hpriv))
1051 writelfl(0, port_mmio + FIS_IRQ_CAUSE);
1053 mv_enable_port_irqs(ap, port_irqs);
1056 static void mv_set_irq_coalescing(struct ata_host *host,
1057 unsigned int count, unsigned int usecs)
1059 struct mv_host_priv *hpriv = host->private_data;
1060 void __iomem *mmio = hpriv->base, *hc_mmio;
1061 u32 coal_enable = 0;
1062 unsigned long flags;
1063 unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
1064 const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
1065 ALL_PORTS_COAL_DONE;
1067 /* Disable IRQ coalescing if either threshold is zero */
1068 if (!usecs || !count) {
1071 /* Respect maximum limits of the hardware */
1072 clks = usecs * COAL_CLOCKS_PER_USEC;
1073 if (clks > MAX_COAL_TIME_THRESHOLD)
1074 clks = MAX_COAL_TIME_THRESHOLD;
1075 if (count > MAX_COAL_IO_COUNT)
1076 count = MAX_COAL_IO_COUNT;
1079 spin_lock_irqsave(&host->lock, flags);
1080 mv_set_main_irq_mask(host, coal_disable, 0);
1082 if (is_dual_hc && !IS_GEN_I(hpriv)) {
1084 * GEN_II/GEN_IIE with dual host controllers:
1085 * one set of global thresholds for the entire chip.
1087 writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD);
1088 writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
1089 /* clear leftover coal IRQ bit */
1090 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
1092 coal_enable = ALL_PORTS_COAL_DONE;
1093 clks = count = 0; /* force clearing of regular regs below */
1097 * All chips: independent thresholds for each HC on the chip.
1099 hc_mmio = mv_hc_base_from_port(mmio, 0);
1100 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1101 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1102 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1104 coal_enable |= PORTS_0_3_COAL_DONE;
1106 hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
1107 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1108 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1109 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1111 coal_enable |= PORTS_4_7_COAL_DONE;
1114 mv_set_main_irq_mask(host, 0, coal_enable);
1115 spin_unlock_irqrestore(&host->lock, flags);
1119 * mv_start_edma - Enable eDMA engine
1120 * @base: port base address
1121 * @pp: port private data
1123 * Verify the local cache of the eDMA state is accurate with a
1127 * Inherited from caller.
1129 static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
1130 struct mv_port_priv *pp, u8 protocol)
1132 int want_ncq = (protocol == ATA_PROT_NCQ);
1134 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1135 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
1136 if (want_ncq != using_ncq)
1139 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
1140 struct mv_host_priv *hpriv = ap->host->private_data;
1142 mv_edma_cfg(ap, want_ncq, 1);
1144 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1145 mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
1147 writelfl(EDMA_EN, port_mmio + EDMA_CMD);
1148 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
1152 static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
1154 void __iomem *port_mmio = mv_ap_base(ap);
1155 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
1156 const int per_loop = 5, timeout = (15 * 1000 / per_loop);
1160 * Wait for the EDMA engine to finish transactions in progress.
1161 * No idea what a good "timeout" value might be, but measurements
1162 * indicate that it often requires hundreds of microseconds
1163 * with two drives in-use. So we use the 15msec value above
1164 * as a rough guess at what even more drives might require.
1166 for (i = 0; i < timeout; ++i) {
1167 u32 edma_stat = readl(port_mmio + EDMA_STATUS);
1168 if ((edma_stat & empty_idle) == empty_idle)
1172 /* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */
1176 * mv_stop_edma_engine - Disable eDMA engine
1177 * @port_mmio: io base address
1180 * Inherited from caller.
1182 static int mv_stop_edma_engine(void __iomem *port_mmio)
1186 /* Disable eDMA. The disable bit auto clears. */
1187 writelfl(EDMA_DS, port_mmio + EDMA_CMD);
1189 /* Wait for the chip to confirm eDMA is off. */
1190 for (i = 10000; i > 0; i--) {
1191 u32 reg = readl(port_mmio + EDMA_CMD);
1192 if (!(reg & EDMA_EN))
1199 static int mv_stop_edma(struct ata_port *ap)
1201 void __iomem *port_mmio = mv_ap_base(ap);
1202 struct mv_port_priv *pp = ap->private_data;
1205 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1207 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1208 mv_wait_for_edma_empty_idle(ap);
1209 if (mv_stop_edma_engine(port_mmio)) {
1210 ata_port_err(ap, "Unable to stop eDMA\n");
1213 mv_edma_cfg(ap, 0, 0);
1218 static void mv_dump_mem(void __iomem *start, unsigned bytes)
1221 for (b = 0; b < bytes; ) {
1222 DPRINTK("%p: ", start + b);
1223 for (w = 0; b < bytes && w < 4; w++) {
1224 printk("%08x ", readl(start + b));
1231 #if defined(ATA_DEBUG) || defined(CONFIG_PCI)
1232 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
1237 for (b = 0; b < bytes; ) {
1238 DPRINTK("%02x: ", b);
1239 for (w = 0; b < bytes && w < 4; w++) {
1240 (void) pci_read_config_dword(pdev, b, &dw);
1241 printk("%08x ", dw);
1249 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
1250 struct pci_dev *pdev)
1253 void __iomem *hc_base = mv_hc_base(mmio_base,
1254 port >> MV_PORT_HC_SHIFT);
1255 void __iomem *port_base;
1256 int start_port, num_ports, p, start_hc, num_hcs, hc;
1259 start_hc = start_port = 0;
1260 num_ports = 8; /* shld be benign for 4 port devs */
1263 start_hc = port >> MV_PORT_HC_SHIFT;
1265 num_ports = num_hcs = 1;
1267 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1268 num_ports > 1 ? num_ports - 1 : start_port);
1271 DPRINTK("PCI config space regs:\n");
1272 mv_dump_pci_cfg(pdev, 0x68);
1274 DPRINTK("PCI regs:\n");
1275 mv_dump_mem(mmio_base+0xc00, 0x3c);
1276 mv_dump_mem(mmio_base+0xd00, 0x34);
1277 mv_dump_mem(mmio_base+0xf00, 0x4);
1278 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1279 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1280 hc_base = mv_hc_base(mmio_base, hc);
1281 DPRINTK("HC regs (HC %i):\n", hc);
1282 mv_dump_mem(hc_base, 0x1c);
1284 for (p = start_port; p < start_port + num_ports; p++) {
1285 port_base = mv_port_base(mmio_base, p);
1286 DPRINTK("EDMA regs (port %i):\n", p);
1287 mv_dump_mem(port_base, 0x54);
1288 DPRINTK("SATA regs (port %i):\n", p);
1289 mv_dump_mem(port_base+0x300, 0x60);
1294 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1298 switch (sc_reg_in) {
1302 ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
1305 ofs = SATA_ACTIVE; /* active is not with the others */
1314 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
1316 unsigned int ofs = mv_scr_offset(sc_reg_in);
1318 if (ofs != 0xffffffffU) {
1319 *val = readl(mv_ap_base(link->ap) + ofs);
1325 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
1327 unsigned int ofs = mv_scr_offset(sc_reg_in);
1329 if (ofs != 0xffffffffU) {
1330 void __iomem *addr = mv_ap_base(link->ap) + ofs;
1331 struct mv_host_priv *hpriv = link->ap->host->private_data;
1332 if (sc_reg_in == SCR_CONTROL) {
1334 * Workaround for 88SX60x1 FEr SATA#26:
1336 * COMRESETs have to take care not to accidentally
1337 * put the drive to sleep when writing SCR_CONTROL.
1338 * Setting bits 12..15 prevents this problem.
1340 * So if we see an outbound COMMRESET, set those bits.
1341 * Ditto for the followup write that clears the reset.
1343 * The proprietary driver does this for
1344 * all chip versions, and so do we.
1346 if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
1349 if (hpriv->hp_flags & MV_HP_FIX_LP_PHY_CTL) {
1350 void __iomem *lp_phy_addr =
1351 mv_ap_base(link->ap) + LP_PHY_CTL;
1353 * Set PHY speed according to SControl speed.
1356 LP_PHY_CTL_PIN_PU_PLL |
1357 LP_PHY_CTL_PIN_PU_RX |
1358 LP_PHY_CTL_PIN_PU_TX;
1360 if ((val & 0xf0) != 0x10)
1362 LP_PHY_CTL_GEN_TX_3G |
1363 LP_PHY_CTL_GEN_RX_3G;
1365 writelfl(lp_phy_val, lp_phy_addr);
1368 writelfl(val, addr);
1374 static void mv6_dev_config(struct ata_device *adev)
1377 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1379 * Gen-II does not support NCQ over a port multiplier
1380 * (no FIS-based switching).
1382 if (adev->flags & ATA_DFLAG_NCQ) {
1383 if (sata_pmp_attached(adev->link->ap)) {
1384 adev->flags &= ~ATA_DFLAG_NCQ;
1386 "NCQ disabled for command-based switching\n");
1391 static int mv_qc_defer(struct ata_queued_cmd *qc)
1393 struct ata_link *link = qc->dev->link;
1394 struct ata_port *ap = link->ap;
1395 struct mv_port_priv *pp = ap->private_data;
1398 * Don't allow new commands if we're in a delayed EH state
1399 * for NCQ and/or FIS-based switching.
1401 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1402 return ATA_DEFER_PORT;
1404 /* PIO commands need exclusive link: no other commands [DMA or PIO]
1405 * can run concurrently.
1406 * set excl_link when we want to send a PIO command in DMA mode
1407 * or a non-NCQ command in NCQ mode.
1408 * When we receive a command from that link, and there are no
1409 * outstanding commands, mark a flag to clear excl_link and let
1410 * the command go through.
1412 if (unlikely(ap->excl_link)) {
1413 if (link == ap->excl_link) {
1414 if (ap->nr_active_links)
1415 return ATA_DEFER_PORT;
1416 qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
1419 return ATA_DEFER_PORT;
1423 * If the port is completely idle, then allow the new qc.
1425 if (ap->nr_active_links == 0)
1429 * The port is operating in host queuing mode (EDMA) with NCQ
1430 * enabled, allow multiple NCQ commands. EDMA also allows
1431 * queueing multiple DMA commands but libata core currently
1434 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
1435 (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1436 if (ata_is_ncq(qc->tf.protocol))
1439 ap->excl_link = link;
1440 return ATA_DEFER_PORT;
1444 return ATA_DEFER_PORT;
1447 static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
1449 struct mv_port_priv *pp = ap->private_data;
1450 void __iomem *port_mmio;
1452 u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg;
1453 u32 ltmode, *old_ltmode = &pp->cached.ltmode;
1454 u32 haltcond, *old_haltcond = &pp->cached.haltcond;
1456 ltmode = *old_ltmode & ~LTMODE_BIT8;
1457 haltcond = *old_haltcond | EDMA_ERR_DEV;
1460 fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
1461 ltmode = *old_ltmode | LTMODE_BIT8;
1463 haltcond &= ~EDMA_ERR_DEV;
1465 fiscfg |= FISCFG_WAIT_DEV_ERR;
1467 fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
1470 port_mmio = mv_ap_base(ap);
1471 mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
1472 mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
1473 mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
1476 static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1478 struct mv_host_priv *hpriv = ap->host->private_data;
1481 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
1482 old = readl(hpriv->base + GPIO_PORT_CTL);
1484 new = old | (1 << 22);
1486 new = old & ~(1 << 22);
1488 writel(new, hpriv->base + GPIO_PORT_CTL);
1492 * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
1493 * @ap: Port being initialized
1495 * There are two DMA modes on these chips: basic DMA, and EDMA.
1497 * Bit-0 of the "EDMA RESERVED" register enables/disables use
1498 * of basic DMA on the GEN_IIE versions of the chips.
1500 * This bit survives EDMA resets, and must be set for basic DMA
1501 * to function, and should be cleared when EDMA is active.
1503 static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
1505 struct mv_port_priv *pp = ap->private_data;
1506 u32 new, *old = &pp->cached.unknown_rsvd;
1512 mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
1516 * SOC chips have an issue whereby the HDD LEDs don't always blink
1517 * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
1518 * of the SOC takes care of it, generating a steady blink rate when
1519 * any drive on the chip is active.
1521 * Unfortunately, the blink mode is a global hardware setting for the SOC,
1522 * so we must use it whenever at least one port on the SOC has NCQ enabled.
1524 * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
1525 * LED operation works then, and provides better (more accurate) feedback.
1527 * Note that this code assumes that an SOC never has more than one HC onboard.
1529 static void mv_soc_led_blink_enable(struct ata_port *ap)
1531 struct ata_host *host = ap->host;
1532 struct mv_host_priv *hpriv = host->private_data;
1533 void __iomem *hc_mmio;
1536 if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
1538 hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
1539 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1540 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1541 writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1544 static void mv_soc_led_blink_disable(struct ata_port *ap)
1546 struct ata_host *host = ap->host;
1547 struct mv_host_priv *hpriv = host->private_data;
1548 void __iomem *hc_mmio;
1552 if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
1555 /* disable led-blink only if no ports are using NCQ */
1556 for (port = 0; port < hpriv->n_ports; port++) {
1557 struct ata_port *this_ap = host->ports[port];
1558 struct mv_port_priv *pp = this_ap->private_data;
1560 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1564 hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
1565 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1566 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1567 writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1570 static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
1573 struct mv_port_priv *pp = ap->private_data;
1574 struct mv_host_priv *hpriv = ap->host->private_data;
1575 void __iomem *port_mmio = mv_ap_base(ap);
1577 /* set up non-NCQ EDMA configuration */
1578 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1580 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
1582 if (IS_GEN_I(hpriv))
1583 cfg |= (1 << 8); /* enab config burst size mask */
1585 else if (IS_GEN_II(hpriv)) {
1586 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1587 mv_60x1_errata_sata25(ap, want_ncq);
1589 } else if (IS_GEN_IIE(hpriv)) {
1590 int want_fbs = sata_pmp_attached(ap);
1592 * Possible future enhancement:
1594 * The chip can use FBS with non-NCQ, if we allow it,
1595 * But first we need to have the error handling in place
1596 * for this mode (datasheet section 7.3.15.4.2.3).
1597 * So disallow non-NCQ FBS for now.
1599 want_fbs &= want_ncq;
1601 mv_config_fbs(ap, want_ncq, want_fbs);
1604 pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1605 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1608 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1610 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1612 cfg |= (1 << 18); /* enab early completion */
1614 if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1615 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
1616 mv_bmdma_enable_iie(ap, !want_edma);
1618 if (IS_SOC(hpriv)) {
1620 mv_soc_led_blink_enable(ap);
1622 mv_soc_led_blink_disable(ap);
1627 cfg |= EDMA_CFG_NCQ;
1628 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1631 writelfl(cfg, port_mmio + EDMA_CFG);
1634 static void mv_port_free_dma_mem(struct ata_port *ap)
1636 struct mv_host_priv *hpriv = ap->host->private_data;
1637 struct mv_port_priv *pp = ap->private_data;
1641 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1645 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1649 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1650 * For later hardware, we have one unique sg_tbl per NCQ tag.
1652 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1653 if (pp->sg_tbl[tag]) {
1654 if (tag == 0 || !IS_GEN_I(hpriv))
1655 dma_pool_free(hpriv->sg_tbl_pool,
1657 pp->sg_tbl_dma[tag]);
1658 pp->sg_tbl[tag] = NULL;
1664 * mv_port_start - Port specific init/start routine.
1665 * @ap: ATA channel to manipulate
1667 * Allocate and point to DMA memory, init port private memory,
1671 * Inherited from caller.
1673 static int mv_port_start(struct ata_port *ap)
1675 struct device *dev = ap->host->dev;
1676 struct mv_host_priv *hpriv = ap->host->private_data;
1677 struct mv_port_priv *pp;
1678 unsigned long flags;
1681 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1684 ap->private_data = pp;
1686 pp->crqb = dma_pool_zalloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1690 pp->crpb = dma_pool_zalloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1692 goto out_port_free_dma_mem;
1694 /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
1695 if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1696 ap->flags |= ATA_FLAG_AN;
1698 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1699 * For later hardware, we need one unique sg_tbl per NCQ tag.
1701 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1702 if (tag == 0 || !IS_GEN_I(hpriv)) {
1703 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1704 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1705 if (!pp->sg_tbl[tag])
1706 goto out_port_free_dma_mem;
1708 pp->sg_tbl[tag] = pp->sg_tbl[0];
1709 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1713 spin_lock_irqsave(ap->lock, flags);
1714 mv_save_cached_regs(ap);
1715 mv_edma_cfg(ap, 0, 0);
1716 spin_unlock_irqrestore(ap->lock, flags);
1720 out_port_free_dma_mem:
1721 mv_port_free_dma_mem(ap);
1726 * mv_port_stop - Port specific cleanup/stop routine.
1727 * @ap: ATA channel to manipulate
1729 * Stop DMA, cleanup port memory.
1732 * This routine uses the host lock to protect the DMA stop.
1734 static void mv_port_stop(struct ata_port *ap)
1736 unsigned long flags;
1738 spin_lock_irqsave(ap->lock, flags);
1740 mv_enable_port_irqs(ap, 0);
1741 spin_unlock_irqrestore(ap->lock, flags);
1742 mv_port_free_dma_mem(ap);
1746 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1747 * @qc: queued command whose SG list to source from
1749 * Populate the SG list and mark the last entry.
1752 * Inherited from caller.
1754 static void mv_fill_sg(struct ata_queued_cmd *qc)
1756 struct mv_port_priv *pp = qc->ap->private_data;
1757 struct scatterlist *sg;
1758 struct mv_sg *mv_sg, *last_sg = NULL;
1761 mv_sg = pp->sg_tbl[qc->hw_tag];
1762 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1763 dma_addr_t addr = sg_dma_address(sg);
1764 u32 sg_len = sg_dma_len(sg);
1767 u32 offset = addr & 0xffff;
1770 if (offset + len > 0x10000)
1771 len = 0x10000 - offset;
1773 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1774 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1775 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1776 mv_sg->reserved = 0;
1786 if (likely(last_sg))
1787 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1788 mb(); /* ensure data structure is visible to the chipset */
1791 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1793 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1794 (last ? CRQB_CMD_LAST : 0);
1795 *cmdw = cpu_to_le16(tmp);
1799 * mv_sff_irq_clear - Clear hardware interrupt after DMA.
1800 * @ap: Port associated with this ATA transaction.
1802 * We need this only for ATAPI bmdma transactions,
1803 * as otherwise we experience spurious interrupts
1804 * after libata-sff handles the bmdma interrupts.
1806 static void mv_sff_irq_clear(struct ata_port *ap)
1808 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
1812 * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
1813 * @qc: queued command to check for chipset/DMA compatibility.
1815 * The bmdma engines cannot handle speculative data sizes
1816 * (bytecount under/over flow). So only allow DMA for
1817 * data transfer commands with known data sizes.
1820 * Inherited from caller.
1822 static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
1824 struct scsi_cmnd *scmd = qc->scsicmd;
1827 switch (scmd->cmnd[0]) {
1835 case GPCMD_SEND_DVD_STRUCTURE:
1836 case GPCMD_SEND_CUE_SHEET:
1837 return 0; /* DMA is safe */
1840 return -EOPNOTSUPP; /* use PIO instead */
1844 * mv_bmdma_setup - Set up BMDMA transaction
1845 * @qc: queued command to prepare DMA for.
1848 * Inherited from caller.
1850 static void mv_bmdma_setup(struct ata_queued_cmd *qc)
1852 struct ata_port *ap = qc->ap;
1853 void __iomem *port_mmio = mv_ap_base(ap);
1854 struct mv_port_priv *pp = ap->private_data;
1858 /* clear all DMA cmd bits */
1859 writel(0, port_mmio + BMDMA_CMD);
1861 /* load PRD table addr. */
1862 writel((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16,
1863 port_mmio + BMDMA_PRD_HIGH);
1864 writelfl(pp->sg_tbl_dma[qc->hw_tag],
1865 port_mmio + BMDMA_PRD_LOW);
1867 /* issue r/w command */
1868 ap->ops->sff_exec_command(ap, &qc->tf);
1872 * mv_bmdma_start - Start a BMDMA transaction
1873 * @qc: queued command to start DMA on.
1876 * Inherited from caller.
1878 static void mv_bmdma_start(struct ata_queued_cmd *qc)
1880 struct ata_port *ap = qc->ap;
1881 void __iomem *port_mmio = mv_ap_base(ap);
1882 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
1883 u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
1885 /* start host DMA transaction */
1886 writelfl(cmd, port_mmio + BMDMA_CMD);
1890 * mv_bmdma_stop - Stop BMDMA transfer
1891 * @qc: queued command to stop DMA on.
1893 * Clears the ATA_DMA_START flag in the bmdma control register
1896 * Inherited from caller.
1898 static void mv_bmdma_stop_ap(struct ata_port *ap)
1900 void __iomem *port_mmio = mv_ap_base(ap);
1903 /* clear start/stop bit */
1904 cmd = readl(port_mmio + BMDMA_CMD);
1905 if (cmd & ATA_DMA_START) {
1906 cmd &= ~ATA_DMA_START;
1907 writelfl(cmd, port_mmio + BMDMA_CMD);
1909 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1910 ata_sff_dma_pause(ap);
1914 static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1916 mv_bmdma_stop_ap(qc->ap);
1920 * mv_bmdma_status - Read BMDMA status
1921 * @ap: port for which to retrieve DMA status.
1923 * Read and return equivalent of the sff BMDMA status register.
1926 * Inherited from caller.
1928 static u8 mv_bmdma_status(struct ata_port *ap)
1930 void __iomem *port_mmio = mv_ap_base(ap);
1934 * Other bits are valid only if ATA_DMA_ACTIVE==0,
1935 * and the ATA_DMA_INTR bit doesn't exist.
1937 reg = readl(port_mmio + BMDMA_STATUS);
1938 if (reg & ATA_DMA_ACTIVE)
1939 status = ATA_DMA_ACTIVE;
1940 else if (reg & ATA_DMA_ERR)
1941 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1944 * Just because DMA_ACTIVE is 0 (DMA completed),
1945 * this does _not_ mean the device is "done".
1946 * So we should not yet be signalling ATA_DMA_INTR
1947 * in some cases. Eg. DSM/TRIM, and perhaps others.
1949 mv_bmdma_stop_ap(ap);
1950 if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
1953 status = ATA_DMA_INTR;
1958 static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
1960 struct ata_taskfile *tf = &qc->tf;
1962 * Workaround for 88SX60x1 FEr SATA#24.
1964 * Chip may corrupt WRITEs if multi_count >= 4kB.
1965 * Note that READs are unaffected.
1967 * It's not clear if this errata really means "4K bytes",
1968 * or if it always happens for multi_count > 7
1969 * regardless of device sector_size.
1971 * So, for safety, any write with multi_count > 7
1972 * gets converted here into a regular PIO write instead:
1974 if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) {
1975 if (qc->dev->multi_count > 7) {
1976 switch (tf->command) {
1977 case ATA_CMD_WRITE_MULTI:
1978 tf->command = ATA_CMD_PIO_WRITE;
1980 case ATA_CMD_WRITE_MULTI_FUA_EXT:
1981 tf->flags &= ~ATA_TFLAG_FUA; /* ugh */
1983 case ATA_CMD_WRITE_MULTI_EXT:
1984 tf->command = ATA_CMD_PIO_WRITE_EXT;
1992 * mv_qc_prep - Host specific command preparation.
1993 * @qc: queued command to prepare
1995 * This routine simply redirects to the general purpose routine
1996 * if command is not DMA. Else, it handles prep of the CRQB
1997 * (command request block), does some sanity checking, and calls
1998 * the SG load routine.
2001 * Inherited from caller.
2003 static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc)
2005 struct ata_port *ap = qc->ap;
2006 struct mv_port_priv *pp = ap->private_data;
2008 struct ata_taskfile *tf = &qc->tf;
2012 switch (tf->protocol) {
2014 if (tf->command == ATA_CMD_DSM)
2018 break; /* continue below */
2020 mv_rw_multi_errata_sata24(qc);
2026 /* Fill in command request block
2028 if (!(tf->flags & ATA_TFLAG_WRITE))
2029 flags |= CRQB_FLAG_READ;
2030 WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag);
2031 flags |= qc->hw_tag << CRQB_TAG_SHIFT;
2032 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2034 /* get current queue index from software */
2035 in_index = pp->req_idx;
2037 pp->crqb[in_index].sg_addr =
2038 cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff);
2039 pp->crqb[in_index].sg_addr_hi =
2040 cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16);
2041 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
2043 cw = &pp->crqb[in_index].ata_cmd[0];
2045 /* Sadly, the CRQB cannot accommodate all registers--there are
2046 * only 11 bytes...so we must pick and choose required
2047 * registers based on the command. So, we drop feature and
2048 * hob_feature for [RW] DMA commands, but they are needed for
2049 * NCQ. NCQ will drop hob_nsect, which is not needed there
2050 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
2052 switch (tf->command) {
2054 case ATA_CMD_READ_EXT:
2056 case ATA_CMD_WRITE_EXT:
2057 case ATA_CMD_WRITE_FUA_EXT:
2058 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
2060 case ATA_CMD_FPDMA_READ:
2061 case ATA_CMD_FPDMA_WRITE:
2062 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
2063 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
2066 /* The only other commands EDMA supports in non-queued and
2067 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
2068 * of which are defined/used by Linux. If we get here, this
2069 * driver needs work.
2071 ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__,
2073 return AC_ERR_INVALID;
2075 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
2076 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
2077 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
2078 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
2079 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
2080 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
2081 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
2082 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
2083 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
2085 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2093 * mv_qc_prep_iie - Host specific command preparation.
2094 * @qc: queued command to prepare
2096 * This routine simply redirects to the general purpose routine
2097 * if command is not DMA. Else, it handles prep of the CRQB
2098 * (command request block), does some sanity checking, and calls
2099 * the SG load routine.
2102 * Inherited from caller.
2104 static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc)
2106 struct ata_port *ap = qc->ap;
2107 struct mv_port_priv *pp = ap->private_data;
2108 struct mv_crqb_iie *crqb;
2109 struct ata_taskfile *tf = &qc->tf;
2113 if ((tf->protocol != ATA_PROT_DMA) &&
2114 (tf->protocol != ATA_PROT_NCQ))
2116 if (tf->command == ATA_CMD_DSM)
2117 return AC_ERR_OK; /* use bmdma for this */
2119 /* Fill in Gen IIE command request block */
2120 if (!(tf->flags & ATA_TFLAG_WRITE))
2121 flags |= CRQB_FLAG_READ;
2123 WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag);
2124 flags |= qc->hw_tag << CRQB_TAG_SHIFT;
2125 flags |= qc->hw_tag << CRQB_HOSTQ_SHIFT;
2126 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2128 /* get current queue index from software */
2129 in_index = pp->req_idx;
2131 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
2132 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff);
2133 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16);
2134 crqb->flags = cpu_to_le32(flags);
2136 crqb->ata_cmd[0] = cpu_to_le32(
2137 (tf->command << 16) |
2140 crqb->ata_cmd[1] = cpu_to_le32(
2146 crqb->ata_cmd[2] = cpu_to_le32(
2147 (tf->hob_lbal << 0) |
2148 (tf->hob_lbam << 8) |
2149 (tf->hob_lbah << 16) |
2150 (tf->hob_feature << 24)
2152 crqb->ata_cmd[3] = cpu_to_le32(
2154 (tf->hob_nsect << 8)
2157 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2165 * mv_sff_check_status - fetch device status, if valid
2166 * @ap: ATA port to fetch status from
2168 * When using command issue via mv_qc_issue_fis(),
2169 * the initial ATA_BUSY state does not show up in the
2170 * ATA status (shadow) register. This can confuse libata!
2172 * So we have a hook here to fake ATA_BUSY for that situation,
2173 * until the first time a BUSY, DRQ, or ERR bit is seen.
2175 * The rest of the time, it simply returns the ATA status register.
2177 static u8 mv_sff_check_status(struct ata_port *ap)
2179 u8 stat = ioread8(ap->ioaddr.status_addr);
2180 struct mv_port_priv *pp = ap->private_data;
2182 if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
2183 if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
2184 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2192 * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
2193 * @fis: fis to be sent
2194 * @nwords: number of 32-bit words in the fis
2196 static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
2198 void __iomem *port_mmio = mv_ap_base(ap);
2199 u32 ifctl, old_ifctl, ifstat;
2200 int i, timeout = 200, final_word = nwords - 1;
2202 /* Initiate FIS transmission mode */
2203 old_ifctl = readl(port_mmio + SATA_IFCTL);
2204 ifctl = 0x100 | (old_ifctl & 0xf);
2205 writelfl(ifctl, port_mmio + SATA_IFCTL);
2207 /* Send all words of the FIS except for the final word */
2208 for (i = 0; i < final_word; ++i)
2209 writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
2211 /* Flag end-of-transmission, and then send the final word */
2212 writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
2213 writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
2216 * Wait for FIS transmission to complete.
2217 * This typically takes just a single iteration.
2220 ifstat = readl(port_mmio + SATA_IFSTAT);
2221 } while (!(ifstat & 0x1000) && --timeout);
2223 /* Restore original port configuration */
2224 writelfl(old_ifctl, port_mmio + SATA_IFCTL);
2226 /* See if it worked */
2227 if ((ifstat & 0x3000) != 0x1000) {
2228 ata_port_warn(ap, "%s transmission error, ifstat=%08x\n",
2230 return AC_ERR_OTHER;
2236 * mv_qc_issue_fis - Issue a command directly as a FIS
2237 * @qc: queued command to start
2239 * Note that the ATA shadow registers are not updated
2240 * after command issue, so the device will appear "READY"
2241 * if polled, even while it is BUSY processing the command.
2243 * So we use a status hook to fake ATA_BUSY until the drive changes state.
2245 * Note: we don't get updated shadow regs on *completion*
2246 * of non-data commands. So avoid sending them via this function,
2247 * as they will appear to have completed immediately.
2249 * GEN_IIE has special registers that we could get the result tf from,
2250 * but earlier chipsets do not. For now, we ignore those registers.
2252 static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2254 struct ata_port *ap = qc->ap;
2255 struct mv_port_priv *pp = ap->private_data;
2256 struct ata_link *link = qc->dev->link;
2260 ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
2261 err = mv_send_fis(ap, fis, ARRAY_SIZE(fis));
2265 switch (qc->tf.protocol) {
2266 case ATAPI_PROT_PIO:
2267 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2269 case ATAPI_PROT_NODATA:
2270 ap->hsm_task_state = HSM_ST_FIRST;
2273 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2274 if (qc->tf.flags & ATA_TFLAG_WRITE)
2275 ap->hsm_task_state = HSM_ST_FIRST;
2277 ap->hsm_task_state = HSM_ST;
2280 ap->hsm_task_state = HSM_ST_LAST;
2284 if (qc->tf.flags & ATA_TFLAG_POLLING)
2285 ata_sff_queue_pio_task(link, 0);
2290 * mv_qc_issue - Initiate a command to the host
2291 * @qc: queued command to start
2293 * This routine simply redirects to the general purpose routine
2294 * if command is not DMA. Else, it sanity checks our local
2295 * caches of the request producer/consumer indices then enables
2296 * DMA and bumps the request producer index.
2299 * Inherited from caller.
2301 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
2303 static int limit_warnings = 10;
2304 struct ata_port *ap = qc->ap;
2305 void __iomem *port_mmio = mv_ap_base(ap);
2306 struct mv_port_priv *pp = ap->private_data;
2308 unsigned int port_irqs;
2310 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
2312 switch (qc->tf.protocol) {
2314 if (qc->tf.command == ATA_CMD_DSM) {
2315 if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */
2316 return AC_ERR_OTHER;
2317 break; /* use bmdma for this */
2321 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2322 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2323 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
2325 /* Write the request in pointer to kick the EDMA to life */
2326 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
2327 port_mmio + EDMA_REQ_Q_IN_PTR);
2332 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
2334 * Someday, we might implement special polling workarounds
2335 * for these, but it all seems rather unnecessary since we
2336 * normally use only DMA for commands which transfer more
2337 * than a single block of data.
2339 * Much of the time, this could just work regardless.
2340 * So for now, just log the incident, and allow the attempt.
2342 if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
2344 ata_link_warn(qc->dev->link, DRV_NAME
2345 ": attempting PIO w/multiple DRQ: "
2346 "this may fail due to h/w errata\n");
2349 case ATA_PROT_NODATA:
2350 case ATAPI_PROT_PIO:
2351 case ATAPI_PROT_NODATA:
2352 if (ap->flags & ATA_FLAG_PIO_POLLING)
2353 qc->tf.flags |= ATA_TFLAG_POLLING;
2357 if (qc->tf.flags & ATA_TFLAG_POLLING)
2358 port_irqs = ERR_IRQ; /* mask device interrupt when polling */
2360 port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */
2363 * We're about to send a non-EDMA capable command to the
2364 * port. Turn off EDMA so there won't be problems accessing
2365 * shadow block, etc registers.
2368 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
2369 mv_pmp_select(ap, qc->dev->link->pmp);
2371 if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
2372 struct mv_host_priv *hpriv = ap->host->private_data;
2374 * Workaround for 88SX60x1 FEr SATA#25 (part 2).
2376 * After any NCQ error, the READ_LOG_EXT command
2377 * from libata-eh *must* use mv_qc_issue_fis().
2378 * Otherwise it might fail, due to chip errata.
2380 * Rather than special-case it, we'll just *always*
2381 * use this method here for READ_LOG_EXT, making for
2384 if (IS_GEN_II(hpriv))
2385 return mv_qc_issue_fis(qc);
2387 return ata_bmdma_qc_issue(qc);
2390 static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
2392 struct mv_port_priv *pp = ap->private_data;
2393 struct ata_queued_cmd *qc;
2395 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
2397 qc = ata_qc_from_tag(ap, ap->link.active_tag);
2398 if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
2403 static void mv_pmp_error_handler(struct ata_port *ap)
2405 unsigned int pmp, pmp_map;
2406 struct mv_port_priv *pp = ap->private_data;
2408 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
2410 * Perform NCQ error analysis on failed PMPs
2411 * before we freeze the port entirely.
2413 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
2415 pmp_map = pp->delayed_eh_pmp_map;
2416 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
2417 for (pmp = 0; pmp_map != 0; pmp++) {
2418 unsigned int this_pmp = (1 << pmp);
2419 if (pmp_map & this_pmp) {
2420 struct ata_link *link = &ap->pmp_link[pmp];
2421 pmp_map &= ~this_pmp;
2422 ata_eh_analyze_ncq_error(link);
2425 ata_port_freeze(ap);
2427 sata_pmp_error_handler(ap);
2430 static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
2432 void __iomem *port_mmio = mv_ap_base(ap);
2434 return readl(port_mmio + SATA_TESTCTL) >> 16;
2437 static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
2442 * Initialize EH info for PMPs which saw device errors
2444 for (pmp = 0; pmp_map != 0; pmp++) {
2445 unsigned int this_pmp = (1 << pmp);
2446 if (pmp_map & this_pmp) {
2447 struct ata_link *link = &ap->pmp_link[pmp];
2448 struct ata_eh_info *ehi = &link->eh_info;
2450 pmp_map &= ~this_pmp;
2451 ata_ehi_clear_desc(ehi);
2452 ata_ehi_push_desc(ehi, "dev err");
2453 ehi->err_mask |= AC_ERR_DEV;
2454 ehi->action |= ATA_EH_RESET;
2455 ata_link_abort(link);
2460 static int mv_req_q_empty(struct ata_port *ap)
2462 void __iomem *port_mmio = mv_ap_base(ap);
2463 u32 in_ptr, out_ptr;
2465 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
2466 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2467 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
2468 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2469 return (in_ptr == out_ptr); /* 1 == queue_is_empty */
2472 static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
2474 struct mv_port_priv *pp = ap->private_data;
2476 unsigned int old_map, new_map;
2479 * Device error during FBS+NCQ operation:
2481 * Set a port flag to prevent further I/O being enqueued.
2482 * Leave the EDMA running to drain outstanding commands from this port.
2483 * Perform the post-mortem/EH only when all responses are complete.
2484 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
2486 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
2487 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
2488 pp->delayed_eh_pmp_map = 0;
2490 old_map = pp->delayed_eh_pmp_map;
2491 new_map = old_map | mv_get_err_pmp_map(ap);
2493 if (old_map != new_map) {
2494 pp->delayed_eh_pmp_map = new_map;
2495 mv_pmp_eh_prep(ap, new_map & ~old_map);
2497 failed_links = hweight16(new_map);
2500 "%s: pmp_map=%04x qc_map=%04llx failed_links=%d nr_active_links=%d\n",
2501 __func__, pp->delayed_eh_pmp_map,
2502 ap->qc_active, failed_links,
2503 ap->nr_active_links);
2505 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
2506 mv_process_crpb_entries(ap, pp);
2509 ata_port_info(ap, "%s: done\n", __func__);
2510 return 1; /* handled */
2512 ata_port_info(ap, "%s: waiting\n", __func__);
2513 return 1; /* handled */
2516 static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
2519 * Possible future enhancement:
2521 * FBS+non-NCQ operation is not yet implemented.
2522 * See related notes in mv_edma_cfg().
2524 * Device error during FBS+non-NCQ operation:
2526 * We need to snapshot the shadow registers for each failed command.
2527 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
2529 return 0; /* not handled */
2532 static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
2534 struct mv_port_priv *pp = ap->private_data;
2536 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
2537 return 0; /* EDMA was not active: not handled */
2538 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
2539 return 0; /* FBS was not active: not handled */
2541 if (!(edma_err_cause & EDMA_ERR_DEV))
2542 return 0; /* non DEV error: not handled */
2543 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
2544 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
2545 return 0; /* other problems: not handled */
2547 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
2549 * EDMA should NOT have self-disabled for this case.
2550 * If it did, then something is wrong elsewhere,
2551 * and we cannot handle it here.
2553 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2554 ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2555 __func__, edma_err_cause, pp->pp_flags);
2556 return 0; /* not handled */
2558 return mv_handle_fbs_ncq_dev_err(ap);
2561 * EDMA should have self-disabled for this case.
2562 * If it did not, then something is wrong elsewhere,
2563 * and we cannot handle it here.
2565 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
2566 ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2567 __func__, edma_err_cause, pp->pp_flags);
2568 return 0; /* not handled */
2570 return mv_handle_fbs_non_ncq_dev_err(ap);
2572 return 0; /* not handled */
2575 static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
2577 struct ata_eh_info *ehi = &ap->link.eh_info;
2578 char *when = "idle";
2580 ata_ehi_clear_desc(ehi);
2581 if (edma_was_enabled) {
2582 when = "EDMA enabled";
2584 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2585 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
2588 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
2589 ehi->err_mask |= AC_ERR_OTHER;
2590 ehi->action |= ATA_EH_RESET;
2591 ata_port_freeze(ap);
2595 * mv_err_intr - Handle error interrupts on the port
2596 * @ap: ATA channel to manipulate
2598 * Most cases require a full reset of the chip's state machine,
2599 * which also performs a COMRESET.
2600 * Also, if the port disabled DMA, update our cached copy to match.
2603 * Inherited from caller.
2605 static void mv_err_intr(struct ata_port *ap)
2607 void __iomem *port_mmio = mv_ap_base(ap);
2608 u32 edma_err_cause, eh_freeze_mask, serr = 0;
2610 struct mv_port_priv *pp = ap->private_data;
2611 struct mv_host_priv *hpriv = ap->host->private_data;
2612 unsigned int action = 0, err_mask = 0;
2613 struct ata_eh_info *ehi = &ap->link.eh_info;
2614 struct ata_queued_cmd *qc;
2618 * Read and clear the SError and err_cause bits.
2619 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
2620 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
2622 sata_scr_read(&ap->link, SCR_ERROR, &serr);
2623 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
2625 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
2626 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2627 fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
2628 writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
2630 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
2632 if (edma_err_cause & EDMA_ERR_DEV) {
2634 * Device errors during FIS-based switching operation
2635 * require special handling.
2637 if (mv_handle_dev_err(ap, edma_err_cause))
2641 qc = mv_get_active_qc(ap);
2642 ata_ehi_clear_desc(ehi);
2643 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
2644 edma_err_cause, pp->pp_flags);
2646 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2647 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
2648 if (fis_cause & FIS_IRQ_CAUSE_AN) {
2649 u32 ec = edma_err_cause &
2650 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
2651 sata_async_notification(ap);
2653 return; /* Just an AN; no need for the nukes */
2654 ata_ehi_push_desc(ehi, "SDB notify");
2658 * All generations share these EDMA error cause bits:
2660 if (edma_err_cause & EDMA_ERR_DEV) {
2661 err_mask |= AC_ERR_DEV;
2662 action |= ATA_EH_RESET;
2663 ata_ehi_push_desc(ehi, "dev error");
2665 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
2666 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
2667 EDMA_ERR_INTRL_PAR)) {
2668 err_mask |= AC_ERR_ATA_BUS;
2669 action |= ATA_EH_RESET;
2670 ata_ehi_push_desc(ehi, "parity error");
2672 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
2673 ata_ehi_hotplugged(ehi);
2674 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
2675 "dev disconnect" : "dev connect");
2676 action |= ATA_EH_RESET;
2680 * Gen-I has a different SELF_DIS bit,
2681 * different FREEZE bits, and no SERR bit:
2683 if (IS_GEN_I(hpriv)) {
2684 eh_freeze_mask = EDMA_EH_FREEZE_5;
2685 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
2686 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2687 ata_ehi_push_desc(ehi, "EDMA self-disable");
2690 eh_freeze_mask = EDMA_EH_FREEZE;
2691 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2692 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2693 ata_ehi_push_desc(ehi, "EDMA self-disable");
2695 if (edma_err_cause & EDMA_ERR_SERR) {
2696 ata_ehi_push_desc(ehi, "SError=%08x", serr);
2697 err_mask |= AC_ERR_ATA_BUS;
2698 action |= ATA_EH_RESET;
2703 err_mask = AC_ERR_OTHER;
2704 action |= ATA_EH_RESET;
2707 ehi->serror |= serr;
2708 ehi->action |= action;
2711 qc->err_mask |= err_mask;
2713 ehi->err_mask |= err_mask;
2715 if (err_mask == AC_ERR_DEV) {
2717 * Cannot do ata_port_freeze() here,
2718 * because it would kill PIO access,
2719 * which is needed for further diagnosis.
2723 } else if (edma_err_cause & eh_freeze_mask) {
2725 * Note to self: ata_port_freeze() calls ata_port_abort()
2727 ata_port_freeze(ap);
2734 ata_link_abort(qc->dev->link);
2740 static bool mv_process_crpb_response(struct ata_port *ap,
2741 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2744 u16 edma_status = le16_to_cpu(response->flags);
2747 * edma_status from a response queue entry:
2748 * LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
2749 * MSB is saved ATA status from command completion.
2752 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2755 * Error will be seen/handled by
2756 * mv_err_intr(). So do nothing at all here.
2761 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2762 if (!ac_err_mask(ata_status))
2764 /* else: leave it for mv_err_intr() */
2768 static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
2770 void __iomem *port_mmio = mv_ap_base(ap);
2771 struct mv_host_priv *hpriv = ap->host->private_data;
2773 bool work_done = false;
2775 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
2777 /* Get the hardware queue position index */
2778 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
2779 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2781 /* Process new responses from since the last time we looked */
2782 while (in_index != pp->resp_idx) {
2784 struct mv_crpb *response = &pp->crpb[pp->resp_idx];
2786 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2788 if (IS_GEN_I(hpriv)) {
2789 /* 50xx: no NCQ, only one command active at a time */
2790 tag = ap->link.active_tag;
2792 /* Gen II/IIE: get command tag from CRPB entry */
2793 tag = le16_to_cpu(response->id) & 0x1f;
2795 if (mv_process_crpb_response(ap, response, tag, ncq_enabled))
2796 done_mask |= 1 << tag;
2801 ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
2803 /* Update the software queue position index in hardware */
2804 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
2805 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
2806 port_mmio + EDMA_RSP_Q_OUT_PTR);
2810 static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2812 struct mv_port_priv *pp;
2813 int edma_was_enabled;
2816 * Grab a snapshot of the EDMA_EN flag setting,
2817 * so that we have a consistent view for this port,
2818 * even if something we call of our routines changes it.
2820 pp = ap->private_data;
2821 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2823 * Process completed CRPB response(s) before other events.
2825 if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2826 mv_process_crpb_entries(ap, pp);
2827 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2828 mv_handle_fbs_ncq_dev_err(ap);
2831 * Handle chip-reported errors, or continue on to handle PIO.
2833 if (unlikely(port_cause & ERR_IRQ)) {
2835 } else if (!edma_was_enabled) {
2836 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2838 ata_bmdma_port_intr(ap, qc);
2840 mv_unexpected_intr(ap, edma_was_enabled);
2845 * mv_host_intr - Handle all interrupts on the given host controller
2846 * @host: host specific structure
2847 * @main_irq_cause: Main interrupt cause register for the chip.
2850 * Inherited from caller.
2852 static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
2854 struct mv_host_priv *hpriv = host->private_data;
2855 void __iomem *mmio = hpriv->base, *hc_mmio;
2856 unsigned int handled = 0, port;
2858 /* If asserted, clear the "all ports" IRQ coalescing bit */
2859 if (main_irq_cause & ALL_PORTS_COAL_DONE)
2860 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
2862 for (port = 0; port < hpriv->n_ports; port++) {
2863 struct ata_port *ap = host->ports[port];
2864 unsigned int p, shift, hardport, port_cause;
2866 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2868 * Each hc within the host has its own hc_irq_cause register,
2869 * where the interrupting ports bits get ack'd.
2871 if (hardport == 0) { /* first port on this hc ? */
2872 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2873 u32 port_mask, ack_irqs;
2875 * Skip this entire hc if nothing pending for any ports
2878 port += MV_PORTS_PER_HC - 1;
2882 * We don't need/want to read the hc_irq_cause register,
2883 * because doing so hurts performance, and
2884 * main_irq_cause already gives us everything we need.
2886 * But we do have to *write* to the hc_irq_cause to ack
2887 * the ports that we are handling this time through.
2889 * This requires that we create a bitmap for those
2890 * ports which interrupted us, and use that bitmap
2891 * to ack (only) those ports via hc_irq_cause.
2894 if (hc_cause & PORTS_0_3_COAL_DONE)
2895 ack_irqs = HC_COAL_IRQ;
2896 for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2897 if ((port + p) >= hpriv->n_ports)
2899 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2900 if (hc_cause & port_mask)
2901 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2903 hc_mmio = mv_hc_base_from_port(mmio, port);
2904 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
2908 * Handle interrupts signalled for this port:
2910 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
2912 mv_port_intr(ap, port_cause);
2917 static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
2919 struct mv_host_priv *hpriv = host->private_data;
2920 struct ata_port *ap;
2921 struct ata_queued_cmd *qc;
2922 struct ata_eh_info *ehi;
2923 unsigned int i, err_mask, printed = 0;
2926 err_cause = readl(mmio + hpriv->irq_cause_offset);
2928 dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause);
2930 DPRINTK("All regs @ PCI error\n");
2931 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
2933 writelfl(0, mmio + hpriv->irq_cause_offset);
2935 for (i = 0; i < host->n_ports; i++) {
2936 ap = host->ports[i];
2937 if (!ata_link_offline(&ap->link)) {
2938 ehi = &ap->link.eh_info;
2939 ata_ehi_clear_desc(ehi);
2941 ata_ehi_push_desc(ehi,
2942 "PCI err cause 0x%08x", err_cause);
2943 err_mask = AC_ERR_HOST_BUS;
2944 ehi->action = ATA_EH_RESET;
2945 qc = ata_qc_from_tag(ap, ap->link.active_tag);
2947 qc->err_mask |= err_mask;
2949 ehi->err_mask |= err_mask;
2951 ata_port_freeze(ap);
2954 return 1; /* handled */
2958 * mv_interrupt - Main interrupt event handler
2960 * @dev_instance: private data; in this case the host structure
2962 * Read the read only register to determine if any host
2963 * controllers have pending interrupts. If so, call lower level
2964 * routine to handle. Also check for PCI errors which are only
2968 * This routine holds the host lock while processing pending
2971 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
2973 struct ata_host *host = dev_instance;
2974 struct mv_host_priv *hpriv = host->private_data;
2975 unsigned int handled = 0;
2976 int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
2977 u32 main_irq_cause, pending_irqs;
2979 spin_lock(&host->lock);
2981 /* for MSI: block new interrupts while in here */
2983 mv_write_main_irq_mask(0, hpriv);
2985 main_irq_cause = readl(hpriv->main_irq_cause_addr);
2986 pending_irqs = main_irq_cause & hpriv->main_irq_mask;
2988 * Deal with cases where we either have nothing pending, or have read
2989 * a bogus register value which can indicate HW removal or PCI fault.
2991 if (pending_irqs && main_irq_cause != 0xffffffffU) {
2992 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
2993 handled = mv_pci_error(host, hpriv->base);
2995 handled = mv_host_intr(host, pending_irqs);
2998 /* for MSI: unmask; interrupt cause bits will retrigger now */
3000 mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
3002 spin_unlock(&host->lock);
3004 return IRQ_RETVAL(handled);
3007 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
3011 switch (sc_reg_in) {
3015 ofs = sc_reg_in * sizeof(u32);
3024 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
3026 struct mv_host_priv *hpriv = link->ap->host->private_data;
3027 void __iomem *mmio = hpriv->base;
3028 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3029 unsigned int ofs = mv5_scr_offset(sc_reg_in);
3031 if (ofs != 0xffffffffU) {
3032 *val = readl(addr + ofs);
3038 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
3040 struct mv_host_priv *hpriv = link->ap->host->private_data;
3041 void __iomem *mmio = hpriv->base;
3042 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3043 unsigned int ofs = mv5_scr_offset(sc_reg_in);
3045 if (ofs != 0xffffffffU) {
3046 writelfl(val, addr + ofs);
3052 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
3054 struct pci_dev *pdev = to_pci_dev(host->dev);
3057 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
3060 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3062 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3065 mv_reset_pci_bus(host, mmio);
3068 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3070 writel(0x0fcfffff, mmio + FLASH_CTL);
3073 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
3076 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
3079 tmp = readl(phy_mmio + MV5_PHY_MODE);
3081 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
3082 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
3085 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3089 writel(0, mmio + GPIO_PORT_CTL);
3091 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
3093 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3095 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3098 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3101 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
3102 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
3104 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
3107 tmp = readl(phy_mmio + MV5_LTMODE);
3109 writel(tmp, phy_mmio + MV5_LTMODE);
3111 tmp = readl(phy_mmio + MV5_PHY_CTL);
3114 writel(tmp, phy_mmio + MV5_PHY_CTL);
3117 tmp = readl(phy_mmio + MV5_PHY_MODE);
3119 tmp |= hpriv->signal[port].pre;
3120 tmp |= hpriv->signal[port].amps;
3121 writel(tmp, phy_mmio + MV5_PHY_MODE);
3126 #define ZERO(reg) writel(0, port_mmio + (reg))
3127 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
3130 void __iomem *port_mmio = mv_port_base(mmio, port);
3132 mv_reset_channel(hpriv, mmio, port);
3134 ZERO(0x028); /* command */
3135 writel(0x11f, port_mmio + EDMA_CFG);
3136 ZERO(0x004); /* timer */
3137 ZERO(0x008); /* irq err cause */
3138 ZERO(0x00c); /* irq err mask */
3139 ZERO(0x010); /* rq bah */
3140 ZERO(0x014); /* rq inp */
3141 ZERO(0x018); /* rq outp */
3142 ZERO(0x01c); /* respq bah */
3143 ZERO(0x024); /* respq outp */
3144 ZERO(0x020); /* respq inp */
3145 ZERO(0x02c); /* test control */
3146 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
3150 #define ZERO(reg) writel(0, hc_mmio + (reg))
3151 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3154 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3162 tmp = readl(hc_mmio + 0x20);
3165 writel(tmp, hc_mmio + 0x20);
3169 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3172 unsigned int hc, port;
3174 for (hc = 0; hc < n_hc; hc++) {
3175 for (port = 0; port < MV_PORTS_PER_HC; port++)
3176 mv5_reset_hc_port(hpriv, mmio,
3177 (hc * MV_PORTS_PER_HC) + port);
3179 mv5_reset_one_hc(hpriv, mmio, hc);
3186 #define ZERO(reg) writel(0, mmio + (reg))
3187 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
3189 struct mv_host_priv *hpriv = host->private_data;
3192 tmp = readl(mmio + MV_PCI_MODE);
3194 writel(tmp, mmio + MV_PCI_MODE);
3196 ZERO(MV_PCI_DISC_TIMER);
3197 ZERO(MV_PCI_MSI_TRIGGER);
3198 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
3199 ZERO(MV_PCI_SERR_MASK);
3200 ZERO(hpriv->irq_cause_offset);
3201 ZERO(hpriv->irq_mask_offset);
3202 ZERO(MV_PCI_ERR_LOW_ADDRESS);
3203 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
3204 ZERO(MV_PCI_ERR_ATTRIBUTE);
3205 ZERO(MV_PCI_ERR_COMMAND);
3209 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3213 mv5_reset_flash(hpriv, mmio);
3215 tmp = readl(mmio + GPIO_PORT_CTL);
3217 tmp |= (1 << 5) | (1 << 6);
3218 writel(tmp, mmio + GPIO_PORT_CTL);
3222 * mv6_reset_hc - Perform the 6xxx global soft reset
3223 * @mmio: base address of the HBA
3225 * This routine only applies to 6xxx parts.
3228 * Inherited from caller.
3230 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3233 void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
3237 /* Following procedure defined in PCI "main command and status
3241 writel(t | STOP_PCI_MASTER, reg);
3243 for (i = 0; i < 1000; i++) {
3246 if (PCI_MASTER_EMPTY & t)
3249 if (!(PCI_MASTER_EMPTY & t)) {
3250 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
3258 writel(t | GLOB_SFT_RST, reg);
3261 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
3263 if (!(GLOB_SFT_RST & t)) {
3264 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
3269 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
3272 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
3275 } while ((GLOB_SFT_RST & t) && (i-- > 0));
3277 if (GLOB_SFT_RST & t) {
3278 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
3285 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
3288 void __iomem *port_mmio;
3291 tmp = readl(mmio + RESET_CFG);
3292 if ((tmp & (1 << 0)) == 0) {
3293 hpriv->signal[idx].amps = 0x7 << 8;
3294 hpriv->signal[idx].pre = 0x1 << 5;
3298 port_mmio = mv_port_base(mmio, idx);
3299 tmp = readl(port_mmio + PHY_MODE2);
3301 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
3302 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
3305 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3307 writel(0x00000060, mmio + GPIO_PORT_CTL);
3310 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3313 void __iomem *port_mmio = mv_port_base(mmio, port);
3315 u32 hp_flags = hpriv->hp_flags;
3317 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3319 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3322 if (fix_phy_mode2) {
3323 m2 = readl(port_mmio + PHY_MODE2);
3326 writel(m2, port_mmio + PHY_MODE2);
3330 m2 = readl(port_mmio + PHY_MODE2);
3331 m2 &= ~((1 << 16) | (1 << 31));
3332 writel(m2, port_mmio + PHY_MODE2);
3338 * Gen-II/IIe PHY_MODE3 errata RM#2:
3339 * Achieves better receiver noise performance than the h/w default:
3341 m3 = readl(port_mmio + PHY_MODE3);
3342 m3 = (m3 & 0x1f) | (0x5555601 << 5);
3344 /* Guideline 88F5182 (GL# SATA-S11) */
3348 if (fix_phy_mode4) {
3349 u32 m4 = readl(port_mmio + PHY_MODE4);
3351 * Enforce reserved-bit restrictions on GenIIe devices only.
3352 * For earlier chipsets, force only the internal config field
3353 * (workaround for errata FEr SATA#10 part 1).
3355 if (IS_GEN_IIE(hpriv))
3356 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
3358 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
3359 writel(m4, port_mmio + PHY_MODE4);
3362 * Workaround for 60x1-B2 errata SATA#13:
3363 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
3364 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
3365 * Or ensure we use writelfl() when writing PHY_MODE4.
3367 writel(m3, port_mmio + PHY_MODE3);
3369 /* Revert values of pre-emphasis and signal amps to the saved ones */
3370 m2 = readl(port_mmio + PHY_MODE2);
3372 m2 &= ~MV_M2_PREAMP_MASK;
3373 m2 |= hpriv->signal[port].amps;
3374 m2 |= hpriv->signal[port].pre;
3377 /* according to mvSata 3.6.1, some IIE values are fixed */
3378 if (IS_GEN_IIE(hpriv)) {
3383 writel(m2, port_mmio + PHY_MODE2);
3386 /* TODO: use the generic LED interface to configure the SATA Presence */
3387 /* & Acitivy LEDs on the board */
3388 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
3394 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
3397 void __iomem *port_mmio;
3400 port_mmio = mv_port_base(mmio, idx);
3401 tmp = readl(port_mmio + PHY_MODE2);
3403 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
3404 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
3408 #define ZERO(reg) writel(0, port_mmio + (reg))
3409 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
3410 void __iomem *mmio, unsigned int port)
3412 void __iomem *port_mmio = mv_port_base(mmio, port);
3414 mv_reset_channel(hpriv, mmio, port);
3416 ZERO(0x028); /* command */
3417 writel(0x101f, port_mmio + EDMA_CFG);
3418 ZERO(0x004); /* timer */
3419 ZERO(0x008); /* irq err cause */
3420 ZERO(0x00c); /* irq err mask */
3421 ZERO(0x010); /* rq bah */
3422 ZERO(0x014); /* rq inp */
3423 ZERO(0x018); /* rq outp */
3424 ZERO(0x01c); /* respq bah */
3425 ZERO(0x024); /* respq outp */
3426 ZERO(0x020); /* respq inp */
3427 ZERO(0x02c); /* test control */
3428 writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
3433 #define ZERO(reg) writel(0, hc_mmio + (reg))
3434 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
3437 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
3447 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
3448 void __iomem *mmio, unsigned int n_hc)
3452 for (port = 0; port < hpriv->n_ports; port++)
3453 mv_soc_reset_hc_port(hpriv, mmio, port);
3455 mv_soc_reset_one_hc(hpriv, mmio);
3460 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
3466 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
3471 static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
3472 void __iomem *mmio, unsigned int port)
3474 void __iomem *port_mmio = mv_port_base(mmio, port);
3477 reg = readl(port_mmio + PHY_MODE3);
3478 reg &= ~(0x3 << 27); /* SELMUPF (bits 28:27) to 1 */
3480 reg &= ~(0x3 << 29); /* SELMUPI (bits 30:29) to 1 */
3482 writel(reg, port_mmio + PHY_MODE3);
3484 reg = readl(port_mmio + PHY_MODE4);
3485 reg &= ~0x1; /* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */
3487 writel(reg, port_mmio + PHY_MODE4);
3489 reg = readl(port_mmio + PHY_MODE9_GEN2);
3490 reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */
3492 reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */
3493 writel(reg, port_mmio + PHY_MODE9_GEN2);
3495 reg = readl(port_mmio + PHY_MODE9_GEN1);
3496 reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */
3498 reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */
3499 writel(reg, port_mmio + PHY_MODE9_GEN1);
3503 * soc_is_65 - check if the soc is 65 nano device
3505 * Detect the type of the SoC, this is done by reading the PHYCFG_OFS
3506 * register, this register should contain non-zero value and it exists only
3507 * in the 65 nano devices, when reading it from older devices we get 0.
3509 static bool soc_is_65n(struct mv_host_priv *hpriv)
3511 void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
3513 if (readl(port0_mmio + PHYCFG_OFS))
3518 static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
3520 u32 ifcfg = readl(port_mmio + SATA_IFCFG);
3522 ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
3524 ifcfg |= (1 << 7); /* enable gen2i speed */
3525 writelfl(ifcfg, port_mmio + SATA_IFCFG);
3528 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
3529 unsigned int port_no)
3531 void __iomem *port_mmio = mv_port_base(mmio, port_no);
3534 * The datasheet warns against setting EDMA_RESET when EDMA is active
3535 * (but doesn't say what the problem might be). So we first try
3536 * to disable the EDMA engine before doing the EDMA_RESET operation.
3538 mv_stop_edma_engine(port_mmio);
3539 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3541 if (!IS_GEN_I(hpriv)) {
3542 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
3543 mv_setup_ifcfg(port_mmio, 1);
3546 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
3547 * link, and physical layers. It resets all SATA interface registers
3548 * (except for SATA_IFCFG), and issues a COMRESET to the dev.
3550 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3551 udelay(25); /* allow reset propagation */
3552 writelfl(0, port_mmio + EDMA_CMD);
3554 hpriv->ops->phy_errata(hpriv, mmio, port_no);
3556 if (IS_GEN_I(hpriv))
3557 usleep_range(500, 1000);
3560 static void mv_pmp_select(struct ata_port *ap, int pmp)
3562 if (sata_pmp_supported(ap)) {
3563 void __iomem *port_mmio = mv_ap_base(ap);
3564 u32 reg = readl(port_mmio + SATA_IFCTL);
3565 int old = reg & 0xf;
3568 reg = (reg & ~0xf) | pmp;
3569 writelfl(reg, port_mmio + SATA_IFCTL);
3574 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
3575 unsigned long deadline)
3577 mv_pmp_select(link->ap, sata_srst_pmp(link));
3578 return sata_std_hardreset(link, class, deadline);
3581 static int mv_softreset(struct ata_link *link, unsigned int *class,
3582 unsigned long deadline)
3584 mv_pmp_select(link->ap, sata_srst_pmp(link));
3585 return ata_sff_softreset(link, class, deadline);
3588 static int mv_hardreset(struct ata_link *link, unsigned int *class,
3589 unsigned long deadline)
3591 struct ata_port *ap = link->ap;
3592 struct mv_host_priv *hpriv = ap->host->private_data;
3593 struct mv_port_priv *pp = ap->private_data;
3594 void __iomem *mmio = hpriv->base;
3595 int rc, attempts = 0, extra = 0;
3599 mv_reset_channel(hpriv, mmio, ap->port_no);
3600 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
3602 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
3604 /* Workaround for errata FEr SATA#10 (part 2) */
3606 const unsigned long *timing =
3607 sata_ehc_deb_timing(&link->eh_context);
3609 rc = sata_link_hardreset(link, timing, deadline + extra,
3611 rc = online ? -EAGAIN : rc;
3614 sata_scr_read(link, SCR_STATUS, &sstatus);
3615 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
3616 /* Force 1.5gb/s link speed and try again */
3617 mv_setup_ifcfg(mv_ap_base(ap), 0);
3618 if (time_after(jiffies + HZ, deadline))
3619 extra = HZ; /* only extend it once, max */
3621 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
3622 mv_save_cached_regs(ap);
3623 mv_edma_cfg(ap, 0, 0);
3628 static void mv_eh_freeze(struct ata_port *ap)
3631 mv_enable_port_irqs(ap, 0);
3634 static void mv_eh_thaw(struct ata_port *ap)
3636 struct mv_host_priv *hpriv = ap->host->private_data;
3637 unsigned int port = ap->port_no;
3638 unsigned int hardport = mv_hardport_from_port(port);
3639 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
3640 void __iomem *port_mmio = mv_ap_base(ap);
3643 /* clear EDMA errors on this port */
3644 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3646 /* clear pending irq events */
3647 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
3648 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
3650 mv_enable_port_irqs(ap, ERR_IRQ);
3654 * mv_port_init - Perform some early initialization on a single port.
3655 * @port: libata data structure storing shadow register addresses
3656 * @port_mmio: base address of the port
3658 * Initialize shadow register mmio addresses, clear outstanding
3659 * interrupts on the port, and unmask interrupts for the future
3660 * start of the port.
3663 * Inherited from caller.
3665 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
3667 void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
3669 /* PIO related setup
3671 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
3673 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
3674 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
3675 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
3676 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
3677 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
3678 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
3680 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
3681 /* special case: control/altstatus doesn't have ATA_REG_ address */
3682 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
3684 /* Clear any currently outstanding port interrupt conditions */
3685 serr = port_mmio + mv_scr_offset(SCR_ERROR);
3686 writelfl(readl(serr), serr);
3687 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3689 /* unmask all non-transient EDMA error interrupts */
3690 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
3692 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
3693 readl(port_mmio + EDMA_CFG),
3694 readl(port_mmio + EDMA_ERR_IRQ_CAUSE),
3695 readl(port_mmio + EDMA_ERR_IRQ_MASK));
3698 static unsigned int mv_in_pcix_mode(struct ata_host *host)
3700 struct mv_host_priv *hpriv = host->private_data;
3701 void __iomem *mmio = hpriv->base;
3704 if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
3705 return 0; /* not PCI-X capable */
3706 reg = readl(mmio + MV_PCI_MODE);
3707 if ((reg & MV_PCI_MODE_MASK) == 0)
3708 return 0; /* conventional PCI mode */
3709 return 1; /* chip is in PCI-X mode */
3712 static int mv_pci_cut_through_okay(struct ata_host *host)
3714 struct mv_host_priv *hpriv = host->private_data;
3715 void __iomem *mmio = hpriv->base;
3718 if (!mv_in_pcix_mode(host)) {
3719 reg = readl(mmio + MV_PCI_COMMAND);
3720 if (reg & MV_PCI_COMMAND_MRDTRIG)
3721 return 0; /* not okay */
3723 return 1; /* okay */
3726 static void mv_60x1b2_errata_pci7(struct ata_host *host)
3728 struct mv_host_priv *hpriv = host->private_data;
3729 void __iomem *mmio = hpriv->base;
3731 /* workaround for 60x1-B2 errata PCI#7 */
3732 if (mv_in_pcix_mode(host)) {
3733 u32 reg = readl(mmio + MV_PCI_COMMAND);
3734 writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
3738 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
3740 struct pci_dev *pdev = to_pci_dev(host->dev);
3741 struct mv_host_priv *hpriv = host->private_data;
3742 u32 hp_flags = hpriv->hp_flags;
3744 switch (board_idx) {
3746 hpriv->ops = &mv5xxx_ops;
3747 hp_flags |= MV_HP_GEN_I;
3749 switch (pdev->revision) {
3751 hp_flags |= MV_HP_ERRATA_50XXB0;
3754 hp_flags |= MV_HP_ERRATA_50XXB2;
3757 dev_warn(&pdev->dev,
3758 "Applying 50XXB2 workarounds to unknown rev\n");
3759 hp_flags |= MV_HP_ERRATA_50XXB2;
3766 hpriv->ops = &mv5xxx_ops;
3767 hp_flags |= MV_HP_GEN_I;
3769 switch (pdev->revision) {
3771 hp_flags |= MV_HP_ERRATA_50XXB0;
3774 hp_flags |= MV_HP_ERRATA_50XXB2;
3777 dev_warn(&pdev->dev,
3778 "Applying B2 workarounds to unknown rev\n");
3779 hp_flags |= MV_HP_ERRATA_50XXB2;
3786 hpriv->ops = &mv6xxx_ops;
3787 hp_flags |= MV_HP_GEN_II;
3789 switch (pdev->revision) {
3791 mv_60x1b2_errata_pci7(host);
3792 hp_flags |= MV_HP_ERRATA_60X1B2;
3795 hp_flags |= MV_HP_ERRATA_60X1C0;
3798 dev_warn(&pdev->dev,
3799 "Applying B2 workarounds to unknown rev\n");
3800 hp_flags |= MV_HP_ERRATA_60X1B2;
3806 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
3807 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
3808 (pdev->device == 0x2300 || pdev->device == 0x2310))
3811 * Highpoint RocketRAID PCIe 23xx series cards:
3813 * Unconfigured drives are treated as "Legacy"
3814 * by the BIOS, and it overwrites sector 8 with
3815 * a "Lgcy" metadata block prior to Linux boot.
3817 * Configured drives (RAID or JBOD) leave sector 8
3818 * alone, but instead overwrite a high numbered
3819 * sector for the RAID metadata. This sector can
3820 * be determined exactly, by truncating the physical
3821 * drive capacity to a nice even GB value.
3823 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
3825 * Warn the user, lest they think we're just buggy.
3827 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
3828 " BIOS CORRUPTS DATA on all attached drives,"
3829 " regardless of if/how they are configured."
3831 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
3832 " use sectors 8-9 on \"Legacy\" drives,"
3833 " and avoid the final two gigabytes on"
3834 " all RocketRAID BIOS initialized drives.\n");
3838 hpriv->ops = &mv6xxx_ops;
3839 hp_flags |= MV_HP_GEN_IIE;
3840 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3841 hp_flags |= MV_HP_CUT_THROUGH;
3843 switch (pdev->revision) {
3844 case 0x2: /* Rev.B0: the first/only public release */
3845 hp_flags |= MV_HP_ERRATA_60X1C0;
3848 dev_warn(&pdev->dev,
3849 "Applying 60X1C0 workarounds to unknown rev\n");
3850 hp_flags |= MV_HP_ERRATA_60X1C0;
3855 if (soc_is_65n(hpriv))
3856 hpriv->ops = &mv_soc_65n_ops;
3858 hpriv->ops = &mv_soc_ops;
3859 hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3860 MV_HP_ERRATA_60X1C0;
3864 dev_alert(host->dev, "BUG: invalid board index %u\n", board_idx);
3868 hpriv->hp_flags = hp_flags;
3869 if (hp_flags & MV_HP_PCIE) {
3870 hpriv->irq_cause_offset = PCIE_IRQ_CAUSE;
3871 hpriv->irq_mask_offset = PCIE_IRQ_MASK;
3872 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
3874 hpriv->irq_cause_offset = PCI_IRQ_CAUSE;
3875 hpriv->irq_mask_offset = PCI_IRQ_MASK;
3876 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
3883 * mv_init_host - Perform some early initialization of the host.
3884 * @host: ATA host to initialize
3886 * If possible, do an early global reset of the host. Then do
3887 * our port init and clear/unmask all/relevant host interrupts.
3890 * Inherited from caller.
3892 static int mv_init_host(struct ata_host *host)
3894 int rc = 0, n_hc, port, hc;
3895 struct mv_host_priv *hpriv = host->private_data;
3896 void __iomem *mmio = hpriv->base;
3898 rc = mv_chip_id(host, hpriv->board_idx);
3902 if (IS_SOC(hpriv)) {
3903 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
3904 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK;
3906 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
3907 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK;
3910 /* initialize shadow irq mask with register's value */
3911 hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3913 /* global interrupt mask: 0 == mask everything */
3914 mv_set_main_irq_mask(host, ~0, 0);
3916 n_hc = mv_get_hc_count(host->ports[0]->flags);
3918 for (port = 0; port < host->n_ports; port++)
3919 if (hpriv->ops->read_preamp)
3920 hpriv->ops->read_preamp(hpriv, port, mmio);
3922 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
3926 hpriv->ops->reset_flash(hpriv, mmio);
3927 hpriv->ops->reset_bus(host, mmio);
3928 hpriv->ops->enable_leds(hpriv, mmio);
3930 for (port = 0; port < host->n_ports; port++) {
3931 struct ata_port *ap = host->ports[port];
3932 void __iomem *port_mmio = mv_port_base(mmio, port);
3934 mv_port_init(&ap->ioaddr, port_mmio);
3937 for (hc = 0; hc < n_hc; hc++) {
3938 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3940 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3941 "(before clear)=0x%08x\n", hc,
3942 readl(hc_mmio + HC_CFG),
3943 readl(hc_mmio + HC_IRQ_CAUSE));
3945 /* Clear any currently outstanding hc interrupt conditions */
3946 writelfl(0, hc_mmio + HC_IRQ_CAUSE);
3949 if (!IS_SOC(hpriv)) {
3950 /* Clear any currently outstanding host interrupt conditions */
3951 writelfl(0, mmio + hpriv->irq_cause_offset);
3953 /* and unmask interrupt generation for host regs */
3954 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
3958 * enable only global host interrupts for now.
3959 * The per-port interrupts get done later as ports are set up.
3961 mv_set_main_irq_mask(host, 0, PCI_ERR);
3962 mv_set_irq_coalescing(host, irq_coalescing_io_count,
3963 irq_coalescing_usecs);
3968 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3970 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3972 if (!hpriv->crqb_pool)
3975 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
3977 if (!hpriv->crpb_pool)
3980 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
3982 if (!hpriv->sg_tbl_pool)
3988 static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
3989 const struct mbus_dram_target_info *dram)
3993 for (i = 0; i < 4; i++) {
3994 writel(0, hpriv->base + WINDOW_CTRL(i));
3995 writel(0, hpriv->base + WINDOW_BASE(i));
3998 for (i = 0; i < dram->num_cs; i++) {
3999 const struct mbus_dram_window *cs = dram->cs + i;
4001 writel(((cs->size - 1) & 0xffff0000) |
4002 (cs->mbus_attr << 8) |
4003 (dram->mbus_dram_target_id << 4) | 1,
4004 hpriv->base + WINDOW_CTRL(i));
4005 writel(cs->base, hpriv->base + WINDOW_BASE(i));
4010 * mv_platform_probe - handle a positive probe of an soc Marvell
4012 * @pdev: platform device found
4015 * Inherited from caller.
4017 static int mv_platform_probe(struct platform_device *pdev)
4019 const struct mv_sata_platform_data *mv_platform_data;
4020 const struct mbus_dram_target_info *dram;
4021 const struct ata_port_info *ppi[] =
4022 { &mv_port_info[chip_soc], NULL };
4023 struct ata_host *host;
4024 struct mv_host_priv *hpriv;
4025 struct resource *res;
4026 int n_ports = 0, irq = 0;
4030 ata_print_version_once(&pdev->dev, DRV_VERSION);
4033 * Simple resource validation ..
4035 if (unlikely(pdev->num_resources != 2)) {
4036 dev_err(&pdev->dev, "invalid number of resources\n");
4041 * Get the register base first
4043 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4048 if (pdev->dev.of_node) {
4049 rc = of_property_read_u32(pdev->dev.of_node, "nr-ports",
4053 "error parsing nr-ports property: %d\n", rc);
4058 dev_err(&pdev->dev, "nr-ports must be positive: %d\n",
4063 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
4065 mv_platform_data = dev_get_platdata(&pdev->dev);
4066 n_ports = mv_platform_data->n_ports;
4067 irq = platform_get_irq(pdev, 0);
4074 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4075 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4077 if (!host || !hpriv)
4079 hpriv->port_clks = devm_kcalloc(&pdev->dev,
4080 n_ports, sizeof(struct clk *),
4082 if (!hpriv->port_clks)
4084 hpriv->port_phys = devm_kcalloc(&pdev->dev,
4085 n_ports, sizeof(struct phy *),
4087 if (!hpriv->port_phys)
4089 host->private_data = hpriv;
4090 hpriv->board_idx = chip_soc;
4093 hpriv->base = devm_ioremap(&pdev->dev, res->start,
4094 resource_size(res));
4098 hpriv->base -= SATAHC0_REG_BASE;
4100 hpriv->clk = clk_get(&pdev->dev, NULL);
4101 if (IS_ERR(hpriv->clk))
4102 dev_notice(&pdev->dev, "cannot get optional clkdev\n");
4104 clk_prepare_enable(hpriv->clk);
4106 for (port = 0; port < n_ports; port++) {
4107 char port_number[16];
4108 sprintf(port_number, "%d", port);
4109 hpriv->port_clks[port] = clk_get(&pdev->dev, port_number);
4110 if (!IS_ERR(hpriv->port_clks[port]))
4111 clk_prepare_enable(hpriv->port_clks[port]);
4113 sprintf(port_number, "port%d", port);
4114 hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev,
4116 if (IS_ERR(hpriv->port_phys[port])) {
4117 rc = PTR_ERR(hpriv->port_phys[port]);
4118 hpriv->port_phys[port] = NULL;
4119 if (rc != -EPROBE_DEFER)
4120 dev_warn(&pdev->dev, "error getting phy %d", rc);
4122 /* Cleanup only the initialized ports */
4123 hpriv->n_ports = port;
4126 phy_power_on(hpriv->port_phys[port]);
4129 /* All the ports have been initialized */
4130 hpriv->n_ports = n_ports;
4133 * (Re-)program MBUS remapping windows if we are asked to.
4135 dram = mv_mbus_dram_info();
4137 mv_conf_mbus_windows(hpriv, dram);
4139 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4144 * To allow disk hotplug on Armada 370/XP SoCs, the PHY speed must be
4145 * updated in the LP_PHY_CTL register.
4147 if (pdev->dev.of_node &&
4148 of_device_is_compatible(pdev->dev.of_node,
4149 "marvell,armada-370-sata"))
4150 hpriv->hp_flags |= MV_HP_FIX_LP_PHY_CTL;
4152 /* initialize adapter */
4153 rc = mv_init_host(host);
4157 dev_info(&pdev->dev, "slots %u ports %d\n",
4158 (unsigned)MV_MAX_Q_DEPTH, host->n_ports);
4160 rc = ata_host_activate(host, irq, mv_interrupt, IRQF_SHARED, &mv6_sht);
4165 if (!IS_ERR(hpriv->clk)) {
4166 clk_disable_unprepare(hpriv->clk);
4167 clk_put(hpriv->clk);
4169 for (port = 0; port < hpriv->n_ports; port++) {
4170 if (!IS_ERR(hpriv->port_clks[port])) {
4171 clk_disable_unprepare(hpriv->port_clks[port]);
4172 clk_put(hpriv->port_clks[port]);
4174 phy_power_off(hpriv->port_phys[port]);
4182 * mv_platform_remove - unplug a platform interface
4183 * @pdev: platform device
4185 * A platform bus SATA device has been unplugged. Perform the needed
4186 * cleanup. Also called on module unload for any active devices.
4188 static int mv_platform_remove(struct platform_device *pdev)
4190 struct ata_host *host = platform_get_drvdata(pdev);
4191 struct mv_host_priv *hpriv = host->private_data;
4193 ata_host_detach(host);
4195 if (!IS_ERR(hpriv->clk)) {
4196 clk_disable_unprepare(hpriv->clk);
4197 clk_put(hpriv->clk);
4199 for (port = 0; port < host->n_ports; port++) {
4200 if (!IS_ERR(hpriv->port_clks[port])) {
4201 clk_disable_unprepare(hpriv->port_clks[port]);
4202 clk_put(hpriv->port_clks[port]);
4204 phy_power_off(hpriv->port_phys[port]);
4209 #ifdef CONFIG_PM_SLEEP
4210 static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
4212 struct ata_host *host = platform_get_drvdata(pdev);
4214 return ata_host_suspend(host, state);
4219 static int mv_platform_resume(struct platform_device *pdev)
4221 struct ata_host *host = platform_get_drvdata(pdev);
4222 const struct mbus_dram_target_info *dram;
4226 struct mv_host_priv *hpriv = host->private_data;
4229 * (Re-)program MBUS remapping windows if we are asked to.
4231 dram = mv_mbus_dram_info();
4233 mv_conf_mbus_windows(hpriv, dram);
4235 /* initialize adapter */
4236 ret = mv_init_host(host);
4238 printk(KERN_ERR DRV_NAME ": Error during HW init\n");
4241 ata_host_resume(host);
4247 #define mv_platform_suspend NULL
4248 #define mv_platform_resume NULL
4252 static const struct of_device_id mv_sata_dt_ids[] = {
4253 { .compatible = "marvell,armada-370-sata", },
4254 { .compatible = "marvell,orion-sata", },
4257 MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
4260 static struct platform_driver mv_platform_driver = {
4261 .probe = mv_platform_probe,
4262 .remove = mv_platform_remove,
4263 .suspend = mv_platform_suspend,
4264 .resume = mv_platform_resume,
4267 .of_match_table = of_match_ptr(mv_sata_dt_ids),
4273 static int mv_pci_init_one(struct pci_dev *pdev,
4274 const struct pci_device_id *ent);
4275 #ifdef CONFIG_PM_SLEEP
4276 static int mv_pci_device_resume(struct pci_dev *pdev);
4279 static const struct pci_device_id mv_pci_tbl[] = {
4280 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
4281 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
4282 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
4283 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
4284 /* RocketRAID 1720/174x have different identifiers */
4285 { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
4286 { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
4287 { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
4289 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
4290 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
4291 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
4292 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
4293 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
4295 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
4297 /* Adaptec 1430SA */
4298 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
4300 /* Marvell 7042 support */
4301 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
4303 /* Highpoint RocketRAID PCIe series */
4304 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
4305 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
4307 { } /* terminate list */
4310 static struct pci_driver mv_pci_driver = {
4312 .id_table = mv_pci_tbl,
4313 .probe = mv_pci_init_one,
4314 .remove = ata_pci_remove_one,
4315 #ifdef CONFIG_PM_SLEEP
4316 .suspend = ata_pci_device_suspend,
4317 .resume = mv_pci_device_resume,
4321 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
4324 * mv_print_info - Dump key info to kernel log for perusal.
4325 * @host: ATA host to print info about
4327 * FIXME: complete this.
4330 * Inherited from caller.
4332 static void mv_print_info(struct ata_host *host)
4334 struct pci_dev *pdev = to_pci_dev(host->dev);
4335 struct mv_host_priv *hpriv = host->private_data;
4337 const char *scc_s, *gen;
4339 /* Use this to determine the HW stepping of the chip so we know
4340 * what errata to workaround
4342 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
4345 else if (scc == 0x01)
4350 if (IS_GEN_I(hpriv))
4352 else if (IS_GEN_II(hpriv))
4354 else if (IS_GEN_IIE(hpriv))
4359 dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
4360 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
4361 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
4365 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
4366 * @pdev: PCI device found
4367 * @ent: PCI device ID entry for the matched host
4370 * Inherited from caller.
4372 static int mv_pci_init_one(struct pci_dev *pdev,
4373 const struct pci_device_id *ent)
4375 unsigned int board_idx = (unsigned int)ent->driver_data;
4376 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
4377 struct ata_host *host;
4378 struct mv_host_priv *hpriv;
4379 int n_ports, port, rc;
4381 ata_print_version_once(&pdev->dev, DRV_VERSION);
4384 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
4386 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4387 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4388 if (!host || !hpriv)
4390 host->private_data = hpriv;
4391 hpriv->n_ports = n_ports;
4392 hpriv->board_idx = board_idx;
4394 /* acquire resources */
4395 rc = pcim_enable_device(pdev);
4399 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
4401 pcim_pin_device(pdev);
4404 host->iomap = pcim_iomap_table(pdev);
4405 hpriv->base = host->iomap[MV_PRIMARY_BAR];
4407 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4409 dev_err(&pdev->dev, "DMA enable failed\n");
4413 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4417 for (port = 0; port < host->n_ports; port++) {
4418 struct ata_port *ap = host->ports[port];
4419 void __iomem *port_mmio = mv_port_base(hpriv->base, port);
4420 unsigned int offset = port_mmio - hpriv->base;
4422 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
4423 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
4426 /* initialize adapter */
4427 rc = mv_init_host(host);
4431 /* Enable message-switched interrupts, if requested */
4432 if (msi && pci_enable_msi(pdev) == 0)
4433 hpriv->hp_flags |= MV_HP_FLAG_MSI;
4435 mv_dump_pci_cfg(pdev, 0x68);
4436 mv_print_info(host);
4438 pci_set_master(pdev);
4439 pci_try_set_mwi(pdev);
4440 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
4441 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
4444 #ifdef CONFIG_PM_SLEEP
4445 static int mv_pci_device_resume(struct pci_dev *pdev)
4447 struct ata_host *host = pci_get_drvdata(pdev);
4450 rc = ata_pci_device_do_resume(pdev);
4454 /* initialize adapter */
4455 rc = mv_init_host(host);
4459 ata_host_resume(host);
4466 static int __init mv_init(void)
4470 rc = pci_register_driver(&mv_pci_driver);
4474 rc = platform_driver_register(&mv_platform_driver);
4478 pci_unregister_driver(&mv_pci_driver);
4483 static void __exit mv_exit(void)
4486 pci_unregister_driver(&mv_pci_driver);
4488 platform_driver_unregister(&mv_platform_driver);
4491 MODULE_AUTHOR("Brett Russ");
4492 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
4493 MODULE_LICENSE("GPL v2");
4494 MODULE_VERSION(DRV_VERSION);
4495 MODULE_ALIAS("platform:" DRV_NAME);
4497 module_init(mv_init);
4498 module_exit(mv_exit);