1 // SPDX-License-Identifier: GPL-2.0+
3 * BRIEF MODULE DESCRIPTION
4 * PCI init for Ralink RT2880 solution
6 * Copyright 2007 Ralink Inc. (bruce_chang@ralinktech.com.tw)
11 * May 2009 Bruce Chang
12 * support RT2880/RT3883 PCIe
14 * May 2011 Bruce Chang
15 * support RT6855/MT7620 PCIe
18 #include <linux/bitops.h>
19 #include <linux/delay.h>
20 #include <linux/gpio/consumer.h>
21 #include <linux/iopoll.h>
22 #include <linux/module.h>
24 #include <linux/of_address.h>
25 #include <linux/of_irq.h>
26 #include <linux/of_pci.h>
27 #include <linux/of_platform.h>
28 #include <linux/pci.h>
29 #include <linux/phy/phy.h>
30 #include <linux/platform_device.h>
31 #include <linux/reset.h>
32 #include <linux/sys_soc.h>
34 #include <ralink_regs.h>
36 #include "../../pci/pci.h"
39 #define MT7621_GPIO_MODE 0x60
41 /* MediaTek specific configuration registers */
42 #define PCIE_FTS_NUM 0x70c
43 #define PCIE_FTS_NUM_MASK GENMASK(15, 8)
44 #define PCIE_FTS_NUM_L0(x) (((x) & 0xff) << 8)
46 /* rt_sysc_membase relative registers */
47 #define RALINK_CLKCFG1 0x30
49 /* Host-PCI bridge registers */
50 #define RALINK_PCI_PCICFG_ADDR 0x0000
51 #define RALINK_PCI_PCIMSK_ADDR 0x000C
52 #define RALINK_PCI_CONFIG_ADDR 0x0020
53 #define RALINK_PCI_CONFIG_DATA 0x0024
54 #define RALINK_PCI_MEMBASE 0x0028
55 #define RALINK_PCI_IOBASE 0x002C
57 /* PCICFG virtual bridges */
58 #define PCIE_P2P_CNT 3
59 #define PCIE_P2P_BR_DEVNUM_SHIFT(p) (16 + (p) * 4)
60 #define PCIE_P2P_BR_DEVNUM0_SHIFT PCIE_P2P_BR_DEVNUM_SHIFT(0)
61 #define PCIE_P2P_BR_DEVNUM1_SHIFT PCIE_P2P_BR_DEVNUM_SHIFT(1)
62 #define PCIE_P2P_BR_DEVNUM2_SHIFT PCIE_P2P_BR_DEVNUM_SHIFT(2)
63 #define PCIE_P2P_BR_DEVNUM_MASK 0xf
64 #define PCIE_P2P_BR_DEVNUM_MASK_FULL (0xfff << PCIE_P2P_BR_DEVNUM0_SHIFT)
66 /* PCIe RC control registers */
67 #define MT7621_PCIE_OFFSET 0x2000
68 #define MT7621_NEXT_PORT 0x1000
70 #define RALINK_PCI_BAR0SETUP_ADDR 0x0010
71 #define RALINK_PCI_IMBASEBAR0_ADDR 0x0018
72 #define RALINK_PCI_ID 0x0030
73 #define RALINK_PCI_CLASS 0x0034
74 #define RALINK_PCI_SUBID 0x0038
75 #define RALINK_PCI_STATUS 0x0050
77 /* Some definition values */
78 #define PCIE_REVISION_ID BIT(0)
79 #define PCIE_CLASS_CODE (0x60400 << 8)
80 #define PCIE_BAR_MAP_MAX GENMASK(30, 16)
81 #define PCIE_BAR_ENABLE BIT(0)
82 #define PCIE_PORT_INT_EN(x) BIT(20 + (x))
83 #define PCIE_PORT_CLK_EN(x) BIT(24 + (x))
84 #define PCIE_PORT_LINKUP BIT(0)
86 #define MEMORY_BASE 0x0
87 #define PERST_MODE_MASK GENMASK(11, 10)
88 #define PERST_MODE_GPIO BIT(10)
89 #define PERST_DELAY_MS 100
92 * struct mt7621_pcie_port - PCIe port information
93 * @base: I/O mapped register base
95 * @pcie: pointer to PCIe host info
96 * @phy: pointer to PHY control block
97 * @pcie_rst: pointer to port reset control
98 * @gpio_rst: gpio reset
101 * @enabled: indicates if port is enabled
103 struct mt7621_pcie_port {
105 struct list_head list;
106 struct mt7621_pcie *pcie;
108 struct reset_control *pcie_rst;
109 struct gpio_desc *gpio_rst;
116 * struct mt7621_pcie - PCIe host information
117 * @base: IO Mapped Register Base
119 * @mem: non-prefetchable memory resource
121 * @offset: IO / Memory offset
122 * @dev: Pointer to PCIe device
123 * @io_map_base: virtual memory base address for io
124 * @ports: pointer to PCIe port information
125 * @irq_map: irq mapping info according pcie link status
126 * @resets_inverted: depends on chip revision
127 * reset lines are inverted.
134 struct resource busn;
139 unsigned long io_map_base;
140 struct list_head ports;
141 int irq_map[PCIE_P2P_CNT];
142 bool resets_inverted;
145 static inline u32 pcie_read(struct mt7621_pcie *pcie, u32 reg)
147 return readl(pcie->base + reg);
150 static inline void pcie_write(struct mt7621_pcie *pcie, u32 val, u32 reg)
152 writel(val, pcie->base + reg);
155 static inline void pcie_rmw(struct mt7621_pcie *pcie, u32 reg, u32 clr, u32 set)
157 u32 val = readl(pcie->base + reg);
161 writel(val, pcie->base + reg);
164 static inline u32 pcie_port_read(struct mt7621_pcie_port *port, u32 reg)
166 return readl(port->base + reg);
169 static inline void pcie_port_write(struct mt7621_pcie_port *port,
172 writel(val, port->base + reg);
175 static inline u32 mt7621_pci_get_cfgaddr(unsigned int bus, unsigned int slot,
176 unsigned int func, unsigned int where)
178 return (((where & 0xF00) >> 8) << 24) | (bus << 16) | (slot << 11) |
179 (func << 8) | (where & 0xfc) | 0x80000000;
182 static void __iomem *mt7621_pcie_map_bus(struct pci_bus *bus,
183 unsigned int devfn, int where)
185 struct mt7621_pcie *pcie = bus->sysdata;
186 u32 address = mt7621_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn),
187 PCI_FUNC(devfn), where);
189 writel(address, pcie->base + RALINK_PCI_CONFIG_ADDR);
191 return pcie->base + RALINK_PCI_CONFIG_DATA + (where & 3);
194 struct pci_ops mt7621_pci_ops = {
195 .map_bus = mt7621_pcie_map_bus,
196 .read = pci_generic_config_read,
197 .write = pci_generic_config_write,
200 static u32 read_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg)
202 u32 address = mt7621_pci_get_cfgaddr(0, dev, 0, reg);
204 pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR);
205 return pcie_read(pcie, RALINK_PCI_CONFIG_DATA);
208 static void write_config(struct mt7621_pcie *pcie, unsigned int dev,
211 u32 address = mt7621_pci_get_cfgaddr(0, dev, 0, reg);
213 pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR);
214 pcie_write(pcie, val, RALINK_PCI_CONFIG_DATA);
217 static inline void mt7621_rst_gpio_pcie_assert(struct mt7621_pcie_port *port)
220 gpiod_set_value(port->gpio_rst, 1);
223 static inline void mt7621_rst_gpio_pcie_deassert(struct mt7621_pcie_port *port)
226 gpiod_set_value(port->gpio_rst, 0);
229 static inline bool mt7621_pcie_port_is_linkup(struct mt7621_pcie_port *port)
231 return (pcie_port_read(port, RALINK_PCI_STATUS) & PCIE_PORT_LINKUP) != 0;
234 static inline void mt7621_pcie_port_clk_enable(struct mt7621_pcie_port *port)
236 rt_sysc_m32(0, PCIE_PORT_CLK_EN(port->slot), RALINK_CLKCFG1);
239 static inline void mt7621_pcie_port_clk_disable(struct mt7621_pcie_port *port)
241 rt_sysc_m32(PCIE_PORT_CLK_EN(port->slot), 0, RALINK_CLKCFG1);
244 static inline void mt7621_control_assert(struct mt7621_pcie_port *port)
246 struct mt7621_pcie *pcie = port->pcie;
248 if (pcie->resets_inverted)
249 reset_control_assert(port->pcie_rst);
251 reset_control_deassert(port->pcie_rst);
254 static inline void mt7621_control_deassert(struct mt7621_pcie_port *port)
256 struct mt7621_pcie *pcie = port->pcie;
258 if (pcie->resets_inverted)
259 reset_control_deassert(port->pcie_rst);
261 reset_control_assert(port->pcie_rst);
264 static void setup_cm_memory_region(struct mt7621_pcie *pcie)
266 struct resource *mem_resource = &pcie->mem;
267 struct device *dev = pcie->dev;
268 resource_size_t mask;
270 if (mips_cps_numiocu(0)) {
272 * FIXME: hardware doesn't accept mask values with 1s after
273 * 0s (e.g. 0xffef), so it would be great to warn if that's
276 mask = ~(mem_resource->end - mem_resource->start);
278 write_gcr_reg1_base(mem_resource->start);
279 write_gcr_reg1_mask(mask | CM_GCR_REGn_MASK_CMTGT_IOCU0);
280 dev_info(dev, "PCI coherence region base: 0x%08llx, mask/settings: 0x%08llx\n",
281 (unsigned long long)read_gcr_reg1_base(),
282 (unsigned long long)read_gcr_reg1_mask());
286 static int mt7621_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
288 struct mt7621_pcie *pcie = pdev->bus->sysdata;
289 struct device *dev = pcie->dev;
290 int irq = pcie->irq_map[slot];
292 dev_info(dev, "bus=%d slot=%d irq=%d\n", pdev->bus->number, slot, irq);
296 static int mt7621_pci_parse_request_of_pci_ranges(struct mt7621_pcie *pcie)
298 struct device *dev = pcie->dev;
299 struct device_node *node = dev->of_node;
300 struct of_pci_range_parser parser;
301 struct of_pci_range range;
304 if (of_pci_range_parser_init(&parser, node)) {
305 dev_err(dev, "missing \"ranges\" property\n");
309 for_each_of_pci_range(&parser, &range) {
310 switch (range.flags & IORESOURCE_TYPE_BITS) {
313 (unsigned long)ioremap(range.cpu_addr,
315 of_pci_range_to_resource(&range, node, &pcie->io);
316 pcie->io.start = range.cpu_addr;
317 pcie->io.end = range.cpu_addr + range.size - 1;
318 pcie->offset.io = 0x00000000UL;
321 of_pci_range_to_resource(&range, node, &pcie->mem);
322 pcie->offset.mem = 0x00000000UL;
327 err = of_pci_parse_bus_range(node, &pcie->busn);
329 dev_err(dev, "failed to parse bus ranges property: %d\n", err);
330 pcie->busn.name = node->name;
331 pcie->busn.start = 0;
332 pcie->busn.end = 0xff;
333 pcie->busn.flags = IORESOURCE_BUS;
336 set_io_port_base(pcie->io_map_base);
341 static int mt7621_pcie_parse_port(struct mt7621_pcie *pcie,
342 struct device_node *node,
345 struct mt7621_pcie_port *port;
346 struct device *dev = pcie->dev;
347 struct platform_device *pdev = to_platform_device(dev);
348 struct device_node *pnode = dev->of_node;
349 struct resource regs;
353 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
357 err = of_address_to_resource(pnode, slot + 1, ®s);
359 dev_err(dev, "missing \"reg\" property\n");
363 port->base = devm_ioremap_resource(dev, ®s);
364 if (IS_ERR(port->base))
365 return PTR_ERR(port->base);
367 snprintf(name, sizeof(name), "pcie%d", slot);
368 port->pcie_rst = devm_reset_control_get_exclusive(dev, name);
369 if (PTR_ERR(port->pcie_rst) == -EPROBE_DEFER) {
370 dev_err(dev, "failed to get pcie%d reset control\n", slot);
371 return PTR_ERR(port->pcie_rst);
374 snprintf(name, sizeof(name), "pcie-phy%d", slot);
375 port->phy = devm_phy_get(dev, name);
376 if (IS_ERR(port->phy) && slot != 1)
377 return PTR_ERR(port->phy);
379 port->gpio_rst = devm_gpiod_get_index_optional(dev, "reset", slot,
381 if (IS_ERR(port->gpio_rst)) {
382 dev_err(dev, "Failed to get GPIO for PCIe%d\n", slot);
383 return PTR_ERR(port->gpio_rst);
389 port->irq = platform_get_irq(pdev, slot);
391 dev_err(dev, "Failed to get IRQ for PCIe%d\n", slot);
395 INIT_LIST_HEAD(&port->list);
396 list_add_tail(&port->list, &pcie->ports);
401 static int mt7621_pcie_parse_dt(struct mt7621_pcie *pcie)
403 struct device *dev = pcie->dev;
404 struct device_node *node = dev->of_node, *child;
405 struct resource regs;
408 err = of_address_to_resource(node, 0, ®s);
410 dev_err(dev, "missing \"reg\" property\n");
414 pcie->base = devm_ioremap_resource(dev, ®s);
415 if (IS_ERR(pcie->base))
416 return PTR_ERR(pcie->base);
418 for_each_available_child_of_node(node, child) {
421 err = of_pci_get_devfn(child);
424 dev_err(dev, "failed to parse devfn: %d\n", err);
428 slot = PCI_SLOT(err);
430 err = mt7621_pcie_parse_port(pcie, child, slot);
440 static int mt7621_pcie_init_port(struct mt7621_pcie_port *port)
442 struct mt7621_pcie *pcie = port->pcie;
443 struct device *dev = pcie->dev;
444 u32 slot = port->slot;
447 err = phy_init(port->phy);
449 dev_err(dev, "failed to initialize port%d phy\n", slot);
453 err = phy_power_on(port->phy);
455 dev_err(dev, "failed to power on port%d phy\n", slot);
460 port->enabled = true;
465 static void mt7621_pcie_reset_assert(struct mt7621_pcie *pcie)
467 struct mt7621_pcie_port *port;
469 list_for_each_entry(port, &pcie->ports, list) {
470 /* PCIe RC reset assert */
471 mt7621_control_assert(port);
473 /* PCIe EP reset assert */
474 mt7621_rst_gpio_pcie_assert(port);
477 mdelay(PERST_DELAY_MS);
480 static void mt7621_pcie_reset_rc_deassert(struct mt7621_pcie *pcie)
482 struct mt7621_pcie_port *port;
484 list_for_each_entry(port, &pcie->ports, list)
485 mt7621_control_deassert(port);
488 static void mt7621_pcie_reset_ep_deassert(struct mt7621_pcie *pcie)
490 struct mt7621_pcie_port *port;
492 list_for_each_entry(port, &pcie->ports, list)
493 mt7621_rst_gpio_pcie_deassert(port);
495 mdelay(PERST_DELAY_MS);
498 static void mt7621_pcie_init_ports(struct mt7621_pcie *pcie)
500 struct device *dev = pcie->dev;
501 struct mt7621_pcie_port *port, *tmp;
504 rt_sysc_m32(PERST_MODE_MASK, PERST_MODE_GPIO, MT7621_GPIO_MODE);
506 mt7621_pcie_reset_assert(pcie);
507 mt7621_pcie_reset_rc_deassert(pcie);
509 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
510 u32 slot = port->slot;
513 port->enabled = true;
517 err = mt7621_pcie_init_port(port);
519 dev_err(dev, "Initiating port %d failed\n", slot);
520 list_del(&port->list);
524 mt7621_pcie_reset_ep_deassert(pcie);
527 list_for_each_entry(port, &pcie->ports, list) {
528 u32 slot = port->slot;
530 if (!mt7621_pcie_port_is_linkup(port)) {
531 dev_err(dev, "pcie%d no card, disable it (RST & CLK)\n",
533 mt7621_control_assert(port);
534 mt7621_pcie_port_clk_disable(port);
535 port->enabled = false;
542 if (slot == 1 && tmp && !tmp->enabled)
543 phy_power_off(tmp->phy);
549 static void mt7621_pcie_enable_port(struct mt7621_pcie_port *port)
551 struct mt7621_pcie *pcie = port->pcie;
552 u32 slot = port->slot;
553 u32 offset = MT7621_PCIE_OFFSET + (slot * MT7621_NEXT_PORT);
556 /* enable pcie interrupt */
557 val = pcie_read(pcie, RALINK_PCI_PCIMSK_ADDR);
558 val |= PCIE_PORT_INT_EN(slot);
559 pcie_write(pcie, val, RALINK_PCI_PCIMSK_ADDR);
561 /* map 2G DDR region */
562 pcie_write(pcie, PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE,
563 offset + RALINK_PCI_BAR0SETUP_ADDR);
564 pcie_write(pcie, MEMORY_BASE,
565 offset + RALINK_PCI_IMBASEBAR0_ADDR);
567 /* configure class code and revision ID */
568 pcie_write(pcie, PCIE_CLASS_CODE | PCIE_REVISION_ID,
569 offset + RALINK_PCI_CLASS);
572 static void mt7621_pcie_enable_ports(struct mt7621_pcie *pcie)
574 struct device *dev = pcie->dev;
575 struct mt7621_pcie_port *port;
576 u8 num_slots_enabled = 0;
580 /* Setup MEMWIN and IOWIN */
581 pcie_write(pcie, 0xffffffff, RALINK_PCI_MEMBASE);
582 pcie_write(pcie, pcie->io.start, RALINK_PCI_IOBASE);
584 list_for_each_entry(port, &pcie->ports, list) {
586 mt7621_pcie_port_clk_enable(port);
587 mt7621_pcie_enable_port(port);
588 dev_info(dev, "PCIE%d enabled\n", port->slot);
593 for (slot = 0; slot < num_slots_enabled; slot++) {
594 val = read_config(pcie, slot, PCI_COMMAND);
595 val |= PCI_COMMAND_MASTER;
596 write_config(pcie, slot, PCI_COMMAND, val);
597 /* configure RC FTS number to 250 when it leaves L0s */
598 val = read_config(pcie, slot, PCIE_FTS_NUM);
599 val &= ~PCIE_FTS_NUM_MASK;
600 val |= PCIE_FTS_NUM_L0(0x50);
601 write_config(pcie, slot, PCIE_FTS_NUM, val);
605 static int mt7621_pcie_init_virtual_bridges(struct mt7621_pcie *pcie)
607 u32 pcie_link_status = 0;
610 u32 p2p_br_devnum[PCIE_P2P_CNT];
611 int irqs[PCIE_P2P_CNT];
612 struct mt7621_pcie_port *port;
614 list_for_each_entry(port, &pcie->ports, list) {
615 u32 slot = port->slot;
617 irqs[i++] = port->irq;
619 pcie_link_status |= BIT(slot);
622 if (pcie_link_status == 0)
626 * Assign device numbers from zero to the enabled ports,
627 * then assigning remaining device numbers to any disabled
630 for (i = 0; i < PCIE_P2P_CNT; i++)
631 if (pcie_link_status & BIT(i))
632 p2p_br_devnum[i] = n++;
634 for (i = 0; i < PCIE_P2P_CNT; i++)
635 if ((pcie_link_status & BIT(i)) == 0)
636 p2p_br_devnum[i] = n++;
638 pcie_rmw(pcie, RALINK_PCI_PCICFG_ADDR,
639 PCIE_P2P_BR_DEVNUM_MASK_FULL,
640 (p2p_br_devnum[0] << PCIE_P2P_BR_DEVNUM0_SHIFT) |
641 (p2p_br_devnum[1] << PCIE_P2P_BR_DEVNUM1_SHIFT) |
642 (p2p_br_devnum[2] << PCIE_P2P_BR_DEVNUM2_SHIFT));
646 for (i = 0; i < PCIE_P2P_CNT; i++)
647 if (pcie_link_status & BIT(i))
648 pcie->irq_map[n++] = irqs[i];
650 for (i = n; i < PCIE_P2P_CNT; i++)
651 pcie->irq_map[i] = -1;
656 static void mt7621_pcie_add_resources(struct mt7621_pcie *pcie,
657 struct list_head *res)
659 pci_add_resource_offset(res, &pcie->io, pcie->offset.io);
660 pci_add_resource_offset(res, &pcie->mem, pcie->offset.mem);
663 static int mt7621_pcie_register_host(struct pci_host_bridge *host,
664 struct list_head *res)
666 struct mt7621_pcie *pcie = pci_host_bridge_priv(host);
668 list_splice_init(res, &host->windows);
669 host->busnr = pcie->busn.start;
670 host->dev.parent = pcie->dev;
671 host->ops = &mt7621_pci_ops;
672 host->map_irq = mt7621_map_irq;
673 host->swizzle_irq = pci_common_swizzle;
674 host->sysdata = pcie;
676 return pci_host_probe(host);
679 static const struct soc_device_attribute mt7621_pci_quirks_match[] = {
680 { .soc_id = "mt7621", .revision = "E2" }
683 static int mt7621_pci_probe(struct platform_device *pdev)
685 struct device *dev = &pdev->dev;
686 const struct soc_device_attribute *attr;
687 struct mt7621_pcie *pcie;
688 struct pci_host_bridge *bridge;
695 bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
699 pcie = pci_host_bridge_priv(bridge);
701 platform_set_drvdata(pdev, pcie);
702 INIT_LIST_HEAD(&pcie->ports);
704 attr = soc_device_match(mt7621_pci_quirks_match);
706 pcie->resets_inverted = true;
708 err = mt7621_pcie_parse_dt(pcie);
710 dev_err(dev, "Parsing DT failed\n");
714 err = mt7621_pci_parse_request_of_pci_ranges(pcie);
716 dev_err(dev, "Error requesting pci resources from ranges");
720 /* set resources limits */
721 ioport_resource.start = pcie->io.start;
722 ioport_resource.end = pcie->io.end;
724 mt7621_pcie_init_ports(pcie);
726 err = mt7621_pcie_init_virtual_bridges(pcie);
728 dev_err(dev, "Nothing is connected in virtual bridges. Exiting...");
732 mt7621_pcie_enable_ports(pcie);
734 setup_cm_memory_region(pcie);
736 mt7621_pcie_add_resources(pcie, &res);
738 err = mt7621_pcie_register_host(bridge, &res);
740 dev_err(dev, "Error registering host\n");
747 static const struct of_device_id mt7621_pci_ids[] = {
748 { .compatible = "mediatek,mt7621-pci" },
751 MODULE_DEVICE_TABLE(of, mt7621_pci_ids);
753 static struct platform_driver mt7621_pci_driver = {
754 .probe = mt7621_pci_probe,
756 .name = "mt7621-pci",
757 .of_match_table = of_match_ptr(mt7621_pci_ids),
761 builtin_platform_driver(mt7621_pci_driver);