1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for the Aardvark PCIe controller, used on Marvell Armada
6 * Copyright (C) 2016 Marvell
8 * Author: Hezi Shahmoon <hezi.shahmoon@marvell.com>
11 #include <linux/delay.h>
12 #include <linux/gpio/consumer.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/irqdomain.h>
16 #include <linux/kernel.h>
17 #include <linux/pci.h>
18 #include <linux/init.h>
19 #include <linux/platform_device.h>
20 #include <linux/of_address.h>
21 #include <linux/of_gpio.h>
22 #include <linux/of_pci.h>
26 /* PCIe core registers */
27 #define PCIE_CORE_CMD_STATUS_REG 0x4
28 #define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0)
29 #define PCIE_CORE_CMD_MEM_ACCESS_EN BIT(1)
30 #define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2)
31 #define PCIE_CORE_PCIEXP_CAP 0xc0
32 #define PCIE_CORE_ERR_CAPCTL_REG 0x118
33 #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
34 #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
35 #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7)
36 #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8)
38 /* PIO registers base address and register offsets */
39 #define PIO_BASE_ADDR 0x4000
40 #define PIO_CTRL (PIO_BASE_ADDR + 0x0)
41 #define PIO_CTRL_TYPE_MASK GENMASK(3, 0)
42 #define PIO_CTRL_ADDR_WIN_DISABLE BIT(24)
43 #define PIO_STAT (PIO_BASE_ADDR + 0x4)
44 #define PIO_COMPLETION_STATUS_SHIFT 7
45 #define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7)
46 #define PIO_COMPLETION_STATUS_OK 0
47 #define PIO_COMPLETION_STATUS_UR 1
48 #define PIO_COMPLETION_STATUS_CRS 2
49 #define PIO_COMPLETION_STATUS_CA 4
50 #define PIO_NON_POSTED_REQ BIT(10)
51 #define PIO_ERR_STATUS BIT(11)
52 #define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8)
53 #define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc)
54 #define PIO_WR_DATA (PIO_BASE_ADDR + 0x10)
55 #define PIO_WR_DATA_STRB (PIO_BASE_ADDR + 0x14)
56 #define PIO_RD_DATA (PIO_BASE_ADDR + 0x18)
57 #define PIO_START (PIO_BASE_ADDR + 0x1c)
58 #define PIO_ISR (PIO_BASE_ADDR + 0x20)
59 #define PIO_ISRM (PIO_BASE_ADDR + 0x24)
61 /* Aardvark Control registers */
62 #define CONTROL_BASE_ADDR 0x4800
63 #define PCIE_CORE_CTRL0_REG (CONTROL_BASE_ADDR + 0x0)
64 #define PCIE_GEN_SEL_MSK 0x3
65 #define PCIE_GEN_SEL_SHIFT 0x0
71 #define LANE_CNT_MSK 0x18
72 #define LANE_CNT_SHIFT 0x3
73 #define LANE_COUNT_1 (0 << LANE_CNT_SHIFT)
74 #define LANE_COUNT_2 (1 << LANE_CNT_SHIFT)
75 #define LANE_COUNT_4 (2 << LANE_CNT_SHIFT)
76 #define LANE_COUNT_8 (3 << LANE_CNT_SHIFT)
77 #define LINK_TRAINING_EN BIT(6)
78 #define LEGACY_INTA BIT(28)
79 #define LEGACY_INTB BIT(29)
80 #define LEGACY_INTC BIT(30)
81 #define LEGACY_INTD BIT(31)
82 #define PCIE_CORE_CTRL1_REG (CONTROL_BASE_ADDR + 0x4)
83 #define HOT_RESET_GEN BIT(0)
84 #define PCIE_CORE_CTRL2_REG (CONTROL_BASE_ADDR + 0x8)
85 #define PCIE_CORE_CTRL2_RESERVED 0x7
86 #define PCIE_CORE_CTRL2_TD_ENABLE BIT(4)
87 #define PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE BIT(5)
88 #define PCIE_CORE_CTRL2_OB_WIN_ENABLE BIT(6)
89 #define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10)
90 #define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40)
91 #define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44)
92 #define PCIE_ISR0_MSI_INT_PENDING BIT(24)
93 #define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val))
94 #define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val))
95 #define PCIE_ISR0_ALL_MASK GENMASK(31, 0)
96 #define PCIE_ISR1_REG (CONTROL_BASE_ADDR + 0x48)
97 #define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C)
98 #define PCIE_ISR1_POWER_STATE_CHANGE BIT(4)
99 #define PCIE_ISR1_FLUSH BIT(5)
100 #define PCIE_ISR1_INTX_ASSERT(val) BIT(8 + (val))
101 #define PCIE_ISR1_ALL_MASK GENMASK(31, 0)
102 #define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50)
103 #define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54)
104 #define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58)
105 #define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
106 #define PCIE_MSI_ALL_MASK GENMASK(31, 0)
107 #define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
108 #define PCIE_MSI_DATA_MASK GENMASK(15, 0)
110 /* PCIe window configuration */
111 #define OB_WIN_BASE_ADDR 0x4c00
112 #define OB_WIN_BLOCK_SIZE 0x20
113 #define OB_WIN_COUNT 8
114 #define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \
115 OB_WIN_BLOCK_SIZE * (win) + \
117 #define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00)
118 #define OB_WIN_ENABLE BIT(0)
119 #define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04)
120 #define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08)
121 #define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c)
122 #define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10)
123 #define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14)
124 #define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18)
125 #define OB_WIN_DEFAULT_ACTIONS (OB_WIN_ACTIONS(OB_WIN_COUNT-1) + 0x4)
126 #define OB_WIN_FUNC_NUM_MASK GENMASK(31, 24)
127 #define OB_WIN_FUNC_NUM_SHIFT 24
128 #define OB_WIN_FUNC_NUM_ENABLE BIT(23)
129 #define OB_WIN_BUS_NUM_BITS_MASK GENMASK(22, 20)
130 #define OB_WIN_BUS_NUM_BITS_SHIFT 20
131 #define OB_WIN_MSG_CODE_ENABLE BIT(22)
132 #define OB_WIN_MSG_CODE_MASK GENMASK(21, 14)
133 #define OB_WIN_MSG_CODE_SHIFT 14
134 #define OB_WIN_MSG_PAYLOAD_LEN BIT(12)
135 #define OB_WIN_ATTR_ENABLE BIT(11)
136 #define OB_WIN_ATTR_TC_MASK GENMASK(10, 8)
137 #define OB_WIN_ATTR_TC_SHIFT 8
138 #define OB_WIN_ATTR_RELAXED BIT(7)
139 #define OB_WIN_ATTR_NOSNOOP BIT(6)
140 #define OB_WIN_ATTR_POISON BIT(5)
141 #define OB_WIN_ATTR_IDO BIT(4)
142 #define OB_WIN_TYPE_MASK GENMASK(3, 0)
143 #define OB_WIN_TYPE_SHIFT 0
144 #define OB_WIN_TYPE_MEM 0x0
145 #define OB_WIN_TYPE_IO 0x4
146 #define OB_WIN_TYPE_CONFIG_TYPE0 0x8
147 #define OB_WIN_TYPE_CONFIG_TYPE1 0x9
148 #define OB_WIN_TYPE_MSG 0xc
150 /* LMI registers base address and register offsets */
151 #define LMI_BASE_ADDR 0x6000
152 #define CFG_REG (LMI_BASE_ADDR + 0x0)
153 #define LTSSM_SHIFT 24
154 #define LTSSM_MASK 0x3f
155 #define RC_BAR_CONFIG 0x300
157 /* LTSSM values in CFG_REG */
159 LTSSM_DETECT_QUIET = 0x0,
160 LTSSM_DETECT_ACTIVE = 0x1,
161 LTSSM_POLLING_ACTIVE = 0x2,
162 LTSSM_POLLING_COMPLIANCE = 0x3,
163 LTSSM_POLLING_CONFIGURATION = 0x4,
164 LTSSM_CONFIG_LINKWIDTH_START = 0x5,
165 LTSSM_CONFIG_LINKWIDTH_ACCEPT = 0x6,
166 LTSSM_CONFIG_LANENUM_ACCEPT = 0x7,
167 LTSSM_CONFIG_LANENUM_WAIT = 0x8,
168 LTSSM_CONFIG_COMPLETE = 0x9,
169 LTSSM_CONFIG_IDLE = 0xa,
170 LTSSM_RECOVERY_RCVR_LOCK = 0xb,
171 LTSSM_RECOVERY_SPEED = 0xc,
172 LTSSM_RECOVERY_RCVR_CFG = 0xd,
173 LTSSM_RECOVERY_IDLE = 0xe,
175 LTSSM_RX_L0S_ENTRY = 0x11,
176 LTSSM_RX_L0S_IDLE = 0x12,
177 LTSSM_RX_L0S_FTS = 0x13,
178 LTSSM_TX_L0S_ENTRY = 0x14,
179 LTSSM_TX_L0S_IDLE = 0x15,
180 LTSSM_TX_L0S_FTS = 0x16,
181 LTSSM_L1_ENTRY = 0x17,
182 LTSSM_L1_IDLE = 0x18,
183 LTSSM_L2_IDLE = 0x19,
184 LTSSM_L2_TRANSMIT_WAKE = 0x1a,
185 LTSSM_DISABLED = 0x20,
186 LTSSM_LOOPBACK_ENTRY_MASTER = 0x21,
187 LTSSM_LOOPBACK_ACTIVE_MASTER = 0x22,
188 LTSSM_LOOPBACK_EXIT_MASTER = 0x23,
189 LTSSM_LOOPBACK_ENTRY_SLAVE = 0x24,
190 LTSSM_LOOPBACK_ACTIVE_SLAVE = 0x25,
191 LTSSM_LOOPBACK_EXIT_SLAVE = 0x26,
192 LTSSM_HOT_RESET = 0x27,
193 LTSSM_RECOVERY_EQUALIZATION_PHASE0 = 0x28,
194 LTSSM_RECOVERY_EQUALIZATION_PHASE1 = 0x29,
195 LTSSM_RECOVERY_EQUALIZATION_PHASE2 = 0x2a,
196 LTSSM_RECOVERY_EQUALIZATION_PHASE3 = 0x2b,
199 /* PCIe core controller registers */
200 #define CTRL_CORE_BASE_ADDR 0x18000
201 #define CTRL_CONFIG_REG (CTRL_CORE_BASE_ADDR + 0x0)
202 #define CTRL_MODE_SHIFT 0x0
203 #define CTRL_MODE_MASK 0x1
204 #define PCIE_CORE_MODE_DIRECT 0x0
205 #define PCIE_CORE_MODE_COMMAND 0x1
207 /* PCIe Central Interrupts Registers */
208 #define CENTRAL_INT_BASE_ADDR 0x1b000
209 #define HOST_CTRL_INT_STATUS_REG (CENTRAL_INT_BASE_ADDR + 0x0)
210 #define HOST_CTRL_INT_MASK_REG (CENTRAL_INT_BASE_ADDR + 0x4)
211 #define PCIE_IRQ_CMDQ_INT BIT(0)
212 #define PCIE_IRQ_MSI_STATUS_INT BIT(1)
213 #define PCIE_IRQ_CMD_SENT_DONE BIT(3)
214 #define PCIE_IRQ_DMA_INT BIT(4)
215 #define PCIE_IRQ_IB_DXFERDONE BIT(5)
216 #define PCIE_IRQ_OB_DXFERDONE BIT(6)
217 #define PCIE_IRQ_OB_RXFERDONE BIT(7)
218 #define PCIE_IRQ_COMPQ_INT BIT(12)
219 #define PCIE_IRQ_DIR_RD_DDR_DET BIT(13)
220 #define PCIE_IRQ_DIR_WR_DDR_DET BIT(14)
221 #define PCIE_IRQ_CORE_INT BIT(16)
222 #define PCIE_IRQ_CORE_INT_PIO BIT(17)
223 #define PCIE_IRQ_DPMU_INT BIT(18)
224 #define PCIE_IRQ_PCIE_MIS_INT BIT(19)
225 #define PCIE_IRQ_MSI_INT1_DET BIT(20)
226 #define PCIE_IRQ_MSI_INT2_DET BIT(21)
227 #define PCIE_IRQ_RC_DBELL_DET BIT(22)
228 #define PCIE_IRQ_EP_STATUS BIT(23)
229 #define PCIE_IRQ_ALL_MASK GENMASK(31, 0)
230 #define PCIE_IRQ_ENABLE_INTS_MASK PCIE_IRQ_CORE_INT
232 /* Transaction types */
233 #define PCIE_CONFIG_RD_TYPE0 0x8
234 #define PCIE_CONFIG_RD_TYPE1 0x9
235 #define PCIE_CONFIG_WR_TYPE0 0xa
236 #define PCIE_CONFIG_WR_TYPE1 0xb
238 #define PCIE_CONF_BUS(bus) (((bus) & 0xff) << 20)
239 #define PCIE_CONF_DEV(dev) (((dev) & 0x1f) << 15)
240 #define PCIE_CONF_FUNC(fun) (((fun) & 0x7) << 12)
241 #define PCIE_CONF_REG(reg) ((reg) & 0xffc)
242 #define PCIE_CONF_ADDR(bus, devfn, where) \
243 (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
244 PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where))
246 #define PIO_RETRY_CNT 750000 /* 1.5 s */
247 #define PIO_RETRY_DELAY 2 /* 2 us*/
249 #define LINK_WAIT_MAX_RETRIES 10
250 #define LINK_WAIT_USLEEP_MIN 90000
251 #define LINK_WAIT_USLEEP_MAX 100000
253 #define MSI_IRQ_NUM 32
256 struct platform_device *pdev;
258 struct list_head resources;
264 } wins[OB_WIN_COUNT];
266 struct irq_domain *irq_domain;
267 struct irq_chip irq_chip;
268 raw_spinlock_t irq_lock;
269 struct irq_domain *msi_domain;
270 struct irq_domain *msi_inner_domain;
271 struct irq_chip msi_bottom_irq_chip;
272 struct irq_chip msi_irq_chip;
273 struct msi_domain_info msi_domain_info;
274 DECLARE_BITMAP(msi_used, MSI_IRQ_NUM);
275 struct mutex msi_used_lock;
279 struct gpio_desc *reset_gpio;
282 static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg)
284 writel(val, pcie->base + reg);
287 static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg)
289 return readl(pcie->base + reg);
292 static u8 advk_pcie_ltssm_state(struct advk_pcie *pcie)
297 val = advk_readl(pcie, CFG_REG);
298 ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK;
302 static inline bool advk_pcie_link_up(struct advk_pcie *pcie)
304 /* check if LTSSM is in normal operation - some L* state */
305 u8 ltssm_state = advk_pcie_ltssm_state(pcie);
306 return ltssm_state >= LTSSM_L0 && ltssm_state < LTSSM_DISABLED;
309 static inline bool advk_pcie_link_training(struct advk_pcie *pcie)
312 * According to PCIe Base specification 3.0, Table 4-14: Link
313 * Status Mapped to the LTSSM is Link Training mapped to LTSSM
314 * Configuration and Recovery states.
316 u8 ltssm_state = advk_pcie_ltssm_state(pcie);
317 return ((ltssm_state >= LTSSM_CONFIG_LINKWIDTH_START &&
318 ltssm_state < LTSSM_L0) ||
319 (ltssm_state >= LTSSM_RECOVERY_EQUALIZATION_PHASE0 &&
320 ltssm_state <= LTSSM_RECOVERY_EQUALIZATION_PHASE3));
323 static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
327 /* check if the link is up or not */
328 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
329 if (advk_pcie_link_up(pcie))
332 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
338 static void advk_pcie_issue_perst(struct advk_pcie *pcie)
340 if (!pcie->reset_gpio)
343 /* 10ms delay is needed for some cards */
344 dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n");
345 gpiod_set_value_cansleep(pcie->reset_gpio, 1);
346 usleep_range(10000, 11000);
347 gpiod_set_value_cansleep(pcie->reset_gpio, 0);
350 static void advk_pcie_train_link(struct advk_pcie *pcie)
352 struct device *dev = &pcie->pdev->dev;
357 * Setup PCIe rev / gen compliance based on device tree property
358 * 'max-link-speed' which also forces maximal link speed.
360 reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
361 reg &= ~PCIE_GEN_SEL_MSK;
362 if (pcie->link_gen == 3)
364 else if (pcie->link_gen == 2)
368 advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
371 * Set maximal link speed value also into PCIe Link Control 2 register.
372 * Armada 3700 Functional Specification says that default value is based
373 * on SPEED_GEN but tests showed that default value is always 8.0 GT/s.
375 reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
376 reg &= ~PCI_EXP_LNKCTL2_TLS;
377 if (pcie->link_gen == 3)
378 reg |= PCI_EXP_LNKCTL2_TLS_8_0GT;
379 else if (pcie->link_gen == 2)
380 reg |= PCI_EXP_LNKCTL2_TLS_5_0GT;
382 reg |= PCI_EXP_LNKCTL2_TLS_2_5GT;
383 advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
385 /* Enable link training after selecting PCIe generation */
386 reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
387 reg |= LINK_TRAINING_EN;
388 advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
391 * Reset PCIe card via PERST# signal. Some cards are not detected
392 * during link training when they are in some non-initial state.
394 advk_pcie_issue_perst(pcie);
397 * PERST# signal could have been asserted by pinctrl subsystem before
398 * probe() callback has been called or issued explicitly by reset gpio
399 * function advk_pcie_issue_perst(), making the endpoint going into
400 * fundamental reset. As required by PCI Express spec (PCI Express
401 * Base Specification, REV. 4.0 PCI Express, February 19 2014, 6.6.1
402 * Conventional Reset) a delay for at least 100ms after such a reset
403 * before sending a Configuration Request to the device is needed.
404 * So wait until PCIe link is up. Function advk_pcie_wait_for_link()
405 * waits for link at least 900ms.
407 ret = advk_pcie_wait_for_link(pcie);
409 dev_err(dev, "link never came up\n");
411 dev_info(dev, "link up\n");
415 * Set PCIe address window register which could be used for memory
418 static void advk_pcie_set_ob_win(struct advk_pcie *pcie, u8 win_num,
419 phys_addr_t match, phys_addr_t remap,
420 phys_addr_t mask, u32 actions)
422 advk_writel(pcie, OB_WIN_ENABLE |
423 lower_32_bits(match), OB_WIN_MATCH_LS(win_num));
424 advk_writel(pcie, upper_32_bits(match), OB_WIN_MATCH_MS(win_num));
425 advk_writel(pcie, lower_32_bits(remap), OB_WIN_REMAP_LS(win_num));
426 advk_writel(pcie, upper_32_bits(remap), OB_WIN_REMAP_MS(win_num));
427 advk_writel(pcie, lower_32_bits(mask), OB_WIN_MASK_LS(win_num));
428 advk_writel(pcie, upper_32_bits(mask), OB_WIN_MASK_MS(win_num));
429 advk_writel(pcie, actions, OB_WIN_ACTIONS(win_num));
432 static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num)
434 advk_writel(pcie, 0, OB_WIN_MATCH_LS(win_num));
435 advk_writel(pcie, 0, OB_WIN_MATCH_MS(win_num));
436 advk_writel(pcie, 0, OB_WIN_REMAP_LS(win_num));
437 advk_writel(pcie, 0, OB_WIN_REMAP_MS(win_num));
438 advk_writel(pcie, 0, OB_WIN_MASK_LS(win_num));
439 advk_writel(pcie, 0, OB_WIN_MASK_MS(win_num));
440 advk_writel(pcie, 0, OB_WIN_ACTIONS(win_num));
443 static void advk_pcie_setup_hw(struct advk_pcie *pcie)
448 /* Set to Direct mode */
449 reg = advk_readl(pcie, CTRL_CONFIG_REG);
450 reg &= ~(CTRL_MODE_MASK << CTRL_MODE_SHIFT);
451 reg |= ((PCIE_CORE_MODE_DIRECT & CTRL_MODE_MASK) << CTRL_MODE_SHIFT);
452 advk_writel(pcie, reg, CTRL_CONFIG_REG);
454 /* Set PCI global control register to RC mode */
455 reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
456 reg |= (IS_RC_MSK << IS_RC_SHIFT);
457 advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
459 /* Set Advanced Error Capabilities and Control PF0 register */
460 reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX |
461 PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN |
462 PCIE_CORE_ERR_CAPCTL_ECRC_CHCK |
463 PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV;
464 advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG);
466 /* Set PCIe Device Control register */
467 reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
468 reg &= ~PCI_EXP_DEVCTL_RELAX_EN;
469 reg &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
470 reg &= ~PCI_EXP_DEVCTL_PAYLOAD;
471 reg &= ~PCI_EXP_DEVCTL_READRQ;
472 reg |= PCI_EXP_DEVCTL_PAYLOAD_512B;
473 reg |= PCI_EXP_DEVCTL_READRQ_512B;
474 advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
476 /* Program PCIe Control 2 to disable strict ordering */
477 reg = PCIE_CORE_CTRL2_RESERVED |
478 PCIE_CORE_CTRL2_TD_ENABLE;
479 advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
482 reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
483 reg &= ~LANE_CNT_MSK;
485 advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
488 reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
489 reg |= PCIE_CORE_CTRL2_MSI_ENABLE;
490 advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
492 /* Clear all interrupts */
493 advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG);
494 advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG);
495 advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
496 advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
498 /* Disable All ISR0/1 Sources */
499 reg = PCIE_ISR0_ALL_MASK;
500 reg &= ~PCIE_ISR0_MSI_INT_PENDING;
501 advk_writel(pcie, reg, PCIE_ISR0_MASK_REG);
503 advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
505 /* Unmask all MSI's */
506 advk_writel(pcie, ~(u32)PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG);
508 /* Enable summary interrupt for GIC SPI source */
509 reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
510 advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG);
513 * Enable AXI address window location generation:
514 * When it is enabled, the default outbound window
515 * configurations (Default User Field: 0xD0074CFC)
516 * are used to transparent address translation for
517 * the outbound transactions. Thus, PCIe address
518 * windows are not required for transparent memory
519 * access when default outbound window configuration
520 * is set for memory access.
522 reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
523 reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE;
524 advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
527 * Set memory access in Default User Field so it
528 * is not required to configure PCIe address for
529 * transparent memory access.
531 advk_writel(pcie, OB_WIN_TYPE_MEM, OB_WIN_DEFAULT_ACTIONS);
534 * Bypass the address window mapping for PIO:
535 * Since PIO access already contains all required
536 * info over AXI interface by PIO registers, the
537 * address window is not required.
539 reg = advk_readl(pcie, PIO_CTRL);
540 reg |= PIO_CTRL_ADDR_WIN_DISABLE;
541 advk_writel(pcie, reg, PIO_CTRL);
544 * Configure PCIe address windows for non-memory or
545 * non-transparent access as by default PCIe uses
546 * transparent memory access.
548 for (i = 0; i < pcie->wins_count; i++)
549 advk_pcie_set_ob_win(pcie, i,
550 pcie->wins[i].match, pcie->wins[i].remap,
551 pcie->wins[i].mask, pcie->wins[i].actions);
553 /* Disable remaining PCIe outbound windows */
554 for (i = pcie->wins_count; i < OB_WIN_COUNT; i++)
555 advk_pcie_disable_ob_win(pcie, i);
557 advk_pcie_train_link(pcie);
559 reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
560 reg |= PCIE_CORE_CMD_MEM_ACCESS_EN |
561 PCIE_CORE_CMD_IO_ACCESS_EN |
562 PCIE_CORE_CMD_MEM_IO_REQ_EN;
563 advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
566 static int advk_pcie_check_pio_status(struct advk_pcie *pcie, u32 *val)
568 struct device *dev = &pcie->pdev->dev;
571 char *strcomp_status, *str_posted;
573 reg = advk_readl(pcie, PIO_STAT);
574 status = (reg & PIO_COMPLETION_STATUS_MASK) >>
575 PIO_COMPLETION_STATUS_SHIFT;
578 * According to HW spec, the PIO status check sequence as below:
579 * 1) even if COMPLETION_STATUS(bit9:7) indicates successful,
580 * it still needs to check Error Status(bit11), only when this bit
581 * indicates no error happen, the operation is successful.
582 * 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
583 * means a PIO write error, and for PIO read it is successful with
584 * a read value of 0xFFFFFFFF.
585 * 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7)
586 * only means a PIO write error, and for PIO read it is successful
587 * with a read value of 0xFFFF0001.
588 * 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
589 * error for both PIO read and PIO write operation.
590 * 5) other errors are indicated as 'unknown'.
593 case PIO_COMPLETION_STATUS_OK:
594 if (reg & PIO_ERR_STATUS) {
595 strcomp_status = "COMP_ERR";
598 /* Get the read result */
600 *val = advk_readl(pcie, PIO_RD_DATA);
602 strcomp_status = NULL;
604 case PIO_COMPLETION_STATUS_UR:
605 strcomp_status = "UR";
607 case PIO_COMPLETION_STATUS_CRS:
608 /* PCIe r4.0, sec 2.3.2, says:
609 * If CRS Software Visibility is not enabled, the Root Complex
610 * must re-issue the Configuration Request as a new Request.
611 * A Root Complex implementation may choose to limit the number
612 * of Configuration Request/CRS Completion Status loops before
613 * determining that something is wrong with the target of the
614 * Request and taking appropriate action, e.g., complete the
615 * Request to the host as a failed transaction.
617 * To simplify implementation do not re-issue the Configuration
618 * Request and complete the Request as a failed transaction.
620 strcomp_status = "CRS";
622 case PIO_COMPLETION_STATUS_CA:
623 strcomp_status = "CA";
626 strcomp_status = "Unknown";
633 if (reg & PIO_NON_POSTED_REQ)
634 str_posted = "Non-posted";
636 str_posted = "Posted";
638 dev_dbg(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
639 str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
644 static int advk_pcie_wait_pio(struct advk_pcie *pcie)
646 struct device *dev = &pcie->pdev->dev;
649 for (i = 0; i < PIO_RETRY_CNT; i++) {
652 start = advk_readl(pcie, PIO_START);
653 isr = advk_readl(pcie, PIO_ISR);
656 udelay(PIO_RETRY_DELAY);
659 dev_err(dev, "PIO read/write transfer time out\n");
663 static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
666 if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
670 * If the link goes down after we check for link-up, nothing bad
671 * happens but the config access times out.
673 if (bus->number != pcie->root_bus_nr && !advk_pcie_link_up(pcie))
679 static bool advk_pcie_pio_is_running(struct advk_pcie *pcie)
681 struct device *dev = &pcie->pdev->dev;
684 * Trying to start a new PIO transfer when previous has not completed
685 * cause External Abort on CPU which results in kernel panic:
687 * SError Interrupt on CPU0, code 0xbf000002 -- SError
688 * Kernel panic - not syncing: Asynchronous SError Interrupt
690 * Functions advk_pcie_rd_conf() and advk_pcie_wr_conf() are protected
691 * by raw_spin_lock_irqsave() at pci_lock_config() level to prevent
692 * concurrent calls at the same time. But because PIO transfer may take
693 * about 1.5s when link is down or card is disconnected, it means that
694 * advk_pcie_wait_pio() does not always have to wait for completion.
696 * Some versions of ARM Trusted Firmware handles this External Abort at
697 * EL3 level and mask it to prevent kernel panic. Relevant TF-A commit:
698 * https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=3c7dcdac5c50
700 if (advk_readl(pcie, PIO_START)) {
701 dev_err(dev, "Previous PIO read/write transfer is still running\n");
708 static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
709 int where, int size, u32 *val)
711 struct advk_pcie *pcie = bus->sysdata;
715 if (!advk_pcie_valid_device(pcie, bus, devfn)) {
717 return PCIBIOS_DEVICE_NOT_FOUND;
720 if (advk_pcie_pio_is_running(pcie)) {
722 return PCIBIOS_SET_FAILED;
725 /* Program the control register */
726 reg = advk_readl(pcie, PIO_CTRL);
727 reg &= ~PIO_CTRL_TYPE_MASK;
728 if (bus->number == pcie->root_bus_nr)
729 reg |= PCIE_CONFIG_RD_TYPE0;
731 reg |= PCIE_CONFIG_RD_TYPE1;
732 advk_writel(pcie, reg, PIO_CTRL);
734 /* Program the address registers */
735 reg = PCIE_CONF_ADDR(bus->number, devfn, where);
736 advk_writel(pcie, reg, PIO_ADDR_LS);
737 advk_writel(pcie, 0, PIO_ADDR_MS);
739 /* Program the data strobe */
740 advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
742 /* Clear PIO DONE ISR and start the transfer */
743 advk_writel(pcie, 1, PIO_ISR);
744 advk_writel(pcie, 1, PIO_START);
746 ret = advk_pcie_wait_pio(pcie);
749 return PCIBIOS_SET_FAILED;
752 /* Check PIO status and get the read result */
753 ret = advk_pcie_check_pio_status(pcie, val);
756 return PCIBIOS_SET_FAILED;
760 *val = (*val >> (8 * (where & 3))) & 0xff;
762 *val = (*val >> (8 * (where & 3))) & 0xffff;
764 return PCIBIOS_SUCCESSFUL;
767 static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
768 int where, int size, u32 val)
770 struct advk_pcie *pcie = bus->sysdata;
772 u32 data_strobe = 0x0;
776 if (!advk_pcie_valid_device(pcie, bus, devfn))
777 return PCIBIOS_DEVICE_NOT_FOUND;
780 return PCIBIOS_SET_FAILED;
782 if (advk_pcie_pio_is_running(pcie))
783 return PCIBIOS_SET_FAILED;
785 /* Program the control register */
786 reg = advk_readl(pcie, PIO_CTRL);
787 reg &= ~PIO_CTRL_TYPE_MASK;
788 if (bus->number == pcie->root_bus_nr)
789 reg |= PCIE_CONFIG_WR_TYPE0;
791 reg |= PCIE_CONFIG_WR_TYPE1;
792 advk_writel(pcie, reg, PIO_CTRL);
794 /* Program the address registers */
795 reg = PCIE_CONF_ADDR(bus->number, devfn, where);
796 advk_writel(pcie, reg, PIO_ADDR_LS);
797 advk_writel(pcie, 0, PIO_ADDR_MS);
799 /* Calculate the write strobe */
800 offset = where & 0x3;
801 reg = val << (8 * offset);
802 data_strobe = GENMASK(size - 1, 0) << offset;
804 /* Program the data register */
805 advk_writel(pcie, reg, PIO_WR_DATA);
807 /* Program the data strobe */
808 advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
810 /* Clear PIO DONE ISR and start the transfer */
811 advk_writel(pcie, 1, PIO_ISR);
812 advk_writel(pcie, 1, PIO_START);
814 ret = advk_pcie_wait_pio(pcie);
816 return PCIBIOS_SET_FAILED;
818 ret = advk_pcie_check_pio_status(pcie, NULL);
820 return PCIBIOS_SET_FAILED;
822 return PCIBIOS_SUCCESSFUL;
825 static struct pci_ops advk_pcie_ops = {
826 .read = advk_pcie_rd_conf,
827 .write = advk_pcie_wr_conf,
830 static void advk_msi_irq_compose_msi_msg(struct irq_data *data,
833 struct advk_pcie *pcie = irq_data_get_irq_chip_data(data);
834 phys_addr_t msi_msg = virt_to_phys(&pcie->msi_msg);
836 msg->address_lo = lower_32_bits(msi_msg);
837 msg->address_hi = upper_32_bits(msi_msg);
838 msg->data = data->hwirq;
841 static int advk_msi_set_affinity(struct irq_data *irq_data,
842 const struct cpumask *mask, bool force)
847 static int advk_msi_irq_domain_alloc(struct irq_domain *domain,
849 unsigned int nr_irqs, void *args)
851 struct advk_pcie *pcie = domain->host_data;
854 mutex_lock(&pcie->msi_used_lock);
855 hwirq = bitmap_find_free_region(pcie->msi_used, MSI_IRQ_NUM,
856 order_base_2(nr_irqs));
857 mutex_unlock(&pcie->msi_used_lock);
861 for (i = 0; i < nr_irqs; i++)
862 irq_domain_set_info(domain, virq + i, hwirq + i,
863 &pcie->msi_bottom_irq_chip,
864 domain->host_data, handle_simple_irq,
870 static void advk_msi_irq_domain_free(struct irq_domain *domain,
871 unsigned int virq, unsigned int nr_irqs)
873 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
874 struct advk_pcie *pcie = domain->host_data;
876 mutex_lock(&pcie->msi_used_lock);
877 bitmap_release_region(pcie->msi_used, d->hwirq, order_base_2(nr_irqs));
878 mutex_unlock(&pcie->msi_used_lock);
881 static const struct irq_domain_ops advk_msi_domain_ops = {
882 .alloc = advk_msi_irq_domain_alloc,
883 .free = advk_msi_irq_domain_free,
886 static void advk_pcie_irq_mask(struct irq_data *d)
888 struct advk_pcie *pcie = d->domain->host_data;
889 irq_hw_number_t hwirq = irqd_to_hwirq(d);
893 raw_spin_lock_irqsave(&pcie->irq_lock, flags);
894 mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
895 mask |= PCIE_ISR1_INTX_ASSERT(hwirq);
896 advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
897 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
900 static void advk_pcie_irq_unmask(struct irq_data *d)
902 struct advk_pcie *pcie = d->domain->host_data;
903 irq_hw_number_t hwirq = irqd_to_hwirq(d);
907 raw_spin_lock_irqsave(&pcie->irq_lock, flags);
908 mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
909 mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq);
910 advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
911 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
914 static int advk_pcie_irq_map(struct irq_domain *h,
915 unsigned int virq, irq_hw_number_t hwirq)
917 struct advk_pcie *pcie = h->host_data;
919 advk_pcie_irq_mask(irq_get_irq_data(virq));
920 irq_set_status_flags(virq, IRQ_LEVEL);
921 irq_set_chip_and_handler(virq, &pcie->irq_chip,
923 irq_set_chip_data(virq, pcie);
928 static const struct irq_domain_ops advk_pcie_irq_domain_ops = {
929 .map = advk_pcie_irq_map,
930 .xlate = irq_domain_xlate_onecell,
933 static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
935 struct device *dev = &pcie->pdev->dev;
936 struct device_node *node = dev->of_node;
937 struct irq_chip *bottom_ic, *msi_ic;
938 struct msi_domain_info *msi_di;
939 phys_addr_t msi_msg_phys;
941 mutex_init(&pcie->msi_used_lock);
943 bottom_ic = &pcie->msi_bottom_irq_chip;
945 bottom_ic->name = "MSI";
946 bottom_ic->irq_compose_msi_msg = advk_msi_irq_compose_msi_msg;
947 bottom_ic->irq_set_affinity = advk_msi_set_affinity;
949 msi_ic = &pcie->msi_irq_chip;
950 msi_ic->name = "advk-MSI";
952 msi_di = &pcie->msi_domain_info;
953 msi_di->flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
954 MSI_FLAG_MULTI_PCI_MSI;
955 msi_di->chip = msi_ic;
957 msi_msg_phys = virt_to_phys(&pcie->msi_msg);
959 advk_writel(pcie, lower_32_bits(msi_msg_phys),
960 PCIE_MSI_ADDR_LOW_REG);
961 advk_writel(pcie, upper_32_bits(msi_msg_phys),
962 PCIE_MSI_ADDR_HIGH_REG);
964 pcie->msi_inner_domain =
965 irq_domain_add_linear(NULL, MSI_IRQ_NUM,
966 &advk_msi_domain_ops, pcie);
967 if (!pcie->msi_inner_domain)
971 pci_msi_create_irq_domain(of_node_to_fwnode(node),
972 msi_di, pcie->msi_inner_domain);
973 if (!pcie->msi_domain) {
974 irq_domain_remove(pcie->msi_inner_domain);
981 static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie)
983 irq_domain_remove(pcie->msi_domain);
984 irq_domain_remove(pcie->msi_inner_domain);
987 static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
989 struct device *dev = &pcie->pdev->dev;
990 struct device_node *node = dev->of_node;
991 struct device_node *pcie_intc_node;
992 struct irq_chip *irq_chip;
995 raw_spin_lock_init(&pcie->irq_lock);
997 pcie_intc_node = of_get_next_child(node, NULL);
998 if (!pcie_intc_node) {
999 dev_err(dev, "No PCIe Intc node found\n");
1003 irq_chip = &pcie->irq_chip;
1005 irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq",
1007 if (!irq_chip->name) {
1012 irq_chip->irq_mask = advk_pcie_irq_mask;
1013 irq_chip->irq_mask_ack = advk_pcie_irq_mask;
1014 irq_chip->irq_unmask = advk_pcie_irq_unmask;
1017 irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
1018 &advk_pcie_irq_domain_ops, pcie);
1019 if (!pcie->irq_domain) {
1020 dev_err(dev, "Failed to get a INTx IRQ domain\n");
1026 of_node_put(pcie_intc_node);
1030 static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie)
1032 irq_domain_remove(pcie->irq_domain);
1035 static void advk_pcie_handle_msi(struct advk_pcie *pcie)
1037 u32 msi_val, msi_mask, msi_status, msi_idx;
1040 msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
1041 msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG);
1042 msi_status = msi_val & ((~msi_mask) & PCIE_MSI_ALL_MASK);
1044 for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) {
1045 if (!(BIT(msi_idx) & msi_status))
1048 advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG);
1049 virq = irq_find_mapping(pcie->msi_inner_domain, msi_idx);
1050 generic_handle_irq(virq);
1053 advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING,
1057 static void advk_pcie_handle_int(struct advk_pcie *pcie)
1059 u32 isr0_val, isr0_mask, isr0_status;
1060 u32 isr1_val, isr1_mask, isr1_status;
1063 isr0_val = advk_readl(pcie, PCIE_ISR0_REG);
1064 isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
1065 isr0_status = isr0_val & ((~isr0_mask) & PCIE_ISR0_ALL_MASK);
1067 isr1_val = advk_readl(pcie, PCIE_ISR1_REG);
1068 isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
1069 isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK);
1071 /* Process MSI interrupts */
1072 if (isr0_status & PCIE_ISR0_MSI_INT_PENDING)
1073 advk_pcie_handle_msi(pcie);
1075 /* Process legacy interrupts */
1076 for (i = 0; i < PCI_NUM_INTX; i++) {
1077 if (!(isr1_status & PCIE_ISR1_INTX_ASSERT(i)))
1080 advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i),
1083 virq = irq_find_mapping(pcie->irq_domain, i);
1084 generic_handle_irq(virq);
1088 static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
1090 struct advk_pcie *pcie = arg;
1093 status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
1094 if (!(status & PCIE_IRQ_CORE_INT))
1097 advk_pcie_handle_int(pcie);
1099 /* Clear interrupt */
1100 advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
1105 static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
1107 int err, res_valid = 0;
1108 struct device *dev = &pcie->pdev->dev;
1109 struct resource_entry *win, *tmp;
1110 resource_size_t iobase;
1112 INIT_LIST_HEAD(&pcie->resources);
1114 err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
1115 &pcie->resources, &iobase);
1119 err = devm_request_pci_bus_resources(dev, &pcie->resources);
1121 goto out_release_res;
1123 resource_list_for_each_entry_safe(win, tmp, &pcie->resources) {
1124 struct resource *res = win->res;
1126 switch (resource_type(res)) {
1128 err = devm_pci_remap_iospace(dev, res, iobase);
1130 dev_warn(dev, "error %d: failed to map resource %pR\n",
1132 resource_list_destroy_entry(win);
1135 case IORESOURCE_MEM:
1136 res_valid |= !(res->flags & IORESOURCE_PREFETCH);
1138 case IORESOURCE_BUS:
1139 pcie->root_bus_nr = res->start;
1145 dev_err(dev, "non-prefetchable memory resource required\n");
1147 goto out_release_res;
1153 pci_free_resource_list(&pcie->resources);
1157 static int advk_pcie_probe(struct platform_device *pdev)
1159 struct device *dev = &pdev->dev;
1160 struct advk_pcie *pcie;
1161 struct resource *res;
1162 struct pci_host_bridge *bridge;
1163 struct resource_entry *entry;
1166 bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
1170 pcie = pci_host_bridge_priv(bridge);
1173 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1174 pcie->base = devm_ioremap_resource(dev, res);
1175 if (IS_ERR(pcie->base))
1176 return PTR_ERR(pcie->base);
1178 irq = platform_get_irq(pdev, 0);
1179 ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
1180 IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
1183 dev_err(dev, "Failed to register interrupt\n");
1187 ret = advk_pcie_parse_request_of_pci_ranges(pcie);
1189 dev_err(dev, "Failed to parse resources\n");
1193 resource_list_for_each_entry(entry, &pcie->resources) {
1194 resource_size_t start = entry->res->start;
1195 resource_size_t size = resource_size(entry->res);
1196 unsigned long type = resource_type(entry->res);
1200 * Aardvark hardware allows to configure also PCIe window
1201 * for config type 0 and type 1 mapping, but driver uses
1202 * only PIO for issuing configuration transfers which does
1203 * not use PCIe window configuration.
1205 if (type != IORESOURCE_MEM && type != IORESOURCE_MEM_64 &&
1206 type != IORESOURCE_IO)
1210 * Skip transparent memory resources. Default outbound access
1211 * configuration is set to transparent memory access so it
1212 * does not need window configuration.
1214 if ((type == IORESOURCE_MEM || type == IORESOURCE_MEM_64) &&
1219 * The n-th PCIe window is configured by tuple (match, remap, mask)
1220 * and an access to address A uses this window if A matches the
1221 * match with given mask.
1222 * So every PCIe window size must be a power of two and every start
1223 * address must be aligned to window size. Minimal size is 64 KiB
1224 * because lower 16 bits of mask must be zero. Remapped address
1225 * may have set only bits from the mask.
1227 while (pcie->wins_count < OB_WIN_COUNT && size > 0) {
1228 /* Calculate the largest aligned window size */
1229 win_size = (1ULL << (fls64(size)-1)) |
1230 (start ? (1ULL << __ffs64(start)) : 0);
1231 win_size = 1ULL << __ffs64(win_size);
1232 if (win_size < 0x10000)
1236 "Configuring PCIe window %d: [0x%llx-0x%llx] as %lu\n",
1237 pcie->wins_count, (unsigned long long)start,
1238 (unsigned long long)start + win_size, type);
1240 if (type == IORESOURCE_IO) {
1241 pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_IO;
1242 pcie->wins[pcie->wins_count].match = pci_pio_to_address(start);
1244 pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_MEM;
1245 pcie->wins[pcie->wins_count].match = start;
1247 pcie->wins[pcie->wins_count].remap = start - entry->offset;
1248 pcie->wins[pcie->wins_count].mask = ~(win_size - 1);
1250 if (pcie->wins[pcie->wins_count].remap & (win_size - 1))
1259 dev_err(&pcie->pdev->dev,
1260 "Invalid PCIe region [0x%llx-0x%llx]\n",
1261 (unsigned long long)entry->res->start,
1262 (unsigned long long)entry->res->end + 1);
1267 pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node,
1271 ret = PTR_ERR_OR_ZERO(pcie->reset_gpio);
1273 if (ret == -ENOENT) {
1274 pcie->reset_gpio = NULL;
1276 if (ret != -EPROBE_DEFER)
1277 dev_err(dev, "Failed to get reset-gpio: %i\n",
1283 ret = of_pci_get_max_link_speed(dev->of_node);
1284 if (ret <= 0 || ret > 3)
1287 pcie->link_gen = ret;
1289 advk_pcie_setup_hw(pcie);
1291 ret = advk_pcie_init_irq_domain(pcie);
1293 dev_err(dev, "Failed to initialize irq\n");
1297 ret = advk_pcie_init_msi_irq_domain(pcie);
1299 dev_err(dev, "Failed to initialize irq\n");
1300 advk_pcie_remove_irq_domain(pcie);
1304 list_splice_init(&pcie->resources, &bridge->windows);
1305 bridge->dev.parent = dev;
1306 bridge->sysdata = pcie;
1308 bridge->ops = &advk_pcie_ops;
1309 bridge->map_irq = of_irq_parse_and_map_pci;
1310 bridge->swizzle_irq = pci_common_swizzle;
1312 ret = pci_host_probe(bridge);
1314 advk_pcie_remove_msi_irq_domain(pcie);
1315 advk_pcie_remove_irq_domain(pcie);
1322 static const struct of_device_id advk_pcie_of_match_table[] = {
1323 { .compatible = "marvell,armada-3700-pcie", },
1327 static struct platform_driver advk_pcie_driver = {
1329 .name = "advk-pcie",
1330 .of_match_table = advk_pcie_of_match_table,
1331 /* Driver unloading/unbinding currently not supported */
1332 .suppress_bind_attrs = true,
1334 .probe = advk_pcie_probe,
1336 builtin_platform_driver(advk_pcie_driver);