2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
4 * Copyright (C) 2014 Marvell
6 * Marcin Wojtas <mw@semihalf.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/platform_device.h>
17 #include <linux/skbuff.h>
18 #include <linux/inetdevice.h>
19 #include <linux/mbus.h>
20 #include <linux/module.h>
21 #include <linux/mfd/syscon.h>
22 #include <linux/interrupt.h>
23 #include <linux/cpumask.h>
25 #include <linux/of_irq.h>
26 #include <linux/of_mdio.h>
27 #include <linux/of_net.h>
28 #include <linux/of_address.h>
29 #include <linux/of_device.h>
30 #include <linux/phy.h>
31 #include <linux/phy/phy.h>
32 #include <linux/clk.h>
33 #include <linux/hrtimer.h>
34 #include <linux/ktime.h>
35 #include <linux/regmap.h>
36 #include <linux/if_vlan.h>
37 #include <uapi/linux/ppp_defs.h>
42 /* RX Fifo Registers */
43 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
44 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
45 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
46 #define MVPP2_RX_FIFO_INIT_REG 0x64
48 /* RX DMA Top Registers */
49 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
50 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
51 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
52 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
53 #define MVPP2_POOL_BUF_SIZE_OFFSET 5
54 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
55 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
56 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
57 #define MVPP2_RXQ_POOL_SHORT_OFFS 20
58 #define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
59 #define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
60 #define MVPP2_RXQ_POOL_LONG_OFFS 24
61 #define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
62 #define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
63 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
64 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
65 #define MVPP2_RXQ_DISABLE_MASK BIT(31)
67 /* Parser Registers */
68 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
69 #define MVPP2_PRS_PORT_LU_MAX 0xf
70 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
71 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
72 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
73 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
74 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
75 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
76 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
77 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
78 #define MVPP2_PRS_TCAM_IDX_REG 0x1100
79 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
80 #define MVPP2_PRS_TCAM_INV_MASK BIT(31)
81 #define MVPP2_PRS_SRAM_IDX_REG 0x1200
82 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
83 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230
84 #define MVPP2_PRS_TCAM_EN_MASK BIT(0)
86 /* Classifier Registers */
87 #define MVPP2_CLS_MODE_REG 0x1800
88 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
89 #define MVPP2_CLS_PORT_WAY_REG 0x1810
90 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
91 #define MVPP2_CLS_LKP_INDEX_REG 0x1814
92 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
93 #define MVPP2_CLS_LKP_TBL_REG 0x1818
94 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
95 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
96 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820
97 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824
98 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828
99 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c
100 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
101 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
102 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
103 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
104 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
105 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
107 /* Descriptor Manager Top Registers */
108 #define MVPP2_RXQ_NUM_REG 0x2040
109 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044
110 #define MVPP22_DESC_ADDR_OFFS 8
111 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048
112 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
113 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
114 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
115 #define MVPP2_RXQ_NUM_NEW_OFFSET 16
116 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
117 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
118 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
119 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
120 #define MVPP2_RXQ_THRESH_REG 0x204c
121 #define MVPP2_OCCUPIED_THRESH_OFFSET 0
122 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
123 #define MVPP2_RXQ_INDEX_REG 0x2050
124 #define MVPP2_TXQ_NUM_REG 0x2080
125 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084
126 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088
127 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
128 #define MVPP2_TXQ_THRESH_REG 0x2094
129 #define MVPP2_TXQ_THRESH_OFFSET 16
130 #define MVPP2_TXQ_THRESH_MASK 0x3fff
131 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
132 #define MVPP2_TXQ_INDEX_REG 0x2098
133 #define MVPP2_TXQ_PREF_BUF_REG 0x209c
134 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
135 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
136 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
137 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
138 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
139 #define MVPP2_TXQ_PENDING_REG 0x20a0
140 #define MVPP2_TXQ_PENDING_MASK 0x3fff
141 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4
142 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
143 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16
144 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
145 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
146 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
147 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
148 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
149 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
150 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16
151 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
152 #define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
153 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
154 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
155 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
156 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
157 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
159 /* MBUS bridge registers */
160 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
161 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
162 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
163 #define MVPP2_BASE_ADDR_ENABLE 0x4060
165 /* AXI Bridge Registers */
166 #define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
167 #define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
168 #define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
169 #define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
170 #define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
171 #define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
172 #define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
173 #define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
174 #define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
175 #define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
176 #define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
177 #define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
179 /* Values for AXI Bridge registers */
180 #define MVPP22_AXI_ATTR_CACHE_OFFS 0
181 #define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
183 #define MVPP22_AXI_CODE_CACHE_OFFS 0
184 #define MVPP22_AXI_CODE_DOMAIN_OFFS 4
186 #define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
187 #define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
188 #define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
190 #define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
191 #define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
193 /* Interrupt Cause and Mask registers */
194 #define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port))
195 #define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0
197 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
198 #define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
199 #define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port))
201 #define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
202 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
203 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
204 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
206 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
207 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
209 #define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
210 #define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
211 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
212 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
214 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
215 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
216 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
217 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
218 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
219 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
220 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16
221 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
222 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
223 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
224 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
225 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
226 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
227 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
228 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
229 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
230 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
231 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
232 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
234 /* Buffer Manager registers */
235 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
236 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
237 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
238 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0
239 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
240 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
241 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
242 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
243 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
244 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
245 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
246 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
247 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
248 #define MVPP2_BM_START_MASK BIT(0)
249 #define MVPP2_BM_STOP_MASK BIT(1)
250 #define MVPP2_BM_STATE_MASK BIT(4)
251 #define MVPP2_BM_LOW_THRESH_OFFS 8
252 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00
253 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
254 MVPP2_BM_LOW_THRESH_OFFS)
255 #define MVPP2_BM_HIGH_THRESH_OFFS 16
256 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
257 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
258 MVPP2_BM_HIGH_THRESH_OFFS)
259 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
260 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
261 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
262 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
263 #define MVPP2_BM_BPPE_FULL_MASK BIT(3)
264 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
265 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
266 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
267 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
268 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440
269 #define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444
270 #define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff
271 #define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00
272 #define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8
273 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
274 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
275 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
276 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
277 #define MVPP2_BM_VIRT_RLS_REG 0x64c0
278 #define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
279 #define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
280 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
281 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
283 /* TX Scheduler registers */
284 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
285 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
286 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff
287 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8
288 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
289 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
290 #define MVPP2_TXP_SCHED_MTU_REG 0x801c
291 #define MVPP2_TXP_MTU_MAX 0x7FFFF
292 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020
293 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
294 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
295 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
296 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
297 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
298 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
299 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
300 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
301 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
302 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
303 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
304 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
305 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
307 /* TX general registers */
308 #define MVPP2_TX_SNOOP_REG 0x8800
309 #define MVPP2_TX_PORT_FLUSH_REG 0x8810
310 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
313 #define MVPP2_SRC_ADDR_MIDDLE 0x24
314 #define MVPP2_SRC_ADDR_HIGH 0x28
315 #define MVPP2_PHY_AN_CFG0_REG 0x34
316 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
317 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
318 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
320 /* Per-port registers */
321 #define MVPP2_GMAC_CTRL_0_REG 0x0
322 #define MVPP2_GMAC_PORT_EN_MASK BIT(0)
323 #define MVPP2_GMAC_PORT_TYPE_MASK BIT(1)
324 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
325 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
326 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
327 #define MVPP2_GMAC_CTRL_1_REG 0x4
328 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
329 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
330 #define MVPP2_GMAC_PCS_LB_EN_BIT 6
331 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
332 #define MVPP2_GMAC_SA_LOW_OFFS 7
333 #define MVPP2_GMAC_CTRL_2_REG 0x8
334 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
335 #define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1)
336 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
337 #define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4)
338 #define MVPP2_GMAC_DISABLE_PADDING BIT(5)
339 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
340 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc
341 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
342 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
343 #define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2)
344 #define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3)
345 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
346 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
347 #define MVPP2_GMAC_AN_SPEED_EN BIT(7)
348 #define MVPP2_GMAC_FC_ADV_EN BIT(9)
349 #define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11)
350 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
351 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
352 #define MVPP2_GMAC_STATUS0 0x10
353 #define MVPP2_GMAC_STATUS0_LINK_UP BIT(0)
354 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
355 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
356 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
357 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
358 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
359 #define MVPP22_GMAC_INT_STAT 0x20
360 #define MVPP22_GMAC_INT_STAT_LINK BIT(1)
361 #define MVPP22_GMAC_INT_MASK 0x24
362 #define MVPP22_GMAC_INT_MASK_LINK_STAT BIT(1)
363 #define MVPP22_GMAC_CTRL_4_REG 0x90
364 #define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0)
365 #define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
366 #define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6)
367 #define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
368 #define MVPP22_GMAC_INT_SUM_MASK 0xa4
369 #define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1)
371 /* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
372 * relative to port->base.
374 #define MVPP22_XLG_CTRL0_REG 0x100
375 #define MVPP22_XLG_CTRL0_PORT_EN BIT(0)
376 #define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1)
377 #define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7)
378 #define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14)
379 #define MVPP22_XLG_CTRL1_REG 0x104
380 #define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS 0
381 #define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK 0x1fff
382 #define MVPP22_XLG_STATUS 0x10c
383 #define MVPP22_XLG_STATUS_LINK_UP BIT(0)
384 #define MVPP22_XLG_INT_STAT 0x114
385 #define MVPP22_XLG_INT_STAT_LINK BIT(1)
386 #define MVPP22_XLG_INT_MASK 0x118
387 #define MVPP22_XLG_INT_MASK_LINK BIT(1)
388 #define MVPP22_XLG_CTRL3_REG 0x11c
389 #define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
390 #define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
391 #define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13)
392 #define MVPP22_XLG_EXT_INT_MASK 0x15c
393 #define MVPP22_XLG_EXT_INT_MASK_XLG BIT(1)
394 #define MVPP22_XLG_EXT_INT_MASK_GIG BIT(2)
395 #define MVPP22_XLG_CTRL4_REG 0x184
396 #define MVPP22_XLG_CTRL4_FWD_FC BIT(5)
397 #define MVPP22_XLG_CTRL4_FWD_PFC BIT(6)
398 #define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12)
400 /* SMI registers. PPv2.2 only, relative to priv->iface_base. */
401 #define MVPP22_SMI_MISC_CFG_REG 0x1204
402 #define MVPP22_SMI_POLLING_EN BIT(10)
404 #define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
406 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
408 /* Descriptor ring Macros */
409 #define MVPP2_QUEUE_NEXT_DESC(q, index) \
410 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
412 /* XPCS registers. PPv2.2 only */
413 #define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000)
414 #define MVPP22_MPCS_CTRL 0x14
415 #define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10)
416 #define MVPP22_MPCS_CLK_RESET 0x14c
417 #define MAC_CLK_RESET_SD_TX BIT(0)
418 #define MAC_CLK_RESET_SD_RX BIT(1)
419 #define MAC_CLK_RESET_MAC BIT(2)
420 #define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4)
421 #define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11)
423 /* XPCS registers. PPv2.2 only */
424 #define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000)
425 #define MVPP22_XPCS_CFG0 0x0
426 #define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3)
427 #define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5)
429 /* System controller registers. Accessed through a regmap. */
430 #define GENCONF_SOFT_RESET1 0x1108
431 #define GENCONF_SOFT_RESET1_GOP BIT(6)
432 #define GENCONF_PORT_CTRL0 0x1110
433 #define GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT BIT(1)
434 #define GENCONF_PORT_CTRL0_RX_DATA_SAMPLE BIT(29)
435 #define GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR BIT(31)
436 #define GENCONF_PORT_CTRL1 0x1114
437 #define GENCONF_PORT_CTRL1_EN(p) BIT(p)
438 #define GENCONF_PORT_CTRL1_RESET(p) (BIT(p) << 28)
439 #define GENCONF_CTRL0 0x1120
440 #define GENCONF_CTRL0_PORT0_RGMII BIT(0)
441 #define GENCONF_CTRL0_PORT1_RGMII_MII BIT(1)
442 #define GENCONF_CTRL0_PORT1_RGMII BIT(2)
444 /* Various constants */
447 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15
448 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
449 #define MVPP2_TXDONE_COAL_USEC 1000
450 #define MVPP2_RX_COAL_PKTS 32
451 #define MVPP2_RX_COAL_USEC 100
453 /* The two bytes Marvell header. Either contains a special value used
454 * by Marvell switches when a specific hardware mode is enabled (not
455 * supported by this driver) or is filled automatically by zeroes on
456 * the RX side. Those two bytes being at the front of the Ethernet
457 * header, they allow to have the IP header aligned on a 4 bytes
458 * boundary automatically: the hardware skips those two bytes on its
461 #define MVPP2_MH_SIZE 2
462 #define MVPP2_ETH_TYPE_LEN 2
463 #define MVPP2_PPPOE_HDR_SIZE 8
464 #define MVPP2_VLAN_TAG_LEN 4
466 /* Lbtd 802.3 type */
467 #define MVPP2_IP_LBDT_TYPE 0xfffa
469 #define MVPP2_TX_CSUM_MAX_SIZE 9800
471 /* Timeout constants */
472 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
473 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
475 #define MVPP2_TX_MTU_MAX 0x7ffff
477 /* Maximum number of T-CONTs of PON port */
478 #define MVPP2_MAX_TCONT 16
480 /* Maximum number of supported ports */
481 #define MVPP2_MAX_PORTS 4
483 /* Maximum number of TXQs used by single port */
484 #define MVPP2_MAX_TXQ 8
486 /* Dfault number of RXQs in use */
487 #define MVPP2_DEFAULT_RXQ 4
489 /* Max number of Rx descriptors */
490 #define MVPP2_MAX_RXD 128
492 /* Max number of Tx descriptors */
493 #define MVPP2_MAX_TXD 1024
495 /* Amount of Tx descriptors that can be reserved at once by CPU */
496 #define MVPP2_CPU_DESC_CHUNK 64
498 /* Max number of Tx descriptors in each aggregated queue */
499 #define MVPP2_AGGR_TXQ_SIZE 256
501 /* Descriptor aligned size */
502 #define MVPP2_DESC_ALIGNED_SIZE 32
504 /* Descriptor alignment mask */
505 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
507 /* RX FIFO constants */
508 #define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
509 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
510 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
512 /* RX buffer constants */
513 #define MVPP2_SKB_SHINFO_SIZE \
514 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
516 #define MVPP2_RX_PKT_SIZE(mtu) \
517 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
518 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
520 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
521 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
522 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
523 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
525 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
527 /* IPv6 max L3 address size */
528 #define MVPP2_MAX_L3_ADDR_SIZE 16
531 #define MVPP2_F_LOOPBACK BIT(0)
533 /* Marvell tag types */
534 enum mvpp2_tag_type {
535 MVPP2_TAG_TYPE_NONE = 0,
536 MVPP2_TAG_TYPE_MH = 1,
537 MVPP2_TAG_TYPE_DSA = 2,
538 MVPP2_TAG_TYPE_EDSA = 3,
539 MVPP2_TAG_TYPE_VLAN = 4,
540 MVPP2_TAG_TYPE_LAST = 5
543 /* Parser constants */
544 #define MVPP2_PRS_TCAM_SRAM_SIZE 256
545 #define MVPP2_PRS_TCAM_WORDS 6
546 #define MVPP2_PRS_SRAM_WORDS 4
547 #define MVPP2_PRS_FLOW_ID_SIZE 64
548 #define MVPP2_PRS_FLOW_ID_MASK 0x3f
549 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1
550 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
551 #define MVPP2_PRS_IPV4_HEAD 0x40
552 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
553 #define MVPP2_PRS_IPV4_MC 0xe0
554 #define MVPP2_PRS_IPV4_MC_MASK 0xf0
555 #define MVPP2_PRS_IPV4_BC_MASK 0xff
556 #define MVPP2_PRS_IPV4_IHL 0x5
557 #define MVPP2_PRS_IPV4_IHL_MASK 0xf
558 #define MVPP2_PRS_IPV6_MC 0xff
559 #define MVPP2_PRS_IPV6_MC_MASK 0xff
560 #define MVPP2_PRS_IPV6_HOP_MASK 0xff
561 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff
562 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
563 #define MVPP2_PRS_DBL_VLANS_MAX 100
566 * - lookup ID - 4 bits
568 * - additional information - 1 byte
569 * - header data - 8 bytes
570 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
572 #define MVPP2_PRS_AI_BITS 8
573 #define MVPP2_PRS_PORT_MASK 0xff
574 #define MVPP2_PRS_LU_MASK 0xf
575 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
576 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
577 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
578 (((offs) * 2) - ((offs) % 2) + 2)
579 #define MVPP2_PRS_TCAM_AI_BYTE 16
580 #define MVPP2_PRS_TCAM_PORT_BYTE 17
581 #define MVPP2_PRS_TCAM_LU_BYTE 20
582 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
583 #define MVPP2_PRS_TCAM_INV_WORD 5
584 /* Tcam entries ID */
585 #define MVPP2_PE_DROP_ALL 0
586 #define MVPP2_PE_FIRST_FREE_TID 1
587 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
588 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
589 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
590 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
591 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
592 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
593 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
594 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
595 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
596 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
597 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
598 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
599 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
600 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
601 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
602 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
603 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
604 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
605 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
606 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
607 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
608 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
609 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
610 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
611 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
614 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
616 #define MVPP2_PRS_SRAM_RI_OFFS 0
617 #define MVPP2_PRS_SRAM_RI_WORD 0
618 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
619 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
620 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
621 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64
622 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
623 #define MVPP2_PRS_SRAM_UDF_OFFS 73
624 #define MVPP2_PRS_SRAM_UDF_BITS 8
625 #define MVPP2_PRS_SRAM_UDF_MASK 0xff
626 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
627 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
628 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
629 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
630 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
631 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
632 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
633 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
634 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
635 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
636 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
637 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
638 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
639 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
640 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
641 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
642 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
643 #define MVPP2_PRS_SRAM_AI_OFFS 90
644 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
645 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
646 #define MVPP2_PRS_SRAM_AI_MASK 0xff
647 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
648 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
649 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110
650 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111
652 /* Sram result info bits assignment */
653 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1
654 #define MVPP2_PRS_RI_DSA_MASK 0x2
655 #define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
656 #define MVPP2_PRS_RI_VLAN_NONE 0x0
657 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
658 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
659 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
660 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
661 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
662 #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
663 #define MVPP2_PRS_RI_L2_UCAST 0x0
664 #define MVPP2_PRS_RI_L2_MCAST BIT(9)
665 #define MVPP2_PRS_RI_L2_BCAST BIT(10)
666 #define MVPP2_PRS_RI_PPPOE_MASK 0x800
667 #define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
668 #define MVPP2_PRS_RI_L3_UN 0x0
669 #define MVPP2_PRS_RI_L3_IP4 BIT(12)
670 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
671 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
672 #define MVPP2_PRS_RI_L3_IP6 BIT(14)
673 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
674 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
675 #define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
676 #define MVPP2_PRS_RI_L3_UCAST 0x0
677 #define MVPP2_PRS_RI_L3_MCAST BIT(15)
678 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
679 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
680 #define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17)
681 #define MVPP2_PRS_RI_UDF3_MASK 0x300000
682 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
683 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
684 #define MVPP2_PRS_RI_L4_TCP BIT(22)
685 #define MVPP2_PRS_RI_L4_UDP BIT(23)
686 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
687 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000
688 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
689 #define MVPP2_PRS_RI_DROP_MASK 0x80000000
691 /* Sram additional info bits assignment */
692 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
693 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
694 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
695 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
696 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
697 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
698 #define MVPP2_PRS_SINGLE_VLAN_AI 0
699 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
702 #define MVPP2_PRS_TAGGED true
703 #define MVPP2_PRS_UNTAGGED false
704 #define MVPP2_PRS_EDSA true
705 #define MVPP2_PRS_DSA false
707 /* MAC entries, shadow udf */
709 MVPP2_PRS_UDF_MAC_DEF,
710 MVPP2_PRS_UDF_MAC_RANGE,
711 MVPP2_PRS_UDF_L2_DEF,
712 MVPP2_PRS_UDF_L2_DEF_COPY,
713 MVPP2_PRS_UDF_L2_USER,
717 enum mvpp2_prs_lookup {
731 enum mvpp2_prs_l3_cast {
732 MVPP2_PRS_L3_UNI_CAST,
733 MVPP2_PRS_L3_MULTI_CAST,
734 MVPP2_PRS_L3_BROAD_CAST
737 /* Classifier constants */
738 #define MVPP2_CLS_FLOWS_TBL_SIZE 512
739 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
740 #define MVPP2_CLS_LKP_TBL_SIZE 64
743 #define MVPP2_BM_POOLS_NUM 8
744 #define MVPP2_BM_LONG_BUF_NUM 1024
745 #define MVPP2_BM_SHORT_BUF_NUM 2048
746 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
747 #define MVPP2_BM_POOL_PTR_ALIGN 128
748 #define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
749 #define MVPP2_BM_SWF_SHORT_POOL 3
751 /* BM cookie (32 bits) definition */
752 #define MVPP2_BM_COOKIE_POOL_OFFS 8
753 #define MVPP2_BM_COOKIE_CPU_OFFS 24
755 /* BM short pool packet size
756 * These value assure that for SWF the total number
757 * of bytes allocated for each buffer will be 512
759 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
761 #define MVPP21_ADDR_SPACE_SZ 0
762 #define MVPP22_ADDR_SPACE_SZ SZ_64K
764 #define MVPP2_MAX_THREADS 8
765 #define MVPP2_MAX_QVECS MVPP2_MAX_THREADS
775 /* Shared Packet Processor resources */
777 /* Shared registers' base addresses */
778 void __iomem *lms_base;
779 void __iomem *iface_base;
781 /* On PPv2.2, each "software thread" can access the base
782 * register through a separate address space, each 64 KB apart
783 * from each other. Typically, such address spaces will be
786 void __iomem *swth_base[MVPP2_MAX_THREADS];
788 /* On PPv2.2, some port control registers are located into the system
789 * controller space. These registers are accessible through a regmap.
791 struct regmap *sysctrl_base;
799 /* List of pointers to port structures */
800 struct mvpp2_port **port_list;
802 /* Aggregated TXQs */
803 struct mvpp2_tx_queue *aggr_txqs;
806 struct mvpp2_bm_pool *bm_pools;
808 /* PRS shadow table */
809 struct mvpp2_prs_shadow *prs_shadow;
810 /* PRS auxiliary table for double vlan entries control */
811 bool *prs_double_vlans;
817 enum { MVPP21, MVPP22 } hw_version;
819 /* Maximum number of RXQs per port */
820 unsigned int max_port_rxqs;
823 struct mvpp2_pcpu_stats {
824 struct u64_stats_sync syncp;
831 /* Per-CPU port control */
832 struct mvpp2_port_pcpu {
833 struct hrtimer tx_done_timer;
834 bool timer_scheduled;
835 /* Tasklet for egress finalization */
836 struct tasklet_struct tx_done_tasklet;
839 struct mvpp2_queue_vector {
841 struct napi_struct napi;
842 enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type;
847 u32 pending_cause_rx;
848 struct mvpp2_port *port;
854 /* Index of the port from the "group of ports" complex point
863 /* Per-port registers' base address */
866 struct mvpp2_rx_queue **rxqs;
868 struct mvpp2_tx_queue **txqs;
870 struct net_device *dev;
874 /* Per-CPU port control */
875 struct mvpp2_port_pcpu __percpu *pcpu;
882 struct mvpp2_pcpu_stats __percpu *stats;
884 phy_interface_t phy_interface;
885 struct device_node *phy_node;
891 struct mvpp2_bm_pool *pool_long;
892 struct mvpp2_bm_pool *pool_short;
894 /* Index of first port's physical RXQ */
897 struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS];
904 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
905 * layout of the transmit and reception DMA descriptors, and their
906 * layout is therefore defined by the hardware design
909 #define MVPP2_TXD_L3_OFF_SHIFT 0
910 #define MVPP2_TXD_IP_HLEN_SHIFT 8
911 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
912 #define MVPP2_TXD_L4_CSUM_NOT BIT(14)
913 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
914 #define MVPP2_TXD_PADDING_DISABLE BIT(23)
915 #define MVPP2_TXD_L4_UDP BIT(24)
916 #define MVPP2_TXD_L3_IP6 BIT(26)
917 #define MVPP2_TXD_L_DESC BIT(28)
918 #define MVPP2_TXD_F_DESC BIT(29)
920 #define MVPP2_RXD_ERR_SUMMARY BIT(15)
921 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
922 #define MVPP2_RXD_ERR_CRC 0x0
923 #define MVPP2_RXD_ERR_OVERRUN BIT(13)
924 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
925 #define MVPP2_RXD_BM_POOL_ID_OFFS 16
926 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
927 #define MVPP2_RXD_HWF_SYNC BIT(21)
928 #define MVPP2_RXD_L4_CSUM_OK BIT(22)
929 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
930 #define MVPP2_RXD_L4_TCP BIT(25)
931 #define MVPP2_RXD_L4_UDP BIT(26)
932 #define MVPP2_RXD_L3_IP4 BIT(28)
933 #define MVPP2_RXD_L3_IP6 BIT(30)
934 #define MVPP2_RXD_BUF_HDR BIT(31)
936 /* HW TX descriptor for PPv2.1 */
937 struct mvpp21_tx_desc {
938 u32 command; /* Options used by HW for packet transmitting.*/
939 u8 packet_offset; /* the offset from the buffer beginning */
940 u8 phys_txq; /* destination queue ID */
941 u16 data_size; /* data size of transmitted packet in bytes */
942 u32 buf_dma_addr; /* physical addr of transmitted buffer */
943 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
944 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
945 u32 reserved2; /* reserved (for future use) */
948 /* HW RX descriptor for PPv2.1 */
949 struct mvpp21_rx_desc {
950 u32 status; /* info about received packet */
951 u16 reserved1; /* parser_info (for future use, PnC) */
952 u16 data_size; /* size of received packet in bytes */
953 u32 buf_dma_addr; /* physical address of the buffer */
954 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
955 u16 reserved2; /* gem_port_id (for future use, PON) */
956 u16 reserved3; /* csum_l4 (for future use, PnC) */
957 u8 reserved4; /* bm_qset (for future use, BM) */
959 u16 reserved6; /* classify_info (for future use, PnC) */
960 u32 reserved7; /* flow_id (for future use, PnC) */
964 /* HW TX descriptor for PPv2.2 */
965 struct mvpp22_tx_desc {
971 u64 buf_dma_addr_ptp;
975 /* HW RX descriptor for PPv2.2 */
976 struct mvpp22_rx_desc {
982 u64 buf_dma_addr_key_hash;
986 /* Opaque type used by the driver to manipulate the HW TX and RX
989 struct mvpp2_tx_desc {
991 struct mvpp21_tx_desc pp21;
992 struct mvpp22_tx_desc pp22;
996 struct mvpp2_rx_desc {
998 struct mvpp21_rx_desc pp21;
999 struct mvpp22_rx_desc pp22;
1003 struct mvpp2_txq_pcpu_buf {
1004 /* Transmitted SKB */
1005 struct sk_buff *skb;
1007 /* Physical address of transmitted buffer */
1010 /* Size transmitted */
1014 /* Per-CPU Tx queue control */
1015 struct mvpp2_txq_pcpu {
1018 /* Number of Tx DMA descriptors in the descriptor ring */
1021 /* Number of currently used Tx DMA descriptor in the
1026 /* Number of Tx DMA descriptors reserved for each CPU */
1029 /* Infos about transmitted buffers */
1030 struct mvpp2_txq_pcpu_buf *buffs;
1032 /* Index of last TX DMA descriptor that was inserted */
1035 /* Index of the TX DMA descriptor to be cleaned up */
1038 /* DMA buffer for TSO headers */
1040 dma_addr_t tso_headers_dma;
1043 struct mvpp2_tx_queue {
1044 /* Physical number of this Tx queue */
1047 /* Logical number of this Tx queue */
1050 /* Number of Tx DMA descriptors in the descriptor ring */
1053 /* Number of currently used Tx DMA descriptor in the descriptor ring */
1056 /* Per-CPU control of physical Tx queues */
1057 struct mvpp2_txq_pcpu __percpu *pcpu;
1061 /* Virtual address of thex Tx DMA descriptors array */
1062 struct mvpp2_tx_desc *descs;
1064 /* DMA address of the Tx DMA descriptors array */
1065 dma_addr_t descs_dma;
1067 /* Index of the last Tx DMA descriptor */
1070 /* Index of the next Tx DMA descriptor to process */
1071 int next_desc_to_proc;
1074 struct mvpp2_rx_queue {
1075 /* RX queue number, in the range 0-31 for physical RXQs */
1078 /* Num of rx descriptors in the rx descriptor ring */
1084 /* Virtual address of the RX DMA descriptors array */
1085 struct mvpp2_rx_desc *descs;
1087 /* DMA address of the RX DMA descriptors array */
1088 dma_addr_t descs_dma;
1090 /* Index of the last RX DMA descriptor */
1093 /* Index of the next RX DMA descriptor to process */
1094 int next_desc_to_proc;
1096 /* ID of port to which physical RXQ is mapped */
1099 /* Port's logic RXQ number to which physical RXQ is mapped */
1103 union mvpp2_prs_tcam_entry {
1104 u32 word[MVPP2_PRS_TCAM_WORDS];
1105 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
1108 union mvpp2_prs_sram_entry {
1109 u32 word[MVPP2_PRS_SRAM_WORDS];
1110 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
1113 struct mvpp2_prs_entry {
1115 union mvpp2_prs_tcam_entry tcam;
1116 union mvpp2_prs_sram_entry sram;
1119 struct mvpp2_prs_shadow {
1126 /* User defined offset */
1134 struct mvpp2_cls_flow_entry {
1136 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
1139 struct mvpp2_cls_lookup_entry {
1145 struct mvpp2_bm_pool {
1146 /* Pool number in the range 0-7 */
1148 enum mvpp2_bm_type type;
1150 /* Buffer Pointers Pool External (BPPE) size */
1152 /* BPPE size in bytes */
1154 /* Number of buffers for this pool */
1156 /* Pool buffer size */
1162 /* BPPE virtual base address */
1164 /* BPPE DMA base address */
1165 dma_addr_t dma_addr;
1167 /* Ports using BM pool */
1171 #define IS_TSO_HEADER(txq_pcpu, addr) \
1172 ((addr) >= (txq_pcpu)->tso_headers_dma && \
1173 (addr) < (txq_pcpu)->tso_headers_dma + \
1174 (txq_pcpu)->size * TSO_HEADER_SIZE)
1177 #define MVPP2_QDIST_SINGLE_MODE 0
1178 #define MVPP2_QDIST_MULTI_MODE 1
1180 static int queue_mode = MVPP2_QDIST_SINGLE_MODE;
1182 module_param(queue_mode, int, 0444);
1183 MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
1185 #define MVPP2_DRIVER_NAME "mvpp2"
1186 #define MVPP2_DRIVER_VERSION "1.0"
1188 /* Utility/helper methods */
1190 static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1192 writel(data, priv->swth_base[0] + offset);
1195 static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1197 return readl(priv->swth_base[0] + offset);
1200 /* These accessors should be used to access:
1202 * - per-CPU registers, where each CPU has its own copy of the
1205 * MVPP2_BM_VIRT_ALLOC_REG
1206 * MVPP2_BM_ADDR_HIGH_ALLOC
1207 * MVPP22_BM_ADDR_HIGH_RLS_REG
1208 * MVPP2_BM_VIRT_RLS_REG
1209 * MVPP2_ISR_RX_TX_CAUSE_REG
1210 * MVPP2_ISR_RX_TX_MASK_REG
1212 * MVPP2_AGGR_TXQ_UPDATE_REG
1213 * MVPP2_TXQ_RSVD_REQ_REG
1214 * MVPP2_TXQ_RSVD_RSLT_REG
1215 * MVPP2_TXQ_SENT_REG
1218 * - global registers that must be accessed through a specific CPU
1219 * window, because they are related to an access to a per-CPU
1222 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
1223 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
1224 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
1225 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
1226 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
1227 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
1228 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1229 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
1230 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
1231 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
1232 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1233 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1234 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1236 static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
1237 u32 offset, u32 data)
1239 writel(data, priv->swth_base[cpu] + offset);
1242 static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
1245 return readl(priv->swth_base[cpu] + offset);
1248 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
1249 struct mvpp2_tx_desc *tx_desc)
1251 if (port->priv->hw_version == MVPP21)
1252 return tx_desc->pp21.buf_dma_addr;
1254 return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0);
1257 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1258 struct mvpp2_tx_desc *tx_desc,
1259 dma_addr_t dma_addr)
1261 if (port->priv->hw_version == MVPP21) {
1262 tx_desc->pp21.buf_dma_addr = dma_addr;
1264 u64 val = (u64)dma_addr;
1266 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
1267 tx_desc->pp22.buf_dma_addr_ptp |= val;
1271 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
1272 struct mvpp2_tx_desc *tx_desc)
1274 if (port->priv->hw_version == MVPP21)
1275 return tx_desc->pp21.data_size;
1277 return tx_desc->pp22.data_size;
1280 static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1281 struct mvpp2_tx_desc *tx_desc,
1284 if (port->priv->hw_version == MVPP21)
1285 tx_desc->pp21.data_size = size;
1287 tx_desc->pp22.data_size = size;
1290 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1291 struct mvpp2_tx_desc *tx_desc,
1294 if (port->priv->hw_version == MVPP21)
1295 tx_desc->pp21.phys_txq = txq;
1297 tx_desc->pp22.phys_txq = txq;
1300 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1301 struct mvpp2_tx_desc *tx_desc,
1302 unsigned int command)
1304 if (port->priv->hw_version == MVPP21)
1305 tx_desc->pp21.command = command;
1307 tx_desc->pp22.command = command;
1310 static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
1311 struct mvpp2_tx_desc *tx_desc,
1312 unsigned int offset)
1314 if (port->priv->hw_version == MVPP21)
1315 tx_desc->pp21.packet_offset = offset;
1317 tx_desc->pp22.packet_offset = offset;
1320 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
1321 struct mvpp2_tx_desc *tx_desc)
1323 if (port->priv->hw_version == MVPP21)
1324 return tx_desc->pp21.packet_offset;
1326 return tx_desc->pp22.packet_offset;
1329 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1330 struct mvpp2_rx_desc *rx_desc)
1332 if (port->priv->hw_version == MVPP21)
1333 return rx_desc->pp21.buf_dma_addr;
1335 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
1338 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1339 struct mvpp2_rx_desc *rx_desc)
1341 if (port->priv->hw_version == MVPP21)
1342 return rx_desc->pp21.buf_cookie;
1344 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
1347 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1348 struct mvpp2_rx_desc *rx_desc)
1350 if (port->priv->hw_version == MVPP21)
1351 return rx_desc->pp21.data_size;
1353 return rx_desc->pp22.data_size;
1356 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1357 struct mvpp2_rx_desc *rx_desc)
1359 if (port->priv->hw_version == MVPP21)
1360 return rx_desc->pp21.status;
1362 return rx_desc->pp22.status;
1365 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1367 txq_pcpu->txq_get_index++;
1368 if (txq_pcpu->txq_get_index == txq_pcpu->size)
1369 txq_pcpu->txq_get_index = 0;
1372 static void mvpp2_txq_inc_put(struct mvpp2_port *port,
1373 struct mvpp2_txq_pcpu *txq_pcpu,
1374 struct sk_buff *skb,
1375 struct mvpp2_tx_desc *tx_desc)
1377 struct mvpp2_txq_pcpu_buf *tx_buf =
1378 txq_pcpu->buffs + txq_pcpu->txq_put_index;
1380 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
1381 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
1382 mvpp2_txdesc_offset_get(port, tx_desc);
1383 txq_pcpu->txq_put_index++;
1384 if (txq_pcpu->txq_put_index == txq_pcpu->size)
1385 txq_pcpu->txq_put_index = 0;
1388 /* Get number of physical egress port */
1389 static inline int mvpp2_egress_port(struct mvpp2_port *port)
1391 return MVPP2_MAX_TCONT + port->id;
1394 /* Get number of physical TXQ */
1395 static inline int mvpp2_txq_phys(int port, int txq)
1397 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1400 /* Parser configuration routines */
1402 /* Update parser tcam and sram hw entries */
1403 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1407 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1410 /* Clear entry invalidation bit */
1411 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1413 /* Write tcam index - indirect access */
1414 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1415 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1416 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1418 /* Write sram index - indirect access */
1419 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1420 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1421 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1426 /* Read tcam entry from hw */
1427 static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1431 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1434 /* Write tcam index - indirect access */
1435 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1437 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1438 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1439 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1440 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1442 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1443 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1445 /* Write sram index - indirect access */
1446 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1447 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1448 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1453 /* Invalidate tcam hw entry */
1454 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1456 /* Write index - indirect access */
1457 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1458 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1459 MVPP2_PRS_TCAM_INV_MASK);
1462 /* Enable shadow table entry and set its lookup ID */
1463 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1465 priv->prs_shadow[index].valid = true;
1466 priv->prs_shadow[index].lu = lu;
1469 /* Update ri fields in shadow table entry */
1470 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1471 unsigned int ri, unsigned int ri_mask)
1473 priv->prs_shadow[index].ri_mask = ri_mask;
1474 priv->prs_shadow[index].ri = ri;
1477 /* Update lookup field in tcam sw entry */
1478 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1480 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1482 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1483 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1486 /* Update mask for single port in tcam sw entry */
1487 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1488 unsigned int port, bool add)
1490 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1493 pe->tcam.byte[enable_off] &= ~(1 << port);
1495 pe->tcam.byte[enable_off] |= 1 << port;
1498 /* Update port map in tcam sw entry */
1499 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1502 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1503 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1505 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1506 pe->tcam.byte[enable_off] &= ~port_mask;
1507 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1510 /* Obtain port map from tcam sw entry */
1511 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1513 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1515 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1518 /* Set byte of data and its enable bits in tcam sw entry */
1519 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1520 unsigned int offs, unsigned char byte,
1521 unsigned char enable)
1523 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1524 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1527 /* Get byte of data and its enable bits from tcam sw entry */
1528 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1529 unsigned int offs, unsigned char *byte,
1530 unsigned char *enable)
1532 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1533 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1536 /* Compare tcam data bytes with a pattern */
1537 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1540 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1543 tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off];
1544 if (tcam_data != data)
1549 /* Update ai bits in tcam sw entry */
1550 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1551 unsigned int bits, unsigned int enable)
1553 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1555 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1557 if (!(enable & BIT(i)))
1561 pe->tcam.byte[ai_idx] |= 1 << i;
1563 pe->tcam.byte[ai_idx] &= ~(1 << i);
1566 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1569 /* Get ai bits from tcam sw entry */
1570 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1572 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1575 /* Set ethertype in tcam sw entry */
1576 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1577 unsigned short ethertype)
1579 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1580 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1583 /* Set bits in sram sw entry */
1584 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1587 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1590 /* Clear bits in sram sw entry */
1591 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1594 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1597 /* Update ri bits in sram sw entry */
1598 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1599 unsigned int bits, unsigned int mask)
1603 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1604 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1606 if (!(mask & BIT(i)))
1610 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1612 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1614 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1618 /* Obtain ri bits from sram sw entry */
1619 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1621 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1624 /* Update ai bits in sram sw entry */
1625 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1626 unsigned int bits, unsigned int mask)
1629 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1631 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1633 if (!(mask & BIT(i)))
1637 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1639 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1641 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1645 /* Read ai bits from sram sw entry */
1646 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1649 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1650 int ai_en_off = ai_off + 1;
1651 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1653 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1654 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1659 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
1662 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1665 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1667 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1668 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1669 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1672 /* In the sram sw entry set sign and value of the next lookup offset
1673 * and the offset value generated to the classifier
1675 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1680 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1683 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1687 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1688 (unsigned char)shift;
1690 /* Reset and set operation */
1691 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1692 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1693 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1695 /* Set base offset as current */
1696 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1699 /* In the sram sw entry set sign and value of the user defined offset
1700 * generated to the classifier
1702 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1703 unsigned int type, int offset,
1708 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1709 offset = 0 - offset;
1711 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1715 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1716 MVPP2_PRS_SRAM_UDF_MASK);
1717 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1718 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1719 MVPP2_PRS_SRAM_UDF_BITS)] &=
1720 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1721 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1722 MVPP2_PRS_SRAM_UDF_BITS)] |=
1723 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1725 /* Set offset type */
1726 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1727 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1728 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1730 /* Set offset operation */
1731 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1732 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1733 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1735 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1736 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1737 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1738 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1740 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1741 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1742 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1744 /* Set base offset as current */
1745 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1748 /* Find parser flow entry */
1749 static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1751 struct mvpp2_prs_entry *pe;
1754 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1757 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1759 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1760 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1763 if (!priv->prs_shadow[tid].valid ||
1764 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1768 mvpp2_prs_hw_read(priv, pe);
1769 bits = mvpp2_prs_sram_ai_get(pe);
1771 /* Sram store classification lookup ID in AI bits [5:0] */
1772 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1780 /* Return first free tcam index, seeking from start to end */
1781 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1789 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1790 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1792 for (tid = start; tid <= end; tid++) {
1793 if (!priv->prs_shadow[tid].valid)
1800 /* Enable/disable dropping all mac da's */
1801 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1803 struct mvpp2_prs_entry pe;
1805 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1806 /* Entry exist - update port only */
1807 pe.index = MVPP2_PE_DROP_ALL;
1808 mvpp2_prs_hw_read(priv, &pe);
1810 /* Entry doesn't exist - create new */
1811 memset(&pe, 0, sizeof(pe));
1812 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1813 pe.index = MVPP2_PE_DROP_ALL;
1815 /* Non-promiscuous mode for all ports - DROP unknown packets */
1816 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1817 MVPP2_PRS_RI_DROP_MASK);
1819 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1820 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1822 /* Update shadow table */
1823 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1825 /* Mask all ports */
1826 mvpp2_prs_tcam_port_map_set(&pe, 0);
1829 /* Update port mask */
1830 mvpp2_prs_tcam_port_set(&pe, port, add);
1832 mvpp2_prs_hw_write(priv, &pe);
1835 /* Set port to promiscuous mode */
1836 static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1838 struct mvpp2_prs_entry pe;
1840 /* Promiscuous mode - Accept unknown packets */
1842 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1843 /* Entry exist - update port only */
1844 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1845 mvpp2_prs_hw_read(priv, &pe);
1847 /* Entry doesn't exist - create new */
1848 memset(&pe, 0, sizeof(pe));
1849 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1850 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1852 /* Continue - set next lookup */
1853 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1855 /* Set result info bits */
1856 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1857 MVPP2_PRS_RI_L2_CAST_MASK);
1859 /* Shift to ethertype */
1860 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1861 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1863 /* Mask all ports */
1864 mvpp2_prs_tcam_port_map_set(&pe, 0);
1866 /* Update shadow table */
1867 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1870 /* Update port mask */
1871 mvpp2_prs_tcam_port_set(&pe, port, add);
1873 mvpp2_prs_hw_write(priv, &pe);
1876 /* Accept multicast */
1877 static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1880 struct mvpp2_prs_entry pe;
1881 unsigned char da_mc;
1883 /* Ethernet multicast address first byte is
1884 * 0x01 for IPv4 and 0x33 for IPv6
1886 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1888 if (priv->prs_shadow[index].valid) {
1889 /* Entry exist - update port only */
1891 mvpp2_prs_hw_read(priv, &pe);
1893 /* Entry doesn't exist - create new */
1894 memset(&pe, 0, sizeof(pe));
1895 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1898 /* Continue - set next lookup */
1899 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1901 /* Set result info bits */
1902 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1903 MVPP2_PRS_RI_L2_CAST_MASK);
1905 /* Update tcam entry data first byte */
1906 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1908 /* Shift to ethertype */
1909 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1910 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1912 /* Mask all ports */
1913 mvpp2_prs_tcam_port_map_set(&pe, 0);
1915 /* Update shadow table */
1916 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1919 /* Update port mask */
1920 mvpp2_prs_tcam_port_set(&pe, port, add);
1922 mvpp2_prs_hw_write(priv, &pe);
1925 /* Set entry for dsa packets */
1926 static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
1927 bool tagged, bool extend)
1929 struct mvpp2_prs_entry pe;
1933 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
1936 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
1940 if (priv->prs_shadow[tid].valid) {
1941 /* Entry exist - update port only */
1943 mvpp2_prs_hw_read(priv, &pe);
1945 /* Entry doesn't exist - create new */
1946 memset(&pe, 0, sizeof(pe));
1947 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1950 /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
1951 mvpp2_prs_sram_shift_set(&pe, shift,
1952 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1954 /* Update shadow table */
1955 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1958 /* Set tagged bit in DSA tag */
1959 mvpp2_prs_tcam_data_byte_set(&pe, 0,
1960 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1961 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1962 /* Clear all ai bits for next iteration */
1963 mvpp2_prs_sram_ai_update(&pe, 0,
1964 MVPP2_PRS_SRAM_AI_MASK);
1965 /* If packet is tagged continue check vlans */
1966 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1968 /* Set result info bits to 'no vlans' */
1969 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1970 MVPP2_PRS_RI_VLAN_MASK);
1971 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1974 /* Mask all ports */
1975 mvpp2_prs_tcam_port_map_set(&pe, 0);
1978 /* Update port mask */
1979 mvpp2_prs_tcam_port_set(&pe, port, add);
1981 mvpp2_prs_hw_write(priv, &pe);
1984 /* Set entry for dsa ethertype */
1985 static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
1986 bool add, bool tagged, bool extend)
1988 struct mvpp2_prs_entry pe;
1989 int tid, shift, port_mask;
1992 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
1993 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
1997 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
1998 MVPP2_PE_ETYPE_DSA_UNTAGGED;
1999 port_mask = MVPP2_PRS_PORT_MASK;
2003 if (priv->prs_shadow[tid].valid) {
2004 /* Entry exist - update port only */
2006 mvpp2_prs_hw_read(priv, &pe);
2008 /* Entry doesn't exist - create new */
2009 memset(&pe, 0, sizeof(pe));
2010 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2014 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
2015 mvpp2_prs_match_etype(&pe, 2, 0);
2017 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
2018 MVPP2_PRS_RI_DSA_MASK);
2019 /* Shift ethertype + 2 byte reserved + tag*/
2020 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
2021 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2023 /* Update shadow table */
2024 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
2027 /* Set tagged bit in DSA tag */
2028 mvpp2_prs_tcam_data_byte_set(&pe,
2029 MVPP2_ETH_TYPE_LEN + 2 + 3,
2030 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
2031 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
2032 /* Clear all ai bits for next iteration */
2033 mvpp2_prs_sram_ai_update(&pe, 0,
2034 MVPP2_PRS_SRAM_AI_MASK);
2035 /* If packet is tagged continue check vlans */
2036 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2038 /* Set result info bits to 'no vlans' */
2039 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2040 MVPP2_PRS_RI_VLAN_MASK);
2041 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2043 /* Mask/unmask all ports, depending on dsa type */
2044 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
2047 /* Update port mask */
2048 mvpp2_prs_tcam_port_set(&pe, port, add);
2050 mvpp2_prs_hw_write(priv, &pe);
2053 /* Search for existing single/triple vlan entry */
2054 static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
2055 unsigned short tpid, int ai)
2057 struct mvpp2_prs_entry *pe;
2060 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2063 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2065 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2066 for (tid = MVPP2_PE_FIRST_FREE_TID;
2067 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2068 unsigned int ri_bits, ai_bits;
2071 if (!priv->prs_shadow[tid].valid ||
2072 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2077 mvpp2_prs_hw_read(priv, pe);
2078 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
2083 ri_bits = mvpp2_prs_sram_ri_get(pe);
2084 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2086 /* Get current ai value from tcam */
2087 ai_bits = mvpp2_prs_tcam_ai_get(pe);
2088 /* Clear double vlan bit */
2089 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
2094 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2095 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2103 /* Add/update single/triple vlan entry */
2104 static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
2105 unsigned int port_map)
2107 struct mvpp2_prs_entry *pe;
2111 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
2114 /* Create new tcam entry */
2115 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
2116 MVPP2_PE_FIRST_FREE_TID);
2120 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2124 /* Get last double vlan tid */
2125 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
2126 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
2127 unsigned int ri_bits;
2129 if (!priv->prs_shadow[tid_aux].valid ||
2130 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2133 pe->index = tid_aux;
2134 mvpp2_prs_hw_read(priv, pe);
2135 ri_bits = mvpp2_prs_sram_ri_get(pe);
2136 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
2137 MVPP2_PRS_RI_VLAN_DOUBLE)
2141 if (tid <= tid_aux) {
2146 memset(pe, 0, sizeof(*pe));
2147 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2150 mvpp2_prs_match_etype(pe, 0, tpid);
2152 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
2153 /* Shift 4 bytes - skip 1 vlan tag */
2154 mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
2155 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2156 /* Clear all ai bits for next iteration */
2157 mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2159 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
2160 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
2161 MVPP2_PRS_RI_VLAN_MASK);
2163 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
2164 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
2165 MVPP2_PRS_RI_VLAN_MASK);
2167 mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
2169 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2171 /* Update ports' mask */
2172 mvpp2_prs_tcam_port_map_set(pe, port_map);
2174 mvpp2_prs_hw_write(priv, pe);
2181 /* Get first free double vlan ai number */
2182 static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
2186 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
2187 if (!priv->prs_double_vlans[i])
2194 /* Search for existing double vlan entry */
2195 static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
2196 unsigned short tpid1,
2197 unsigned short tpid2)
2199 struct mvpp2_prs_entry *pe;
2202 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2205 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2207 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2208 for (tid = MVPP2_PE_FIRST_FREE_TID;
2209 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2210 unsigned int ri_mask;
2213 if (!priv->prs_shadow[tid].valid ||
2214 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2218 mvpp2_prs_hw_read(priv, pe);
2220 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
2221 && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
2226 ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
2227 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
2235 /* Add or update double vlan entry */
2236 static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
2237 unsigned short tpid2,
2238 unsigned int port_map)
2240 struct mvpp2_prs_entry *pe;
2241 int tid_aux, tid, ai, ret = 0;
2243 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
2246 /* Create new tcam entry */
2247 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2248 MVPP2_PE_LAST_FREE_TID);
2252 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2256 /* Set ai value for new double vlan entry */
2257 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
2263 /* Get first single/triple vlan tid */
2264 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
2265 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
2266 unsigned int ri_bits;
2268 if (!priv->prs_shadow[tid_aux].valid ||
2269 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2272 pe->index = tid_aux;
2273 mvpp2_prs_hw_read(priv, pe);
2274 ri_bits = mvpp2_prs_sram_ri_get(pe);
2275 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2276 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2277 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2281 if (tid >= tid_aux) {
2286 memset(pe, 0, sizeof(*pe));
2287 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2290 priv->prs_double_vlans[ai] = true;
2292 mvpp2_prs_match_etype(pe, 0, tpid1);
2293 mvpp2_prs_match_etype(pe, 4, tpid2);
2295 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
2296 /* Shift 8 bytes - skip 2 vlan tags */
2297 mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
2298 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2299 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2300 MVPP2_PRS_RI_VLAN_MASK);
2301 mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
2302 MVPP2_PRS_SRAM_AI_MASK);
2304 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2307 /* Update ports' mask */
2308 mvpp2_prs_tcam_port_map_set(pe, port_map);
2309 mvpp2_prs_hw_write(priv, pe);
2315 /* IPv4 header parsing for fragmentation and L4 offset */
2316 static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2317 unsigned int ri, unsigned int ri_mask)
2319 struct mvpp2_prs_entry pe;
2322 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2323 (proto != IPPROTO_IGMP))
2326 /* Not fragmented packet */
2327 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2328 MVPP2_PE_LAST_FREE_TID);
2332 memset(&pe, 0, sizeof(pe));
2333 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2336 /* Set next lu to IPv4 */
2337 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2338 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2340 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2341 sizeof(struct iphdr) - 4,
2342 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2343 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2344 MVPP2_PRS_IPV4_DIP_AI_BIT);
2345 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2347 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
2348 MVPP2_PRS_TCAM_PROTO_MASK_L);
2349 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
2350 MVPP2_PRS_TCAM_PROTO_MASK);
2352 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2353 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2354 /* Unmask all ports */
2355 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2357 /* Update shadow table and hw entry */
2358 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2359 mvpp2_prs_hw_write(priv, &pe);
2361 /* Fragmented packet */
2362 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2363 MVPP2_PE_LAST_FREE_TID);
2368 /* Clear ri before updating */
2369 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2370 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2371 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2373 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
2374 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2376 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
2377 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
2379 /* Update shadow table and hw entry */
2380 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2381 mvpp2_prs_hw_write(priv, &pe);
2386 /* IPv4 L3 multicast or broadcast */
2387 static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
2389 struct mvpp2_prs_entry pe;
2392 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2393 MVPP2_PE_LAST_FREE_TID);
2397 memset(&pe, 0, sizeof(pe));
2398 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2402 case MVPP2_PRS_L3_MULTI_CAST:
2403 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
2404 MVPP2_PRS_IPV4_MC_MASK);
2405 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2406 MVPP2_PRS_RI_L3_ADDR_MASK);
2408 case MVPP2_PRS_L3_BROAD_CAST:
2409 mask = MVPP2_PRS_IPV4_BC_MASK;
2410 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
2411 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
2412 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
2413 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
2414 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
2415 MVPP2_PRS_RI_L3_ADDR_MASK);
2421 /* Finished: go to flowid generation */
2422 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2423 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2425 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2426 MVPP2_PRS_IPV4_DIP_AI_BIT);
2427 /* Unmask all ports */
2428 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2430 /* Update shadow table and hw entry */
2431 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2432 mvpp2_prs_hw_write(priv, &pe);
2437 /* Set entries for protocols over IPv6 */
2438 static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2439 unsigned int ri, unsigned int ri_mask)
2441 struct mvpp2_prs_entry pe;
2444 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2445 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2448 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2449 MVPP2_PE_LAST_FREE_TID);
2453 memset(&pe, 0, sizeof(pe));
2454 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2457 /* Finished: go to flowid generation */
2458 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2459 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2460 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2461 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2462 sizeof(struct ipv6hdr) - 6,
2463 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2465 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2466 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2467 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2468 /* Unmask all ports */
2469 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2472 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2473 mvpp2_prs_hw_write(priv, &pe);
2478 /* IPv6 L3 multicast entry */
2479 static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2481 struct mvpp2_prs_entry pe;
2484 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2487 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2488 MVPP2_PE_LAST_FREE_TID);
2492 memset(&pe, 0, sizeof(pe));
2493 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2496 /* Finished: go to flowid generation */
2497 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2498 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2499 MVPP2_PRS_RI_L3_ADDR_MASK);
2500 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2501 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2502 /* Shift back to IPv6 NH */
2503 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2505 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2506 MVPP2_PRS_IPV6_MC_MASK);
2507 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2508 /* Unmask all ports */
2509 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2511 /* Update shadow table and hw entry */
2512 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2513 mvpp2_prs_hw_write(priv, &pe);
2518 /* Parser per-port initialization */
2519 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2520 int lu_max, int offset)
2525 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2526 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2527 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2528 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2530 /* Set maximum number of loops for packet received from port */
2531 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2532 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2533 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2534 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2536 /* Set initial offset for packet header extraction for the first
2539 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2540 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2541 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2542 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2545 /* Default flow entries initialization for all ports */
2546 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2548 struct mvpp2_prs_entry pe;
2551 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
2552 memset(&pe, 0, sizeof(pe));
2553 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2554 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2556 /* Mask all ports */
2557 mvpp2_prs_tcam_port_map_set(&pe, 0);
2560 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2561 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2563 /* Update shadow table and hw entry */
2564 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2565 mvpp2_prs_hw_write(priv, &pe);
2569 /* Set default entry for Marvell Header field */
2570 static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2572 struct mvpp2_prs_entry pe;
2574 memset(&pe, 0, sizeof(pe));
2576 pe.index = MVPP2_PE_MH_DEFAULT;
2577 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2578 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2579 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2580 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2582 /* Unmask all ports */
2583 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2585 /* Update shadow table and hw entry */
2586 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2587 mvpp2_prs_hw_write(priv, &pe);
2590 /* Set default entires (place holder) for promiscuous, non-promiscuous and
2591 * multicast MAC addresses
2593 static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2595 struct mvpp2_prs_entry pe;
2597 memset(&pe, 0, sizeof(pe));
2599 /* Non-promiscuous mode for all ports - DROP unknown packets */
2600 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2601 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2603 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2604 MVPP2_PRS_RI_DROP_MASK);
2605 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2606 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2608 /* Unmask all ports */
2609 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2611 /* Update shadow table and hw entry */
2612 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2613 mvpp2_prs_hw_write(priv, &pe);
2615 /* place holders only - no ports */
2616 mvpp2_prs_mac_drop_all_set(priv, 0, false);
2617 mvpp2_prs_mac_promisc_set(priv, 0, false);
2618 mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_ALL, false);
2619 mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_IP6, false);
2622 /* Set default entries for various types of dsa packets */
2623 static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2625 struct mvpp2_prs_entry pe;
2627 /* None tagged EDSA entry - place holder */
2628 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2631 /* Tagged EDSA entry - place holder */
2632 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2634 /* None tagged DSA entry - place holder */
2635 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2638 /* Tagged DSA entry - place holder */
2639 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2641 /* None tagged EDSA ethertype entry - place holder*/
2642 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2643 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2645 /* Tagged EDSA ethertype entry - place holder*/
2646 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2647 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2649 /* None tagged DSA ethertype entry */
2650 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2651 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2653 /* Tagged DSA ethertype entry */
2654 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2655 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2657 /* Set default entry, in case DSA or EDSA tag not found */
2658 memset(&pe, 0, sizeof(pe));
2659 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2660 pe.index = MVPP2_PE_DSA_DEFAULT;
2661 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2664 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2665 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2667 /* Clear all sram ai bits for next iteration */
2668 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2670 /* Unmask all ports */
2671 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2673 mvpp2_prs_hw_write(priv, &pe);
2676 /* Match basic ethertypes */
2677 static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2679 struct mvpp2_prs_entry pe;
2682 /* Ethertype: PPPoE */
2683 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2684 MVPP2_PE_LAST_FREE_TID);
2688 memset(&pe, 0, sizeof(pe));
2689 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2692 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2694 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2695 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2696 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2697 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2698 MVPP2_PRS_RI_PPPOE_MASK);
2700 /* Update shadow table and hw entry */
2701 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2702 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2703 priv->prs_shadow[pe.index].finish = false;
2704 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2705 MVPP2_PRS_RI_PPPOE_MASK);
2706 mvpp2_prs_hw_write(priv, &pe);
2708 /* Ethertype: ARP */
2709 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2710 MVPP2_PE_LAST_FREE_TID);
2714 memset(&pe, 0, sizeof(pe));
2715 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2718 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2720 /* Generate flow in the next iteration*/
2721 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2722 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2723 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2724 MVPP2_PRS_RI_L3_PROTO_MASK);
2726 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2728 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2730 /* Update shadow table and hw entry */
2731 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2732 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2733 priv->prs_shadow[pe.index].finish = true;
2734 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2735 MVPP2_PRS_RI_L3_PROTO_MASK);
2736 mvpp2_prs_hw_write(priv, &pe);
2738 /* Ethertype: LBTD */
2739 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2740 MVPP2_PE_LAST_FREE_TID);
2744 memset(&pe, 0, sizeof(pe));
2745 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2748 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2750 /* Generate flow in the next iteration*/
2751 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2752 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2753 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2754 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2755 MVPP2_PRS_RI_CPU_CODE_MASK |
2756 MVPP2_PRS_RI_UDF3_MASK);
2758 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2760 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2762 /* Update shadow table and hw entry */
2763 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2764 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2765 priv->prs_shadow[pe.index].finish = true;
2766 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2767 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2768 MVPP2_PRS_RI_CPU_CODE_MASK |
2769 MVPP2_PRS_RI_UDF3_MASK);
2770 mvpp2_prs_hw_write(priv, &pe);
2772 /* Ethertype: IPv4 without options */
2773 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2774 MVPP2_PE_LAST_FREE_TID);
2778 memset(&pe, 0, sizeof(pe));
2779 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2782 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2783 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2784 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2785 MVPP2_PRS_IPV4_HEAD_MASK |
2786 MVPP2_PRS_IPV4_IHL_MASK);
2788 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2789 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2790 MVPP2_PRS_RI_L3_PROTO_MASK);
2791 /* Skip eth_type + 4 bytes of IP header */
2792 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2793 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2795 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2797 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2799 /* Update shadow table and hw entry */
2800 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2801 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2802 priv->prs_shadow[pe.index].finish = false;
2803 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2804 MVPP2_PRS_RI_L3_PROTO_MASK);
2805 mvpp2_prs_hw_write(priv, &pe);
2807 /* Ethertype: IPv4 with options */
2808 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2809 MVPP2_PE_LAST_FREE_TID);
2815 /* Clear tcam data before updating */
2816 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2817 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2819 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2820 MVPP2_PRS_IPV4_HEAD,
2821 MVPP2_PRS_IPV4_HEAD_MASK);
2823 /* Clear ri before updating */
2824 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2825 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2826 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2827 MVPP2_PRS_RI_L3_PROTO_MASK);
2829 /* Update shadow table and hw entry */
2830 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2831 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2832 priv->prs_shadow[pe.index].finish = false;
2833 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2834 MVPP2_PRS_RI_L3_PROTO_MASK);
2835 mvpp2_prs_hw_write(priv, &pe);
2837 /* Ethertype: IPv6 without options */
2838 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2839 MVPP2_PE_LAST_FREE_TID);
2843 memset(&pe, 0, sizeof(pe));
2844 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2847 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
2849 /* Skip DIP of IPV6 header */
2850 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2851 MVPP2_MAX_L3_ADDR_SIZE,
2852 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2853 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2854 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2855 MVPP2_PRS_RI_L3_PROTO_MASK);
2857 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2859 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2861 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2862 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2863 priv->prs_shadow[pe.index].finish = false;
2864 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2865 MVPP2_PRS_RI_L3_PROTO_MASK);
2866 mvpp2_prs_hw_write(priv, &pe);
2868 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2869 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2870 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2871 pe.index = MVPP2_PE_ETH_TYPE_UN;
2873 /* Unmask all ports */
2874 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2876 /* Generate flow in the next iteration*/
2877 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2878 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2879 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2880 MVPP2_PRS_RI_L3_PROTO_MASK);
2881 /* Set L3 offset even it's unknown L3 */
2882 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2884 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2886 /* Update shadow table and hw entry */
2887 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2888 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2889 priv->prs_shadow[pe.index].finish = true;
2890 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2891 MVPP2_PRS_RI_L3_PROTO_MASK);
2892 mvpp2_prs_hw_write(priv, &pe);
2897 /* Configure vlan entries and detect up to 2 successive VLAN tags.
2904 static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
2906 struct mvpp2_prs_entry pe;
2909 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
2910 MVPP2_PRS_DBL_VLANS_MAX,
2912 if (!priv->prs_double_vlans)
2915 /* Double VLAN: 0x8100, 0x88A8 */
2916 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
2917 MVPP2_PRS_PORT_MASK);
2921 /* Double VLAN: 0x8100, 0x8100 */
2922 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
2923 MVPP2_PRS_PORT_MASK);
2927 /* Single VLAN: 0x88a8 */
2928 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
2929 MVPP2_PRS_PORT_MASK);
2933 /* Single VLAN: 0x8100 */
2934 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
2935 MVPP2_PRS_PORT_MASK);
2939 /* Set default double vlan entry */
2940 memset(&pe, 0, sizeof(pe));
2941 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2942 pe.index = MVPP2_PE_VLAN_DBL;
2944 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2945 /* Clear ai for next iterations */
2946 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2947 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2948 MVPP2_PRS_RI_VLAN_MASK);
2950 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
2951 MVPP2_PRS_DBL_VLAN_AI_BIT);
2952 /* Unmask all ports */
2953 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2955 /* Update shadow table and hw entry */
2956 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2957 mvpp2_prs_hw_write(priv, &pe);
2959 /* Set default vlan none entry */
2960 memset(&pe, 0, sizeof(pe));
2961 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2962 pe.index = MVPP2_PE_VLAN_NONE;
2964 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2965 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2966 MVPP2_PRS_RI_VLAN_MASK);
2968 /* Unmask all ports */
2969 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2971 /* Update shadow table and hw entry */
2972 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2973 mvpp2_prs_hw_write(priv, &pe);
2978 /* Set entries for PPPoE ethertype */
2979 static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
2981 struct mvpp2_prs_entry pe;
2984 /* IPv4 over PPPoE with options */
2985 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2986 MVPP2_PE_LAST_FREE_TID);
2990 memset(&pe, 0, sizeof(pe));
2991 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2994 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
2996 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2997 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2998 MVPP2_PRS_RI_L3_PROTO_MASK);
2999 /* Skip eth_type + 4 bytes of IP header */
3000 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
3001 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3003 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3005 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3007 /* Update shadow table and hw entry */
3008 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3009 mvpp2_prs_hw_write(priv, &pe);
3011 /* IPv4 over PPPoE without options */
3012 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3013 MVPP2_PE_LAST_FREE_TID);
3019 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
3020 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
3021 MVPP2_PRS_IPV4_HEAD_MASK |
3022 MVPP2_PRS_IPV4_IHL_MASK);
3024 /* Clear ri before updating */
3025 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
3026 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
3027 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
3028 MVPP2_PRS_RI_L3_PROTO_MASK);
3030 /* Update shadow table and hw entry */
3031 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3032 mvpp2_prs_hw_write(priv, &pe);
3034 /* IPv6 over PPPoE */
3035 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3036 MVPP2_PE_LAST_FREE_TID);
3040 memset(&pe, 0, sizeof(pe));
3041 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3044 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
3046 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3047 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
3048 MVPP2_PRS_RI_L3_PROTO_MASK);
3049 /* Skip eth_type + 4 bytes of IPv6 header */
3050 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
3051 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3053 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3055 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3057 /* Update shadow table and hw entry */
3058 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3059 mvpp2_prs_hw_write(priv, &pe);
3061 /* Non-IP over PPPoE */
3062 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3063 MVPP2_PE_LAST_FREE_TID);
3067 memset(&pe, 0, sizeof(pe));
3068 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3071 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
3072 MVPP2_PRS_RI_L3_PROTO_MASK);
3074 /* Finished: go to flowid generation */
3075 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3076 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3077 /* Set L3 offset even if it's unknown L3 */
3078 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3080 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3082 /* Update shadow table and hw entry */
3083 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3084 mvpp2_prs_hw_write(priv, &pe);
3089 /* Initialize entries for IPv4 */
3090 static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
3092 struct mvpp2_prs_entry pe;
3095 /* Set entries for TCP, UDP and IGMP over IPv4 */
3096 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
3097 MVPP2_PRS_RI_L4_PROTO_MASK);
3101 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
3102 MVPP2_PRS_RI_L4_PROTO_MASK);
3106 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
3107 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3108 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3109 MVPP2_PRS_RI_CPU_CODE_MASK |
3110 MVPP2_PRS_RI_UDF3_MASK);
3114 /* IPv4 Broadcast */
3115 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
3119 /* IPv4 Multicast */
3120 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3124 /* Default IPv4 entry for unknown protocols */
3125 memset(&pe, 0, sizeof(pe));
3126 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3127 pe.index = MVPP2_PE_IP4_PROTO_UN;
3129 /* Set next lu to IPv4 */
3130 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
3131 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3133 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3134 sizeof(struct iphdr) - 4,
3135 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3136 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3137 MVPP2_PRS_IPV4_DIP_AI_BIT);
3138 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3139 MVPP2_PRS_RI_L4_PROTO_MASK);
3141 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
3142 /* Unmask all ports */
3143 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3145 /* Update shadow table and hw entry */
3146 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3147 mvpp2_prs_hw_write(priv, &pe);
3149 /* Default IPv4 entry for unicast address */
3150 memset(&pe, 0, sizeof(pe));
3151 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3152 pe.index = MVPP2_PE_IP4_ADDR_UN;
3154 /* Finished: go to flowid generation */
3155 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3156 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3157 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3158 MVPP2_PRS_RI_L3_ADDR_MASK);
3160 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3161 MVPP2_PRS_IPV4_DIP_AI_BIT);
3162 /* Unmask all ports */
3163 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3165 /* Update shadow table and hw entry */
3166 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3167 mvpp2_prs_hw_write(priv, &pe);
3172 /* Initialize entries for IPv6 */
3173 static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
3175 struct mvpp2_prs_entry pe;
3178 /* Set entries for TCP, UDP and ICMP over IPv6 */
3179 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
3180 MVPP2_PRS_RI_L4_TCP,
3181 MVPP2_PRS_RI_L4_PROTO_MASK);
3185 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
3186 MVPP2_PRS_RI_L4_UDP,
3187 MVPP2_PRS_RI_L4_PROTO_MASK);
3191 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
3192 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3193 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3194 MVPP2_PRS_RI_CPU_CODE_MASK |
3195 MVPP2_PRS_RI_UDF3_MASK);
3199 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
3200 /* Result Info: UDF7=1, DS lite */
3201 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
3202 MVPP2_PRS_RI_UDF7_IP6_LITE,
3203 MVPP2_PRS_RI_UDF7_MASK);
3207 /* IPv6 multicast */
3208 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3212 /* Entry for checking hop limit */
3213 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3214 MVPP2_PE_LAST_FREE_TID);
3218 memset(&pe, 0, sizeof(pe));
3219 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3222 /* Finished: go to flowid generation */
3223 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3224 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3225 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
3226 MVPP2_PRS_RI_DROP_MASK,
3227 MVPP2_PRS_RI_L3_PROTO_MASK |
3228 MVPP2_PRS_RI_DROP_MASK);
3230 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
3231 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3232 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3234 /* Update shadow table and hw entry */
3235 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3236 mvpp2_prs_hw_write(priv, &pe);
3238 /* Default IPv6 entry for unknown protocols */
3239 memset(&pe, 0, sizeof(pe));
3240 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3241 pe.index = MVPP2_PE_IP6_PROTO_UN;
3243 /* Finished: go to flowid generation */
3244 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3245 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3246 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3247 MVPP2_PRS_RI_L4_PROTO_MASK);
3248 /* Set L4 offset relatively to our current place */
3249 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3250 sizeof(struct ipv6hdr) - 4,
3251 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3253 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3254 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3255 /* Unmask all ports */
3256 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3258 /* Update shadow table and hw entry */
3259 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3260 mvpp2_prs_hw_write(priv, &pe);
3262 /* Default IPv6 entry for unknown ext protocols */
3263 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3264 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3265 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
3267 /* Finished: go to flowid generation */
3268 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3269 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3270 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3271 MVPP2_PRS_RI_L4_PROTO_MASK);
3273 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
3274 MVPP2_PRS_IPV6_EXT_AI_BIT);
3275 /* Unmask all ports */
3276 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3278 /* Update shadow table and hw entry */
3279 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3280 mvpp2_prs_hw_write(priv, &pe);
3282 /* Default IPv6 entry for unicast address */
3283 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3284 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3285 pe.index = MVPP2_PE_IP6_ADDR_UN;
3287 /* Finished: go to IPv6 again */
3288 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3289 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3290 MVPP2_PRS_RI_L3_ADDR_MASK);
3291 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3292 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3293 /* Shift back to IPV6 NH */
3294 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3296 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3297 /* Unmask all ports */
3298 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3300 /* Update shadow table and hw entry */
3301 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
3302 mvpp2_prs_hw_write(priv, &pe);
3307 /* Parser default initialization */
3308 static int mvpp2_prs_default_init(struct platform_device *pdev,
3313 /* Enable tcam table */
3314 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
3316 /* Clear all tcam and sram entries */
3317 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
3318 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
3319 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3320 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
3322 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
3323 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3324 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
3327 /* Invalidate all tcam entries */
3328 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
3329 mvpp2_prs_hw_inv(priv, index);
3331 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
3332 sizeof(*priv->prs_shadow),
3334 if (!priv->prs_shadow)
3337 /* Always start from lookup = 0 */
3338 for (index = 0; index < MVPP2_MAX_PORTS; index++)
3339 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
3340 MVPP2_PRS_PORT_LU_MAX, 0);
3342 mvpp2_prs_def_flow_init(priv);
3344 mvpp2_prs_mh_init(priv);
3346 mvpp2_prs_mac_init(priv);
3348 mvpp2_prs_dsa_init(priv);
3350 err = mvpp2_prs_etype_init(priv);
3354 err = mvpp2_prs_vlan_init(pdev, priv);
3358 err = mvpp2_prs_pppoe_init(priv);
3362 err = mvpp2_prs_ip6_init(priv);
3366 err = mvpp2_prs_ip4_init(priv);
3373 /* Compare MAC DA with tcam entry data */
3374 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
3375 const u8 *da, unsigned char *mask)
3377 unsigned char tcam_byte, tcam_mask;
3380 for (index = 0; index < ETH_ALEN; index++) {
3381 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
3382 if (tcam_mask != mask[index])
3385 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
3392 /* Find tcam entry with matched pair <MAC DA, port> */
3393 static struct mvpp2_prs_entry *
3394 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
3395 unsigned char *mask, int udf_type)
3397 struct mvpp2_prs_entry *pe;
3400 pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
3403 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3405 /* Go through the all entires with MVPP2_PRS_LU_MAC */
3406 for (tid = MVPP2_PE_FIRST_FREE_TID;
3407 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3408 unsigned int entry_pmap;
3410 if (!priv->prs_shadow[tid].valid ||
3411 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3412 (priv->prs_shadow[tid].udf != udf_type))
3416 mvpp2_prs_hw_read(priv, pe);
3417 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
3419 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
3428 /* Update parser's mac da entry */
3429 static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3430 const u8 *da, bool add)
3432 struct mvpp2_prs_entry *pe;
3433 unsigned int pmap, len, ri;
3434 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3437 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3438 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
3439 MVPP2_PRS_UDF_MAC_DEF);
3446 /* Create new TCAM entry */
3447 /* Find first range mac entry*/
3448 for (tid = MVPP2_PE_FIRST_FREE_TID;
3449 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
3450 if (priv->prs_shadow[tid].valid &&
3451 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
3452 (priv->prs_shadow[tid].udf ==
3453 MVPP2_PRS_UDF_MAC_RANGE))
3456 /* Go through the all entries from first to last */
3457 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3462 pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
3465 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3468 /* Mask all ports */
3469 mvpp2_prs_tcam_port_map_set(pe, 0);
3472 /* Update port mask */
3473 mvpp2_prs_tcam_port_set(pe, port, add);
3475 /* Invalidate the entry if no ports are left enabled */
3476 pmap = mvpp2_prs_tcam_port_map_get(pe);
3482 mvpp2_prs_hw_inv(priv, pe->index);
3483 priv->prs_shadow[pe->index].valid = false;
3488 /* Continue - set next lookup */
3489 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
3491 /* Set match on DA */
3494 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
3496 /* Set result info bits */
3497 if (is_broadcast_ether_addr(da))
3498 ri = MVPP2_PRS_RI_L2_BCAST;
3499 else if (is_multicast_ether_addr(da))
3500 ri = MVPP2_PRS_RI_L2_MCAST;
3502 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
3504 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3505 MVPP2_PRS_RI_MAC_ME_MASK);
3506 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3507 MVPP2_PRS_RI_MAC_ME_MASK);
3509 /* Shift to ethertype */
3510 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
3511 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3513 /* Update shadow table and hw entry */
3514 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
3515 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
3516 mvpp2_prs_hw_write(priv, pe);
3523 static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3525 struct mvpp2_port *port = netdev_priv(dev);
3528 /* Remove old parser entry */
3529 err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
3534 /* Add new parser entry */
3535 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
3539 /* Set addr in the device */
3540 ether_addr_copy(dev->dev_addr, da);
3545 /* Delete all port's multicast simple (not range) entries */
3546 static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
3548 struct mvpp2_prs_entry pe;
3551 for (tid = MVPP2_PE_FIRST_FREE_TID;
3552 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3553 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3555 if (!priv->prs_shadow[tid].valid ||
3556 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3557 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3560 /* Only simple mac entries */
3562 mvpp2_prs_hw_read(priv, &pe);
3564 /* Read mac addr from entry */
3565 for (index = 0; index < ETH_ALEN; index++)
3566 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3569 if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
3570 /* Delete this entry */
3571 mvpp2_prs_mac_da_accept(priv, port, da, false);
3575 static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3578 case MVPP2_TAG_TYPE_EDSA:
3579 /* Add port to EDSA entries */
3580 mvpp2_prs_dsa_tag_set(priv, port, true,
3581 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3582 mvpp2_prs_dsa_tag_set(priv, port, true,
3583 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3584 /* Remove port from DSA entries */
3585 mvpp2_prs_dsa_tag_set(priv, port, false,
3586 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3587 mvpp2_prs_dsa_tag_set(priv, port, false,
3588 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3591 case MVPP2_TAG_TYPE_DSA:
3592 /* Add port to DSA entries */
3593 mvpp2_prs_dsa_tag_set(priv, port, true,
3594 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3595 mvpp2_prs_dsa_tag_set(priv, port, true,
3596 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3597 /* Remove port from EDSA entries */
3598 mvpp2_prs_dsa_tag_set(priv, port, false,
3599 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3600 mvpp2_prs_dsa_tag_set(priv, port, false,
3601 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3604 case MVPP2_TAG_TYPE_MH:
3605 case MVPP2_TAG_TYPE_NONE:
3606 /* Remove port form EDSA and DSA entries */
3607 mvpp2_prs_dsa_tag_set(priv, port, false,
3608 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3609 mvpp2_prs_dsa_tag_set(priv, port, false,
3610 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3611 mvpp2_prs_dsa_tag_set(priv, port, false,
3612 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3613 mvpp2_prs_dsa_tag_set(priv, port, false,
3614 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3618 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3625 /* Set prs flow for the port */
3626 static int mvpp2_prs_def_flow(struct mvpp2_port *port)
3628 struct mvpp2_prs_entry *pe;
3631 pe = mvpp2_prs_flow_find(port->priv, port->id);
3633 /* Such entry not exist */
3635 /* Go through the all entires from last to first */
3636 tid = mvpp2_prs_tcam_first_free(port->priv,
3637 MVPP2_PE_LAST_FREE_TID,
3638 MVPP2_PE_FIRST_FREE_TID);
3642 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3646 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
3650 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
3651 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
3653 /* Update shadow table */
3654 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
3657 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
3658 mvpp2_prs_hw_write(port->priv, pe);
3664 /* Classifier configuration routines */
3666 /* Update classification flow table registers */
3667 static void mvpp2_cls_flow_write(struct mvpp2 *priv,
3668 struct mvpp2_cls_flow_entry *fe)
3670 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
3671 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
3672 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
3673 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
3676 /* Update classification lookup table register */
3677 static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
3678 struct mvpp2_cls_lookup_entry *le)
3682 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
3683 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
3684 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
3687 /* Classifier default initialization */
3688 static void mvpp2_cls_init(struct mvpp2 *priv)
3690 struct mvpp2_cls_lookup_entry le;
3691 struct mvpp2_cls_flow_entry fe;
3694 /* Enable classifier */
3695 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
3697 /* Clear classifier flow table */
3698 memset(&fe.data, 0, sizeof(fe.data));
3699 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
3701 mvpp2_cls_flow_write(priv, &fe);
3704 /* Clear classifier lookup table */
3706 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
3709 mvpp2_cls_lookup_write(priv, &le);
3712 mvpp2_cls_lookup_write(priv, &le);
3716 static void mvpp2_cls_port_config(struct mvpp2_port *port)
3718 struct mvpp2_cls_lookup_entry le;
3721 /* Set way for the port */
3722 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
3723 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
3724 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
3726 /* Pick the entry to be accessed in lookup ID decoding table
3727 * according to the way and lkpid.
3729 le.lkpid = port->id;
3733 /* Set initial CPU queue for receiving packets */
3734 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
3735 le.data |= port->first_rxq;
3737 /* Disable classification engines */
3738 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
3740 /* Update lookup ID table entry */
3741 mvpp2_cls_lookup_write(port->priv, &le);
3744 /* Set CPU queue number for oversize packets */
3745 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
3749 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
3750 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
3752 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
3753 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
3755 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
3756 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
3757 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
3760 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
3762 if (likely(pool->frag_size <= PAGE_SIZE))
3763 return netdev_alloc_frag(pool->frag_size);
3765 return kmalloc(pool->frag_size, GFP_ATOMIC);
3768 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
3770 if (likely(pool->frag_size <= PAGE_SIZE))
3771 skb_free_frag(data);
3776 /* Buffer Manager configuration routines */
3779 static int mvpp2_bm_pool_create(struct platform_device *pdev,
3781 struct mvpp2_bm_pool *bm_pool, int size)
3785 /* Number of buffer pointers must be a multiple of 16, as per
3786 * hardware constraints
3788 if (!IS_ALIGNED(size, 16))
3791 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
3792 * bytes per buffer pointer
3794 if (priv->hw_version == MVPP21)
3795 bm_pool->size_bytes = 2 * sizeof(u32) * size;
3797 bm_pool->size_bytes = 2 * sizeof(u64) * size;
3799 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
3802 if (!bm_pool->virt_addr)
3805 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
3806 MVPP2_BM_POOL_PTR_ALIGN)) {
3807 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
3808 bm_pool->virt_addr, bm_pool->dma_addr);
3809 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
3810 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
3814 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
3815 lower_32_bits(bm_pool->dma_addr));
3816 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
3818 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3819 val |= MVPP2_BM_START_MASK;
3820 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3822 bm_pool->type = MVPP2_BM_FREE;
3823 bm_pool->size = size;
3824 bm_pool->pkt_size = 0;
3825 bm_pool->buf_num = 0;
3830 /* Set pool buffer size */
3831 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
3832 struct mvpp2_bm_pool *bm_pool,
3837 bm_pool->buf_size = buf_size;
3839 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
3840 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
3843 static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
3844 struct mvpp2_bm_pool *bm_pool,
3845 dma_addr_t *dma_addr,
3846 phys_addr_t *phys_addr)
3848 int cpu = get_cpu();
3850 *dma_addr = mvpp2_percpu_read(priv, cpu,
3851 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
3852 *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
3854 if (priv->hw_version == MVPP22) {
3856 u32 dma_addr_highbits, phys_addr_highbits;
3858 val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
3859 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
3860 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
3861 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
3863 if (sizeof(dma_addr_t) == 8)
3864 *dma_addr |= (u64)dma_addr_highbits << 32;
3866 if (sizeof(phys_addr_t) == 8)
3867 *phys_addr |= (u64)phys_addr_highbits << 32;
3873 /* Free all buffers from the pool */
3874 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
3875 struct mvpp2_bm_pool *bm_pool)
3879 for (i = 0; i < bm_pool->buf_num; i++) {
3880 dma_addr_t buf_dma_addr;
3881 phys_addr_t buf_phys_addr;
3884 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
3885 &buf_dma_addr, &buf_phys_addr);
3887 dma_unmap_single(dev, buf_dma_addr,
3888 bm_pool->buf_size, DMA_FROM_DEVICE);
3890 data = (void *)phys_to_virt(buf_phys_addr);
3894 mvpp2_frag_free(bm_pool, data);
3897 /* Update BM driver with number of buffers removed from pool */
3898 bm_pool->buf_num -= i;
3902 static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
3904 struct mvpp2_bm_pool *bm_pool)
3908 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
3909 if (bm_pool->buf_num) {
3910 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
3914 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3915 val |= MVPP2_BM_STOP_MASK;
3916 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3918 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
3924 static int mvpp2_bm_pools_init(struct platform_device *pdev,
3928 struct mvpp2_bm_pool *bm_pool;
3930 /* Create all pools with maximum size */
3931 size = MVPP2_BM_POOL_SIZE_MAX;
3932 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3933 bm_pool = &priv->bm_pools[i];
3935 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
3937 goto err_unroll_pools;
3938 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
3943 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
3944 for (i = i - 1; i >= 0; i--)
3945 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
3949 static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
3953 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3954 /* Mask BM all interrupts */
3955 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
3956 /* Clear BM cause register */
3957 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
3960 /* Allocate and initialize BM pools */
3961 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
3962 sizeof(*priv->bm_pools), GFP_KERNEL);
3963 if (!priv->bm_pools)
3966 err = mvpp2_bm_pools_init(pdev, priv);
3972 /* Attach long pool to rxq */
3973 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
3974 int lrxq, int long_pool)
3979 /* Get queue physical ID */
3980 prxq = port->rxqs[lrxq]->id;
3982 if (port->priv->hw_version == MVPP21)
3983 mask = MVPP21_RXQ_POOL_LONG_MASK;
3985 mask = MVPP22_RXQ_POOL_LONG_MASK;
3987 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3989 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
3990 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3993 /* Attach short pool to rxq */
3994 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
3995 int lrxq, int short_pool)
4000 /* Get queue physical ID */
4001 prxq = port->rxqs[lrxq]->id;
4003 if (port->priv->hw_version == MVPP21)
4004 mask = MVPP21_RXQ_POOL_SHORT_MASK;
4006 mask = MVPP22_RXQ_POOL_SHORT_MASK;
4008 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4010 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
4011 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4014 static void *mvpp2_buf_alloc(struct mvpp2_port *port,
4015 struct mvpp2_bm_pool *bm_pool,
4016 dma_addr_t *buf_dma_addr,
4017 phys_addr_t *buf_phys_addr,
4020 dma_addr_t dma_addr;
4023 data = mvpp2_frag_alloc(bm_pool);
4027 dma_addr = dma_map_single(port->dev->dev.parent, data,
4028 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
4030 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
4031 mvpp2_frag_free(bm_pool, data);
4034 *buf_dma_addr = dma_addr;
4035 *buf_phys_addr = virt_to_phys(data);
4040 /* Release buffer to BM */
4041 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
4042 dma_addr_t buf_dma_addr,
4043 phys_addr_t buf_phys_addr)
4045 int cpu = get_cpu();
4047 if (port->priv->hw_version == MVPP22) {
4050 if (sizeof(dma_addr_t) == 8)
4051 val |= upper_32_bits(buf_dma_addr) &
4052 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
4054 if (sizeof(phys_addr_t) == 8)
4055 val |= (upper_32_bits(buf_phys_addr)
4056 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
4057 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
4059 mvpp2_percpu_write(port->priv, cpu,
4060 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
4063 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
4064 * returned in the "cookie" field of the RX
4065 * descriptor. Instead of storing the virtual address, we
4066 * store the physical address
4068 mvpp2_percpu_write(port->priv, cpu,
4069 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
4070 mvpp2_percpu_write(port->priv, cpu,
4071 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
4076 /* Allocate buffers for the pool */
4077 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
4078 struct mvpp2_bm_pool *bm_pool, int buf_num)
4080 int i, buf_size, total_size;
4081 dma_addr_t dma_addr;
4082 phys_addr_t phys_addr;
4085 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
4086 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
4089 (buf_num + bm_pool->buf_num > bm_pool->size)) {
4090 netdev_err(port->dev,
4091 "cannot allocate %d buffers for pool %d\n",
4092 buf_num, bm_pool->id);
4096 for (i = 0; i < buf_num; i++) {
4097 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
4098 &phys_addr, GFP_KERNEL);
4102 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
4106 /* Update BM driver with number of buffers added to pool */
4107 bm_pool->buf_num += i;
4109 netdev_dbg(port->dev,
4110 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
4111 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
4112 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
4114 netdev_dbg(port->dev,
4115 "%s pool %d: %d of %d buffers added\n",
4116 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
4117 bm_pool->id, i, buf_num);
4121 /* Notify the driver that BM pool is being used as specific type and return the
4122 * pool pointer on success
4124 static struct mvpp2_bm_pool *
4125 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
4128 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
4131 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
4132 netdev_err(port->dev, "mixing pool types is forbidden\n");
4136 if (new_pool->type == MVPP2_BM_FREE)
4137 new_pool->type = type;
4139 /* Allocate buffers in case BM pool is used as long pool, but packet
4140 * size doesn't match MTU or BM pool hasn't being used yet
4142 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
4143 (new_pool->pkt_size == 0)) {
4146 /* Set default buffer number or free all the buffers in case
4147 * the pool is not empty
4149 pkts_num = new_pool->buf_num;
4151 pkts_num = type == MVPP2_BM_SWF_LONG ?
4152 MVPP2_BM_LONG_BUF_NUM :
4153 MVPP2_BM_SHORT_BUF_NUM;
4155 mvpp2_bm_bufs_free(port->dev->dev.parent,
4156 port->priv, new_pool);
4158 new_pool->pkt_size = pkt_size;
4159 new_pool->frag_size =
4160 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4161 MVPP2_SKB_SHINFO_SIZE;
4163 /* Allocate buffers for this pool */
4164 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
4165 if (num != pkts_num) {
4166 WARN(1, "pool %d: %d of %d allocated\n",
4167 new_pool->id, num, pkts_num);
4172 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
4173 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
4178 /* Initialize pools for swf */
4179 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
4183 if (!port->pool_long) {
4185 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
4188 if (!port->pool_long)
4191 port->pool_long->port_map |= (1 << port->id);
4193 for (rxq = 0; rxq < port->nrxqs; rxq++)
4194 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
4197 if (!port->pool_short) {
4199 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
4201 MVPP2_BM_SHORT_PKT_SIZE);
4202 if (!port->pool_short)
4205 port->pool_short->port_map |= (1 << port->id);
4207 for (rxq = 0; rxq < port->nrxqs; rxq++)
4208 mvpp2_rxq_short_pool_set(port, rxq,
4209 port->pool_short->id);
4215 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
4217 struct mvpp2_port *port = netdev_priv(dev);
4218 struct mvpp2_bm_pool *port_pool = port->pool_long;
4219 int num, pkts_num = port_pool->buf_num;
4220 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
4222 /* Update BM pool with new buffer size */
4223 mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
4224 if (port_pool->buf_num) {
4225 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
4229 port_pool->pkt_size = pkt_size;
4230 port_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4231 MVPP2_SKB_SHINFO_SIZE;
4232 num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
4233 if (num != pkts_num) {
4234 WARN(1, "pool %d: %d of %d allocated\n",
4235 port_pool->id, num, pkts_num);
4239 mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
4240 MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
4242 netdev_update_features(dev);
4246 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
4248 int i, sw_thread_mask = 0;
4250 for (i = 0; i < port->nqvecs; i++)
4251 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4253 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4254 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
4257 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
4259 int i, sw_thread_mask = 0;
4261 for (i = 0; i < port->nqvecs; i++)
4262 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4264 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4265 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
4268 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
4270 struct mvpp2_port *port = qvec->port;
4272 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4273 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
4276 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
4278 struct mvpp2_port *port = qvec->port;
4280 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4281 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
4284 /* Mask the current CPU's Rx/Tx interrupts
4285 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4286 * using smp_processor_id() is OK.
4288 static void mvpp2_interrupts_mask(void *arg)
4290 struct mvpp2_port *port = arg;
4292 mvpp2_percpu_write(port->priv, smp_processor_id(),
4293 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
4296 /* Unmask the current CPU's Rx/Tx interrupts.
4297 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4298 * using smp_processor_id() is OK.
4300 static void mvpp2_interrupts_unmask(void *arg)
4302 struct mvpp2_port *port = arg;
4305 val = MVPP2_CAUSE_MISC_SUM_MASK |
4306 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4307 if (port->has_tx_irqs)
4308 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
4310 mvpp2_percpu_write(port->priv, smp_processor_id(),
4311 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4315 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
4320 if (port->priv->hw_version != MVPP22)
4326 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4328 for (i = 0; i < port->nqvecs; i++) {
4329 struct mvpp2_queue_vector *v = port->qvecs + i;
4331 if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
4334 mvpp2_percpu_write(port->priv, v->sw_thread_id,
4335 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4339 /* Port configuration routines */
4341 static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
4343 struct mvpp2 *priv = port->priv;
4346 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4347 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
4348 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4350 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
4351 if (port->gop_id == 2)
4352 val |= GENCONF_CTRL0_PORT0_RGMII;
4353 else if (port->gop_id == 3)
4354 val |= GENCONF_CTRL0_PORT1_RGMII_MII;
4355 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
4358 static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
4360 struct mvpp2 *priv = port->priv;
4363 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4364 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
4365 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
4366 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4368 if (port->gop_id > 1) {
4369 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
4370 if (port->gop_id == 2)
4371 val &= ~GENCONF_CTRL0_PORT0_RGMII;
4372 else if (port->gop_id == 3)
4373 val &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
4374 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
4378 static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
4380 struct mvpp2 *priv = port->priv;
4381 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
4382 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
4386 val = readl(xpcs + MVPP22_XPCS_CFG0);
4387 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
4388 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
4389 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
4390 writel(val, xpcs + MVPP22_XPCS_CFG0);
4393 val = readl(mpcs + MVPP22_MPCS_CTRL);
4394 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
4395 writel(val, mpcs + MVPP22_MPCS_CTRL);
4397 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
4398 val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC |
4399 MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
4400 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
4401 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
4403 val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
4404 val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX;
4405 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
4408 static int mvpp22_gop_init(struct mvpp2_port *port)
4410 struct mvpp2 *priv = port->priv;
4413 if (!priv->sysctrl_base)
4416 switch (port->phy_interface) {
4417 case PHY_INTERFACE_MODE_RGMII:
4418 case PHY_INTERFACE_MODE_RGMII_ID:
4419 case PHY_INTERFACE_MODE_RGMII_RXID:
4420 case PHY_INTERFACE_MODE_RGMII_TXID:
4421 if (port->gop_id == 0)
4423 mvpp22_gop_init_rgmii(port);
4425 case PHY_INTERFACE_MODE_SGMII:
4426 mvpp22_gop_init_sgmii(port);
4428 case PHY_INTERFACE_MODE_10GKR:
4429 if (port->gop_id != 0)
4431 mvpp22_gop_init_10gkr(port);
4434 goto unsupported_conf;
4437 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
4438 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
4439 GENCONF_PORT_CTRL1_EN(port->gop_id);
4440 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
4442 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4443 val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
4444 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4446 regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
4447 val |= GENCONF_SOFT_RESET1_GOP;
4448 regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
4454 netdev_err(port->dev, "Invalid port configuration\n");
4458 static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
4462 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4463 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4464 /* Enable the GMAC link status irq for this port */
4465 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
4466 val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
4467 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
4470 if (port->gop_id == 0) {
4471 /* Enable the XLG/GIG irqs for this port */
4472 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
4473 if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
4474 val |= MVPP22_XLG_EXT_INT_MASK_XLG;
4476 val |= MVPP22_XLG_EXT_INT_MASK_GIG;
4477 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
4481 static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
4485 if (port->gop_id == 0) {
4486 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
4487 val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
4488 MVPP22_XLG_EXT_INT_MASK_GIG);
4489 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
4492 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4493 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4494 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
4495 val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
4496 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
4500 static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
4504 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4505 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4506 val = readl(port->base + MVPP22_GMAC_INT_MASK);
4507 val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
4508 writel(val, port->base + MVPP22_GMAC_INT_MASK);
4511 if (port->gop_id == 0) {
4512 val = readl(port->base + MVPP22_XLG_INT_MASK);
4513 val |= MVPP22_XLG_INT_MASK_LINK;
4514 writel(val, port->base + MVPP22_XLG_INT_MASK);
4517 mvpp22_gop_unmask_irq(port);
4520 static int mvpp22_comphy_init(struct mvpp2_port *port)
4528 switch (port->phy_interface) {
4529 case PHY_INTERFACE_MODE_SGMII:
4530 mode = PHY_MODE_SGMII;
4532 case PHY_INTERFACE_MODE_10GKR:
4533 mode = PHY_MODE_10GKR;
4539 ret = phy_set_mode(port->comphy, mode);
4543 return phy_power_on(port->comphy);
4546 static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
4550 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4551 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4552 val |= MVPP22_CTRL4_SYNC_BYPASS_DIS | MVPP22_CTRL4_DP_CLK_SEL |
4553 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4554 val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4555 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
4556 } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
4557 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4558 val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
4559 MVPP22_CTRL4_SYNC_BYPASS_DIS |
4560 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4561 val &= ~MVPP22_CTRL4_DP_CLK_SEL;
4562 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
4565 /* The port is connected to a copper PHY */
4566 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4567 val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
4568 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4570 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4571 val |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
4572 MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
4573 MVPP2_GMAC_AN_DUPLEX_EN;
4574 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
4575 val |= MVPP2_GMAC_IN_BAND_AUTONEG;
4576 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4579 static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port)
4583 /* Force link down */
4584 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4585 val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
4586 val |= MVPP2_GMAC_FORCE_LINK_DOWN;
4587 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4589 /* Set the GMAC in a reset state */
4590 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4591 val |= MVPP2_GMAC_PORT_RESET_MASK;
4592 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4594 /* Configure the PCS and in-band AN */
4595 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4596 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4597 val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
4598 } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
4599 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
4601 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4603 mvpp2_port_mii_gmac_configure_mode(port);
4605 /* Unset the GMAC reset state */
4606 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4607 val &= ~MVPP2_GMAC_PORT_RESET_MASK;
4608 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4610 /* Stop forcing link down */
4611 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4612 val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
4613 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4616 static void mvpp2_port_mii_xlg_configure(struct mvpp2_port *port)
4620 if (port->gop_id != 0)
4623 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
4624 val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
4625 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
4627 val = readl(port->base + MVPP22_XLG_CTRL4_REG);
4628 val &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
4629 val |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
4630 writel(val, port->base + MVPP22_XLG_CTRL4_REG);
4633 static void mvpp22_port_mii_set(struct mvpp2_port *port)
4637 /* Only GOP port 0 has an XLG MAC */
4638 if (port->gop_id == 0) {
4639 val = readl(port->base + MVPP22_XLG_CTRL3_REG);
4640 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
4642 if (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
4643 port->phy_interface == PHY_INTERFACE_MODE_10GKR)
4644 val |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
4646 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
4648 writel(val, port->base + MVPP22_XLG_CTRL3_REG);
4652 static void mvpp2_port_mii_set(struct mvpp2_port *port)
4654 if (port->priv->hw_version == MVPP22)
4655 mvpp22_port_mii_set(port);
4657 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4658 port->phy_interface == PHY_INTERFACE_MODE_SGMII)
4659 mvpp2_port_mii_gmac_configure(port);
4660 else if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
4661 mvpp2_port_mii_xlg_configure(port);
4664 static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
4668 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4669 val |= MVPP2_GMAC_FC_ADV_EN;
4670 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4673 static void mvpp2_port_enable(struct mvpp2_port *port)
4677 /* Only GOP port 0 has an XLG MAC */
4678 if (port->gop_id == 0 &&
4679 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
4680 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
4681 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
4682 val |= MVPP22_XLG_CTRL0_PORT_EN |
4683 MVPP22_XLG_CTRL0_MAC_RESET_DIS;
4684 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
4685 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
4687 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4688 val |= MVPP2_GMAC_PORT_EN_MASK;
4689 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
4690 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4694 static void mvpp2_port_disable(struct mvpp2_port *port)
4698 /* Only GOP port 0 has an XLG MAC */
4699 if (port->gop_id == 0 &&
4700 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
4701 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
4702 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
4703 val &= ~(MVPP22_XLG_CTRL0_PORT_EN |
4704 MVPP22_XLG_CTRL0_MAC_RESET_DIS);
4705 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
4707 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4708 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
4709 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4713 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
4714 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
4718 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
4719 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
4720 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
4723 /* Configure loopback port */
4724 static void mvpp2_port_loopback_set(struct mvpp2_port *port)
4728 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
4730 if (port->speed == 1000)
4731 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
4733 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
4735 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
4736 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
4738 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
4740 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
4743 static void mvpp2_port_reset(struct mvpp2_port *port)
4747 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
4748 ~MVPP2_GMAC_PORT_RESET_MASK;
4749 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4751 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
4752 MVPP2_GMAC_PORT_RESET_MASK)
4756 /* Change maximum receive size of the port */
4757 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
4761 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4762 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
4763 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
4764 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
4765 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4768 /* Change maximum receive size of the port */
4769 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
4773 val = readl(port->base + MVPP22_XLG_CTRL1_REG);
4774 val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
4775 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
4776 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
4777 writel(val, port->base + MVPP22_XLG_CTRL1_REG);
4780 /* Set defaults to the MVPP2 port */
4781 static void mvpp2_defaults_set(struct mvpp2_port *port)
4783 int tx_port_num, val, queue, lrxq;
4785 if (port->priv->hw_version == MVPP21) {
4786 /* Configure port to loopback if needed */
4787 if (port->flags & MVPP2_F_LOOPBACK)
4788 mvpp2_port_loopback_set(port);
4790 /* Update TX FIFO MIN Threshold */
4791 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
4792 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
4793 /* Min. TX threshold must be less than minimal packet length */
4794 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
4795 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
4798 /* Disable Legacy WRR, Disable EJP, Release from reset */
4799 tx_port_num = mvpp2_egress_port(port);
4800 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
4802 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
4804 /* Close bandwidth for all queues */
4805 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
4806 mvpp2_write(port->priv,
4807 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
4809 /* Set refill period to 1 usec, refill tokens
4810 * and bucket size to maximum
4812 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
4813 port->priv->tclk / USEC_PER_SEC);
4814 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
4815 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
4816 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
4817 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
4818 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
4819 val = MVPP2_TXP_TOKEN_SIZE_MAX;
4820 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4822 /* Set MaximumLowLatencyPacketSize value to 256 */
4823 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
4824 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
4825 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
4827 /* Enable Rx cache snoop */
4828 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
4829 queue = port->rxqs[lrxq]->id;
4830 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4831 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
4832 MVPP2_SNOOP_BUF_HDR_MASK;
4833 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4836 /* At default, mask all interrupts to all present cpus */
4837 mvpp2_interrupts_disable(port);
4840 /* Enable/disable receiving packets */
4841 static void mvpp2_ingress_enable(struct mvpp2_port *port)
4846 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
4847 queue = port->rxqs[lrxq]->id;
4848 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4849 val &= ~MVPP2_RXQ_DISABLE_MASK;
4850 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4854 static void mvpp2_ingress_disable(struct mvpp2_port *port)
4859 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
4860 queue = port->rxqs[lrxq]->id;
4861 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4862 val |= MVPP2_RXQ_DISABLE_MASK;
4863 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4867 /* Enable transmit via physical egress queue
4868 * - HW starts take descriptors from DRAM
4870 static void mvpp2_egress_enable(struct mvpp2_port *port)
4874 int tx_port_num = mvpp2_egress_port(port);
4876 /* Enable all initialized TXs. */
4878 for (queue = 0; queue < port->ntxqs; queue++) {
4879 struct mvpp2_tx_queue *txq = port->txqs[queue];
4882 qmap |= (1 << queue);
4885 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4886 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
4889 /* Disable transmit via physical egress queue
4890 * - HW doesn't take descriptors from DRAM
4892 static void mvpp2_egress_disable(struct mvpp2_port *port)
4896 int tx_port_num = mvpp2_egress_port(port);
4898 /* Issue stop command for active channels only */
4899 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4900 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
4901 MVPP2_TXP_SCHED_ENQ_MASK;
4903 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
4904 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
4906 /* Wait for all Tx activity to terminate. */
4909 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
4910 netdev_warn(port->dev,
4911 "Tx stop timed out, status=0x%08x\n",
4918 /* Check port TX Command register that all
4919 * Tx queues are stopped
4921 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
4922 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
4925 /* Rx descriptors helper methods */
4927 /* Get number of Rx descriptors occupied by received packets */
4929 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
4931 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
4933 return val & MVPP2_RXQ_OCCUPIED_MASK;
4936 /* Update Rx queue status with the number of occupied and available
4937 * Rx descriptor slots.
4940 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
4941 int used_count, int free_count)
4943 /* Decrement the number of used descriptors and increment count
4944 * increment the number of free descriptors.
4946 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
4948 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
4951 /* Get pointer to next RX descriptor to be processed by SW */
4952 static inline struct mvpp2_rx_desc *
4953 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
4955 int rx_desc = rxq->next_desc_to_proc;
4957 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
4958 prefetch(rxq->descs + rxq->next_desc_to_proc);
4959 return rxq->descs + rx_desc;
4962 /* Set rx queue offset */
4963 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
4964 int prxq, int offset)
4968 /* Convert offset from bytes to units of 32 bytes */
4969 offset = offset >> 5;
4971 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4972 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
4975 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
4976 MVPP2_RXQ_PACKET_OFFSET_MASK);
4978 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4981 /* Tx descriptors helper methods */
4983 /* Get pointer to next Tx descriptor to be processed (send) by HW */
4984 static struct mvpp2_tx_desc *
4985 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
4987 int tx_desc = txq->next_desc_to_proc;
4989 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
4990 return txq->descs + tx_desc;
4993 /* Update HW with number of aggregated Tx descriptors to be sent
4995 * Called only from mvpp2_tx(), so migration is disabled, using
4996 * smp_processor_id() is OK.
4998 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
5000 /* aggregated access - relevant TXQ number is written in TX desc */
5001 mvpp2_percpu_write(port->priv, smp_processor_id(),
5002 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
5006 /* Check if there are enough free descriptors in aggregated txq.
5007 * If not, update the number of occupied descriptors and repeat the check.
5009 * Called only from mvpp2_tx(), so migration is disabled, using
5010 * smp_processor_id() is OK.
5012 static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
5013 struct mvpp2_tx_queue *aggr_txq, int num)
5015 if ((aggr_txq->count + num) > aggr_txq->size) {
5016 /* Update number of occupied aggregated Tx descriptors */
5017 int cpu = smp_processor_id();
5018 u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
5020 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
5023 if ((aggr_txq->count + num) > aggr_txq->size)
5029 /* Reserved Tx descriptors allocation request
5031 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
5032 * only by mvpp2_tx(), so migration is disabled, using
5033 * smp_processor_id() is OK.
5035 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
5036 struct mvpp2_tx_queue *txq, int num)
5039 int cpu = smp_processor_id();
5041 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
5042 mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
5044 val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
5046 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
5049 /* Check if there are enough reserved descriptors for transmission.
5050 * If not, request chunk of reserved descriptors and check again.
5052 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
5053 struct mvpp2_tx_queue *txq,
5054 struct mvpp2_txq_pcpu *txq_pcpu,
5057 int req, cpu, desc_count;
5059 if (txq_pcpu->reserved_num >= num)
5062 /* Not enough descriptors reserved! Update the reserved descriptor
5063 * count and check again.
5067 /* Compute total of used descriptors */
5068 for_each_present_cpu(cpu) {
5069 struct mvpp2_txq_pcpu *txq_pcpu_aux;
5071 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
5072 desc_count += txq_pcpu_aux->count;
5073 desc_count += txq_pcpu_aux->reserved_num;
5076 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
5080 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
5083 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
5085 /* OK, the descriptor cound has been updated: check again. */
5086 if (txq_pcpu->reserved_num < num)
5091 /* Release the last allocated Tx descriptor. Useful to handle DMA
5092 * mapping failures in the Tx path.
5094 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
5096 if (txq->next_desc_to_proc == 0)
5097 txq->next_desc_to_proc = txq->last_desc - 1;
5099 txq->next_desc_to_proc--;
5102 /* Set Tx descriptors fields relevant for CSUM calculation */
5103 static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
5104 int ip_hdr_len, int l4_proto)
5108 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
5109 * G_L4_chk, L4_type required only for checksum calculation
5111 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
5112 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
5113 command |= MVPP2_TXD_IP_CSUM_DISABLE;
5115 if (l3_proto == swab16(ETH_P_IP)) {
5116 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
5117 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
5119 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
5122 if (l4_proto == IPPROTO_TCP) {
5123 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
5124 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
5125 } else if (l4_proto == IPPROTO_UDP) {
5126 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
5127 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
5129 command |= MVPP2_TXD_L4_CSUM_NOT;
5135 /* Get number of sent descriptors and decrement counter.
5136 * The number of sent descriptors is returned.
5139 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
5140 * (migration disabled) and from the TX completion tasklet (migration
5141 * disabled) so using smp_processor_id() is OK.
5143 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
5144 struct mvpp2_tx_queue *txq)
5148 /* Reading status reg resets transmitted descriptor counter */
5149 val = mvpp2_percpu_read(port->priv, smp_processor_id(),
5150 MVPP2_TXQ_SENT_REG(txq->id));
5152 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
5153 MVPP2_TRANSMITTED_COUNT_OFFSET;
5156 /* Called through on_each_cpu(), so runs on all CPUs, with migration
5157 * disabled, therefore using smp_processor_id() is OK.
5159 static void mvpp2_txq_sent_counter_clear(void *arg)
5161 struct mvpp2_port *port = arg;
5164 for (queue = 0; queue < port->ntxqs; queue++) {
5165 int id = port->txqs[queue]->id;
5167 mvpp2_percpu_read(port->priv, smp_processor_id(),
5168 MVPP2_TXQ_SENT_REG(id));
5172 /* Set max sizes for Tx queues */
5173 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
5176 int txq, tx_port_num;
5178 mtu = port->pkt_size * 8;
5179 if (mtu > MVPP2_TXP_MTU_MAX)
5180 mtu = MVPP2_TXP_MTU_MAX;
5182 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
5185 /* Indirect access to registers */
5186 tx_port_num = mvpp2_egress_port(port);
5187 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5190 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
5191 val &= ~MVPP2_TXP_MTU_MAX;
5193 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
5195 /* TXP token size and all TXQs token size must be larger that MTU */
5196 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
5197 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
5200 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
5202 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
5205 for (txq = 0; txq < port->ntxqs; txq++) {
5206 val = mvpp2_read(port->priv,
5207 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
5208 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
5212 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
5214 mvpp2_write(port->priv,
5215 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
5221 /* Set the number of packets that will be received before Rx interrupt
5222 * will be generated by HW.
5224 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
5225 struct mvpp2_rx_queue *rxq)
5227 int cpu = get_cpu();
5229 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
5230 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
5232 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
5233 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
5239 /* For some reason in the LSP this is done on each CPU. Why ? */
5240 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
5241 struct mvpp2_tx_queue *txq)
5243 int cpu = get_cpu();
5246 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
5247 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
5249 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
5250 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5251 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val);
5256 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
5258 u64 tmp = (u64)clk_hz * usec;
5260 do_div(tmp, USEC_PER_SEC);
5262 return tmp > U32_MAX ? U32_MAX : tmp;
5265 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
5267 u64 tmp = (u64)cycles * USEC_PER_SEC;
5269 do_div(tmp, clk_hz);
5271 return tmp > U32_MAX ? U32_MAX : tmp;
5274 /* Set the time delay in usec before Rx interrupt */
5275 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
5276 struct mvpp2_rx_queue *rxq)
5278 unsigned long freq = port->priv->tclk;
5279 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
5281 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
5283 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
5285 /* re-evaluate to get actual register value */
5286 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
5289 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
5292 static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
5294 unsigned long freq = port->priv->tclk;
5295 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
5297 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
5298 port->tx_time_coal =
5299 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
5301 /* re-evaluate to get actual register value */
5302 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
5305 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
5308 /* Free Tx queue skbuffs */
5309 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
5310 struct mvpp2_tx_queue *txq,
5311 struct mvpp2_txq_pcpu *txq_pcpu, int num)
5315 for (i = 0; i < num; i++) {
5316 struct mvpp2_txq_pcpu_buf *tx_buf =
5317 txq_pcpu->buffs + txq_pcpu->txq_get_index;
5319 if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma))
5320 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
5321 tx_buf->size, DMA_TO_DEVICE);
5323 dev_kfree_skb_any(tx_buf->skb);
5325 mvpp2_txq_inc_get(txq_pcpu);
5329 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
5332 int queue = fls(cause) - 1;
5334 return port->rxqs[queue];
5337 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
5340 int queue = fls(cause) - 1;
5342 return port->txqs[queue];
5345 /* Handle end of transmission */
5346 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
5347 struct mvpp2_txq_pcpu *txq_pcpu)
5349 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
5352 if (txq_pcpu->cpu != smp_processor_id())
5353 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
5355 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
5358 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
5360 txq_pcpu->count -= tx_done;
5362 if (netif_tx_queue_stopped(nq))
5363 if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
5364 netif_tx_wake_queue(nq);
5367 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
5370 struct mvpp2_tx_queue *txq;
5371 struct mvpp2_txq_pcpu *txq_pcpu;
5372 unsigned int tx_todo = 0;
5375 txq = mvpp2_get_tx_queue(port, cause);
5379 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5381 if (txq_pcpu->count) {
5382 mvpp2_txq_done(port, txq, txq_pcpu);
5383 tx_todo += txq_pcpu->count;
5386 cause &= ~(1 << txq->log_id);
5391 /* Rx/Tx queue initialization/cleanup methods */
5393 /* Allocate and initialize descriptors for aggr TXQ */
5394 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
5395 struct mvpp2_tx_queue *aggr_txq, int cpu,
5400 /* Allocate memory for TX descriptors */
5401 aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
5402 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
5403 &aggr_txq->descs_dma, GFP_KERNEL);
5404 if (!aggr_txq->descs)
5407 aggr_txq->last_desc = aggr_txq->size - 1;
5409 /* Aggr TXQ no reset WA */
5410 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
5411 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
5413 /* Set Tx descriptors queue starting address indirect
5416 if (priv->hw_version == MVPP21)
5417 txq_dma = aggr_txq->descs_dma;
5419 txq_dma = aggr_txq->descs_dma >>
5420 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
5422 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
5423 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu),
5424 MVPP2_AGGR_TXQ_SIZE);
5429 /* Create a specified Rx queue */
5430 static int mvpp2_rxq_init(struct mvpp2_port *port,
5431 struct mvpp2_rx_queue *rxq)
5437 rxq->size = port->rx_ring_size;
5439 /* Allocate memory for RX descriptors */
5440 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
5441 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
5442 &rxq->descs_dma, GFP_KERNEL);
5446 rxq->last_desc = rxq->size - 1;
5448 /* Zero occupied and non-occupied counters - direct access */
5449 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
5451 /* Set Rx descriptors queue starting address - indirect access */
5453 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
5454 if (port->priv->hw_version == MVPP21)
5455 rxq_dma = rxq->descs_dma;
5457 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
5458 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
5459 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
5460 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
5464 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
5466 /* Set coalescing pkts and time */
5467 mvpp2_rx_pkts_coal_set(port, rxq);
5468 mvpp2_rx_time_coal_set(port, rxq);
5470 /* Add number of descriptors ready for receiving packets */
5471 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
5476 /* Push packets received by the RXQ to BM pool */
5477 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
5478 struct mvpp2_rx_queue *rxq)
5482 rx_received = mvpp2_rxq_received(port, rxq->id);
5486 for (i = 0; i < rx_received; i++) {
5487 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5488 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
5491 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
5492 MVPP2_RXD_BM_POOL_ID_OFFS;
5494 mvpp2_bm_pool_put(port, pool,
5495 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
5496 mvpp2_rxdesc_cookie_get(port, rx_desc));
5498 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
5501 /* Cleanup Rx queue */
5502 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
5503 struct mvpp2_rx_queue *rxq)
5507 mvpp2_rxq_drop_pkts(port, rxq);
5510 dma_free_coherent(port->dev->dev.parent,
5511 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
5517 rxq->next_desc_to_proc = 0;
5520 /* Clear Rx descriptors queue starting address and size;
5521 * free descriptor number
5523 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
5525 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
5526 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
5527 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
5531 /* Create and initialize a Tx queue */
5532 static int mvpp2_txq_init(struct mvpp2_port *port,
5533 struct mvpp2_tx_queue *txq)
5536 int cpu, desc, desc_per_txq, tx_port_num;
5537 struct mvpp2_txq_pcpu *txq_pcpu;
5539 txq->size = port->tx_ring_size;
5541 /* Allocate memory for Tx descriptors */
5542 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
5543 txq->size * MVPP2_DESC_ALIGNED_SIZE,
5544 &txq->descs_dma, GFP_KERNEL);
5548 txq->last_desc = txq->size - 1;
5550 /* Set Tx descriptors queue starting address - indirect access */
5552 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5553 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
5555 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
5556 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
5557 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
5558 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
5559 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
5560 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
5561 val &= ~MVPP2_TXQ_PENDING_MASK;
5562 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
5564 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
5565 * for each existing TXQ.
5566 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
5567 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
5570 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
5571 (txq->log_id * desc_per_txq);
5573 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
5574 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
5575 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
5578 /* WRR / EJP configuration - indirect access */
5579 tx_port_num = mvpp2_egress_port(port);
5580 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5582 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
5583 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
5584 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
5585 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
5586 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
5588 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
5589 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
5592 for_each_present_cpu(cpu) {
5593 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5594 txq_pcpu->size = txq->size;
5595 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
5596 sizeof(*txq_pcpu->buffs),
5598 if (!txq_pcpu->buffs)
5601 txq_pcpu->count = 0;
5602 txq_pcpu->reserved_num = 0;
5603 txq_pcpu->txq_put_index = 0;
5604 txq_pcpu->txq_get_index = 0;
5606 txq_pcpu->tso_headers =
5607 dma_alloc_coherent(port->dev->dev.parent,
5608 txq_pcpu->size * TSO_HEADER_SIZE,
5609 &txq_pcpu->tso_headers_dma,
5611 if (!txq_pcpu->tso_headers)
5618 /* Free allocated TXQ resources */
5619 static void mvpp2_txq_deinit(struct mvpp2_port *port,
5620 struct mvpp2_tx_queue *txq)
5622 struct mvpp2_txq_pcpu *txq_pcpu;
5625 for_each_present_cpu(cpu) {
5626 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5627 kfree(txq_pcpu->buffs);
5629 dma_free_coherent(port->dev->dev.parent,
5630 txq_pcpu->size * TSO_HEADER_SIZE,
5631 txq_pcpu->tso_headers,
5632 txq_pcpu->tso_headers_dma);
5636 dma_free_coherent(port->dev->dev.parent,
5637 txq->size * MVPP2_DESC_ALIGNED_SIZE,
5638 txq->descs, txq->descs_dma);
5642 txq->next_desc_to_proc = 0;
5645 /* Set minimum bandwidth for disabled TXQs */
5646 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
5648 /* Set Tx descriptors queue starting address and size */
5650 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5651 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
5652 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
5656 /* Cleanup Tx ports */
5657 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
5659 struct mvpp2_txq_pcpu *txq_pcpu;
5660 int delay, pending, cpu;
5664 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5665 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
5666 val |= MVPP2_TXQ_DRAIN_EN_MASK;
5667 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
5669 /* The napi queue has been stopped so wait for all packets
5670 * to be transmitted.
5674 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
5675 netdev_warn(port->dev,
5676 "port %d: cleaning queue %d timed out\n",
5677 port->id, txq->log_id);
5683 pending = mvpp2_percpu_read(port->priv, cpu,
5684 MVPP2_TXQ_PENDING_REG);
5685 pending &= MVPP2_TXQ_PENDING_MASK;
5688 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
5689 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
5692 for_each_present_cpu(cpu) {
5693 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5695 /* Release all packets */
5696 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
5699 txq_pcpu->count = 0;
5700 txq_pcpu->txq_put_index = 0;
5701 txq_pcpu->txq_get_index = 0;
5705 /* Cleanup all Tx queues */
5706 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
5708 struct mvpp2_tx_queue *txq;
5712 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
5714 /* Reset Tx ports and delete Tx queues */
5715 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
5716 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
5718 for (queue = 0; queue < port->ntxqs; queue++) {
5719 txq = port->txqs[queue];
5720 mvpp2_txq_clean(port, txq);
5721 mvpp2_txq_deinit(port, txq);
5724 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
5726 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
5727 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
5730 /* Cleanup all Rx queues */
5731 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
5735 for (queue = 0; queue < port->nrxqs; queue++)
5736 mvpp2_rxq_deinit(port, port->rxqs[queue]);
5739 /* Init all Rx queues for port */
5740 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
5744 for (queue = 0; queue < port->nrxqs; queue++) {
5745 err = mvpp2_rxq_init(port, port->rxqs[queue]);
5752 mvpp2_cleanup_rxqs(port);
5756 /* Init all tx queues for port */
5757 static int mvpp2_setup_txqs(struct mvpp2_port *port)
5759 struct mvpp2_tx_queue *txq;
5762 for (queue = 0; queue < port->ntxqs; queue++) {
5763 txq = port->txqs[queue];
5764 err = mvpp2_txq_init(port, txq);
5769 if (port->has_tx_irqs) {
5770 mvpp2_tx_time_coal_set(port);
5771 for (queue = 0; queue < port->ntxqs; queue++) {
5772 txq = port->txqs[queue];
5773 mvpp2_tx_pkts_coal_set(port, txq);
5777 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
5781 mvpp2_cleanup_txqs(port);
5785 /* The callback for per-port interrupt */
5786 static irqreturn_t mvpp2_isr(int irq, void *dev_id)
5788 struct mvpp2_queue_vector *qv = dev_id;
5790 mvpp2_qvec_interrupt_disable(qv);
5792 napi_schedule(&qv->napi);
5797 /* Per-port interrupt for link status changes */
5798 static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
5800 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
5801 struct net_device *dev = port->dev;
5802 bool event = false, link = false;
5805 mvpp22_gop_mask_irq(port);
5807 if (port->gop_id == 0 &&
5808 port->phy_interface == PHY_INTERFACE_MODE_10GKR) {
5809 val = readl(port->base + MVPP22_XLG_INT_STAT);
5810 if (val & MVPP22_XLG_INT_STAT_LINK) {
5812 val = readl(port->base + MVPP22_XLG_STATUS);
5813 if (val & MVPP22_XLG_STATUS_LINK_UP)
5816 } else if (phy_interface_mode_is_rgmii(port->phy_interface) ||
5817 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
5818 val = readl(port->base + MVPP22_GMAC_INT_STAT);
5819 if (val & MVPP22_GMAC_INT_STAT_LINK) {
5821 val = readl(port->base + MVPP2_GMAC_STATUS0);
5822 if (val & MVPP2_GMAC_STATUS0_LINK_UP)
5827 if (!netif_running(dev) || !event)
5831 mvpp2_interrupts_enable(port);
5833 mvpp2_egress_enable(port);
5834 mvpp2_ingress_enable(port);
5835 netif_carrier_on(dev);
5836 netif_tx_wake_all_queues(dev);
5838 netif_tx_stop_all_queues(dev);
5839 netif_carrier_off(dev);
5840 mvpp2_ingress_disable(port);
5841 mvpp2_egress_disable(port);
5843 mvpp2_interrupts_disable(port);
5847 mvpp22_gop_unmask_irq(port);
5851 static void mvpp2_gmac_set_autoneg(struct mvpp2_port *port,
5852 struct phy_device *phydev)
5856 if (port->phy_interface != PHY_INTERFACE_MODE_RGMII &&
5857 port->phy_interface != PHY_INTERFACE_MODE_RGMII_ID &&
5858 port->phy_interface != PHY_INTERFACE_MODE_RGMII_RXID &&
5859 port->phy_interface != PHY_INTERFACE_MODE_RGMII_TXID &&
5860 port->phy_interface != PHY_INTERFACE_MODE_SGMII)
5863 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5864 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
5865 MVPP2_GMAC_CONFIG_GMII_SPEED |
5866 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
5867 MVPP2_GMAC_AN_SPEED_EN |
5868 MVPP2_GMAC_AN_DUPLEX_EN);
5871 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
5873 if (phydev->speed == SPEED_1000)
5874 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
5875 else if (phydev->speed == SPEED_100)
5876 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
5878 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5882 static void mvpp2_link_event(struct net_device *dev)
5884 struct mvpp2_port *port = netdev_priv(dev);
5885 struct phy_device *phydev = dev->phydev;
5886 bool link_reconfigured = false;
5890 if (port->phy_interface != phydev->interface && port->comphy) {
5891 /* disable current port for reconfiguration */
5892 mvpp2_interrupts_disable(port);
5893 netif_carrier_off(port->dev);
5894 mvpp2_port_disable(port);
5895 phy_power_off(port->comphy);
5897 /* comphy reconfiguration */
5898 port->phy_interface = phydev->interface;
5899 mvpp22_comphy_init(port);
5901 /* gop/mac reconfiguration */
5902 mvpp22_gop_init(port);
5903 mvpp2_port_mii_set(port);
5905 link_reconfigured = true;
5908 if ((port->speed != phydev->speed) ||
5909 (port->duplex != phydev->duplex)) {
5910 mvpp2_gmac_set_autoneg(port, phydev);
5912 port->duplex = phydev->duplex;
5913 port->speed = phydev->speed;
5917 if (phydev->link != port->link || link_reconfigured) {
5918 port->link = phydev->link;
5921 if (port->phy_interface == PHY_INTERFACE_MODE_RGMII ||
5922 port->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
5923 port->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
5924 port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID ||
5925 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
5926 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5927 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
5928 MVPP2_GMAC_FORCE_LINK_DOWN);
5929 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5932 mvpp2_interrupts_enable(port);
5933 mvpp2_port_enable(port);
5935 mvpp2_egress_enable(port);
5936 mvpp2_ingress_enable(port);
5937 netif_carrier_on(dev);
5938 netif_tx_wake_all_queues(dev);
5943 netif_tx_stop_all_queues(dev);
5944 netif_carrier_off(dev);
5945 mvpp2_ingress_disable(port);
5946 mvpp2_egress_disable(port);
5948 mvpp2_port_disable(port);
5949 mvpp2_interrupts_disable(port);
5952 phy_print_status(phydev);
5956 static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
5960 if (!port_pcpu->timer_scheduled) {
5961 port_pcpu->timer_scheduled = true;
5962 interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
5963 hrtimer_start(&port_pcpu->tx_done_timer, interval,
5964 HRTIMER_MODE_REL_PINNED);
5968 static void mvpp2_tx_proc_cb(unsigned long data)
5970 struct net_device *dev = (struct net_device *)data;
5971 struct mvpp2_port *port = netdev_priv(dev);
5972 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5973 unsigned int tx_todo, cause;
5975 if (!netif_running(dev))
5977 port_pcpu->timer_scheduled = false;
5979 /* Process all the Tx queues */
5980 cause = (1 << port->ntxqs) - 1;
5981 tx_todo = mvpp2_tx_done(port, cause, smp_processor_id());
5983 /* Set the timer in case not all the packets were processed */
5985 mvpp2_timer_set(port_pcpu);
5988 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
5990 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
5991 struct mvpp2_port_pcpu,
5994 tasklet_schedule(&port_pcpu->tx_done_tasklet);
5996 return HRTIMER_NORESTART;
5999 /* Main RX/TX processing routines */
6001 /* Display more error info */
6002 static void mvpp2_rx_error(struct mvpp2_port *port,
6003 struct mvpp2_rx_desc *rx_desc)
6005 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
6006 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
6008 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
6009 case MVPP2_RXD_ERR_CRC:
6010 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
6013 case MVPP2_RXD_ERR_OVERRUN:
6014 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
6017 case MVPP2_RXD_ERR_RESOURCE:
6018 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
6024 /* Handle RX checksum offload */
6025 static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
6026 struct sk_buff *skb)
6028 if (((status & MVPP2_RXD_L3_IP4) &&
6029 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
6030 (status & MVPP2_RXD_L3_IP6))
6031 if (((status & MVPP2_RXD_L4_UDP) ||
6032 (status & MVPP2_RXD_L4_TCP)) &&
6033 (status & MVPP2_RXD_L4_CSUM_OK)) {
6035 skb->ip_summed = CHECKSUM_UNNECESSARY;
6039 skb->ip_summed = CHECKSUM_NONE;
6042 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
6043 static int mvpp2_rx_refill(struct mvpp2_port *port,
6044 struct mvpp2_bm_pool *bm_pool, int pool)
6046 dma_addr_t dma_addr;
6047 phys_addr_t phys_addr;
6050 /* No recycle or too many buffers are in use, so allocate a new skb */
6051 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
6056 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
6061 /* Handle tx checksum */
6062 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
6064 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6067 __be16 l3_proto = vlan_get_protocol(skb);
6069 if (l3_proto == htons(ETH_P_IP)) {
6070 struct iphdr *ip4h = ip_hdr(skb);
6072 /* Calculate IPv4 checksum and L4 checksum */
6073 ip_hdr_len = ip4h->ihl;
6074 l4_proto = ip4h->protocol;
6075 } else if (l3_proto == htons(ETH_P_IPV6)) {
6076 struct ipv6hdr *ip6h = ipv6_hdr(skb);
6078 /* Read l4_protocol from one of IPv6 extra headers */
6079 if (skb_network_header_len(skb) > 0)
6080 ip_hdr_len = (skb_network_header_len(skb) >> 2);
6081 l4_proto = ip6h->nexthdr;
6083 return MVPP2_TXD_L4_CSUM_NOT;
6086 return mvpp2_txq_desc_csum(skb_network_offset(skb),
6087 l3_proto, ip_hdr_len, l4_proto);
6090 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
6093 /* Main rx processing */
6094 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
6095 int rx_todo, struct mvpp2_rx_queue *rxq)
6097 struct net_device *dev = port->dev;
6103 /* Get number of received packets and clamp the to-do */
6104 rx_received = mvpp2_rxq_received(port, rxq->id);
6105 if (rx_todo > rx_received)
6106 rx_todo = rx_received;
6108 while (rx_done < rx_todo) {
6109 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
6110 struct mvpp2_bm_pool *bm_pool;
6111 struct sk_buff *skb;
6112 unsigned int frag_size;
6113 dma_addr_t dma_addr;
6114 phys_addr_t phys_addr;
6116 int pool, rx_bytes, err;
6120 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
6121 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
6122 rx_bytes -= MVPP2_MH_SIZE;
6123 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
6124 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
6125 data = (void *)phys_to_virt(phys_addr);
6127 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
6128 MVPP2_RXD_BM_POOL_ID_OFFS;
6129 bm_pool = &port->priv->bm_pools[pool];
6131 /* In case of an error, release the requested buffer pointer
6132 * to the Buffer Manager. This request process is controlled
6133 * by the hardware, and the information about the buffer is
6134 * comprised by the RX descriptor.
6136 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
6138 dev->stats.rx_errors++;
6139 mvpp2_rx_error(port, rx_desc);
6140 /* Return the buffer to the pool */
6141 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
6145 if (bm_pool->frag_size > PAGE_SIZE)
6148 frag_size = bm_pool->frag_size;
6150 skb = build_skb(data, frag_size);
6152 netdev_warn(port->dev, "skb build failed\n");
6153 goto err_drop_frame;
6156 err = mvpp2_rx_refill(port, bm_pool, pool);
6158 netdev_err(port->dev, "failed to refill BM pools\n");
6159 goto err_drop_frame;
6162 dma_unmap_single(dev->dev.parent, dma_addr,
6163 bm_pool->buf_size, DMA_FROM_DEVICE);
6166 rcvd_bytes += rx_bytes;
6168 skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
6169 skb_put(skb, rx_bytes);
6170 skb->protocol = eth_type_trans(skb, dev);
6171 mvpp2_rx_csum(port, rx_status, skb);
6173 napi_gro_receive(napi, skb);
6177 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
6179 u64_stats_update_begin(&stats->syncp);
6180 stats->rx_packets += rcvd_pkts;
6181 stats->rx_bytes += rcvd_bytes;
6182 u64_stats_update_end(&stats->syncp);
6185 /* Update Rx queue management counters */
6187 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
6193 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
6194 struct mvpp2_tx_desc *desc)
6196 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
6198 dma_addr_t buf_dma_addr =
6199 mvpp2_txdesc_dma_addr_get(port, desc);
6201 mvpp2_txdesc_size_get(port, desc);
6202 if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
6203 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
6204 buf_sz, DMA_TO_DEVICE);
6205 mvpp2_txq_desc_put(txq);
6208 /* Handle tx fragmentation processing */
6209 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
6210 struct mvpp2_tx_queue *aggr_txq,
6211 struct mvpp2_tx_queue *txq)
6213 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
6214 struct mvpp2_tx_desc *tx_desc;
6216 dma_addr_t buf_dma_addr;
6218 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6219 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6220 void *addr = page_address(frag->page.p) + frag->page_offset;
6222 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6223 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6224 mvpp2_txdesc_size_set(port, tx_desc, frag->size);
6226 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
6229 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
6230 mvpp2_txq_desc_put(txq);
6234 mvpp2_txdesc_offset_set(port, tx_desc,
6235 buf_dma_addr & MVPP2_TX_DESC_ALIGN);
6236 mvpp2_txdesc_dma_addr_set(port, tx_desc,
6237 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
6239 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
6240 /* Last descriptor */
6241 mvpp2_txdesc_cmd_set(port, tx_desc,
6243 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
6245 /* Descriptor in the middle: Not First, Not Last */
6246 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
6247 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6253 /* Release all descriptors that were used to map fragments of
6254 * this packet, as well as the corresponding DMA mappings
6256 for (i = i - 1; i >= 0; i--) {
6257 tx_desc = txq->descs + i;
6258 tx_desc_unmap_put(port, txq, tx_desc);
6264 static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
6265 struct net_device *dev,
6266 struct mvpp2_tx_queue *txq,
6267 struct mvpp2_tx_queue *aggr_txq,
6268 struct mvpp2_txq_pcpu *txq_pcpu,
6271 struct mvpp2_port *port = netdev_priv(dev);
6272 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6275 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6276 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
6278 addr = txq_pcpu->tso_headers_dma +
6279 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
6280 mvpp2_txdesc_offset_set(port, tx_desc, addr & MVPP2_TX_DESC_ALIGN);
6281 mvpp2_txdesc_dma_addr_set(port, tx_desc, addr & ~MVPP2_TX_DESC_ALIGN);
6283 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
6285 MVPP2_TXD_PADDING_DISABLE);
6286 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6289 static inline int mvpp2_tso_put_data(struct sk_buff *skb,
6290 struct net_device *dev, struct tso_t *tso,
6291 struct mvpp2_tx_queue *txq,
6292 struct mvpp2_tx_queue *aggr_txq,
6293 struct mvpp2_txq_pcpu *txq_pcpu,
6294 int sz, bool left, bool last)
6296 struct mvpp2_port *port = netdev_priv(dev);
6297 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6298 dma_addr_t buf_dma_addr;
6300 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6301 mvpp2_txdesc_size_set(port, tx_desc, sz);
6303 buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
6305 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
6306 mvpp2_txq_desc_put(txq);
6310 mvpp2_txdesc_offset_set(port, tx_desc,
6311 buf_dma_addr & MVPP2_TX_DESC_ALIGN);
6312 mvpp2_txdesc_dma_addr_set(port, tx_desc,
6313 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
6316 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
6318 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
6322 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
6325 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6329 static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
6330 struct mvpp2_tx_queue *txq,
6331 struct mvpp2_tx_queue *aggr_txq,
6332 struct mvpp2_txq_pcpu *txq_pcpu)
6334 struct mvpp2_port *port = netdev_priv(dev);
6336 int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb);
6337 int i, len, descs = 0;
6339 /* Check number of available descriptors */
6340 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq,
6341 tso_count_descs(skb)) ||
6342 mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu,
6343 tso_count_descs(skb)))
6346 tso_start(skb, &tso);
6347 len = skb->len - hdr_sz;
6349 int left = min_t(int, skb_shinfo(skb)->gso_size, len);
6350 char *hdr = txq_pcpu->tso_headers +
6351 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
6356 tso_build_hdr(skb, hdr, &tso, left, len == 0);
6357 mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
6360 int sz = min_t(int, tso.size, left);
6364 if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
6365 txq_pcpu, sz, left, len == 0))
6367 tso_build_data(skb, &tso, sz);
6374 for (i = descs - 1; i >= 0; i--) {
6375 struct mvpp2_tx_desc *tx_desc = txq->descs + i;
6376 tx_desc_unmap_put(port, txq, tx_desc);
6381 /* Main tx processing */
6382 static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
6384 struct mvpp2_port *port = netdev_priv(dev);
6385 struct mvpp2_tx_queue *txq, *aggr_txq;
6386 struct mvpp2_txq_pcpu *txq_pcpu;
6387 struct mvpp2_tx_desc *tx_desc;
6388 dma_addr_t buf_dma_addr;
6393 txq_id = skb_get_queue_mapping(skb);
6394 txq = port->txqs[txq_id];
6395 txq_pcpu = this_cpu_ptr(txq->pcpu);
6396 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
6398 if (skb_is_gso(skb)) {
6399 frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
6402 frags = skb_shinfo(skb)->nr_frags + 1;
6404 /* Check number of available descriptors */
6405 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
6406 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
6412 /* Get a descriptor for the first part of the packet */
6413 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6414 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6415 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
6417 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
6418 skb_headlen(skb), DMA_TO_DEVICE);
6419 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
6420 mvpp2_txq_desc_put(txq);
6425 mvpp2_txdesc_offset_set(port, tx_desc,
6426 buf_dma_addr & MVPP2_TX_DESC_ALIGN);
6427 mvpp2_txdesc_dma_addr_set(port, tx_desc,
6428 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
6430 tx_cmd = mvpp2_skb_tx_csum(port, skb);
6433 /* First and Last descriptor */
6434 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
6435 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
6436 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
6438 /* First but not Last */
6439 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
6440 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
6441 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6443 /* Continue with other skb fragments */
6444 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
6445 tx_desc_unmap_put(port, txq, tx_desc);
6453 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
6454 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
6456 txq_pcpu->reserved_num -= frags;
6457 txq_pcpu->count += frags;
6458 aggr_txq->count += frags;
6460 /* Enable transmit */
6462 mvpp2_aggr_txq_pend_desc_add(port, frags);
6464 if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1)
6465 netif_tx_stop_queue(nq);
6467 u64_stats_update_begin(&stats->syncp);
6468 stats->tx_packets++;
6469 stats->tx_bytes += skb->len;
6470 u64_stats_update_end(&stats->syncp);
6472 dev->stats.tx_dropped++;
6473 dev_kfree_skb_any(skb);
6476 /* Finalize TX processing */
6477 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
6478 mvpp2_txq_done(port, txq, txq_pcpu);
6480 /* Set the timer in case not all frags were processed */
6481 if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
6482 txq_pcpu->count > 0) {
6483 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
6485 mvpp2_timer_set(port_pcpu);
6488 return NETDEV_TX_OK;
6491 static inline void mvpp2_cause_error(struct net_device *dev, int cause)
6493 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
6494 netdev_err(dev, "FCS error\n");
6495 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
6496 netdev_err(dev, "rx fifo overrun error\n");
6497 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
6498 netdev_err(dev, "tx fifo underrun error\n");
6501 static int mvpp2_poll(struct napi_struct *napi, int budget)
6503 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
6505 struct mvpp2_port *port = netdev_priv(napi->dev);
6506 struct mvpp2_queue_vector *qv;
6507 int cpu = smp_processor_id();
6509 qv = container_of(napi, struct mvpp2_queue_vector, napi);
6511 /* Rx/Tx cause register
6513 * Bits 0-15: each bit indicates received packets on the Rx queue
6514 * (bit 0 is for Rx queue 0).
6516 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
6517 * (bit 16 is for Tx queue 0).
6519 * Each CPU has its own Rx/Tx cause register
6521 cause_rx_tx = mvpp2_percpu_read(port->priv, qv->sw_thread_id,
6522 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
6524 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
6526 mvpp2_cause_error(port->dev, cause_misc);
6528 /* Clear the cause register */
6529 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
6530 mvpp2_percpu_write(port->priv, cpu,
6531 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
6532 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
6535 if (port->has_tx_irqs) {
6536 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
6538 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
6539 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
6543 /* Process RX packets */
6544 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
6545 cause_rx <<= qv->first_rxq;
6546 cause_rx |= qv->pending_cause_rx;
6547 while (cause_rx && budget > 0) {
6549 struct mvpp2_rx_queue *rxq;
6551 rxq = mvpp2_get_rx_queue(port, cause_rx);
6555 count = mvpp2_rx(port, napi, budget, rxq);
6559 /* Clear the bit associated to this Rx queue
6560 * so that next iteration will continue from
6561 * the next Rx queue.
6563 cause_rx &= ~(1 << rxq->logic_rxq);
6569 napi_complete_done(napi, rx_done);
6571 mvpp2_qvec_interrupt_enable(qv);
6573 qv->pending_cause_rx = cause_rx;
6577 /* Set hw internals when starting port */
6578 static void mvpp2_start_dev(struct mvpp2_port *port)
6580 struct net_device *ndev = port->dev;
6583 if (port->gop_id == 0 &&
6584 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
6585 port->phy_interface == PHY_INTERFACE_MODE_10GKR))
6586 mvpp2_xlg_max_rx_size_set(port);
6588 mvpp2_gmac_max_rx_size_set(port);
6590 mvpp2_txp_max_tx_size_set(port);
6592 for (i = 0; i < port->nqvecs; i++)
6593 napi_enable(&port->qvecs[i].napi);
6595 /* Enable interrupts on all CPUs */
6596 mvpp2_interrupts_enable(port);
6598 if (port->priv->hw_version == MVPP22) {
6599 mvpp22_comphy_init(port);
6600 mvpp22_gop_init(port);
6603 mvpp2_port_mii_set(port);
6604 mvpp2_port_enable(port);
6606 phy_start(ndev->phydev);
6607 netif_tx_start_all_queues(port->dev);
6610 /* Set hw internals when stopping port */
6611 static void mvpp2_stop_dev(struct mvpp2_port *port)
6613 struct net_device *ndev = port->dev;
6616 /* Stop new packets from arriving to RXQs */
6617 mvpp2_ingress_disable(port);
6621 /* Disable interrupts on all CPUs */
6622 mvpp2_interrupts_disable(port);
6624 for (i = 0; i < port->nqvecs; i++)
6625 napi_disable(&port->qvecs[i].napi);
6627 netif_carrier_off(port->dev);
6628 netif_tx_stop_all_queues(port->dev);
6630 mvpp2_egress_disable(port);
6631 mvpp2_port_disable(port);
6633 phy_stop(ndev->phydev);
6634 phy_power_off(port->comphy);
6637 static int mvpp2_check_ringparam_valid(struct net_device *dev,
6638 struct ethtool_ringparam *ring)
6640 u16 new_rx_pending = ring->rx_pending;
6641 u16 new_tx_pending = ring->tx_pending;
6643 if (ring->rx_pending == 0 || ring->tx_pending == 0)
6646 if (ring->rx_pending > MVPP2_MAX_RXD)
6647 new_rx_pending = MVPP2_MAX_RXD;
6648 else if (!IS_ALIGNED(ring->rx_pending, 16))
6649 new_rx_pending = ALIGN(ring->rx_pending, 16);
6651 if (ring->tx_pending > MVPP2_MAX_TXD)
6652 new_tx_pending = MVPP2_MAX_TXD;
6653 else if (!IS_ALIGNED(ring->tx_pending, 32))
6654 new_tx_pending = ALIGN(ring->tx_pending, 32);
6656 if (ring->rx_pending != new_rx_pending) {
6657 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
6658 ring->rx_pending, new_rx_pending);
6659 ring->rx_pending = new_rx_pending;
6662 if (ring->tx_pending != new_tx_pending) {
6663 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
6664 ring->tx_pending, new_tx_pending);
6665 ring->tx_pending = new_tx_pending;
6671 static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
6673 u32 mac_addr_l, mac_addr_m, mac_addr_h;
6675 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
6676 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
6677 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
6678 addr[0] = (mac_addr_h >> 24) & 0xFF;
6679 addr[1] = (mac_addr_h >> 16) & 0xFF;
6680 addr[2] = (mac_addr_h >> 8) & 0xFF;
6681 addr[3] = mac_addr_h & 0xFF;
6682 addr[4] = mac_addr_m & 0xFF;
6683 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
6686 static int mvpp2_phy_connect(struct mvpp2_port *port)
6688 struct phy_device *phy_dev;
6690 /* No PHY is attached */
6691 if (!port->phy_node)
6694 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
6695 port->phy_interface);
6697 netdev_err(port->dev, "cannot connect to phy\n");
6700 phy_dev->supported &= PHY_GBIT_FEATURES;
6701 phy_dev->advertising = phy_dev->supported;
6710 static void mvpp2_phy_disconnect(struct mvpp2_port *port)
6712 struct net_device *ndev = port->dev;
6717 phy_disconnect(ndev->phydev);
6720 static int mvpp2_irqs_init(struct mvpp2_port *port)
6724 for (i = 0; i < port->nqvecs; i++) {
6725 struct mvpp2_queue_vector *qv = port->qvecs + i;
6727 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
6728 irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
6730 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
6734 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
6735 irq_set_affinity_hint(qv->irq,
6736 cpumask_of(qv->sw_thread_id));
6741 for (i = 0; i < port->nqvecs; i++) {
6742 struct mvpp2_queue_vector *qv = port->qvecs + i;
6744 irq_set_affinity_hint(qv->irq, NULL);
6745 free_irq(qv->irq, qv);
6751 static void mvpp2_irqs_deinit(struct mvpp2_port *port)
6755 for (i = 0; i < port->nqvecs; i++) {
6756 struct mvpp2_queue_vector *qv = port->qvecs + i;
6758 irq_set_affinity_hint(qv->irq, NULL);
6759 irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
6760 free_irq(qv->irq, qv);
6764 static int mvpp2_open(struct net_device *dev)
6766 struct mvpp2_port *port = netdev_priv(dev);
6767 struct mvpp2 *priv = port->priv;
6768 unsigned char mac_bcast[ETH_ALEN] = {
6769 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6772 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
6774 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
6777 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
6778 dev->dev_addr, true);
6780 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
6783 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
6785 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
6788 err = mvpp2_prs_def_flow(port);
6790 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
6794 /* Allocate the Rx/Tx queues */
6795 err = mvpp2_setup_rxqs(port);
6797 netdev_err(port->dev, "cannot allocate Rx queues\n");
6801 err = mvpp2_setup_txqs(port);
6803 netdev_err(port->dev, "cannot allocate Tx queues\n");
6804 goto err_cleanup_rxqs;
6807 err = mvpp2_irqs_init(port);
6809 netdev_err(port->dev, "cannot init IRQs\n");
6810 goto err_cleanup_txqs;
6813 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq) {
6814 err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
6817 netdev_err(port->dev, "cannot request link IRQ %d\n",
6822 mvpp22_gop_setup_irq(port);
6825 /* In default link is down */
6826 netif_carrier_off(port->dev);
6828 err = mvpp2_phy_connect(port);
6830 goto err_free_link_irq;
6832 /* Unmask interrupts on all CPUs */
6833 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
6834 mvpp2_shared_interrupt_mask_unmask(port, false);
6836 mvpp2_start_dev(port);
6841 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq)
6842 free_irq(port->link_irq, port);
6844 mvpp2_irqs_deinit(port);
6846 mvpp2_cleanup_txqs(port);
6848 mvpp2_cleanup_rxqs(port);
6852 static int mvpp2_stop(struct net_device *dev)
6854 struct mvpp2_port *port = netdev_priv(dev);
6855 struct mvpp2_port_pcpu *port_pcpu;
6856 struct mvpp2 *priv = port->priv;
6859 mvpp2_stop_dev(port);
6860 mvpp2_phy_disconnect(port);
6862 /* Mask interrupts on all CPUs */
6863 on_each_cpu(mvpp2_interrupts_mask, port, 1);
6864 mvpp2_shared_interrupt_mask_unmask(port, true);
6866 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq)
6867 free_irq(port->link_irq, port);
6869 mvpp2_irqs_deinit(port);
6870 if (!port->has_tx_irqs) {
6871 for_each_present_cpu(cpu) {
6872 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
6874 hrtimer_cancel(&port_pcpu->tx_done_timer);
6875 port_pcpu->timer_scheduled = false;
6876 tasklet_kill(&port_pcpu->tx_done_tasklet);
6879 mvpp2_cleanup_rxqs(port);
6880 mvpp2_cleanup_txqs(port);
6885 static void mvpp2_set_rx_mode(struct net_device *dev)
6887 struct mvpp2_port *port = netdev_priv(dev);
6888 struct mvpp2 *priv = port->priv;
6889 struct netdev_hw_addr *ha;
6891 bool allmulti = dev->flags & IFF_ALLMULTI;
6894 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
6895 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
6896 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
6898 /* Remove all port->id's mcast enries */
6899 mvpp2_prs_mcast_del_all(priv, id);
6902 netdev_for_each_mc_addr(ha, dev) {
6903 if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) {
6911 static int mvpp2_set_mac_address(struct net_device *dev, void *p)
6913 struct mvpp2_port *port = netdev_priv(dev);
6914 const struct sockaddr *addr = p;
6917 if (!is_valid_ether_addr(addr->sa_data)) {
6918 err = -EADDRNOTAVAIL;
6922 if (!netif_running(dev)) {
6923 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
6926 /* Reconfigure parser to accept the original MAC address */
6927 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
6932 mvpp2_stop_dev(port);
6934 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
6938 /* Reconfigure parser accept the original MAC address */
6939 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
6943 mvpp2_start_dev(port);
6944 mvpp2_egress_enable(port);
6945 mvpp2_ingress_enable(port);
6948 netdev_err(dev, "failed to change MAC address\n");
6952 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
6954 struct mvpp2_port *port = netdev_priv(dev);
6955 bool running = netif_running(dev);
6958 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
6959 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
6960 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
6961 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
6965 mvpp2_stop_dev(port);
6967 err = mvpp2_bm_update_mtu(dev, mtu);
6969 netdev_err(dev, "failed to change MTU\n");
6970 /* Reconfigure BM to the original MTU */
6971 mvpp2_bm_update_mtu(dev, dev->mtu);
6973 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
6977 mvpp2_start_dev(port);
6978 mvpp2_egress_enable(port);
6979 mvpp2_ingress_enable(port);
6986 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6988 struct mvpp2_port *port = netdev_priv(dev);
6992 for_each_possible_cpu(cpu) {
6993 struct mvpp2_pcpu_stats *cpu_stats;
6999 cpu_stats = per_cpu_ptr(port->stats, cpu);
7001 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
7002 rx_packets = cpu_stats->rx_packets;
7003 rx_bytes = cpu_stats->rx_bytes;
7004 tx_packets = cpu_stats->tx_packets;
7005 tx_bytes = cpu_stats->tx_bytes;
7006 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
7008 stats->rx_packets += rx_packets;
7009 stats->rx_bytes += rx_bytes;
7010 stats->tx_packets += tx_packets;
7011 stats->tx_bytes += tx_bytes;
7014 stats->rx_errors = dev->stats.rx_errors;
7015 stats->rx_dropped = dev->stats.rx_dropped;
7016 stats->tx_dropped = dev->stats.tx_dropped;
7019 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7026 ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
7028 mvpp2_link_event(dev);
7033 /* Ethtool methods */
7035 /* Set interrupt coalescing for ethtools */
7036 static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
7037 struct ethtool_coalesce *c)
7039 struct mvpp2_port *port = netdev_priv(dev);
7042 for (queue = 0; queue < port->nrxqs; queue++) {
7043 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
7045 rxq->time_coal = c->rx_coalesce_usecs;
7046 rxq->pkts_coal = c->rx_max_coalesced_frames;
7047 mvpp2_rx_pkts_coal_set(port, rxq);
7048 mvpp2_rx_time_coal_set(port, rxq);
7051 if (port->has_tx_irqs) {
7052 port->tx_time_coal = c->tx_coalesce_usecs;
7053 mvpp2_tx_time_coal_set(port);
7056 for (queue = 0; queue < port->ntxqs; queue++) {
7057 struct mvpp2_tx_queue *txq = port->txqs[queue];
7059 txq->done_pkts_coal = c->tx_max_coalesced_frames;
7061 if (port->has_tx_irqs)
7062 mvpp2_tx_pkts_coal_set(port, txq);
7068 /* get coalescing for ethtools */
7069 static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
7070 struct ethtool_coalesce *c)
7072 struct mvpp2_port *port = netdev_priv(dev);
7074 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
7075 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
7076 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
7080 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
7081 struct ethtool_drvinfo *drvinfo)
7083 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
7084 sizeof(drvinfo->driver));
7085 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
7086 sizeof(drvinfo->version));
7087 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
7088 sizeof(drvinfo->bus_info));
7091 static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
7092 struct ethtool_ringparam *ring)
7094 struct mvpp2_port *port = netdev_priv(dev);
7096 ring->rx_max_pending = MVPP2_MAX_RXD;
7097 ring->tx_max_pending = MVPP2_MAX_TXD;
7098 ring->rx_pending = port->rx_ring_size;
7099 ring->tx_pending = port->tx_ring_size;
7102 static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
7103 struct ethtool_ringparam *ring)
7105 struct mvpp2_port *port = netdev_priv(dev);
7106 u16 prev_rx_ring_size = port->rx_ring_size;
7107 u16 prev_tx_ring_size = port->tx_ring_size;
7110 err = mvpp2_check_ringparam_valid(dev, ring);
7114 if (!netif_running(dev)) {
7115 port->rx_ring_size = ring->rx_pending;
7116 port->tx_ring_size = ring->tx_pending;
7120 /* The interface is running, so we have to force a
7121 * reallocation of the queues
7123 mvpp2_stop_dev(port);
7124 mvpp2_cleanup_rxqs(port);
7125 mvpp2_cleanup_txqs(port);
7127 port->rx_ring_size = ring->rx_pending;
7128 port->tx_ring_size = ring->tx_pending;
7130 err = mvpp2_setup_rxqs(port);
7132 /* Reallocate Rx queues with the original ring size */
7133 port->rx_ring_size = prev_rx_ring_size;
7134 ring->rx_pending = prev_rx_ring_size;
7135 err = mvpp2_setup_rxqs(port);
7139 err = mvpp2_setup_txqs(port);
7141 /* Reallocate Tx queues with the original ring size */
7142 port->tx_ring_size = prev_tx_ring_size;
7143 ring->tx_pending = prev_tx_ring_size;
7144 err = mvpp2_setup_txqs(port);
7146 goto err_clean_rxqs;
7149 mvpp2_start_dev(port);
7150 mvpp2_egress_enable(port);
7151 mvpp2_ingress_enable(port);
7156 mvpp2_cleanup_rxqs(port);
7158 netdev_err(dev, "failed to change ring parameters");
7164 static const struct net_device_ops mvpp2_netdev_ops = {
7165 .ndo_open = mvpp2_open,
7166 .ndo_stop = mvpp2_stop,
7167 .ndo_start_xmit = mvpp2_tx,
7168 .ndo_set_rx_mode = mvpp2_set_rx_mode,
7169 .ndo_set_mac_address = mvpp2_set_mac_address,
7170 .ndo_change_mtu = mvpp2_change_mtu,
7171 .ndo_get_stats64 = mvpp2_get_stats64,
7172 .ndo_do_ioctl = mvpp2_ioctl,
7175 static const struct ethtool_ops mvpp2_eth_tool_ops = {
7176 .nway_reset = phy_ethtool_nway_reset,
7177 .get_link = ethtool_op_get_link,
7178 .set_coalesce = mvpp2_ethtool_set_coalesce,
7179 .get_coalesce = mvpp2_ethtool_get_coalesce,
7180 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
7181 .get_ringparam = mvpp2_ethtool_get_ringparam,
7182 .set_ringparam = mvpp2_ethtool_set_ringparam,
7183 .get_link_ksettings = phy_ethtool_get_link_ksettings,
7184 .set_link_ksettings = phy_ethtool_set_link_ksettings,
7187 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
7188 * had a single IRQ defined per-port.
7190 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
7191 struct device_node *port_node)
7193 struct mvpp2_queue_vector *v = &port->qvecs[0];
7196 v->nrxqs = port->nrxqs;
7197 v->type = MVPP2_QUEUE_VECTOR_SHARED;
7198 v->sw_thread_id = 0;
7199 v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
7201 v->irq = irq_of_parse_and_map(port_node, 0);
7204 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
7212 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
7213 struct device_node *port_node)
7215 struct mvpp2_queue_vector *v;
7218 port->nqvecs = num_possible_cpus();
7219 if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
7222 for (i = 0; i < port->nqvecs; i++) {
7225 v = port->qvecs + i;
7228 v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
7229 v->sw_thread_id = i;
7230 v->sw_thread_mask = BIT(i);
7232 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
7234 if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
7235 v->first_rxq = i * MVPP2_DEFAULT_RXQ;
7236 v->nrxqs = MVPP2_DEFAULT_RXQ;
7237 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
7238 i == (port->nqvecs - 1)) {
7240 v->nrxqs = port->nrxqs;
7241 v->type = MVPP2_QUEUE_VECTOR_SHARED;
7242 strncpy(irqname, "rx-shared", sizeof(irqname));
7245 v->irq = of_irq_get_byname(port_node, irqname);
7251 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
7258 for (i = 0; i < port->nqvecs; i++)
7259 irq_dispose_mapping(port->qvecs[i].irq);
7263 static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
7264 struct device_node *port_node)
7266 if (port->has_tx_irqs)
7267 return mvpp2_multi_queue_vectors_init(port, port_node);
7269 return mvpp2_simple_queue_vectors_init(port, port_node);
7272 static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
7276 for (i = 0; i < port->nqvecs; i++)
7277 irq_dispose_mapping(port->qvecs[i].irq);
7280 /* Configure Rx queue group interrupt for this port */
7281 static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
7283 struct mvpp2 *priv = port->priv;
7287 if (priv->hw_version == MVPP21) {
7288 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
7293 /* Handle the more complicated PPv2.2 case */
7294 for (i = 0; i < port->nqvecs; i++) {
7295 struct mvpp2_queue_vector *qv = port->qvecs + i;
7300 val = qv->sw_thread_id;
7301 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
7302 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
7304 val = qv->first_rxq;
7305 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
7306 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
7310 /* Initialize port HW */
7311 static int mvpp2_port_init(struct mvpp2_port *port)
7313 struct device *dev = port->dev->dev.parent;
7314 struct mvpp2 *priv = port->priv;
7315 struct mvpp2_txq_pcpu *txq_pcpu;
7316 int queue, cpu, err;
7318 /* Checks for hardware constraints */
7319 if (port->first_rxq + port->nrxqs >
7320 MVPP2_MAX_PORTS * priv->max_port_rxqs)
7323 if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) ||
7324 (port->ntxqs > MVPP2_MAX_TXQ))
7328 mvpp2_egress_disable(port);
7329 mvpp2_port_disable(port);
7331 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
7333 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
7338 /* Associate physical Tx queues to this port and initialize.
7339 * The mapping is predefined.
7341 for (queue = 0; queue < port->ntxqs; queue++) {
7342 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
7343 struct mvpp2_tx_queue *txq;
7345 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
7348 goto err_free_percpu;
7351 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
7354 goto err_free_percpu;
7357 txq->id = queue_phy_id;
7358 txq->log_id = queue;
7359 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
7360 for_each_present_cpu(cpu) {
7361 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
7362 txq_pcpu->cpu = cpu;
7365 port->txqs[queue] = txq;
7368 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
7372 goto err_free_percpu;
7375 /* Allocate and initialize Rx queue for this port */
7376 for (queue = 0; queue < port->nrxqs; queue++) {
7377 struct mvpp2_rx_queue *rxq;
7379 /* Map physical Rx queue to port's logical Rx queue */
7380 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
7383 goto err_free_percpu;
7385 /* Map this Rx queue to a physical queue */
7386 rxq->id = port->first_rxq + queue;
7387 rxq->port = port->id;
7388 rxq->logic_rxq = queue;
7390 port->rxqs[queue] = rxq;
7393 mvpp2_rx_irqs_setup(port);
7395 /* Create Rx descriptor rings */
7396 for (queue = 0; queue < port->nrxqs; queue++) {
7397 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
7399 rxq->size = port->rx_ring_size;
7400 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
7401 rxq->time_coal = MVPP2_RX_COAL_USEC;
7404 mvpp2_ingress_disable(port);
7406 /* Port default configuration */
7407 mvpp2_defaults_set(port);
7409 /* Port's classifier configuration */
7410 mvpp2_cls_oversize_rxq_set(port);
7411 mvpp2_cls_port_config(port);
7413 /* Provide an initial Rx packet size */
7414 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
7416 /* Initialize pools for swf */
7417 err = mvpp2_swf_bm_pool_init(port);
7419 goto err_free_percpu;
7424 for (queue = 0; queue < port->ntxqs; queue++) {
7425 if (!port->txqs[queue])
7427 free_percpu(port->txqs[queue]->pcpu);
7432 /* Checks if the port DT description has the TX interrupts
7433 * described. On PPv2.1, there are no such interrupts. On PPv2.2,
7434 * there are available, but we need to keep support for old DTs.
7436 static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv,
7437 struct device_node *port_node)
7439 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1",
7440 "tx-cpu2", "tx-cpu3" };
7443 if (priv->hw_version == MVPP21)
7446 for (i = 0; i < 5; i++) {
7447 ret = of_property_match_string(port_node, "interrupt-names",
7456 static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
7457 struct device_node *port_node,
7460 struct mvpp2_port *port = netdev_priv(dev);
7461 char hw_mac_addr[ETH_ALEN] = {0};
7462 const char *dt_mac_addr;
7464 dt_mac_addr = of_get_mac_address(port_node);
7465 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
7466 *mac_from = "device tree";
7467 ether_addr_copy(dev->dev_addr, dt_mac_addr);
7471 if (priv->hw_version == MVPP21) {
7472 mvpp21_get_mac_address(port, hw_mac_addr);
7473 if (is_valid_ether_addr(hw_mac_addr)) {
7474 *mac_from = "hardware";
7475 ether_addr_copy(dev->dev_addr, hw_mac_addr);
7480 *mac_from = "random";
7481 eth_hw_addr_random(dev);
7484 /* Ports initialization */
7485 static int mvpp2_port_probe(struct platform_device *pdev,
7486 struct device_node *port_node,
7487 struct mvpp2 *priv, int index)
7489 struct device_node *phy_node;
7491 struct mvpp2_port *port;
7492 struct mvpp2_port_pcpu *port_pcpu;
7493 struct net_device *dev;
7494 struct resource *res;
7495 char *mac_from = "";
7496 unsigned int ntxqs, nrxqs;
7503 has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
7506 queue_mode = MVPP2_QDIST_SINGLE_MODE;
7508 ntxqs = MVPP2_MAX_TXQ;
7509 if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE)
7510 nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus();
7512 nrxqs = MVPP2_DEFAULT_RXQ;
7514 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
7518 phy_node = of_parse_phandle(port_node, "phy", 0);
7519 phy_mode = of_get_phy_mode(port_node);
7521 dev_err(&pdev->dev, "incorrect phy mode\n");
7523 goto err_free_netdev;
7526 comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
7527 if (IS_ERR(comphy)) {
7528 if (PTR_ERR(comphy) == -EPROBE_DEFER) {
7529 err = -EPROBE_DEFER;
7530 goto err_free_netdev;
7535 if (of_property_read_u32(port_node, "port-id", &id)) {
7537 dev_err(&pdev->dev, "missing port-id value\n");
7538 goto err_free_netdev;
7541 dev->tx_queue_len = MVPP2_MAX_TXD;
7542 dev->watchdog_timeo = 5 * HZ;
7543 dev->netdev_ops = &mvpp2_netdev_ops;
7544 dev->ethtool_ops = &mvpp2_eth_tool_ops;
7546 port = netdev_priv(dev);
7548 port->ntxqs = ntxqs;
7549 port->nrxqs = nrxqs;
7551 port->has_tx_irqs = has_tx_irqs;
7553 err = mvpp2_queue_vectors_init(port, port_node);
7555 goto err_free_netdev;
7557 port->link_irq = of_irq_get_byname(port_node, "link");
7558 if (port->link_irq == -EPROBE_DEFER) {
7559 err = -EPROBE_DEFER;
7560 goto err_deinit_qvecs;
7562 if (port->link_irq <= 0)
7563 /* the link irq is optional */
7566 if (of_property_read_bool(port_node, "marvell,loopback"))
7567 port->flags |= MVPP2_F_LOOPBACK;
7570 if (priv->hw_version == MVPP21)
7571 port->first_rxq = port->id * port->nrxqs;
7573 port->first_rxq = port->id * priv->max_port_rxqs;
7575 port->phy_node = phy_node;
7576 port->phy_interface = phy_mode;
7577 port->comphy = comphy;
7579 if (priv->hw_version == MVPP21) {
7580 res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
7581 port->base = devm_ioremap_resource(&pdev->dev, res);
7582 if (IS_ERR(port->base)) {
7583 err = PTR_ERR(port->base);
7587 if (of_property_read_u32(port_node, "gop-port-id",
7590 dev_err(&pdev->dev, "missing gop-port-id value\n");
7591 goto err_deinit_qvecs;
7594 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
7597 /* Alloc per-cpu stats */
7598 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
7604 mvpp2_port_copy_mac_addr(dev, priv, port_node, &mac_from);
7606 port->tx_ring_size = MVPP2_MAX_TXD;
7607 port->rx_ring_size = MVPP2_MAX_RXD;
7608 SET_NETDEV_DEV(dev, &pdev->dev);
7610 err = mvpp2_port_init(port);
7612 dev_err(&pdev->dev, "failed to init port %d\n", id);
7613 goto err_free_stats;
7616 mvpp2_port_periodic_xon_disable(port);
7618 if (priv->hw_version == MVPP21)
7619 mvpp2_port_fc_adv_enable(port);
7621 mvpp2_port_reset(port);
7623 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
7626 goto err_free_txq_pcpu;
7629 if (!port->has_tx_irqs) {
7630 for_each_present_cpu(cpu) {
7631 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
7633 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
7634 HRTIMER_MODE_REL_PINNED);
7635 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
7636 port_pcpu->timer_scheduled = false;
7638 tasklet_init(&port_pcpu->tx_done_tasklet,
7640 (unsigned long)dev);
7644 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
7645 dev->features = features | NETIF_F_RXCSUM;
7646 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
7647 dev->vlan_features |= features;
7649 /* MTU range: 68 - 9676 */
7650 dev->min_mtu = ETH_MIN_MTU;
7651 /* 9676 == 9700 - 20 and rounding to 8 */
7652 dev->max_mtu = 9676;
7654 err = register_netdev(dev);
7656 dev_err(&pdev->dev, "failed to register netdev\n");
7657 goto err_free_port_pcpu;
7659 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
7661 priv->port_list[index] = port;
7665 free_percpu(port->pcpu);
7667 for (i = 0; i < port->ntxqs; i++)
7668 free_percpu(port->txqs[i]->pcpu);
7670 free_percpu(port->stats);
7673 irq_dispose_mapping(port->link_irq);
7675 mvpp2_queue_vectors_deinit(port);
7677 of_node_put(phy_node);
7682 /* Ports removal routine */
7683 static void mvpp2_port_remove(struct mvpp2_port *port)
7687 unregister_netdev(port->dev);
7688 of_node_put(port->phy_node);
7689 free_percpu(port->pcpu);
7690 free_percpu(port->stats);
7691 for (i = 0; i < port->ntxqs; i++)
7692 free_percpu(port->txqs[i]->pcpu);
7693 mvpp2_queue_vectors_deinit(port);
7695 irq_dispose_mapping(port->link_irq);
7696 free_netdev(port->dev);
7699 /* Initialize decoding windows */
7700 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
7706 for (i = 0; i < 6; i++) {
7707 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
7708 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
7711 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
7716 for (i = 0; i < dram->num_cs; i++) {
7717 const struct mbus_dram_window *cs = dram->cs + i;
7719 mvpp2_write(priv, MVPP2_WIN_BASE(i),
7720 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
7721 dram->mbus_dram_target_id);
7723 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
7724 (cs->size - 1) & 0xffff0000);
7726 win_enable |= (1 << i);
7729 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
7732 /* Initialize Rx FIFO's */
7733 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
7737 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
7738 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
7739 MVPP2_RX_FIFO_PORT_DATA_SIZE);
7740 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
7741 MVPP2_RX_FIFO_PORT_ATTR_SIZE);
7744 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
7745 MVPP2_RX_FIFO_PORT_MIN_PKT);
7746 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
7749 static void mvpp2_axi_init(struct mvpp2 *priv)
7751 u32 val, rdval, wrval;
7753 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
7755 /* AXI Bridge Configuration */
7757 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
7758 << MVPP22_AXI_ATTR_CACHE_OFFS;
7759 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7760 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
7762 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
7763 << MVPP22_AXI_ATTR_CACHE_OFFS;
7764 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7765 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
7768 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
7769 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
7772 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
7773 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
7774 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
7775 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
7778 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
7779 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
7781 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
7782 << MVPP22_AXI_CODE_CACHE_OFFS;
7783 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
7784 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7785 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
7786 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
7788 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
7789 << MVPP22_AXI_CODE_CACHE_OFFS;
7790 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7791 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7793 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
7795 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
7796 << MVPP22_AXI_CODE_CACHE_OFFS;
7797 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7798 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7800 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
7803 /* Initialize network controller common part HW */
7804 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
7806 const struct mbus_dram_target_info *dram_target_info;
7810 /* MBUS windows configuration */
7811 dram_target_info = mv_mbus_dram_info();
7812 if (dram_target_info)
7813 mvpp2_conf_mbus_windows(dram_target_info, priv);
7815 if (priv->hw_version == MVPP22)
7816 mvpp2_axi_init(priv);
7818 /* Disable HW PHY polling */
7819 if (priv->hw_version == MVPP21) {
7820 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
7821 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
7822 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
7824 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
7825 val &= ~MVPP22_SMI_POLLING_EN;
7826 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
7829 /* Allocate and initialize aggregated TXQs */
7830 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
7831 sizeof(*priv->aggr_txqs),
7833 if (!priv->aggr_txqs)
7836 for_each_present_cpu(i) {
7837 priv->aggr_txqs[i].id = i;
7838 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
7839 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
7845 mvpp2_rx_fifo_init(priv);
7847 if (priv->hw_version == MVPP21)
7848 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
7849 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
7851 /* Allow cache snoop when transmiting packets */
7852 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
7854 /* Buffer Manager initialization */
7855 err = mvpp2_bm_init(pdev, priv);
7859 /* Parser default initialization */
7860 err = mvpp2_prs_default_init(pdev, priv);
7864 /* Classifier default initialization */
7865 mvpp2_cls_init(priv);
7870 static int mvpp2_probe(struct platform_device *pdev)
7872 struct device_node *dn = pdev->dev.of_node;
7873 struct device_node *port_node;
7875 struct resource *res;
7880 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
7885 (unsigned long)of_device_get_match_data(&pdev->dev);
7887 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
7888 base = devm_ioremap_resource(&pdev->dev, res);
7890 return PTR_ERR(base);
7892 if (priv->hw_version == MVPP21) {
7893 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
7894 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
7895 if (IS_ERR(priv->lms_base))
7896 return PTR_ERR(priv->lms_base);
7898 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
7899 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
7900 if (IS_ERR(priv->iface_base))
7901 return PTR_ERR(priv->iface_base);
7903 priv->sysctrl_base =
7904 syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
7905 "marvell,system-controller");
7906 if (IS_ERR(priv->sysctrl_base))
7907 /* The system controller regmap is optional for dt
7908 * compatibility reasons. When not provided, the
7909 * configuration of the GoP relies on the
7910 * firmware/bootloader.
7912 priv->sysctrl_base = NULL;
7915 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
7918 addr_space_sz = (priv->hw_version == MVPP21 ?
7919 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
7920 priv->swth_base[i] = base + i * addr_space_sz;
7923 if (priv->hw_version == MVPP21)
7924 priv->max_port_rxqs = 8;
7926 priv->max_port_rxqs = 32;
7928 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
7929 if (IS_ERR(priv->pp_clk))
7930 return PTR_ERR(priv->pp_clk);
7931 err = clk_prepare_enable(priv->pp_clk);
7935 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
7936 if (IS_ERR(priv->gop_clk)) {
7937 err = PTR_ERR(priv->gop_clk);
7940 err = clk_prepare_enable(priv->gop_clk);
7944 if (priv->hw_version == MVPP22) {
7945 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
7946 if (IS_ERR(priv->mg_clk)) {
7947 err = PTR_ERR(priv->mg_clk);
7951 err = clk_prepare_enable(priv->mg_clk);
7955 priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
7956 if (IS_ERR(priv->axi_clk)) {
7957 err = PTR_ERR(priv->axi_clk);
7958 if (err == -EPROBE_DEFER)
7960 priv->axi_clk = NULL;
7962 err = clk_prepare_enable(priv->axi_clk);
7968 /* Get system's tclk rate */
7969 priv->tclk = clk_get_rate(priv->pp_clk);
7971 if (priv->hw_version == MVPP22) {
7972 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
7975 /* Sadly, the BM pools all share the same register to
7976 * store the high 32 bits of their address. So they
7977 * must all have the same high 32 bits, which forces
7978 * us to restrict coherent memory to DMA_BIT_MASK(32).
7980 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7985 /* Initialize network controller */
7986 err = mvpp2_init(pdev, priv);
7988 dev_err(&pdev->dev, "failed to initialize controller\n");
7992 port_count = of_get_available_child_count(dn);
7993 if (port_count == 0) {
7994 dev_err(&pdev->dev, "no ports enabled\n");
7999 priv->port_list = devm_kcalloc(&pdev->dev, port_count,
8000 sizeof(*priv->port_list),
8002 if (!priv->port_list) {
8007 /* Initialize ports */
8009 for_each_available_child_of_node(dn, port_node) {
8010 err = mvpp2_port_probe(pdev, port_node, priv, i);
8016 platform_set_drvdata(pdev, priv);
8020 clk_disable_unprepare(priv->axi_clk);
8021 if (priv->hw_version == MVPP22)
8022 clk_disable_unprepare(priv->mg_clk);
8024 clk_disable_unprepare(priv->gop_clk);
8026 clk_disable_unprepare(priv->pp_clk);
8030 static int mvpp2_remove(struct platform_device *pdev)
8032 struct mvpp2 *priv = platform_get_drvdata(pdev);
8033 struct device_node *dn = pdev->dev.of_node;
8034 struct device_node *port_node;
8037 for_each_available_child_of_node(dn, port_node) {
8038 if (priv->port_list[i])
8039 mvpp2_port_remove(priv->port_list[i]);
8043 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
8044 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
8046 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
8049 for_each_present_cpu(i) {
8050 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
8052 dma_free_coherent(&pdev->dev,
8053 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
8055 aggr_txq->descs_dma);
8058 clk_disable_unprepare(priv->axi_clk);
8059 clk_disable_unprepare(priv->mg_clk);
8060 clk_disable_unprepare(priv->pp_clk);
8061 clk_disable_unprepare(priv->gop_clk);
8066 static const struct of_device_id mvpp2_match[] = {
8068 .compatible = "marvell,armada-375-pp2",
8069 .data = (void *)MVPP21,
8072 .compatible = "marvell,armada-7k-pp22",
8073 .data = (void *)MVPP22,
8077 MODULE_DEVICE_TABLE(of, mvpp2_match);
8079 static struct platform_driver mvpp2_driver = {
8080 .probe = mvpp2_probe,
8081 .remove = mvpp2_remove,
8083 .name = MVPP2_DRIVER_NAME,
8084 .of_match_table = mvpp2_match,
8088 module_platform_driver(mvpp2_driver);
8090 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
8091 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
8092 MODULE_LICENSE("GPL v2");