2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
4 * Copyright (C) 2014 Marvell
6 * Marcin Wojtas <mw@semihalf.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/platform_device.h>
17 #include <linux/skbuff.h>
18 #include <linux/inetdevice.h>
19 #include <linux/mbus.h>
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/cpumask.h>
24 #include <linux/of_irq.h>
25 #include <linux/of_mdio.h>
26 #include <linux/of_net.h>
27 #include <linux/of_address.h>
28 #include <linux/phy.h>
29 #include <linux/clk.h>
30 #include <linux/hrtimer.h>
31 #include <linux/ktime.h>
32 #include <linux/if_vlan.h>
33 #include <uapi/linux/ppp_defs.h>
37 /* RX Fifo Registers */
38 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
39 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
40 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
41 #define MVPP2_RX_FIFO_INIT_REG 0x64
43 /* RX DMA Top Registers */
44 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
45 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
46 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
47 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
48 #define MVPP2_POOL_BUF_SIZE_OFFSET 5
49 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
50 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
51 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
52 #define MVPP2_RXQ_POOL_SHORT_OFFS 20
53 #define MVPP2_RXQ_POOL_SHORT_MASK 0x700000
54 #define MVPP2_RXQ_POOL_LONG_OFFS 24
55 #define MVPP2_RXQ_POOL_LONG_MASK 0x7000000
56 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
57 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
58 #define MVPP2_RXQ_DISABLE_MASK BIT(31)
60 /* Parser Registers */
61 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
62 #define MVPP2_PRS_PORT_LU_MAX 0xf
63 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
64 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
65 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
66 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
67 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
68 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
69 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
70 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
71 #define MVPP2_PRS_TCAM_IDX_REG 0x1100
72 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
73 #define MVPP2_PRS_TCAM_INV_MASK BIT(31)
74 #define MVPP2_PRS_SRAM_IDX_REG 0x1200
75 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
76 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230
77 #define MVPP2_PRS_TCAM_EN_MASK BIT(0)
79 /* Classifier Registers */
80 #define MVPP2_CLS_MODE_REG 0x1800
81 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
82 #define MVPP2_CLS_PORT_WAY_REG 0x1810
83 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
84 #define MVPP2_CLS_LKP_INDEX_REG 0x1814
85 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
86 #define MVPP2_CLS_LKP_TBL_REG 0x1818
87 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
88 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
89 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820
90 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824
91 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828
92 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c
93 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
94 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
95 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
96 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
97 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
98 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
100 /* Descriptor Manager Top Registers */
101 #define MVPP2_RXQ_NUM_REG 0x2040
102 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044
103 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048
104 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
105 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
106 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
107 #define MVPP2_RXQ_NUM_NEW_OFFSET 16
108 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
109 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
110 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
111 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
112 #define MVPP2_RXQ_THRESH_REG 0x204c
113 #define MVPP2_OCCUPIED_THRESH_OFFSET 0
114 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
115 #define MVPP2_RXQ_INDEX_REG 0x2050
116 #define MVPP2_TXQ_NUM_REG 0x2080
117 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084
118 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088
119 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
120 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
121 #define MVPP2_TXQ_THRESH_REG 0x2094
122 #define MVPP2_TRANSMITTED_THRESH_OFFSET 16
123 #define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
124 #define MVPP2_TXQ_INDEX_REG 0x2098
125 #define MVPP2_TXQ_PREF_BUF_REG 0x209c
126 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
127 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
128 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
129 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
130 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
131 #define MVPP2_TXQ_PENDING_REG 0x20a0
132 #define MVPP2_TXQ_PENDING_MASK 0x3fff
133 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4
134 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
135 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16
136 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
137 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
138 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
139 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
140 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
141 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
142 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16
143 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
144 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
145 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
146 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
147 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
148 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
150 /* MBUS bridge registers */
151 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
152 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
153 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
154 #define MVPP2_BASE_ADDR_ENABLE 0x4060
156 /* Interrupt Cause and Mask registers */
157 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
158 #define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
159 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
160 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
161 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
162 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
163 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
164 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
165 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
166 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
167 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
168 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
169 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
170 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
171 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
172 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
173 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
174 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
175 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
176 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
178 /* Buffer Manager registers */
179 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
180 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
181 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
182 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0
183 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
184 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
185 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
186 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
187 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
188 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
189 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
190 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
191 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
192 #define MVPP2_BM_START_MASK BIT(0)
193 #define MVPP2_BM_STOP_MASK BIT(1)
194 #define MVPP2_BM_STATE_MASK BIT(4)
195 #define MVPP2_BM_LOW_THRESH_OFFS 8
196 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00
197 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
198 MVPP2_BM_LOW_THRESH_OFFS)
199 #define MVPP2_BM_HIGH_THRESH_OFFS 16
200 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
201 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
202 MVPP2_BM_HIGH_THRESH_OFFS)
203 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
204 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
205 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
206 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
207 #define MVPP2_BM_BPPE_FULL_MASK BIT(3)
208 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
209 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
210 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
211 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
212 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440
213 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
214 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
215 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
216 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
217 #define MVPP2_BM_VIRT_RLS_REG 0x64c0
218 #define MVPP2_BM_MC_RLS_REG 0x64c4
219 #define MVPP2_BM_MC_ID_MASK 0xfff
220 #define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
222 /* TX Scheduler registers */
223 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
224 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
225 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff
226 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8
227 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
228 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
229 #define MVPP2_TXP_SCHED_MTU_REG 0x801c
230 #define MVPP2_TXP_MTU_MAX 0x7FFFF
231 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020
232 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
233 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
234 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
235 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
236 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
237 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
238 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
239 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
240 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
241 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
242 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
243 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
244 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
246 /* TX general registers */
247 #define MVPP2_TX_SNOOP_REG 0x8800
248 #define MVPP2_TX_PORT_FLUSH_REG 0x8810
249 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
252 #define MVPP2_SRC_ADDR_MIDDLE 0x24
253 #define MVPP2_SRC_ADDR_HIGH 0x28
254 #define MVPP2_PHY_AN_CFG0_REG 0x34
255 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
256 #define MVPP2_MIB_COUNTERS_BASE(port) (0x1000 + ((port) >> 1) * \
257 0x400 + (port) * 0x400)
258 #define MVPP2_MIB_LATE_COLLISION 0x7c
259 #define MVPP2_ISR_SUM_MASK_REG 0x220c
260 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
261 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
263 /* Per-port registers */
264 #define MVPP2_GMAC_CTRL_0_REG 0x0
265 #define MVPP2_GMAC_PORT_EN_MASK BIT(0)
266 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
267 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
268 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
269 #define MVPP2_GMAC_CTRL_1_REG 0x4
270 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
271 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
272 #define MVPP2_GMAC_PCS_LB_EN_BIT 6
273 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
274 #define MVPP2_GMAC_SA_LOW_OFFS 7
275 #define MVPP2_GMAC_CTRL_2_REG 0x8
276 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
277 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
278 #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
279 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
280 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc
281 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
282 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
283 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
284 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
285 #define MVPP2_GMAC_AN_SPEED_EN BIT(7)
286 #define MVPP2_GMAC_FC_ADV_EN BIT(9)
287 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
288 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
289 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
290 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
291 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
292 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
293 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
295 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
297 /* Descriptor ring Macros */
298 #define MVPP2_QUEUE_NEXT_DESC(q, index) \
299 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
301 /* Various constants */
304 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15
305 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
306 #define MVPP2_RX_COAL_PKTS 32
307 #define MVPP2_RX_COAL_USEC 100
309 /* The two bytes Marvell header. Either contains a special value used
310 * by Marvell switches when a specific hardware mode is enabled (not
311 * supported by this driver) or is filled automatically by zeroes on
312 * the RX side. Those two bytes being at the front of the Ethernet
313 * header, they allow to have the IP header aligned on a 4 bytes
314 * boundary automatically: the hardware skips those two bytes on its
317 #define MVPP2_MH_SIZE 2
318 #define MVPP2_ETH_TYPE_LEN 2
319 #define MVPP2_PPPOE_HDR_SIZE 8
320 #define MVPP2_VLAN_TAG_LEN 4
322 /* Lbtd 802.3 type */
323 #define MVPP2_IP_LBDT_TYPE 0xfffa
325 #define MVPP2_CPU_D_CACHE_LINE_SIZE 32
326 #define MVPP2_TX_CSUM_MAX_SIZE 9800
328 /* Timeout constants */
329 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
330 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
332 #define MVPP2_TX_MTU_MAX 0x7ffff
334 /* Maximum number of T-CONTs of PON port */
335 #define MVPP2_MAX_TCONT 16
337 /* Maximum number of supported ports */
338 #define MVPP2_MAX_PORTS 4
340 /* Maximum number of TXQs used by single port */
341 #define MVPP2_MAX_TXQ 8
343 /* Maximum number of RXQs used by single port */
344 #define MVPP2_MAX_RXQ 8
346 /* Dfault number of RXQs in use */
347 #define MVPP2_DEFAULT_RXQ 4
349 /* Total number of RXQs available to all ports */
350 #define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
352 /* Max number of Rx descriptors */
353 #define MVPP2_MAX_RXD 128
355 /* Max number of Tx descriptors */
356 #define MVPP2_MAX_TXD 1024
358 /* Amount of Tx descriptors that can be reserved at once by CPU */
359 #define MVPP2_CPU_DESC_CHUNK 64
361 /* Max number of Tx descriptors in each aggregated queue */
362 #define MVPP2_AGGR_TXQ_SIZE 256
364 /* Descriptor aligned size */
365 #define MVPP2_DESC_ALIGNED_SIZE 32
367 /* Descriptor alignment mask */
368 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
370 /* RX FIFO constants */
371 #define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
372 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
373 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
375 /* RX buffer constants */
376 #define MVPP2_SKB_SHINFO_SIZE \
377 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
379 #define MVPP2_RX_PKT_SIZE(mtu) \
380 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
381 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
383 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
384 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
385 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
386 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
388 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
390 /* IPv6 max L3 address size */
391 #define MVPP2_MAX_L3_ADDR_SIZE 16
394 #define MVPP2_F_LOOPBACK BIT(0)
396 /* Marvell tag types */
397 enum mvpp2_tag_type {
398 MVPP2_TAG_TYPE_NONE = 0,
399 MVPP2_TAG_TYPE_MH = 1,
400 MVPP2_TAG_TYPE_DSA = 2,
401 MVPP2_TAG_TYPE_EDSA = 3,
402 MVPP2_TAG_TYPE_VLAN = 4,
403 MVPP2_TAG_TYPE_LAST = 5
406 /* Parser constants */
407 #define MVPP2_PRS_TCAM_SRAM_SIZE 256
408 #define MVPP2_PRS_TCAM_WORDS 6
409 #define MVPP2_PRS_SRAM_WORDS 4
410 #define MVPP2_PRS_FLOW_ID_SIZE 64
411 #define MVPP2_PRS_FLOW_ID_MASK 0x3f
412 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1
413 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
414 #define MVPP2_PRS_IPV4_HEAD 0x40
415 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
416 #define MVPP2_PRS_IPV4_MC 0xe0
417 #define MVPP2_PRS_IPV4_MC_MASK 0xf0
418 #define MVPP2_PRS_IPV4_BC_MASK 0xff
419 #define MVPP2_PRS_IPV4_IHL 0x5
420 #define MVPP2_PRS_IPV4_IHL_MASK 0xf
421 #define MVPP2_PRS_IPV6_MC 0xff
422 #define MVPP2_PRS_IPV6_MC_MASK 0xff
423 #define MVPP2_PRS_IPV6_HOP_MASK 0xff
424 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff
425 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
426 #define MVPP2_PRS_DBL_VLANS_MAX 100
429 * - lookup ID - 4 bits
431 * - additional information - 1 byte
432 * - header data - 8 bytes
433 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
435 #define MVPP2_PRS_AI_BITS 8
436 #define MVPP2_PRS_PORT_MASK 0xff
437 #define MVPP2_PRS_LU_MASK 0xf
438 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
439 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
440 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
441 (((offs) * 2) - ((offs) % 2) + 2)
442 #define MVPP2_PRS_TCAM_AI_BYTE 16
443 #define MVPP2_PRS_TCAM_PORT_BYTE 17
444 #define MVPP2_PRS_TCAM_LU_BYTE 20
445 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
446 #define MVPP2_PRS_TCAM_INV_WORD 5
447 /* Tcam entries ID */
448 #define MVPP2_PE_DROP_ALL 0
449 #define MVPP2_PE_FIRST_FREE_TID 1
450 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
451 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
452 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
453 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
454 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
455 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
456 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
457 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
458 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
459 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
460 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
461 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
462 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
463 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
464 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
465 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
466 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
467 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
468 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
469 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
470 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
471 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
472 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
473 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
474 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
477 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
479 #define MVPP2_PRS_SRAM_RI_OFFS 0
480 #define MVPP2_PRS_SRAM_RI_WORD 0
481 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
482 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
483 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
484 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64
485 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
486 #define MVPP2_PRS_SRAM_UDF_OFFS 73
487 #define MVPP2_PRS_SRAM_UDF_BITS 8
488 #define MVPP2_PRS_SRAM_UDF_MASK 0xff
489 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
490 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
491 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
492 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
493 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
494 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
495 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
496 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
497 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
498 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
499 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
500 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
501 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
502 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
503 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
504 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
505 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
506 #define MVPP2_PRS_SRAM_AI_OFFS 90
507 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
508 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
509 #define MVPP2_PRS_SRAM_AI_MASK 0xff
510 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
511 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
512 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110
513 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111
515 /* Sram result info bits assignment */
516 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1
517 #define MVPP2_PRS_RI_DSA_MASK 0x2
518 #define MVPP2_PRS_RI_VLAN_MASK 0xc
519 #define MVPP2_PRS_RI_VLAN_NONE ~(BIT(2) | BIT(3))
520 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
521 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
522 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
523 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
524 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
525 #define MVPP2_PRS_RI_L2_CAST_MASK 0x600
526 #define MVPP2_PRS_RI_L2_UCAST ~(BIT(9) | BIT(10))
527 #define MVPP2_PRS_RI_L2_MCAST BIT(9)
528 #define MVPP2_PRS_RI_L2_BCAST BIT(10)
529 #define MVPP2_PRS_RI_PPPOE_MASK 0x800
530 #define MVPP2_PRS_RI_L3_PROTO_MASK 0x7000
531 #define MVPP2_PRS_RI_L3_UN ~(BIT(12) | BIT(13) | BIT(14))
532 #define MVPP2_PRS_RI_L3_IP4 BIT(12)
533 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
534 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
535 #define MVPP2_PRS_RI_L3_IP6 BIT(14)
536 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
537 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
538 #define MVPP2_PRS_RI_L3_ADDR_MASK 0x18000
539 #define MVPP2_PRS_RI_L3_UCAST ~(BIT(15) | BIT(16))
540 #define MVPP2_PRS_RI_L3_MCAST BIT(15)
541 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
542 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
543 #define MVPP2_PRS_RI_UDF3_MASK 0x300000
544 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
545 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
546 #define MVPP2_PRS_RI_L4_TCP BIT(22)
547 #define MVPP2_PRS_RI_L4_UDP BIT(23)
548 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
549 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000
550 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
551 #define MVPP2_PRS_RI_DROP_MASK 0x80000000
553 /* Sram additional info bits assignment */
554 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
555 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
556 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
557 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
558 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
559 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
560 #define MVPP2_PRS_SINGLE_VLAN_AI 0
561 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
564 #define MVPP2_PRS_TAGGED true
565 #define MVPP2_PRS_UNTAGGED false
566 #define MVPP2_PRS_EDSA true
567 #define MVPP2_PRS_DSA false
569 /* MAC entries, shadow udf */
571 MVPP2_PRS_UDF_MAC_DEF,
572 MVPP2_PRS_UDF_MAC_RANGE,
573 MVPP2_PRS_UDF_L2_DEF,
574 MVPP2_PRS_UDF_L2_DEF_COPY,
575 MVPP2_PRS_UDF_L2_USER,
579 enum mvpp2_prs_lookup {
593 enum mvpp2_prs_l3_cast {
594 MVPP2_PRS_L3_UNI_CAST,
595 MVPP2_PRS_L3_MULTI_CAST,
596 MVPP2_PRS_L3_BROAD_CAST
599 /* Classifier constants */
600 #define MVPP2_CLS_FLOWS_TBL_SIZE 512
601 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
602 #define MVPP2_CLS_LKP_TBL_SIZE 64
605 #define MVPP2_BM_POOLS_NUM 8
606 #define MVPP2_BM_LONG_BUF_NUM 1024
607 #define MVPP2_BM_SHORT_BUF_NUM 2048
608 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
609 #define MVPP2_BM_POOL_PTR_ALIGN 128
610 #define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
611 #define MVPP2_BM_SWF_SHORT_POOL 3
613 /* BM cookie (32 bits) definition */
614 #define MVPP2_BM_COOKIE_POOL_OFFS 8
615 #define MVPP2_BM_COOKIE_CPU_OFFS 24
617 /* BM short pool packet size
618 * These value assure that for SWF the total number
619 * of bytes allocated for each buffer will be 512
621 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
631 /* Shared Packet Processor resources */
633 /* Shared registers' base addresses */
635 void __iomem *lms_base;
641 /* List of pointers to port structures */
642 struct mvpp2_port **port_list;
644 /* Aggregated TXQs */
645 struct mvpp2_tx_queue *aggr_txqs;
648 struct mvpp2_bm_pool *bm_pools;
650 /* PRS shadow table */
651 struct mvpp2_prs_shadow *prs_shadow;
652 /* PRS auxiliary table for double vlan entries control */
653 bool *prs_double_vlans;
659 struct mvpp2_pcpu_stats {
660 struct u64_stats_sync syncp;
667 /* Per-CPU port control */
668 struct mvpp2_port_pcpu {
669 struct hrtimer tx_done_timer;
670 bool timer_scheduled;
671 /* Tasklet for egress finalization */
672 struct tasklet_struct tx_done_tasklet;
682 /* Per-port registers' base address */
685 struct mvpp2_rx_queue **rxqs;
686 struct mvpp2_tx_queue **txqs;
687 struct net_device *dev;
691 u32 pending_cause_rx;
692 struct napi_struct napi;
694 /* Per-CPU port control */
695 struct mvpp2_port_pcpu __percpu *pcpu;
702 struct mvpp2_pcpu_stats __percpu *stats;
704 struct phy_device *phy_dev;
705 phy_interface_t phy_interface;
706 struct device_node *phy_node;
711 struct mvpp2_bm_pool *pool_long;
712 struct mvpp2_bm_pool *pool_short;
714 /* Index of first port's physical RXQ */
718 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
719 * layout of the transmit and reception DMA descriptors, and their
720 * layout is therefore defined by the hardware design
723 #define MVPP2_TXD_L3_OFF_SHIFT 0
724 #define MVPP2_TXD_IP_HLEN_SHIFT 8
725 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
726 #define MVPP2_TXD_L4_CSUM_NOT BIT(14)
727 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
728 #define MVPP2_TXD_PADDING_DISABLE BIT(23)
729 #define MVPP2_TXD_L4_UDP BIT(24)
730 #define MVPP2_TXD_L3_IP6 BIT(26)
731 #define MVPP2_TXD_L_DESC BIT(28)
732 #define MVPP2_TXD_F_DESC BIT(29)
734 #define MVPP2_RXD_ERR_SUMMARY BIT(15)
735 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
736 #define MVPP2_RXD_ERR_CRC 0x0
737 #define MVPP2_RXD_ERR_OVERRUN BIT(13)
738 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
739 #define MVPP2_RXD_BM_POOL_ID_OFFS 16
740 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
741 #define MVPP2_RXD_HWF_SYNC BIT(21)
742 #define MVPP2_RXD_L4_CSUM_OK BIT(22)
743 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
744 #define MVPP2_RXD_L4_TCP BIT(25)
745 #define MVPP2_RXD_L4_UDP BIT(26)
746 #define MVPP2_RXD_L3_IP4 BIT(28)
747 #define MVPP2_RXD_L3_IP6 BIT(30)
748 #define MVPP2_RXD_BUF_HDR BIT(31)
750 struct mvpp2_tx_desc {
751 u32 command; /* Options used by HW for packet transmitting.*/
752 u8 packet_offset; /* the offset from the buffer beginning */
753 u8 phys_txq; /* destination queue ID */
754 u16 data_size; /* data size of transmitted packet in bytes */
755 u32 buf_phys_addr; /* physical addr of transmitted buffer */
756 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
757 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
758 u32 reserved2; /* reserved (for future use) */
761 struct mvpp2_rx_desc {
762 u32 status; /* info about received packet */
763 u16 reserved1; /* parser_info (for future use, PnC) */
764 u16 data_size; /* size of received packet in bytes */
765 u32 buf_phys_addr; /* physical address of the buffer */
766 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
767 u16 reserved2; /* gem_port_id (for future use, PON) */
768 u16 reserved3; /* csum_l4 (for future use, PnC) */
769 u8 reserved4; /* bm_qset (for future use, BM) */
771 u16 reserved6; /* classify_info (for future use, PnC) */
772 u32 reserved7; /* flow_id (for future use, PnC) */
776 struct mvpp2_txq_pcpu_buf {
777 /* Transmitted SKB */
780 /* Physical address of transmitted buffer */
783 /* Size transmitted */
787 /* Per-CPU Tx queue control */
788 struct mvpp2_txq_pcpu {
791 /* Number of Tx DMA descriptors in the descriptor ring */
794 /* Number of currently used Tx DMA descriptor in the
799 /* Number of Tx DMA descriptors reserved for each CPU */
802 /* Infos about transmitted buffers */
803 struct mvpp2_txq_pcpu_buf *buffs;
805 /* Index of last TX DMA descriptor that was inserted */
808 /* Index of the TX DMA descriptor to be cleaned up */
812 struct mvpp2_tx_queue {
813 /* Physical number of this Tx queue */
816 /* Logical number of this Tx queue */
819 /* Number of Tx DMA descriptors in the descriptor ring */
822 /* Number of currently used Tx DMA descriptor in the descriptor ring */
825 /* Per-CPU control of physical Tx queues */
826 struct mvpp2_txq_pcpu __percpu *pcpu;
828 /* Array of transmitted skb */
829 struct sk_buff **tx_skb;
833 /* Virtual address of thex Tx DMA descriptors array */
834 struct mvpp2_tx_desc *descs;
836 /* DMA address of the Tx DMA descriptors array */
837 dma_addr_t descs_phys;
839 /* Index of the last Tx DMA descriptor */
842 /* Index of the next Tx DMA descriptor to process */
843 int next_desc_to_proc;
846 struct mvpp2_rx_queue {
847 /* RX queue number, in the range 0-31 for physical RXQs */
850 /* Num of rx descriptors in the rx descriptor ring */
856 /* Virtual address of the RX DMA descriptors array */
857 struct mvpp2_rx_desc *descs;
859 /* DMA address of the RX DMA descriptors array */
860 dma_addr_t descs_phys;
862 /* Index of the last RX DMA descriptor */
865 /* Index of the next RX DMA descriptor to process */
866 int next_desc_to_proc;
868 /* ID of port to which physical RXQ is mapped */
871 /* Port's logic RXQ number to which physical RXQ is mapped */
875 union mvpp2_prs_tcam_entry {
876 u32 word[MVPP2_PRS_TCAM_WORDS];
877 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
880 union mvpp2_prs_sram_entry {
881 u32 word[MVPP2_PRS_SRAM_WORDS];
882 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
885 struct mvpp2_prs_entry {
887 union mvpp2_prs_tcam_entry tcam;
888 union mvpp2_prs_sram_entry sram;
891 struct mvpp2_prs_shadow {
898 /* User defined offset */
906 struct mvpp2_cls_flow_entry {
908 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
911 struct mvpp2_cls_lookup_entry {
917 struct mvpp2_bm_pool {
918 /* Pool number in the range 0-7 */
920 enum mvpp2_bm_type type;
922 /* Buffer Pointers Pool External (BPPE) size */
924 /* Number of buffers for this pool */
926 /* Pool buffer size */
931 /* BPPE virtual base address */
933 /* BPPE physical base address */
934 dma_addr_t phys_addr;
936 /* Ports using BM pool */
939 /* Occupied buffers indicator */
944 struct mvpp2_buff_hdr {
945 u32 next_buff_phys_addr;
946 u32 next_buff_virt_addr;
949 u8 reserved1; /* bm_qset (for future use, BM) */
952 /* Buffer header info bits */
953 #define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
954 #define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
955 #define MVPP2_B_HDR_INFO_LAST_OFFS 12
956 #define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
957 #define MVPP2_B_HDR_INFO_IS_LAST(info) \
958 ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
960 /* Static declaractions */
962 /* Number of RXQs used by single port */
963 static int rxq_number = MVPP2_DEFAULT_RXQ;
964 /* Number of TXQs used by single port */
965 static int txq_number = MVPP2_MAX_TXQ;
967 #define MVPP2_DRIVER_NAME "mvpp2"
968 #define MVPP2_DRIVER_VERSION "1.0"
970 /* Utility/helper methods */
972 static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
974 writel(data, priv->base + offset);
977 static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
979 return readl(priv->base + offset);
982 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
984 txq_pcpu->txq_get_index++;
985 if (txq_pcpu->txq_get_index == txq_pcpu->size)
986 txq_pcpu->txq_get_index = 0;
989 static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
991 struct mvpp2_tx_desc *tx_desc)
993 struct mvpp2_txq_pcpu_buf *tx_buf =
994 txq_pcpu->buffs + txq_pcpu->txq_put_index;
996 tx_buf->size = tx_desc->data_size;
997 tx_buf->phys = tx_desc->buf_phys_addr + tx_desc->packet_offset;
998 txq_pcpu->txq_put_index++;
999 if (txq_pcpu->txq_put_index == txq_pcpu->size)
1000 txq_pcpu->txq_put_index = 0;
1003 /* Get number of physical egress port */
1004 static inline int mvpp2_egress_port(struct mvpp2_port *port)
1006 return MVPP2_MAX_TCONT + port->id;
1009 /* Get number of physical TXQ */
1010 static inline int mvpp2_txq_phys(int port, int txq)
1012 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1015 /* Parser configuration routines */
1017 /* Update parser tcam and sram hw entries */
1018 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1022 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1025 /* Clear entry invalidation bit */
1026 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1028 /* Write tcam index - indirect access */
1029 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1030 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1031 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1033 /* Write sram index - indirect access */
1034 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1035 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1036 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1041 /* Read tcam entry from hw */
1042 static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1046 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1049 /* Write tcam index - indirect access */
1050 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1052 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1053 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1054 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1055 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1057 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1058 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1060 /* Write sram index - indirect access */
1061 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1062 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1063 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1068 /* Invalidate tcam hw entry */
1069 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1071 /* Write index - indirect access */
1072 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1073 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1074 MVPP2_PRS_TCAM_INV_MASK);
1077 /* Enable shadow table entry and set its lookup ID */
1078 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1080 priv->prs_shadow[index].valid = true;
1081 priv->prs_shadow[index].lu = lu;
1084 /* Update ri fields in shadow table entry */
1085 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1086 unsigned int ri, unsigned int ri_mask)
1088 priv->prs_shadow[index].ri_mask = ri_mask;
1089 priv->prs_shadow[index].ri = ri;
1092 /* Update lookup field in tcam sw entry */
1093 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1095 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1097 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1098 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1101 /* Update mask for single port in tcam sw entry */
1102 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1103 unsigned int port, bool add)
1105 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1108 pe->tcam.byte[enable_off] &= ~(1 << port);
1110 pe->tcam.byte[enable_off] |= 1 << port;
1113 /* Update port map in tcam sw entry */
1114 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1117 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1118 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1120 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1121 pe->tcam.byte[enable_off] &= ~port_mask;
1122 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1125 /* Obtain port map from tcam sw entry */
1126 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1128 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1130 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1133 /* Set byte of data and its enable bits in tcam sw entry */
1134 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1135 unsigned int offs, unsigned char byte,
1136 unsigned char enable)
1138 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1139 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1142 /* Get byte of data and its enable bits from tcam sw entry */
1143 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1144 unsigned int offs, unsigned char *byte,
1145 unsigned char *enable)
1147 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1148 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1151 /* Compare tcam data bytes with a pattern */
1152 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1155 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1158 tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
1159 if (tcam_data != data)
1164 /* Update ai bits in tcam sw entry */
1165 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1166 unsigned int bits, unsigned int enable)
1168 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1170 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1172 if (!(enable & BIT(i)))
1176 pe->tcam.byte[ai_idx] |= 1 << i;
1178 pe->tcam.byte[ai_idx] &= ~(1 << i);
1181 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1184 /* Get ai bits from tcam sw entry */
1185 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1187 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1190 /* Set ethertype in tcam sw entry */
1191 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1192 unsigned short ethertype)
1194 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1195 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1198 /* Set bits in sram sw entry */
1199 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1202 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1205 /* Clear bits in sram sw entry */
1206 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1209 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1212 /* Update ri bits in sram sw entry */
1213 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1214 unsigned int bits, unsigned int mask)
1218 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1219 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1221 if (!(mask & BIT(i)))
1225 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1227 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1229 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1233 /* Obtain ri bits from sram sw entry */
1234 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1236 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1239 /* Update ai bits in sram sw entry */
1240 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1241 unsigned int bits, unsigned int mask)
1244 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1246 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1248 if (!(mask & BIT(i)))
1252 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1254 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1256 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1260 /* Read ai bits from sram sw entry */
1261 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1264 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1265 int ai_en_off = ai_off + 1;
1266 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1268 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1269 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1274 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
1277 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1280 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1282 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1283 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1284 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1287 /* In the sram sw entry set sign and value of the next lookup offset
1288 * and the offset value generated to the classifier
1290 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1295 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1298 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1302 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1303 (unsigned char)shift;
1305 /* Reset and set operation */
1306 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1307 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1308 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1310 /* Set base offset as current */
1311 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1314 /* In the sram sw entry set sign and value of the user defined offset
1315 * generated to the classifier
1317 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1318 unsigned int type, int offset,
1323 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1324 offset = 0 - offset;
1326 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1330 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1331 MVPP2_PRS_SRAM_UDF_MASK);
1332 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1333 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1334 MVPP2_PRS_SRAM_UDF_BITS)] &=
1335 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1336 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1337 MVPP2_PRS_SRAM_UDF_BITS)] |=
1338 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1340 /* Set offset type */
1341 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1342 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1343 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1345 /* Set offset operation */
1346 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1347 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1348 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1350 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1351 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1352 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1353 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1355 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1356 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1357 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1359 /* Set base offset as current */
1360 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1363 /* Find parser flow entry */
1364 static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1366 struct mvpp2_prs_entry *pe;
1369 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1372 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1374 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1375 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1378 if (!priv->prs_shadow[tid].valid ||
1379 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1383 mvpp2_prs_hw_read(priv, pe);
1384 bits = mvpp2_prs_sram_ai_get(pe);
1386 /* Sram store classification lookup ID in AI bits [5:0] */
1387 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1395 /* Return first free tcam index, seeking from start to end */
1396 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1404 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1405 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1407 for (tid = start; tid <= end; tid++) {
1408 if (!priv->prs_shadow[tid].valid)
1415 /* Enable/disable dropping all mac da's */
1416 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1418 struct mvpp2_prs_entry pe;
1420 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1421 /* Entry exist - update port only */
1422 pe.index = MVPP2_PE_DROP_ALL;
1423 mvpp2_prs_hw_read(priv, &pe);
1425 /* Entry doesn't exist - create new */
1426 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1427 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1428 pe.index = MVPP2_PE_DROP_ALL;
1430 /* Non-promiscuous mode for all ports - DROP unknown packets */
1431 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1432 MVPP2_PRS_RI_DROP_MASK);
1434 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1435 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1437 /* Update shadow table */
1438 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1440 /* Mask all ports */
1441 mvpp2_prs_tcam_port_map_set(&pe, 0);
1444 /* Update port mask */
1445 mvpp2_prs_tcam_port_set(&pe, port, add);
1447 mvpp2_prs_hw_write(priv, &pe);
1450 /* Set port to promiscuous mode */
1451 static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1453 struct mvpp2_prs_entry pe;
1455 /* Promiscuous mode - Accept unknown packets */
1457 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1458 /* Entry exist - update port only */
1459 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1460 mvpp2_prs_hw_read(priv, &pe);
1462 /* Entry doesn't exist - create new */
1463 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1464 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1465 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1467 /* Continue - set next lookup */
1468 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1470 /* Set result info bits */
1471 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1472 MVPP2_PRS_RI_L2_CAST_MASK);
1474 /* Shift to ethertype */
1475 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1476 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1478 /* Mask all ports */
1479 mvpp2_prs_tcam_port_map_set(&pe, 0);
1481 /* Update shadow table */
1482 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1485 /* Update port mask */
1486 mvpp2_prs_tcam_port_set(&pe, port, add);
1488 mvpp2_prs_hw_write(priv, &pe);
1491 /* Accept multicast */
1492 static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1495 struct mvpp2_prs_entry pe;
1496 unsigned char da_mc;
1498 /* Ethernet multicast address first byte is
1499 * 0x01 for IPv4 and 0x33 for IPv6
1501 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1503 if (priv->prs_shadow[index].valid) {
1504 /* Entry exist - update port only */
1506 mvpp2_prs_hw_read(priv, &pe);
1508 /* Entry doesn't exist - create new */
1509 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1510 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1513 /* Continue - set next lookup */
1514 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1516 /* Set result info bits */
1517 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1518 MVPP2_PRS_RI_L2_CAST_MASK);
1520 /* Update tcam entry data first byte */
1521 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1523 /* Shift to ethertype */
1524 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1525 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1527 /* Mask all ports */
1528 mvpp2_prs_tcam_port_map_set(&pe, 0);
1530 /* Update shadow table */
1531 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1534 /* Update port mask */
1535 mvpp2_prs_tcam_port_set(&pe, port, add);
1537 mvpp2_prs_hw_write(priv, &pe);
1540 /* Set entry for dsa packets */
1541 static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
1542 bool tagged, bool extend)
1544 struct mvpp2_prs_entry pe;
1548 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
1551 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
1555 if (priv->prs_shadow[tid].valid) {
1556 /* Entry exist - update port only */
1558 mvpp2_prs_hw_read(priv, &pe);
1560 /* Entry doesn't exist - create new */
1561 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1562 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1565 /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
1566 mvpp2_prs_sram_shift_set(&pe, shift,
1567 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1569 /* Update shadow table */
1570 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1573 /* Set tagged bit in DSA tag */
1574 mvpp2_prs_tcam_data_byte_set(&pe, 0,
1575 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1576 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1577 /* Clear all ai bits for next iteration */
1578 mvpp2_prs_sram_ai_update(&pe, 0,
1579 MVPP2_PRS_SRAM_AI_MASK);
1580 /* If packet is tagged continue check vlans */
1581 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1583 /* Set result info bits to 'no vlans' */
1584 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1585 MVPP2_PRS_RI_VLAN_MASK);
1586 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1589 /* Mask all ports */
1590 mvpp2_prs_tcam_port_map_set(&pe, 0);
1593 /* Update port mask */
1594 mvpp2_prs_tcam_port_set(&pe, port, add);
1596 mvpp2_prs_hw_write(priv, &pe);
1599 /* Set entry for dsa ethertype */
1600 static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
1601 bool add, bool tagged, bool extend)
1603 struct mvpp2_prs_entry pe;
1604 int tid, shift, port_mask;
1607 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
1608 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
1612 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
1613 MVPP2_PE_ETYPE_DSA_UNTAGGED;
1614 port_mask = MVPP2_PRS_PORT_MASK;
1618 if (priv->prs_shadow[tid].valid) {
1619 /* Entry exist - update port only */
1621 mvpp2_prs_hw_read(priv, &pe);
1623 /* Entry doesn't exist - create new */
1624 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1625 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1629 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
1630 mvpp2_prs_match_etype(&pe, 2, 0);
1632 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
1633 MVPP2_PRS_RI_DSA_MASK);
1634 /* Shift ethertype + 2 byte reserved + tag*/
1635 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
1636 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1638 /* Update shadow table */
1639 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1642 /* Set tagged bit in DSA tag */
1643 mvpp2_prs_tcam_data_byte_set(&pe,
1644 MVPP2_ETH_TYPE_LEN + 2 + 3,
1645 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1646 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1647 /* Clear all ai bits for next iteration */
1648 mvpp2_prs_sram_ai_update(&pe, 0,
1649 MVPP2_PRS_SRAM_AI_MASK);
1650 /* If packet is tagged continue check vlans */
1651 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1653 /* Set result info bits to 'no vlans' */
1654 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1655 MVPP2_PRS_RI_VLAN_MASK);
1656 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1658 /* Mask/unmask all ports, depending on dsa type */
1659 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
1662 /* Update port mask */
1663 mvpp2_prs_tcam_port_set(&pe, port, add);
1665 mvpp2_prs_hw_write(priv, &pe);
1668 /* Search for existing single/triple vlan entry */
1669 static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
1670 unsigned short tpid, int ai)
1672 struct mvpp2_prs_entry *pe;
1675 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1678 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1680 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1681 for (tid = MVPP2_PE_FIRST_FREE_TID;
1682 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1683 unsigned int ri_bits, ai_bits;
1686 if (!priv->prs_shadow[tid].valid ||
1687 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1692 mvpp2_prs_hw_read(priv, pe);
1693 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
1698 ri_bits = mvpp2_prs_sram_ri_get(pe);
1699 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1701 /* Get current ai value from tcam */
1702 ai_bits = mvpp2_prs_tcam_ai_get(pe);
1703 /* Clear double vlan bit */
1704 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
1709 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1710 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1718 /* Add/update single/triple vlan entry */
1719 static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
1720 unsigned int port_map)
1722 struct mvpp2_prs_entry *pe;
1726 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
1729 /* Create new tcam entry */
1730 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
1731 MVPP2_PE_FIRST_FREE_TID);
1735 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1739 /* Get last double vlan tid */
1740 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
1741 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
1742 unsigned int ri_bits;
1744 if (!priv->prs_shadow[tid_aux].valid ||
1745 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
1748 pe->index = tid_aux;
1749 mvpp2_prs_hw_read(priv, pe);
1750 ri_bits = mvpp2_prs_sram_ri_get(pe);
1751 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
1752 MVPP2_PRS_RI_VLAN_DOUBLE)
1756 if (tid <= tid_aux) {
1761 memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
1762 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1765 mvpp2_prs_match_etype(pe, 0, tpid);
1767 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
1768 /* Shift 4 bytes - skip 1 vlan tag */
1769 mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
1770 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1771 /* Clear all ai bits for next iteration */
1772 mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1774 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
1775 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
1776 MVPP2_PRS_RI_VLAN_MASK);
1778 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
1779 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
1780 MVPP2_PRS_RI_VLAN_MASK);
1782 mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
1784 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
1786 /* Update ports' mask */
1787 mvpp2_prs_tcam_port_map_set(pe, port_map);
1789 mvpp2_prs_hw_write(priv, pe);
1797 /* Get first free double vlan ai number */
1798 static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
1802 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
1803 if (!priv->prs_double_vlans[i])
1810 /* Search for existing double vlan entry */
1811 static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
1812 unsigned short tpid1,
1813 unsigned short tpid2)
1815 struct mvpp2_prs_entry *pe;
1818 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1821 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1823 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1824 for (tid = MVPP2_PE_FIRST_FREE_TID;
1825 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1826 unsigned int ri_mask;
1829 if (!priv->prs_shadow[tid].valid ||
1830 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1834 mvpp2_prs_hw_read(priv, pe);
1836 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
1837 && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
1842 ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
1843 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
1851 /* Add or update double vlan entry */
1852 static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
1853 unsigned short tpid2,
1854 unsigned int port_map)
1856 struct mvpp2_prs_entry *pe;
1857 int tid_aux, tid, ai, ret = 0;
1859 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
1862 /* Create new tcam entry */
1863 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1864 MVPP2_PE_LAST_FREE_TID);
1868 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1872 /* Set ai value for new double vlan entry */
1873 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
1879 /* Get first single/triple vlan tid */
1880 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
1881 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
1882 unsigned int ri_bits;
1884 if (!priv->prs_shadow[tid_aux].valid ||
1885 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
1888 pe->index = tid_aux;
1889 mvpp2_prs_hw_read(priv, pe);
1890 ri_bits = mvpp2_prs_sram_ri_get(pe);
1891 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1892 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1893 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1897 if (tid >= tid_aux) {
1902 memset(pe, 0, sizeof(struct mvpp2_prs_entry));
1903 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1906 priv->prs_double_vlans[ai] = true;
1908 mvpp2_prs_match_etype(pe, 0, tpid1);
1909 mvpp2_prs_match_etype(pe, 4, tpid2);
1911 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
1912 /* Shift 8 bytes - skip 2 vlan tags */
1913 mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
1914 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1915 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1916 MVPP2_PRS_RI_VLAN_MASK);
1917 mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
1918 MVPP2_PRS_SRAM_AI_MASK);
1920 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
1923 /* Update ports' mask */
1924 mvpp2_prs_tcam_port_map_set(pe, port_map);
1925 mvpp2_prs_hw_write(priv, pe);
1932 /* IPv4 header parsing for fragmentation and L4 offset */
1933 static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
1934 unsigned int ri, unsigned int ri_mask)
1936 struct mvpp2_prs_entry pe;
1939 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
1940 (proto != IPPROTO_IGMP))
1943 /* Fragmented packet */
1944 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1945 MVPP2_PE_LAST_FREE_TID);
1949 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1950 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1953 /* Set next lu to IPv4 */
1954 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1955 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1957 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1958 sizeof(struct iphdr) - 4,
1959 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1960 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1961 MVPP2_PRS_IPV4_DIP_AI_BIT);
1962 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
1963 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
1965 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
1966 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1967 /* Unmask all ports */
1968 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1970 /* Update shadow table and hw entry */
1971 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1972 mvpp2_prs_hw_write(priv, &pe);
1974 /* Not fragmented packet */
1975 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1976 MVPP2_PE_LAST_FREE_TID);
1981 /* Clear ri before updating */
1982 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1983 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1984 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
1986 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
1987 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
1989 /* Update shadow table and hw entry */
1990 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1991 mvpp2_prs_hw_write(priv, &pe);
1996 /* IPv4 L3 multicast or broadcast */
1997 static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
1999 struct mvpp2_prs_entry pe;
2002 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2003 MVPP2_PE_LAST_FREE_TID);
2007 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2008 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2012 case MVPP2_PRS_L3_MULTI_CAST:
2013 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
2014 MVPP2_PRS_IPV4_MC_MASK);
2015 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2016 MVPP2_PRS_RI_L3_ADDR_MASK);
2018 case MVPP2_PRS_L3_BROAD_CAST:
2019 mask = MVPP2_PRS_IPV4_BC_MASK;
2020 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
2021 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
2022 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
2023 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
2024 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
2025 MVPP2_PRS_RI_L3_ADDR_MASK);
2031 /* Finished: go to flowid generation */
2032 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2033 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2035 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2036 MVPP2_PRS_IPV4_DIP_AI_BIT);
2037 /* Unmask all ports */
2038 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2040 /* Update shadow table and hw entry */
2041 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2042 mvpp2_prs_hw_write(priv, &pe);
2047 /* Set entries for protocols over IPv6 */
2048 static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2049 unsigned int ri, unsigned int ri_mask)
2051 struct mvpp2_prs_entry pe;
2054 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2055 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2058 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2059 MVPP2_PE_LAST_FREE_TID);
2063 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2064 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2067 /* Finished: go to flowid generation */
2068 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2069 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2070 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2071 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2072 sizeof(struct ipv6hdr) - 6,
2073 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2075 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2076 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2077 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2078 /* Unmask all ports */
2079 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2082 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2083 mvpp2_prs_hw_write(priv, &pe);
2088 /* IPv6 L3 multicast entry */
2089 static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2091 struct mvpp2_prs_entry pe;
2094 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2097 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2098 MVPP2_PE_LAST_FREE_TID);
2102 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2103 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2106 /* Finished: go to flowid generation */
2107 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2108 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2109 MVPP2_PRS_RI_L3_ADDR_MASK);
2110 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2111 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2112 /* Shift back to IPv6 NH */
2113 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2115 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2116 MVPP2_PRS_IPV6_MC_MASK);
2117 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2118 /* Unmask all ports */
2119 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2121 /* Update shadow table and hw entry */
2122 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2123 mvpp2_prs_hw_write(priv, &pe);
2128 /* Parser per-port initialization */
2129 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2130 int lu_max, int offset)
2135 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2136 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2137 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2138 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2140 /* Set maximum number of loops for packet received from port */
2141 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2142 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2143 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2144 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2146 /* Set initial offset for packet header extraction for the first
2149 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2150 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2151 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2152 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2155 /* Default flow entries initialization for all ports */
2156 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2158 struct mvpp2_prs_entry pe;
2161 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
2162 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2163 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2164 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2166 /* Mask all ports */
2167 mvpp2_prs_tcam_port_map_set(&pe, 0);
2170 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2171 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2173 /* Update shadow table and hw entry */
2174 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2175 mvpp2_prs_hw_write(priv, &pe);
2179 /* Set default entry for Marvell Header field */
2180 static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2182 struct mvpp2_prs_entry pe;
2184 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2186 pe.index = MVPP2_PE_MH_DEFAULT;
2187 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2188 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2189 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2190 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2192 /* Unmask all ports */
2193 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2195 /* Update shadow table and hw entry */
2196 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2197 mvpp2_prs_hw_write(priv, &pe);
2200 /* Set default entires (place holder) for promiscuous, non-promiscuous and
2201 * multicast MAC addresses
2203 static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2205 struct mvpp2_prs_entry pe;
2207 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2209 /* Non-promiscuous mode for all ports - DROP unknown packets */
2210 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2211 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2213 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2214 MVPP2_PRS_RI_DROP_MASK);
2215 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2216 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2218 /* Unmask all ports */
2219 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2221 /* Update shadow table and hw entry */
2222 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2223 mvpp2_prs_hw_write(priv, &pe);
2225 /* place holders only - no ports */
2226 mvpp2_prs_mac_drop_all_set(priv, 0, false);
2227 mvpp2_prs_mac_promisc_set(priv, 0, false);
2228 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
2229 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
2232 /* Set default entries for various types of dsa packets */
2233 static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2235 struct mvpp2_prs_entry pe;
2237 /* None tagged EDSA entry - place holder */
2238 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2241 /* Tagged EDSA entry - place holder */
2242 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2244 /* None tagged DSA entry - place holder */
2245 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2248 /* Tagged DSA entry - place holder */
2249 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2251 /* None tagged EDSA ethertype entry - place holder*/
2252 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2253 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2255 /* Tagged EDSA ethertype entry - place holder*/
2256 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2257 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2259 /* None tagged DSA ethertype entry */
2260 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2261 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2263 /* Tagged DSA ethertype entry */
2264 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2265 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2267 /* Set default entry, in case DSA or EDSA tag not found */
2268 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2269 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2270 pe.index = MVPP2_PE_DSA_DEFAULT;
2271 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2274 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2275 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2277 /* Clear all sram ai bits for next iteration */
2278 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2280 /* Unmask all ports */
2281 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2283 mvpp2_prs_hw_write(priv, &pe);
2286 /* Match basic ethertypes */
2287 static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2289 struct mvpp2_prs_entry pe;
2292 /* Ethertype: PPPoE */
2293 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2294 MVPP2_PE_LAST_FREE_TID);
2298 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2299 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2302 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2304 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2305 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2306 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2307 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2308 MVPP2_PRS_RI_PPPOE_MASK);
2310 /* Update shadow table and hw entry */
2311 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2312 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2313 priv->prs_shadow[pe.index].finish = false;
2314 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2315 MVPP2_PRS_RI_PPPOE_MASK);
2316 mvpp2_prs_hw_write(priv, &pe);
2318 /* Ethertype: ARP */
2319 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2320 MVPP2_PE_LAST_FREE_TID);
2324 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2325 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2328 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2330 /* Generate flow in the next iteration*/
2331 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2332 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2333 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2334 MVPP2_PRS_RI_L3_PROTO_MASK);
2336 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2338 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2340 /* Update shadow table and hw entry */
2341 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2342 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2343 priv->prs_shadow[pe.index].finish = true;
2344 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2345 MVPP2_PRS_RI_L3_PROTO_MASK);
2346 mvpp2_prs_hw_write(priv, &pe);
2348 /* Ethertype: LBTD */
2349 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2350 MVPP2_PE_LAST_FREE_TID);
2354 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2355 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2358 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2360 /* Generate flow in the next iteration*/
2361 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2362 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2363 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2364 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2365 MVPP2_PRS_RI_CPU_CODE_MASK |
2366 MVPP2_PRS_RI_UDF3_MASK);
2368 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2370 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2372 /* Update shadow table and hw entry */
2373 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2374 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2375 priv->prs_shadow[pe.index].finish = true;
2376 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2377 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2378 MVPP2_PRS_RI_CPU_CODE_MASK |
2379 MVPP2_PRS_RI_UDF3_MASK);
2380 mvpp2_prs_hw_write(priv, &pe);
2382 /* Ethertype: IPv4 without options */
2383 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2384 MVPP2_PE_LAST_FREE_TID);
2388 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2389 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2392 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2393 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2394 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2395 MVPP2_PRS_IPV4_HEAD_MASK |
2396 MVPP2_PRS_IPV4_IHL_MASK);
2398 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2399 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2400 MVPP2_PRS_RI_L3_PROTO_MASK);
2401 /* Skip eth_type + 4 bytes of IP header */
2402 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2403 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2405 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2407 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2409 /* Update shadow table and hw entry */
2410 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2411 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2412 priv->prs_shadow[pe.index].finish = false;
2413 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2414 MVPP2_PRS_RI_L3_PROTO_MASK);
2415 mvpp2_prs_hw_write(priv, &pe);
2417 /* Ethertype: IPv4 with options */
2418 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2419 MVPP2_PE_LAST_FREE_TID);
2425 /* Clear tcam data before updating */
2426 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2427 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2429 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2430 MVPP2_PRS_IPV4_HEAD,
2431 MVPP2_PRS_IPV4_HEAD_MASK);
2433 /* Clear ri before updating */
2434 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2435 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2436 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2437 MVPP2_PRS_RI_L3_PROTO_MASK);
2439 /* Update shadow table and hw entry */
2440 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2441 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2442 priv->prs_shadow[pe.index].finish = false;
2443 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2444 MVPP2_PRS_RI_L3_PROTO_MASK);
2445 mvpp2_prs_hw_write(priv, &pe);
2447 /* Ethertype: IPv6 without options */
2448 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2449 MVPP2_PE_LAST_FREE_TID);
2453 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2454 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2457 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
2459 /* Skip DIP of IPV6 header */
2460 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2461 MVPP2_MAX_L3_ADDR_SIZE,
2462 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2463 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2464 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2465 MVPP2_PRS_RI_L3_PROTO_MASK);
2467 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2469 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2471 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2472 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2473 priv->prs_shadow[pe.index].finish = false;
2474 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2475 MVPP2_PRS_RI_L3_PROTO_MASK);
2476 mvpp2_prs_hw_write(priv, &pe);
2478 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2479 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2480 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2481 pe.index = MVPP2_PE_ETH_TYPE_UN;
2483 /* Unmask all ports */
2484 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2486 /* Generate flow in the next iteration*/
2487 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2488 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2489 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2490 MVPP2_PRS_RI_L3_PROTO_MASK);
2491 /* Set L3 offset even it's unknown L3 */
2492 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2494 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2496 /* Update shadow table and hw entry */
2497 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2498 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2499 priv->prs_shadow[pe.index].finish = true;
2500 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2501 MVPP2_PRS_RI_L3_PROTO_MASK);
2502 mvpp2_prs_hw_write(priv, &pe);
2507 /* Configure vlan entries and detect up to 2 successive VLAN tags.
2514 static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
2516 struct mvpp2_prs_entry pe;
2519 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
2520 MVPP2_PRS_DBL_VLANS_MAX,
2522 if (!priv->prs_double_vlans)
2525 /* Double VLAN: 0x8100, 0x88A8 */
2526 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
2527 MVPP2_PRS_PORT_MASK);
2531 /* Double VLAN: 0x8100, 0x8100 */
2532 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
2533 MVPP2_PRS_PORT_MASK);
2537 /* Single VLAN: 0x88a8 */
2538 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
2539 MVPP2_PRS_PORT_MASK);
2543 /* Single VLAN: 0x8100 */
2544 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
2545 MVPP2_PRS_PORT_MASK);
2549 /* Set default double vlan entry */
2550 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2551 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2552 pe.index = MVPP2_PE_VLAN_DBL;
2554 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2555 /* Clear ai for next iterations */
2556 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2557 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2558 MVPP2_PRS_RI_VLAN_MASK);
2560 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
2561 MVPP2_PRS_DBL_VLAN_AI_BIT);
2562 /* Unmask all ports */
2563 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2565 /* Update shadow table and hw entry */
2566 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2567 mvpp2_prs_hw_write(priv, &pe);
2569 /* Set default vlan none entry */
2570 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2571 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2572 pe.index = MVPP2_PE_VLAN_NONE;
2574 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2575 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2576 MVPP2_PRS_RI_VLAN_MASK);
2578 /* Unmask all ports */
2579 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2581 /* Update shadow table and hw entry */
2582 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2583 mvpp2_prs_hw_write(priv, &pe);
2588 /* Set entries for PPPoE ethertype */
2589 static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
2591 struct mvpp2_prs_entry pe;
2594 /* IPv4 over PPPoE with options */
2595 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2596 MVPP2_PE_LAST_FREE_TID);
2600 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2601 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2604 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
2606 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2607 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2608 MVPP2_PRS_RI_L3_PROTO_MASK);
2609 /* Skip eth_type + 4 bytes of IP header */
2610 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2611 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2613 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2615 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2617 /* Update shadow table and hw entry */
2618 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2619 mvpp2_prs_hw_write(priv, &pe);
2621 /* IPv4 over PPPoE without options */
2622 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2623 MVPP2_PE_LAST_FREE_TID);
2629 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2630 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2631 MVPP2_PRS_IPV4_HEAD_MASK |
2632 MVPP2_PRS_IPV4_IHL_MASK);
2634 /* Clear ri before updating */
2635 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2636 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2637 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2638 MVPP2_PRS_RI_L3_PROTO_MASK);
2640 /* Update shadow table and hw entry */
2641 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2642 mvpp2_prs_hw_write(priv, &pe);
2644 /* IPv6 over PPPoE */
2645 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2646 MVPP2_PE_LAST_FREE_TID);
2650 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2651 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2654 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
2656 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2657 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2658 MVPP2_PRS_RI_L3_PROTO_MASK);
2659 /* Skip eth_type + 4 bytes of IPv6 header */
2660 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2661 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2663 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2665 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2667 /* Update shadow table and hw entry */
2668 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2669 mvpp2_prs_hw_write(priv, &pe);
2671 /* Non-IP over PPPoE */
2672 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2673 MVPP2_PE_LAST_FREE_TID);
2677 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2678 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2681 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2682 MVPP2_PRS_RI_L3_PROTO_MASK);
2684 /* Finished: go to flowid generation */
2685 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2686 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2687 /* Set L3 offset even if it's unknown L3 */
2688 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2690 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2692 /* Update shadow table and hw entry */
2693 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2694 mvpp2_prs_hw_write(priv, &pe);
2699 /* Initialize entries for IPv4 */
2700 static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
2702 struct mvpp2_prs_entry pe;
2705 /* Set entries for TCP, UDP and IGMP over IPv4 */
2706 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
2707 MVPP2_PRS_RI_L4_PROTO_MASK);
2711 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
2712 MVPP2_PRS_RI_L4_PROTO_MASK);
2716 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
2717 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2718 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2719 MVPP2_PRS_RI_CPU_CODE_MASK |
2720 MVPP2_PRS_RI_UDF3_MASK);
2724 /* IPv4 Broadcast */
2725 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
2729 /* IPv4 Multicast */
2730 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
2734 /* Default IPv4 entry for unknown protocols */
2735 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2736 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2737 pe.index = MVPP2_PE_IP4_PROTO_UN;
2739 /* Set next lu to IPv4 */
2740 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2741 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2743 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2744 sizeof(struct iphdr) - 4,
2745 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2746 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2747 MVPP2_PRS_IPV4_DIP_AI_BIT);
2748 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2749 MVPP2_PRS_RI_L4_PROTO_MASK);
2751 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2752 /* Unmask all ports */
2753 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2755 /* Update shadow table and hw entry */
2756 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2757 mvpp2_prs_hw_write(priv, &pe);
2759 /* Default IPv4 entry for unicast address */
2760 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2761 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2762 pe.index = MVPP2_PE_IP4_ADDR_UN;
2764 /* Finished: go to flowid generation */
2765 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2766 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2767 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
2768 MVPP2_PRS_RI_L3_ADDR_MASK);
2770 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2771 MVPP2_PRS_IPV4_DIP_AI_BIT);
2772 /* Unmask all ports */
2773 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2775 /* Update shadow table and hw entry */
2776 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2777 mvpp2_prs_hw_write(priv, &pe);
2782 /* Initialize entries for IPv6 */
2783 static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
2785 struct mvpp2_prs_entry pe;
2788 /* Set entries for TCP, UDP and ICMP over IPv6 */
2789 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
2790 MVPP2_PRS_RI_L4_TCP,
2791 MVPP2_PRS_RI_L4_PROTO_MASK);
2795 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
2796 MVPP2_PRS_RI_L4_UDP,
2797 MVPP2_PRS_RI_L4_PROTO_MASK);
2801 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
2802 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2803 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2804 MVPP2_PRS_RI_CPU_CODE_MASK |
2805 MVPP2_PRS_RI_UDF3_MASK);
2809 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
2810 /* Result Info: UDF7=1, DS lite */
2811 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
2812 MVPP2_PRS_RI_UDF7_IP6_LITE,
2813 MVPP2_PRS_RI_UDF7_MASK);
2817 /* IPv6 multicast */
2818 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
2822 /* Entry for checking hop limit */
2823 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2824 MVPP2_PE_LAST_FREE_TID);
2828 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2829 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2832 /* Finished: go to flowid generation */
2833 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2834 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2835 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
2836 MVPP2_PRS_RI_DROP_MASK,
2837 MVPP2_PRS_RI_L3_PROTO_MASK |
2838 MVPP2_PRS_RI_DROP_MASK);
2840 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
2841 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2842 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2844 /* Update shadow table and hw entry */
2845 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2846 mvpp2_prs_hw_write(priv, &pe);
2848 /* Default IPv6 entry for unknown protocols */
2849 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2850 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2851 pe.index = MVPP2_PE_IP6_PROTO_UN;
2853 /* Finished: go to flowid generation */
2854 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2855 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2856 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2857 MVPP2_PRS_RI_L4_PROTO_MASK);
2858 /* Set L4 offset relatively to our current place */
2859 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2860 sizeof(struct ipv6hdr) - 4,
2861 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2863 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2864 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2865 /* Unmask all ports */
2866 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2868 /* Update shadow table and hw entry */
2869 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2870 mvpp2_prs_hw_write(priv, &pe);
2872 /* Default IPv6 entry for unknown ext protocols */
2873 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2874 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2875 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
2877 /* Finished: go to flowid generation */
2878 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2879 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2880 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2881 MVPP2_PRS_RI_L4_PROTO_MASK);
2883 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
2884 MVPP2_PRS_IPV6_EXT_AI_BIT);
2885 /* Unmask all ports */
2886 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2888 /* Update shadow table and hw entry */
2889 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2890 mvpp2_prs_hw_write(priv, &pe);
2892 /* Default IPv6 entry for unicast address */
2893 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2894 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2895 pe.index = MVPP2_PE_IP6_ADDR_UN;
2897 /* Finished: go to IPv6 again */
2898 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2899 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
2900 MVPP2_PRS_RI_L3_ADDR_MASK);
2901 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2902 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2903 /* Shift back to IPV6 NH */
2904 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2906 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2907 /* Unmask all ports */
2908 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2910 /* Update shadow table and hw entry */
2911 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2912 mvpp2_prs_hw_write(priv, &pe);
2917 /* Parser default initialization */
2918 static int mvpp2_prs_default_init(struct platform_device *pdev,
2923 /* Enable tcam table */
2924 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2926 /* Clear all tcam and sram entries */
2927 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2928 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2929 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2930 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2932 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2933 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2934 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2937 /* Invalidate all tcam entries */
2938 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2939 mvpp2_prs_hw_inv(priv, index);
2941 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2942 sizeof(struct mvpp2_prs_shadow),
2944 if (!priv->prs_shadow)
2947 /* Always start from lookup = 0 */
2948 for (index = 0; index < MVPP2_MAX_PORTS; index++)
2949 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2950 MVPP2_PRS_PORT_LU_MAX, 0);
2952 mvpp2_prs_def_flow_init(priv);
2954 mvpp2_prs_mh_init(priv);
2956 mvpp2_prs_mac_init(priv);
2958 mvpp2_prs_dsa_init(priv);
2960 err = mvpp2_prs_etype_init(priv);
2964 err = mvpp2_prs_vlan_init(pdev, priv);
2968 err = mvpp2_prs_pppoe_init(priv);
2972 err = mvpp2_prs_ip6_init(priv);
2976 err = mvpp2_prs_ip4_init(priv);
2983 /* Compare MAC DA with tcam entry data */
2984 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2985 const u8 *da, unsigned char *mask)
2987 unsigned char tcam_byte, tcam_mask;
2990 for (index = 0; index < ETH_ALEN; index++) {
2991 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2992 if (tcam_mask != mask[index])
2995 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
3002 /* Find tcam entry with matched pair <MAC DA, port> */
3003 static struct mvpp2_prs_entry *
3004 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
3005 unsigned char *mask, int udf_type)
3007 struct mvpp2_prs_entry *pe;
3010 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3013 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3015 /* Go through the all entires with MVPP2_PRS_LU_MAC */
3016 for (tid = MVPP2_PE_FIRST_FREE_TID;
3017 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3018 unsigned int entry_pmap;
3020 if (!priv->prs_shadow[tid].valid ||
3021 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3022 (priv->prs_shadow[tid].udf != udf_type))
3026 mvpp2_prs_hw_read(priv, pe);
3027 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
3029 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
3038 /* Update parser's mac da entry */
3039 static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3040 const u8 *da, bool add)
3042 struct mvpp2_prs_entry *pe;
3043 unsigned int pmap, len, ri;
3044 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3047 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3048 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
3049 MVPP2_PRS_UDF_MAC_DEF);
3056 /* Create new TCAM entry */
3057 /* Find first range mac entry*/
3058 for (tid = MVPP2_PE_FIRST_FREE_TID;
3059 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
3060 if (priv->prs_shadow[tid].valid &&
3061 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
3062 (priv->prs_shadow[tid].udf ==
3063 MVPP2_PRS_UDF_MAC_RANGE))
3066 /* Go through the all entries from first to last */
3067 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3072 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3075 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3078 /* Mask all ports */
3079 mvpp2_prs_tcam_port_map_set(pe, 0);
3082 /* Update port mask */
3083 mvpp2_prs_tcam_port_set(pe, port, add);
3085 /* Invalidate the entry if no ports are left enabled */
3086 pmap = mvpp2_prs_tcam_port_map_get(pe);
3092 mvpp2_prs_hw_inv(priv, pe->index);
3093 priv->prs_shadow[pe->index].valid = false;
3098 /* Continue - set next lookup */
3099 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
3101 /* Set match on DA */
3104 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
3106 /* Set result info bits */
3107 if (is_broadcast_ether_addr(da))
3108 ri = MVPP2_PRS_RI_L2_BCAST;
3109 else if (is_multicast_ether_addr(da))
3110 ri = MVPP2_PRS_RI_L2_MCAST;
3112 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
3114 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3115 MVPP2_PRS_RI_MAC_ME_MASK);
3116 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3117 MVPP2_PRS_RI_MAC_ME_MASK);
3119 /* Shift to ethertype */
3120 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
3121 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3123 /* Update shadow table and hw entry */
3124 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
3125 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
3126 mvpp2_prs_hw_write(priv, pe);
3133 static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3135 struct mvpp2_port *port = netdev_priv(dev);
3138 /* Remove old parser entry */
3139 err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
3144 /* Add new parser entry */
3145 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
3149 /* Set addr in the device */
3150 ether_addr_copy(dev->dev_addr, da);
3155 /* Delete all port's multicast simple (not range) entries */
3156 static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
3158 struct mvpp2_prs_entry pe;
3161 for (tid = MVPP2_PE_FIRST_FREE_TID;
3162 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3163 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3165 if (!priv->prs_shadow[tid].valid ||
3166 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3167 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3170 /* Only simple mac entries */
3172 mvpp2_prs_hw_read(priv, &pe);
3174 /* Read mac addr from entry */
3175 for (index = 0; index < ETH_ALEN; index++)
3176 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3179 if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
3180 /* Delete this entry */
3181 mvpp2_prs_mac_da_accept(priv, port, da, false);
3185 static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3188 case MVPP2_TAG_TYPE_EDSA:
3189 /* Add port to EDSA entries */
3190 mvpp2_prs_dsa_tag_set(priv, port, true,
3191 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3192 mvpp2_prs_dsa_tag_set(priv, port, true,
3193 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3194 /* Remove port from DSA entries */
3195 mvpp2_prs_dsa_tag_set(priv, port, false,
3196 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3197 mvpp2_prs_dsa_tag_set(priv, port, false,
3198 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3201 case MVPP2_TAG_TYPE_DSA:
3202 /* Add port to DSA entries */
3203 mvpp2_prs_dsa_tag_set(priv, port, true,
3204 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3205 mvpp2_prs_dsa_tag_set(priv, port, true,
3206 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3207 /* Remove port from EDSA entries */
3208 mvpp2_prs_dsa_tag_set(priv, port, false,
3209 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3210 mvpp2_prs_dsa_tag_set(priv, port, false,
3211 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3214 case MVPP2_TAG_TYPE_MH:
3215 case MVPP2_TAG_TYPE_NONE:
3216 /* Remove port form EDSA and DSA entries */
3217 mvpp2_prs_dsa_tag_set(priv, port, false,
3218 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3219 mvpp2_prs_dsa_tag_set(priv, port, false,
3220 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3221 mvpp2_prs_dsa_tag_set(priv, port, false,
3222 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3223 mvpp2_prs_dsa_tag_set(priv, port, false,
3224 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3228 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3235 /* Set prs flow for the port */
3236 static int mvpp2_prs_def_flow(struct mvpp2_port *port)
3238 struct mvpp2_prs_entry *pe;
3241 pe = mvpp2_prs_flow_find(port->priv, port->id);
3243 /* Such entry not exist */
3245 /* Go through the all entires from last to first */
3246 tid = mvpp2_prs_tcam_first_free(port->priv,
3247 MVPP2_PE_LAST_FREE_TID,
3248 MVPP2_PE_FIRST_FREE_TID);
3252 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3256 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
3260 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
3261 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
3263 /* Update shadow table */
3264 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
3267 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
3268 mvpp2_prs_hw_write(port->priv, pe);
3274 /* Classifier configuration routines */
3276 /* Update classification flow table registers */
3277 static void mvpp2_cls_flow_write(struct mvpp2 *priv,
3278 struct mvpp2_cls_flow_entry *fe)
3280 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
3281 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
3282 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
3283 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
3286 /* Update classification lookup table register */
3287 static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
3288 struct mvpp2_cls_lookup_entry *le)
3292 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
3293 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
3294 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
3297 /* Classifier default initialization */
3298 static void mvpp2_cls_init(struct mvpp2 *priv)
3300 struct mvpp2_cls_lookup_entry le;
3301 struct mvpp2_cls_flow_entry fe;
3304 /* Enable classifier */
3305 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
3307 /* Clear classifier flow table */
3308 memset(&fe.data, 0, sizeof(fe.data));
3309 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
3311 mvpp2_cls_flow_write(priv, &fe);
3314 /* Clear classifier lookup table */
3316 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
3319 mvpp2_cls_lookup_write(priv, &le);
3322 mvpp2_cls_lookup_write(priv, &le);
3326 static void mvpp2_cls_port_config(struct mvpp2_port *port)
3328 struct mvpp2_cls_lookup_entry le;
3331 /* Set way for the port */
3332 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
3333 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
3334 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
3336 /* Pick the entry to be accessed in lookup ID decoding table
3337 * according to the way and lkpid.
3339 le.lkpid = port->id;
3343 /* Set initial CPU queue for receiving packets */
3344 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
3345 le.data |= port->first_rxq;
3347 /* Disable classification engines */
3348 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
3350 /* Update lookup ID table entry */
3351 mvpp2_cls_lookup_write(port->priv, &le);
3354 /* Set CPU queue number for oversize packets */
3355 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
3359 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
3360 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
3362 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
3363 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
3365 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
3366 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
3367 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
3370 /* Buffer Manager configuration routines */
3373 static int mvpp2_bm_pool_create(struct platform_device *pdev,
3375 struct mvpp2_bm_pool *bm_pool, int size)
3380 size_bytes = sizeof(u32) * size;
3381 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
3382 &bm_pool->phys_addr,
3384 if (!bm_pool->virt_addr)
3387 if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) {
3388 dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
3389 bm_pool->phys_addr);
3390 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
3391 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
3395 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
3396 bm_pool->phys_addr);
3397 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
3399 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3400 val |= MVPP2_BM_START_MASK;
3401 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3403 bm_pool->type = MVPP2_BM_FREE;
3404 bm_pool->size = size;
3405 bm_pool->pkt_size = 0;
3406 bm_pool->buf_num = 0;
3407 atomic_set(&bm_pool->in_use, 0);
3412 /* Set pool buffer size */
3413 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
3414 struct mvpp2_bm_pool *bm_pool,
3419 bm_pool->buf_size = buf_size;
3421 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
3422 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
3425 /* Free all buffers from the pool */
3426 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
3427 struct mvpp2_bm_pool *bm_pool)
3431 for (i = 0; i < bm_pool->buf_num; i++) {
3432 dma_addr_t buf_phys_addr;
3435 /* Get buffer virtual address (indirect access) */
3436 buf_phys_addr = mvpp2_read(priv,
3437 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
3438 vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
3440 dma_unmap_single(dev, buf_phys_addr,
3441 bm_pool->buf_size, DMA_FROM_DEVICE);
3445 dev_kfree_skb_any((struct sk_buff *)vaddr);
3448 /* Update BM driver with number of buffers removed from pool */
3449 bm_pool->buf_num -= i;
3453 static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
3455 struct mvpp2_bm_pool *bm_pool)
3459 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
3460 if (bm_pool->buf_num) {
3461 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
3465 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3466 val |= MVPP2_BM_STOP_MASK;
3467 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3469 dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size,
3471 bm_pool->phys_addr);
3475 static int mvpp2_bm_pools_init(struct platform_device *pdev,
3479 struct mvpp2_bm_pool *bm_pool;
3481 /* Create all pools with maximum size */
3482 size = MVPP2_BM_POOL_SIZE_MAX;
3483 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3484 bm_pool = &priv->bm_pools[i];
3486 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
3488 goto err_unroll_pools;
3489 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
3494 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
3495 for (i = i - 1; i >= 0; i--)
3496 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
3500 static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
3504 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3505 /* Mask BM all interrupts */
3506 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
3507 /* Clear BM cause register */
3508 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
3511 /* Allocate and initialize BM pools */
3512 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
3513 sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
3514 if (!priv->bm_pools)
3517 err = mvpp2_bm_pools_init(pdev, priv);
3523 /* Attach long pool to rxq */
3524 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
3525 int lrxq, int long_pool)
3530 /* Get queue physical ID */
3531 prxq = port->rxqs[lrxq]->id;
3533 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3534 val &= ~MVPP2_RXQ_POOL_LONG_MASK;
3535 val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) &
3536 MVPP2_RXQ_POOL_LONG_MASK);
3538 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3541 /* Attach short pool to rxq */
3542 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
3543 int lrxq, int short_pool)
3548 /* Get queue physical ID */
3549 prxq = port->rxqs[lrxq]->id;
3551 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3552 val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
3553 val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) &
3554 MVPP2_RXQ_POOL_SHORT_MASK);
3556 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3559 /* Allocate skb for BM pool */
3560 static struct sk_buff *mvpp2_skb_alloc(struct mvpp2_port *port,
3561 struct mvpp2_bm_pool *bm_pool,
3562 dma_addr_t *buf_phys_addr,
3565 struct sk_buff *skb;
3566 dma_addr_t phys_addr;
3568 skb = __dev_alloc_skb(bm_pool->pkt_size, gfp_mask);
3572 phys_addr = dma_map_single(port->dev->dev.parent, skb->head,
3573 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
3575 if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) {
3576 dev_kfree_skb_any(skb);
3579 *buf_phys_addr = phys_addr;
3584 /* Set pool number in a BM cookie */
3585 static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
3589 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
3590 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
3595 /* Get pool number from a BM cookie */
3596 static inline int mvpp2_bm_cookie_pool_get(u32 cookie)
3598 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
3601 /* Release buffer to BM */
3602 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
3603 u32 buf_phys_addr, u32 buf_virt_addr)
3605 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr);
3606 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr);
3609 /* Release multicast buffer */
3610 static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool,
3611 u32 buf_phys_addr, u32 buf_virt_addr,
3616 val |= (mc_id & MVPP2_BM_MC_ID_MASK);
3617 mvpp2_write(port->priv, MVPP2_BM_MC_RLS_REG, val);
3619 mvpp2_bm_pool_put(port, pool,
3620 buf_phys_addr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK,
3624 /* Refill BM pool */
3625 static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
3626 u32 phys_addr, u32 cookie)
3628 int pool = mvpp2_bm_cookie_pool_get(bm);
3630 mvpp2_bm_pool_put(port, pool, phys_addr, cookie);
3633 /* Allocate buffers for the pool */
3634 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
3635 struct mvpp2_bm_pool *bm_pool, int buf_num)
3637 struct sk_buff *skb;
3638 int i, buf_size, total_size;
3640 dma_addr_t phys_addr;
3642 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
3643 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
3646 (buf_num + bm_pool->buf_num > bm_pool->size)) {
3647 netdev_err(port->dev,
3648 "cannot allocate %d buffers for pool %d\n",
3649 buf_num, bm_pool->id);
3653 bm = mvpp2_bm_cookie_pool_set(0, bm_pool->id);
3654 for (i = 0; i < buf_num; i++) {
3655 skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_KERNEL);
3659 mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
3662 /* Update BM driver with number of buffers added to pool */
3663 bm_pool->buf_num += i;
3664 bm_pool->in_use_thresh = bm_pool->buf_num / 4;
3666 netdev_dbg(port->dev,
3667 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
3668 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3669 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
3671 netdev_dbg(port->dev,
3672 "%s pool %d: %d of %d buffers added\n",
3673 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3674 bm_pool->id, i, buf_num);
3678 /* Notify the driver that BM pool is being used as specific type and return the
3679 * pool pointer on success
3681 static struct mvpp2_bm_pool *
3682 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3685 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
3688 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
3689 netdev_err(port->dev, "mixing pool types is forbidden\n");
3693 if (new_pool->type == MVPP2_BM_FREE)
3694 new_pool->type = type;
3696 /* Allocate buffers in case BM pool is used as long pool, but packet
3697 * size doesn't match MTU or BM pool hasn't being used yet
3699 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
3700 (new_pool->pkt_size == 0)) {
3703 /* Set default buffer number or free all the buffers in case
3704 * the pool is not empty
3706 pkts_num = new_pool->buf_num;
3708 pkts_num = type == MVPP2_BM_SWF_LONG ?
3709 MVPP2_BM_LONG_BUF_NUM :
3710 MVPP2_BM_SHORT_BUF_NUM;
3712 mvpp2_bm_bufs_free(port->dev->dev.parent,
3713 port->priv, new_pool);
3715 new_pool->pkt_size = pkt_size;
3717 /* Allocate buffers for this pool */
3718 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
3719 if (num != pkts_num) {
3720 WARN(1, "pool %d: %d of %d allocated\n",
3721 new_pool->id, num, pkts_num);
3726 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
3727 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
3732 /* Initialize pools for swf */
3733 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3737 if (!port->pool_long) {
3739 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
3742 if (!port->pool_long)
3745 port->pool_long->port_map |= (1 << port->id);
3747 for (rxq = 0; rxq < rxq_number; rxq++)
3748 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
3751 if (!port->pool_short) {
3753 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
3755 MVPP2_BM_SHORT_PKT_SIZE);
3756 if (!port->pool_short)
3759 port->pool_short->port_map |= (1 << port->id);
3761 for (rxq = 0; rxq < rxq_number; rxq++)
3762 mvpp2_rxq_short_pool_set(port, rxq,
3763 port->pool_short->id);
3769 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
3771 struct mvpp2_port *port = netdev_priv(dev);
3772 struct mvpp2_bm_pool *port_pool = port->pool_long;
3773 int num, pkts_num = port_pool->buf_num;
3774 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
3776 /* Update BM pool with new buffer size */
3777 mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
3778 if (port_pool->buf_num) {
3779 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
3783 port_pool->pkt_size = pkt_size;
3784 num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
3785 if (num != pkts_num) {
3786 WARN(1, "pool %d: %d of %d allocated\n",
3787 port_pool->id, num, pkts_num);
3791 mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
3792 MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
3794 netdev_update_features(dev);
3798 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
3800 int cpu, cpu_mask = 0;
3802 for_each_present_cpu(cpu)
3803 cpu_mask |= 1 << cpu;
3804 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
3805 MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
3808 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
3810 int cpu, cpu_mask = 0;
3812 for_each_present_cpu(cpu)
3813 cpu_mask |= 1 << cpu;
3814 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
3815 MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
3818 /* Mask the current CPU's Rx/Tx interrupts */
3819 static void mvpp2_interrupts_mask(void *arg)
3821 struct mvpp2_port *port = arg;
3823 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
3826 /* Unmask the current CPU's Rx/Tx interrupts */
3827 static void mvpp2_interrupts_unmask(void *arg)
3829 struct mvpp2_port *port = arg;
3831 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
3832 (MVPP2_CAUSE_MISC_SUM_MASK |
3833 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
3836 /* Port configuration routines */
3838 static void mvpp2_port_mii_set(struct mvpp2_port *port)
3842 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3844 switch (port->phy_interface) {
3845 case PHY_INTERFACE_MODE_SGMII:
3846 val |= MVPP2_GMAC_INBAND_AN_MASK;
3848 case PHY_INTERFACE_MODE_RGMII:
3849 val |= MVPP2_GMAC_PORT_RGMII_MASK;
3851 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
3854 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3857 static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
3861 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3862 val |= MVPP2_GMAC_FC_ADV_EN;
3863 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3866 static void mvpp2_port_enable(struct mvpp2_port *port)
3870 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3871 val |= MVPP2_GMAC_PORT_EN_MASK;
3872 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
3873 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3876 static void mvpp2_port_disable(struct mvpp2_port *port)
3880 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3881 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
3882 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3885 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
3886 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
3890 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
3891 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
3892 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3895 /* Configure loopback port */
3896 static void mvpp2_port_loopback_set(struct mvpp2_port *port)
3900 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
3902 if (port->speed == 1000)
3903 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
3905 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
3907 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
3908 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
3910 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
3912 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3915 static void mvpp2_port_reset(struct mvpp2_port *port)
3919 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
3920 ~MVPP2_GMAC_PORT_RESET_MASK;
3921 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3923 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
3924 MVPP2_GMAC_PORT_RESET_MASK)
3928 /* Change maximum receive size of the port */
3929 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
3933 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3934 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
3935 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
3936 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
3937 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3940 /* Set defaults to the MVPP2 port */
3941 static void mvpp2_defaults_set(struct mvpp2_port *port)
3943 int tx_port_num, val, queue, lrxq;
3945 /* Configure port to loopback if needed */
3946 if (port->flags & MVPP2_F_LOOPBACK)
3947 mvpp2_port_loopback_set(port);
3949 /* Update TX FIFO MIN Threshold */
3950 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3951 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3952 /* Min. TX threshold must be less than minimal packet length */
3953 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
3954 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3956 /* Disable Legacy WRR, Disable EJP, Release from reset */
3957 tx_port_num = mvpp2_egress_port(port);
3958 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3960 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
3962 /* Close bandwidth for all queues */
3963 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
3964 mvpp2_write(port->priv,
3965 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
3967 /* Set refill period to 1 usec, refill tokens
3968 * and bucket size to maximum
3970 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
3971 port->priv->tclk / USEC_PER_SEC);
3972 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
3973 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
3974 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
3975 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
3976 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
3977 val = MVPP2_TXP_TOKEN_SIZE_MAX;
3978 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3980 /* Set MaximumLowLatencyPacketSize value to 256 */
3981 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
3982 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
3983 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
3985 /* Enable Rx cache snoop */
3986 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3987 queue = port->rxqs[lrxq]->id;
3988 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3989 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
3990 MVPP2_SNOOP_BUF_HDR_MASK;
3991 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3994 /* At default, mask all interrupts to all present cpus */
3995 mvpp2_interrupts_disable(port);
3998 /* Enable/disable receiving packets */
3999 static void mvpp2_ingress_enable(struct mvpp2_port *port)
4004 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4005 queue = port->rxqs[lrxq]->id;
4006 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4007 val &= ~MVPP2_RXQ_DISABLE_MASK;
4008 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4012 static void mvpp2_ingress_disable(struct mvpp2_port *port)
4017 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4018 queue = port->rxqs[lrxq]->id;
4019 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4020 val |= MVPP2_RXQ_DISABLE_MASK;
4021 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4025 /* Enable transmit via physical egress queue
4026 * - HW starts take descriptors from DRAM
4028 static void mvpp2_egress_enable(struct mvpp2_port *port)
4032 int tx_port_num = mvpp2_egress_port(port);
4034 /* Enable all initialized TXs. */
4036 for (queue = 0; queue < txq_number; queue++) {
4037 struct mvpp2_tx_queue *txq = port->txqs[queue];
4039 if (txq->descs != NULL)
4040 qmap |= (1 << queue);
4043 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4044 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
4047 /* Disable transmit via physical egress queue
4048 * - HW doesn't take descriptors from DRAM
4050 static void mvpp2_egress_disable(struct mvpp2_port *port)
4054 int tx_port_num = mvpp2_egress_port(port);
4056 /* Issue stop command for active channels only */
4057 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4058 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
4059 MVPP2_TXP_SCHED_ENQ_MASK;
4061 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
4062 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
4064 /* Wait for all Tx activity to terminate. */
4067 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
4068 netdev_warn(port->dev,
4069 "Tx stop timed out, status=0x%08x\n",
4076 /* Check port TX Command register that all
4077 * Tx queues are stopped
4079 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
4080 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
4083 /* Rx descriptors helper methods */
4085 /* Get number of Rx descriptors occupied by received packets */
4087 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
4089 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
4091 return val & MVPP2_RXQ_OCCUPIED_MASK;
4094 /* Update Rx queue status with the number of occupied and available
4095 * Rx descriptor slots.
4098 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
4099 int used_count, int free_count)
4101 /* Decrement the number of used descriptors and increment count
4102 * increment the number of free descriptors.
4104 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
4106 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
4109 /* Get pointer to next RX descriptor to be processed by SW */
4110 static inline struct mvpp2_rx_desc *
4111 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
4113 int rx_desc = rxq->next_desc_to_proc;
4115 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
4116 prefetch(rxq->descs + rxq->next_desc_to_proc);
4117 return rxq->descs + rx_desc;
4120 /* Set rx queue offset */
4121 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
4122 int prxq, int offset)
4126 /* Convert offset from bytes to units of 32 bytes */
4127 offset = offset >> 5;
4129 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4130 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
4133 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
4134 MVPP2_RXQ_PACKET_OFFSET_MASK);
4136 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4139 /* Obtain BM cookie information from descriptor */
4140 static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
4142 int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >>
4143 MVPP2_RXD_BM_POOL_ID_OFFS;
4144 int cpu = smp_processor_id();
4146 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
4147 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
4150 /* Tx descriptors helper methods */
4152 /* Get number of Tx descriptors waiting to be transmitted by HW */
4153 static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
4154 struct mvpp2_tx_queue *txq)
4158 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4159 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4161 return val & MVPP2_TXQ_PENDING_MASK;
4164 /* Get pointer to next Tx descriptor to be processed (send) by HW */
4165 static struct mvpp2_tx_desc *
4166 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
4168 int tx_desc = txq->next_desc_to_proc;
4170 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
4171 return txq->descs + tx_desc;
4174 /* Update HW with number of aggregated Tx descriptors to be sent */
4175 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
4177 /* aggregated access - relevant TXQ number is written in TX desc */
4178 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
4182 /* Check if there are enough free descriptors in aggregated txq.
4183 * If not, update the number of occupied descriptors and repeat the check.
4185 static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
4186 struct mvpp2_tx_queue *aggr_txq, int num)
4188 if ((aggr_txq->count + num) > aggr_txq->size) {
4189 /* Update number of occupied aggregated Tx descriptors */
4190 int cpu = smp_processor_id();
4191 u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
4193 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
4196 if ((aggr_txq->count + num) > aggr_txq->size)
4202 /* Reserved Tx descriptors allocation request */
4203 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
4204 struct mvpp2_tx_queue *txq, int num)
4208 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
4209 mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val);
4211 val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG);
4213 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
4216 /* Check if there are enough reserved descriptors for transmission.
4217 * If not, request chunk of reserved descriptors and check again.
4219 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
4220 struct mvpp2_tx_queue *txq,
4221 struct mvpp2_txq_pcpu *txq_pcpu,
4224 int req, cpu, desc_count;
4226 if (txq_pcpu->reserved_num >= num)
4229 /* Not enough descriptors reserved! Update the reserved descriptor
4230 * count and check again.
4234 /* Compute total of used descriptors */
4235 for_each_present_cpu(cpu) {
4236 struct mvpp2_txq_pcpu *txq_pcpu_aux;
4238 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
4239 desc_count += txq_pcpu_aux->count;
4240 desc_count += txq_pcpu_aux->reserved_num;
4243 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
4247 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
4250 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
4252 /* OK, the descriptor cound has been updated: check again. */
4253 if (txq_pcpu->reserved_num < num)
4258 /* Release the last allocated Tx descriptor. Useful to handle DMA
4259 * mapping failures in the Tx path.
4261 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
4263 if (txq->next_desc_to_proc == 0)
4264 txq->next_desc_to_proc = txq->last_desc - 1;
4266 txq->next_desc_to_proc--;
4269 /* Set Tx descriptors fields relevant for CSUM calculation */
4270 static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
4271 int ip_hdr_len, int l4_proto)
4275 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
4276 * G_L4_chk, L4_type required only for checksum calculation
4278 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
4279 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
4280 command |= MVPP2_TXD_IP_CSUM_DISABLE;
4282 if (l3_proto == swab16(ETH_P_IP)) {
4283 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
4284 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
4286 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
4289 if (l4_proto == IPPROTO_TCP) {
4290 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
4291 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4292 } else if (l4_proto == IPPROTO_UDP) {
4293 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
4294 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4296 command |= MVPP2_TXD_L4_CSUM_NOT;
4302 /* Get number of sent descriptors and decrement counter.
4303 * The number of sent descriptors is returned.
4306 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
4307 struct mvpp2_tx_queue *txq)
4311 /* Reading status reg resets transmitted descriptor counter */
4312 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
4314 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
4315 MVPP2_TRANSMITTED_COUNT_OFFSET;
4318 static void mvpp2_txq_sent_counter_clear(void *arg)
4320 struct mvpp2_port *port = arg;
4323 for (queue = 0; queue < txq_number; queue++) {
4324 int id = port->txqs[queue]->id;
4326 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
4330 /* Set max sizes for Tx queues */
4331 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
4334 int txq, tx_port_num;
4336 mtu = port->pkt_size * 8;
4337 if (mtu > MVPP2_TXP_MTU_MAX)
4338 mtu = MVPP2_TXP_MTU_MAX;
4340 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
4343 /* Indirect access to registers */
4344 tx_port_num = mvpp2_egress_port(port);
4345 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4348 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
4349 val &= ~MVPP2_TXP_MTU_MAX;
4351 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
4353 /* TXP token size and all TXQs token size must be larger that MTU */
4354 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
4355 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
4358 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
4360 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4363 for (txq = 0; txq < txq_number; txq++) {
4364 val = mvpp2_read(port->priv,
4365 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
4366 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
4370 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
4372 mvpp2_write(port->priv,
4373 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
4379 /* Set the number of packets that will be received before Rx interrupt
4380 * will be generated by HW.
4382 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
4383 struct mvpp2_rx_queue *rxq, u32 pkts)
4387 val = (pkts & MVPP2_OCCUPIED_THRESH_MASK);
4388 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4389 mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
4391 rxq->pkts_coal = pkts;
4394 /* Set the time delay in usec before Rx interrupt */
4395 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
4396 struct mvpp2_rx_queue *rxq, u32 usec)
4400 val = (port->priv->tclk / USEC_PER_SEC) * usec;
4401 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
4403 rxq->time_coal = usec;
4406 /* Free Tx queue skbuffs */
4407 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4408 struct mvpp2_tx_queue *txq,
4409 struct mvpp2_txq_pcpu *txq_pcpu, int num)
4413 for (i = 0; i < num; i++) {
4414 struct mvpp2_txq_pcpu_buf *tx_buf =
4415 txq_pcpu->buffs + txq_pcpu->txq_get_index;
4417 dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
4418 tx_buf->size, DMA_TO_DEVICE);
4420 dev_kfree_skb_any(tx_buf->skb);
4422 mvpp2_txq_inc_get(txq_pcpu);
4426 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4429 int queue = fls(cause) - 1;
4431 return port->rxqs[queue];
4434 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4437 int queue = fls(cause) - 1;
4439 return port->txqs[queue];
4442 /* Handle end of transmission */
4443 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4444 struct mvpp2_txq_pcpu *txq_pcpu)
4446 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
4449 if (txq_pcpu->cpu != smp_processor_id())
4450 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
4452 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
4455 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
4457 txq_pcpu->count -= tx_done;
4459 if (netif_tx_queue_stopped(nq))
4460 if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
4461 netif_tx_wake_queue(nq);
4464 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
4466 struct mvpp2_tx_queue *txq;
4467 struct mvpp2_txq_pcpu *txq_pcpu;
4468 unsigned int tx_todo = 0;
4471 txq = mvpp2_get_tx_queue(port, cause);
4475 txq_pcpu = this_cpu_ptr(txq->pcpu);
4477 if (txq_pcpu->count) {
4478 mvpp2_txq_done(port, txq, txq_pcpu);
4479 tx_todo += txq_pcpu->count;
4482 cause &= ~(1 << txq->log_id);
4487 /* Rx/Tx queue initialization/cleanup methods */
4489 /* Allocate and initialize descriptors for aggr TXQ */
4490 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
4491 struct mvpp2_tx_queue *aggr_txq,
4492 int desc_num, int cpu,
4495 /* Allocate memory for TX descriptors */
4496 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
4497 desc_num * MVPP2_DESC_ALIGNED_SIZE,
4498 &aggr_txq->descs_phys, GFP_KERNEL);
4499 if (!aggr_txq->descs)
4502 /* Make sure descriptor address is cache line size aligned */
4503 BUG_ON(aggr_txq->descs !=
4504 PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4506 aggr_txq->last_desc = aggr_txq->size - 1;
4508 /* Aggr TXQ no reset WA */
4509 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
4510 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
4512 /* Set Tx descriptors queue starting address */
4513 /* indirect access */
4514 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
4515 aggr_txq->descs_phys);
4516 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
4521 /* Create a specified Rx queue */
4522 static int mvpp2_rxq_init(struct mvpp2_port *port,
4523 struct mvpp2_rx_queue *rxq)
4526 rxq->size = port->rx_ring_size;
4528 /* Allocate memory for RX descriptors */
4529 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
4530 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4531 &rxq->descs_phys, GFP_KERNEL);
4535 BUG_ON(rxq->descs !=
4536 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4538 rxq->last_desc = rxq->size - 1;
4540 /* Zero occupied and non-occupied counters - direct access */
4541 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4543 /* Set Rx descriptors queue starting address - indirect access */
4544 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4545 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys);
4546 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
4547 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
4550 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
4552 /* Set coalescing pkts and time */
4553 mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
4554 mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
4556 /* Add number of descriptors ready for receiving packets */
4557 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
4562 /* Push packets received by the RXQ to BM pool */
4563 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
4564 struct mvpp2_rx_queue *rxq)
4568 rx_received = mvpp2_rxq_received(port, rxq->id);
4572 for (i = 0; i < rx_received; i++) {
4573 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
4574 u32 bm = mvpp2_bm_cookie_build(rx_desc);
4576 mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
4577 rx_desc->buf_cookie);
4579 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
4582 /* Cleanup Rx queue */
4583 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
4584 struct mvpp2_rx_queue *rxq)
4586 mvpp2_rxq_drop_pkts(port, rxq);
4589 dma_free_coherent(port->dev->dev.parent,
4590 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4596 rxq->next_desc_to_proc = 0;
4597 rxq->descs_phys = 0;
4599 /* Clear Rx descriptors queue starting address and size;
4600 * free descriptor number
4602 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4603 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4604 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
4605 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
4608 /* Create and initialize a Tx queue */
4609 static int mvpp2_txq_init(struct mvpp2_port *port,
4610 struct mvpp2_tx_queue *txq)
4613 int cpu, desc, desc_per_txq, tx_port_num;
4614 struct mvpp2_txq_pcpu *txq_pcpu;
4616 txq->size = port->tx_ring_size;
4618 /* Allocate memory for Tx descriptors */
4619 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
4620 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4621 &txq->descs_phys, GFP_KERNEL);
4625 /* Make sure descriptor address is cache line size aligned */
4626 BUG_ON(txq->descs !=
4627 PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4629 txq->last_desc = txq->size - 1;
4631 /* Set Tx descriptors queue starting address - indirect access */
4632 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4633 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys);
4634 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
4635 MVPP2_TXQ_DESC_SIZE_MASK);
4636 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
4637 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
4638 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
4639 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4640 val &= ~MVPP2_TXQ_PENDING_MASK;
4641 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
4643 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
4644 * for each existing TXQ.
4645 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
4646 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
4649 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
4650 (txq->log_id * desc_per_txq);
4652 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
4653 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
4654 MVPP2_PREF_BUF_THRESH(desc_per_txq/2));
4656 /* WRR / EJP configuration - indirect access */
4657 tx_port_num = mvpp2_egress_port(port);
4658 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4660 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
4661 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
4662 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
4663 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
4664 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
4666 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
4667 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
4670 for_each_present_cpu(cpu) {
4671 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4672 txq_pcpu->size = txq->size;
4673 txq_pcpu->buffs = kmalloc(txq_pcpu->size *
4674 sizeof(struct mvpp2_txq_pcpu_buf),
4676 if (!txq_pcpu->buffs)
4679 txq_pcpu->count = 0;
4680 txq_pcpu->reserved_num = 0;
4681 txq_pcpu->txq_put_index = 0;
4682 txq_pcpu->txq_get_index = 0;
4688 for_each_present_cpu(cpu) {
4689 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4690 kfree(txq_pcpu->buffs);
4693 dma_free_coherent(port->dev->dev.parent,
4694 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4695 txq->descs, txq->descs_phys);
4700 /* Free allocated TXQ resources */
4701 static void mvpp2_txq_deinit(struct mvpp2_port *port,
4702 struct mvpp2_tx_queue *txq)
4704 struct mvpp2_txq_pcpu *txq_pcpu;
4707 for_each_present_cpu(cpu) {
4708 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4709 kfree(txq_pcpu->buffs);
4713 dma_free_coherent(port->dev->dev.parent,
4714 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4715 txq->descs, txq->descs_phys);
4719 txq->next_desc_to_proc = 0;
4720 txq->descs_phys = 0;
4722 /* Set minimum bandwidth for disabled TXQs */
4723 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
4725 /* Set Tx descriptors queue starting address and size */
4726 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4727 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
4728 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
4731 /* Cleanup Tx ports */
4732 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
4734 struct mvpp2_txq_pcpu *txq_pcpu;
4735 int delay, pending, cpu;
4738 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4739 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
4740 val |= MVPP2_TXQ_DRAIN_EN_MASK;
4741 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4743 /* The napi queue has been stopped so wait for all packets
4744 * to be transmitted.
4748 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
4749 netdev_warn(port->dev,
4750 "port %d: cleaning queue %d timed out\n",
4751 port->id, txq->log_id);
4757 pending = mvpp2_txq_pend_desc_num_get(port, txq);
4760 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
4761 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4763 for_each_present_cpu(cpu) {
4764 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4766 /* Release all packets */
4767 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
4770 txq_pcpu->count = 0;
4771 txq_pcpu->txq_put_index = 0;
4772 txq_pcpu->txq_get_index = 0;
4776 /* Cleanup all Tx queues */
4777 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
4779 struct mvpp2_tx_queue *txq;
4783 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
4785 /* Reset Tx ports and delete Tx queues */
4786 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
4787 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4789 for (queue = 0; queue < txq_number; queue++) {
4790 txq = port->txqs[queue];
4791 mvpp2_txq_clean(port, txq);
4792 mvpp2_txq_deinit(port, txq);
4795 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4797 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
4798 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4801 /* Cleanup all Rx queues */
4802 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
4806 for (queue = 0; queue < rxq_number; queue++)
4807 mvpp2_rxq_deinit(port, port->rxqs[queue]);
4810 /* Init all Rx queues for port */
4811 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
4815 for (queue = 0; queue < rxq_number; queue++) {
4816 err = mvpp2_rxq_init(port, port->rxqs[queue]);
4823 mvpp2_cleanup_rxqs(port);
4827 /* Init all tx queues for port */
4828 static int mvpp2_setup_txqs(struct mvpp2_port *port)
4830 struct mvpp2_tx_queue *txq;
4833 for (queue = 0; queue < txq_number; queue++) {
4834 txq = port->txqs[queue];
4835 err = mvpp2_txq_init(port, txq);
4840 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4844 mvpp2_cleanup_txqs(port);
4848 /* The callback for per-port interrupt */
4849 static irqreturn_t mvpp2_isr(int irq, void *dev_id)
4851 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
4853 mvpp2_interrupts_disable(port);
4855 napi_schedule(&port->napi);
4861 static void mvpp2_link_event(struct net_device *dev)
4863 struct mvpp2_port *port = netdev_priv(dev);
4864 struct phy_device *phydev = port->phy_dev;
4865 int status_change = 0;
4869 if ((port->speed != phydev->speed) ||
4870 (port->duplex != phydev->duplex)) {
4873 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4874 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
4875 MVPP2_GMAC_CONFIG_GMII_SPEED |
4876 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
4877 MVPP2_GMAC_AN_SPEED_EN |
4878 MVPP2_GMAC_AN_DUPLEX_EN);
4881 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
4883 if (phydev->speed == SPEED_1000)
4884 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
4885 else if (phydev->speed == SPEED_100)
4886 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
4888 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4890 port->duplex = phydev->duplex;
4891 port->speed = phydev->speed;
4895 if (phydev->link != port->link) {
4896 if (!phydev->link) {
4901 port->link = phydev->link;
4905 if (status_change) {
4907 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4908 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
4909 MVPP2_GMAC_FORCE_LINK_DOWN);
4910 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4911 mvpp2_egress_enable(port);
4912 mvpp2_ingress_enable(port);
4914 mvpp2_ingress_disable(port);
4915 mvpp2_egress_disable(port);
4917 phy_print_status(phydev);
4921 static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
4925 if (!port_pcpu->timer_scheduled) {
4926 port_pcpu->timer_scheduled = true;
4927 interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
4928 hrtimer_start(&port_pcpu->tx_done_timer, interval,
4929 HRTIMER_MODE_REL_PINNED);
4933 static void mvpp2_tx_proc_cb(unsigned long data)
4935 struct net_device *dev = (struct net_device *)data;
4936 struct mvpp2_port *port = netdev_priv(dev);
4937 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
4938 unsigned int tx_todo, cause;
4940 if (!netif_running(dev))
4942 port_pcpu->timer_scheduled = false;
4944 /* Process all the Tx queues */
4945 cause = (1 << txq_number) - 1;
4946 tx_todo = mvpp2_tx_done(port, cause);
4948 /* Set the timer in case not all the packets were processed */
4950 mvpp2_timer_set(port_pcpu);
4953 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
4955 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
4956 struct mvpp2_port_pcpu,
4959 tasklet_schedule(&port_pcpu->tx_done_tasklet);
4961 return HRTIMER_NORESTART;
4964 /* Main RX/TX processing routines */
4966 /* Display more error info */
4967 static void mvpp2_rx_error(struct mvpp2_port *port,
4968 struct mvpp2_rx_desc *rx_desc)
4970 u32 status = rx_desc->status;
4972 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
4973 case MVPP2_RXD_ERR_CRC:
4974 netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n",
4975 status, rx_desc->data_size);
4977 case MVPP2_RXD_ERR_OVERRUN:
4978 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n",
4979 status, rx_desc->data_size);
4981 case MVPP2_RXD_ERR_RESOURCE:
4982 netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n",
4983 status, rx_desc->data_size);
4988 /* Handle RX checksum offload */
4989 static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
4990 struct sk_buff *skb)
4992 if (((status & MVPP2_RXD_L3_IP4) &&
4993 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
4994 (status & MVPP2_RXD_L3_IP6))
4995 if (((status & MVPP2_RXD_L4_UDP) ||
4996 (status & MVPP2_RXD_L4_TCP)) &&
4997 (status & MVPP2_RXD_L4_CSUM_OK)) {
4999 skb->ip_summed = CHECKSUM_UNNECESSARY;
5003 skb->ip_summed = CHECKSUM_NONE;
5006 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
5007 static int mvpp2_rx_refill(struct mvpp2_port *port,
5008 struct mvpp2_bm_pool *bm_pool,
5009 u32 bm, int is_recycle)
5011 struct sk_buff *skb;
5012 dma_addr_t phys_addr;
5015 (atomic_read(&bm_pool->in_use) < bm_pool->in_use_thresh))
5018 /* No recycle or too many buffers are in use, so allocate a new skb */
5019 skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC);
5023 mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
5024 atomic_dec(&bm_pool->in_use);
5028 /* Handle tx checksum */
5029 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
5031 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5034 __be16 l3_proto = vlan_get_protocol(skb);
5036 if (l3_proto == htons(ETH_P_IP)) {
5037 struct iphdr *ip4h = ip_hdr(skb);
5039 /* Calculate IPv4 checksum and L4 checksum */
5040 ip_hdr_len = ip4h->ihl;
5041 l4_proto = ip4h->protocol;
5042 } else if (l3_proto == htons(ETH_P_IPV6)) {
5043 struct ipv6hdr *ip6h = ipv6_hdr(skb);
5045 /* Read l4_protocol from one of IPv6 extra headers */
5046 if (skb_network_header_len(skb) > 0)
5047 ip_hdr_len = (skb_network_header_len(skb) >> 2);
5048 l4_proto = ip6h->nexthdr;
5050 return MVPP2_TXD_L4_CSUM_NOT;
5053 return mvpp2_txq_desc_csum(skb_network_offset(skb),
5054 l3_proto, ip_hdr_len, l4_proto);
5057 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
5060 static void mvpp2_buff_hdr_rx(struct mvpp2_port *port,
5061 struct mvpp2_rx_desc *rx_desc)
5063 struct mvpp2_buff_hdr *buff_hdr;
5064 struct sk_buff *skb;
5065 u32 rx_status = rx_desc->status;
5068 u32 buff_phys_addr_next;
5069 u32 buff_virt_addr_next;
5073 pool_id = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
5074 MVPP2_RXD_BM_POOL_ID_OFFS;
5075 buff_phys_addr = rx_desc->buf_phys_addr;
5076 buff_virt_addr = rx_desc->buf_cookie;
5079 skb = (struct sk_buff *)buff_virt_addr;
5080 buff_hdr = (struct mvpp2_buff_hdr *)skb->head;
5082 mc_id = MVPP2_B_HDR_INFO_MC_ID(buff_hdr->info);
5084 buff_phys_addr_next = buff_hdr->next_buff_phys_addr;
5085 buff_virt_addr_next = buff_hdr->next_buff_virt_addr;
5087 /* Release buffer */
5088 mvpp2_bm_pool_mc_put(port, pool_id, buff_phys_addr,
5089 buff_virt_addr, mc_id);
5091 buff_phys_addr = buff_phys_addr_next;
5092 buff_virt_addr = buff_virt_addr_next;
5094 } while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr->info));
5097 /* Main rx processing */
5098 static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5099 struct mvpp2_rx_queue *rxq)
5101 struct net_device *dev = port->dev;
5107 /* Get number of received packets and clamp the to-do */
5108 rx_received = mvpp2_rxq_received(port, rxq->id);
5109 if (rx_todo > rx_received)
5110 rx_todo = rx_received;
5112 while (rx_done < rx_todo) {
5113 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5114 struct mvpp2_bm_pool *bm_pool;
5115 struct sk_buff *skb;
5116 dma_addr_t phys_addr;
5118 int pool, rx_bytes, err;
5121 rx_status = rx_desc->status;
5122 rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
5123 phys_addr = rx_desc->buf_phys_addr;
5125 bm = mvpp2_bm_cookie_build(rx_desc);
5126 pool = mvpp2_bm_cookie_pool_get(bm);
5127 bm_pool = &port->priv->bm_pools[pool];
5128 /* Check if buffer header is used */
5129 if (rx_status & MVPP2_RXD_BUF_HDR) {
5130 mvpp2_buff_hdr_rx(port, rx_desc);
5134 /* In case of an error, release the requested buffer pointer
5135 * to the Buffer Manager. This request process is controlled
5136 * by the hardware, and the information about the buffer is
5137 * comprised by the RX descriptor.
5139 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
5141 dev->stats.rx_errors++;
5142 mvpp2_rx_error(port, rx_desc);
5143 /* Return the buffer to the pool */
5144 mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
5145 rx_desc->buf_cookie);
5149 skb = (struct sk_buff *)rx_desc->buf_cookie;
5151 err = mvpp2_rx_refill(port, bm_pool, bm, 0);
5153 netdev_err(port->dev, "failed to refill BM pools\n");
5154 goto err_drop_frame;
5157 dma_unmap_single(dev->dev.parent, phys_addr,
5158 bm_pool->buf_size, DMA_FROM_DEVICE);
5161 rcvd_bytes += rx_bytes;
5162 atomic_inc(&bm_pool->in_use);
5164 skb_reserve(skb, MVPP2_MH_SIZE);
5165 skb_put(skb, rx_bytes);
5166 skb->protocol = eth_type_trans(skb, dev);
5167 mvpp2_rx_csum(port, rx_status, skb);
5169 napi_gro_receive(&port->napi, skb);
5173 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5175 u64_stats_update_begin(&stats->syncp);
5176 stats->rx_packets += rcvd_pkts;
5177 stats->rx_bytes += rcvd_bytes;
5178 u64_stats_update_end(&stats->syncp);
5181 /* Update Rx queue management counters */
5183 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
5189 tx_desc_unmap_put(struct device *dev, struct mvpp2_tx_queue *txq,
5190 struct mvpp2_tx_desc *desc)
5192 dma_unmap_single(dev, desc->buf_phys_addr,
5193 desc->data_size, DMA_TO_DEVICE);
5194 mvpp2_txq_desc_put(txq);
5197 /* Handle tx fragmentation processing */
5198 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
5199 struct mvpp2_tx_queue *aggr_txq,
5200 struct mvpp2_tx_queue *txq)
5202 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5203 struct mvpp2_tx_desc *tx_desc;
5205 dma_addr_t buf_phys_addr;
5207 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5208 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5209 void *addr = page_address(frag->page.p) + frag->page_offset;
5211 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5212 tx_desc->phys_txq = txq->id;
5213 tx_desc->data_size = frag->size;
5215 buf_phys_addr = dma_map_single(port->dev->dev.parent, addr,
5218 if (dma_mapping_error(port->dev->dev.parent, buf_phys_addr)) {
5219 mvpp2_txq_desc_put(txq);
5223 tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
5224 tx_desc->buf_phys_addr = buf_phys_addr & (~MVPP2_TX_DESC_ALIGN);
5226 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
5227 /* Last descriptor */
5228 tx_desc->command = MVPP2_TXD_L_DESC;
5229 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
5231 /* Descriptor in the middle: Not First, Not Last */
5232 tx_desc->command = 0;
5233 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
5240 /* Release all descriptors that were used to map fragments of
5241 * this packet, as well as the corresponding DMA mappings
5243 for (i = i - 1; i >= 0; i--) {
5244 tx_desc = txq->descs + i;
5245 tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
5251 /* Main tx processing */
5252 static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
5254 struct mvpp2_port *port = netdev_priv(dev);
5255 struct mvpp2_tx_queue *txq, *aggr_txq;
5256 struct mvpp2_txq_pcpu *txq_pcpu;
5257 struct mvpp2_tx_desc *tx_desc;
5258 dma_addr_t buf_phys_addr;
5263 txq_id = skb_get_queue_mapping(skb);
5264 txq = port->txqs[txq_id];
5265 txq_pcpu = this_cpu_ptr(txq->pcpu);
5266 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
5268 frags = skb_shinfo(skb)->nr_frags + 1;
5270 /* Check number of available descriptors */
5271 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
5272 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
5278 /* Get a descriptor for the first part of the packet */
5279 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5280 tx_desc->phys_txq = txq->id;
5281 tx_desc->data_size = skb_headlen(skb);
5283 buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
5284 tx_desc->data_size, DMA_TO_DEVICE);
5285 if (unlikely(dma_mapping_error(dev->dev.parent, buf_phys_addr))) {
5286 mvpp2_txq_desc_put(txq);
5290 tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
5291 tx_desc->buf_phys_addr = buf_phys_addr & ~MVPP2_TX_DESC_ALIGN;
5293 tx_cmd = mvpp2_skb_tx_csum(port, skb);
5296 /* First and Last descriptor */
5297 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
5298 tx_desc->command = tx_cmd;
5299 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
5301 /* First but not Last */
5302 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
5303 tx_desc->command = tx_cmd;
5304 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
5306 /* Continue with other skb fragments */
5307 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
5308 tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
5314 txq_pcpu->reserved_num -= frags;
5315 txq_pcpu->count += frags;
5316 aggr_txq->count += frags;
5318 /* Enable transmit */
5320 mvpp2_aggr_txq_pend_desc_add(port, frags);
5322 if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
5323 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
5325 netif_tx_stop_queue(nq);
5329 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5331 u64_stats_update_begin(&stats->syncp);
5332 stats->tx_packets++;
5333 stats->tx_bytes += skb->len;
5334 u64_stats_update_end(&stats->syncp);
5336 dev->stats.tx_dropped++;
5337 dev_kfree_skb_any(skb);
5340 /* Finalize TX processing */
5341 if (txq_pcpu->count >= txq->done_pkts_coal)
5342 mvpp2_txq_done(port, txq, txq_pcpu);
5344 /* Set the timer in case not all frags were processed */
5345 if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
5346 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5348 mvpp2_timer_set(port_pcpu);
5351 return NETDEV_TX_OK;
5354 static inline void mvpp2_cause_error(struct net_device *dev, int cause)
5356 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
5357 netdev_err(dev, "FCS error\n");
5358 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
5359 netdev_err(dev, "rx fifo overrun error\n");
5360 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
5361 netdev_err(dev, "tx fifo underrun error\n");
5364 static int mvpp2_poll(struct napi_struct *napi, int budget)
5366 u32 cause_rx_tx, cause_rx, cause_misc;
5368 struct mvpp2_port *port = netdev_priv(napi->dev);
5370 /* Rx/Tx cause register
5372 * Bits 0-15: each bit indicates received packets on the Rx queue
5373 * (bit 0 is for Rx queue 0).
5375 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
5376 * (bit 16 is for Tx queue 0).
5378 * Each CPU has its own Rx/Tx cause register
5380 cause_rx_tx = mvpp2_read(port->priv,
5381 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5382 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
5383 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
5386 mvpp2_cause_error(port->dev, cause_misc);
5388 /* Clear the cause register */
5389 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
5390 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
5391 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
5394 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
5396 /* Process RX packets */
5397 cause_rx |= port->pending_cause_rx;
5398 while (cause_rx && budget > 0) {
5400 struct mvpp2_rx_queue *rxq;
5402 rxq = mvpp2_get_rx_queue(port, cause_rx);
5406 count = mvpp2_rx(port, budget, rxq);
5410 /* Clear the bit associated to this Rx queue
5411 * so that next iteration will continue from
5412 * the next Rx queue.
5414 cause_rx &= ~(1 << rxq->logic_rxq);
5420 napi_complete(napi);
5422 mvpp2_interrupts_enable(port);
5424 port->pending_cause_rx = cause_rx;
5428 /* Set hw internals when starting port */
5429 static void mvpp2_start_dev(struct mvpp2_port *port)
5431 mvpp2_gmac_max_rx_size_set(port);
5432 mvpp2_txp_max_tx_size_set(port);
5434 napi_enable(&port->napi);
5436 /* Enable interrupts on all CPUs */
5437 mvpp2_interrupts_enable(port);
5439 mvpp2_port_enable(port);
5440 phy_start(port->phy_dev);
5441 netif_tx_start_all_queues(port->dev);
5444 /* Set hw internals when stopping port */
5445 static void mvpp2_stop_dev(struct mvpp2_port *port)
5447 /* Stop new packets from arriving to RXQs */
5448 mvpp2_ingress_disable(port);
5452 /* Disable interrupts on all CPUs */
5453 mvpp2_interrupts_disable(port);
5455 napi_disable(&port->napi);
5457 netif_carrier_off(port->dev);
5458 netif_tx_stop_all_queues(port->dev);
5460 mvpp2_egress_disable(port);
5461 mvpp2_port_disable(port);
5462 phy_stop(port->phy_dev);
5465 /* Return positive if MTU is valid */
5466 static inline int mvpp2_check_mtu_valid(struct net_device *dev, int mtu)
5469 netdev_err(dev, "cannot change mtu to less than 68\n");
5473 /* 9676 == 9700 - 20 and rounding to 8 */
5475 netdev_info(dev, "illegal MTU value %d, round to 9676\n", mtu);
5479 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
5480 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
5481 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
5482 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
5488 static int mvpp2_check_ringparam_valid(struct net_device *dev,
5489 struct ethtool_ringparam *ring)
5491 u16 new_rx_pending = ring->rx_pending;
5492 u16 new_tx_pending = ring->tx_pending;
5494 if (ring->rx_pending == 0 || ring->tx_pending == 0)
5497 if (ring->rx_pending > MVPP2_MAX_RXD)
5498 new_rx_pending = MVPP2_MAX_RXD;
5499 else if (!IS_ALIGNED(ring->rx_pending, 16))
5500 new_rx_pending = ALIGN(ring->rx_pending, 16);
5502 if (ring->tx_pending > MVPP2_MAX_TXD)
5503 new_tx_pending = MVPP2_MAX_TXD;
5504 else if (!IS_ALIGNED(ring->tx_pending, 32))
5505 new_tx_pending = ALIGN(ring->tx_pending, 32);
5507 if (ring->rx_pending != new_rx_pending) {
5508 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
5509 ring->rx_pending, new_rx_pending);
5510 ring->rx_pending = new_rx_pending;
5513 if (ring->tx_pending != new_tx_pending) {
5514 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
5515 ring->tx_pending, new_tx_pending);
5516 ring->tx_pending = new_tx_pending;
5522 static void mvpp2_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
5524 u32 mac_addr_l, mac_addr_m, mac_addr_h;
5526 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
5527 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
5528 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
5529 addr[0] = (mac_addr_h >> 24) & 0xFF;
5530 addr[1] = (mac_addr_h >> 16) & 0xFF;
5531 addr[2] = (mac_addr_h >> 8) & 0xFF;
5532 addr[3] = mac_addr_h & 0xFF;
5533 addr[4] = mac_addr_m & 0xFF;
5534 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
5537 static int mvpp2_phy_connect(struct mvpp2_port *port)
5539 struct phy_device *phy_dev;
5541 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
5542 port->phy_interface);
5544 netdev_err(port->dev, "cannot connect to phy\n");
5547 phy_dev->supported &= PHY_GBIT_FEATURES;
5548 phy_dev->advertising = phy_dev->supported;
5550 port->phy_dev = phy_dev;
5558 static void mvpp2_phy_disconnect(struct mvpp2_port *port)
5560 phy_disconnect(port->phy_dev);
5561 port->phy_dev = NULL;
5564 static int mvpp2_open(struct net_device *dev)
5566 struct mvpp2_port *port = netdev_priv(dev);
5567 unsigned char mac_bcast[ETH_ALEN] = {
5568 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
5571 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
5573 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
5576 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
5577 dev->dev_addr, true);
5579 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
5582 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
5584 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
5587 err = mvpp2_prs_def_flow(port);
5589 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
5593 /* Allocate the Rx/Tx queues */
5594 err = mvpp2_setup_rxqs(port);
5596 netdev_err(port->dev, "cannot allocate Rx queues\n");
5600 err = mvpp2_setup_txqs(port);
5602 netdev_err(port->dev, "cannot allocate Tx queues\n");
5603 goto err_cleanup_rxqs;
5606 err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port);
5608 netdev_err(port->dev, "cannot request IRQ %d\n", port->irq);
5609 goto err_cleanup_txqs;
5612 /* In default link is down */
5613 netif_carrier_off(port->dev);
5615 err = mvpp2_phy_connect(port);
5619 /* Unmask interrupts on all CPUs */
5620 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
5622 mvpp2_start_dev(port);
5627 free_irq(port->irq, port);
5629 mvpp2_cleanup_txqs(port);
5631 mvpp2_cleanup_rxqs(port);
5635 static int mvpp2_stop(struct net_device *dev)
5637 struct mvpp2_port *port = netdev_priv(dev);
5638 struct mvpp2_port_pcpu *port_pcpu;
5641 mvpp2_stop_dev(port);
5642 mvpp2_phy_disconnect(port);
5644 /* Mask interrupts on all CPUs */
5645 on_each_cpu(mvpp2_interrupts_mask, port, 1);
5647 free_irq(port->irq, port);
5648 for_each_present_cpu(cpu) {
5649 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
5651 hrtimer_cancel(&port_pcpu->tx_done_timer);
5652 port_pcpu->timer_scheduled = false;
5653 tasklet_kill(&port_pcpu->tx_done_tasklet);
5655 mvpp2_cleanup_rxqs(port);
5656 mvpp2_cleanup_txqs(port);
5661 static void mvpp2_set_rx_mode(struct net_device *dev)
5663 struct mvpp2_port *port = netdev_priv(dev);
5664 struct mvpp2 *priv = port->priv;
5665 struct netdev_hw_addr *ha;
5667 bool allmulti = dev->flags & IFF_ALLMULTI;
5670 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
5671 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
5672 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
5674 /* Remove all port->id's mcast enries */
5675 mvpp2_prs_mcast_del_all(priv, id);
5678 netdev_for_each_mc_addr(ha, dev) {
5679 if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) {
5687 static int mvpp2_set_mac_address(struct net_device *dev, void *p)
5689 struct mvpp2_port *port = netdev_priv(dev);
5690 const struct sockaddr *addr = p;
5693 if (!is_valid_ether_addr(addr->sa_data)) {
5694 err = -EADDRNOTAVAIL;
5698 if (!netif_running(dev)) {
5699 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
5702 /* Reconfigure parser to accept the original MAC address */
5703 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
5708 mvpp2_stop_dev(port);
5710 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
5714 /* Reconfigure parser accept the original MAC address */
5715 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
5719 mvpp2_start_dev(port);
5720 mvpp2_egress_enable(port);
5721 mvpp2_ingress_enable(port);
5725 netdev_err(dev, "fail to change MAC address\n");
5729 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
5731 struct mvpp2_port *port = netdev_priv(dev);
5734 mtu = mvpp2_check_mtu_valid(dev, mtu);
5740 if (!netif_running(dev)) {
5741 err = mvpp2_bm_update_mtu(dev, mtu);
5743 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
5747 /* Reconfigure BM to the original MTU */
5748 err = mvpp2_bm_update_mtu(dev, dev->mtu);
5753 mvpp2_stop_dev(port);
5755 err = mvpp2_bm_update_mtu(dev, mtu);
5757 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
5761 /* Reconfigure BM to the original MTU */
5762 err = mvpp2_bm_update_mtu(dev, dev->mtu);
5767 mvpp2_start_dev(port);
5768 mvpp2_egress_enable(port);
5769 mvpp2_ingress_enable(port);
5774 netdev_err(dev, "fail to change MTU\n");
5778 static struct rtnl_link_stats64 *
5779 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5781 struct mvpp2_port *port = netdev_priv(dev);
5785 for_each_possible_cpu(cpu) {
5786 struct mvpp2_pcpu_stats *cpu_stats;
5792 cpu_stats = per_cpu_ptr(port->stats, cpu);
5794 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
5795 rx_packets = cpu_stats->rx_packets;
5796 rx_bytes = cpu_stats->rx_bytes;
5797 tx_packets = cpu_stats->tx_packets;
5798 tx_bytes = cpu_stats->tx_bytes;
5799 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
5801 stats->rx_packets += rx_packets;
5802 stats->rx_bytes += rx_bytes;
5803 stats->tx_packets += tx_packets;
5804 stats->tx_bytes += tx_bytes;
5807 stats->rx_errors = dev->stats.rx_errors;
5808 stats->rx_dropped = dev->stats.rx_dropped;
5809 stats->tx_dropped = dev->stats.tx_dropped;
5814 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5816 struct mvpp2_port *port = netdev_priv(dev);
5822 ret = phy_mii_ioctl(port->phy_dev, ifr, cmd);
5824 mvpp2_link_event(dev);
5829 /* Ethtool methods */
5831 /* Get settings (phy address, speed) for ethtools */
5832 static int mvpp2_ethtool_get_settings(struct net_device *dev,
5833 struct ethtool_cmd *cmd)
5835 struct mvpp2_port *port = netdev_priv(dev);
5839 return phy_ethtool_gset(port->phy_dev, cmd);
5842 /* Set settings (phy address, speed) for ethtools */
5843 static int mvpp2_ethtool_set_settings(struct net_device *dev,
5844 struct ethtool_cmd *cmd)
5846 struct mvpp2_port *port = netdev_priv(dev);
5850 return phy_ethtool_sset(port->phy_dev, cmd);
5853 /* Set interrupt coalescing for ethtools */
5854 static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
5855 struct ethtool_coalesce *c)
5857 struct mvpp2_port *port = netdev_priv(dev);
5860 for (queue = 0; queue < rxq_number; queue++) {
5861 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5863 rxq->time_coal = c->rx_coalesce_usecs;
5864 rxq->pkts_coal = c->rx_max_coalesced_frames;
5865 mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
5866 mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
5869 for (queue = 0; queue < txq_number; queue++) {
5870 struct mvpp2_tx_queue *txq = port->txqs[queue];
5872 txq->done_pkts_coal = c->tx_max_coalesced_frames;
5878 /* get coalescing for ethtools */
5879 static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
5880 struct ethtool_coalesce *c)
5882 struct mvpp2_port *port = netdev_priv(dev);
5884 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
5885 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
5886 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
5890 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
5891 struct ethtool_drvinfo *drvinfo)
5893 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
5894 sizeof(drvinfo->driver));
5895 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
5896 sizeof(drvinfo->version));
5897 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
5898 sizeof(drvinfo->bus_info));
5901 static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
5902 struct ethtool_ringparam *ring)
5904 struct mvpp2_port *port = netdev_priv(dev);
5906 ring->rx_max_pending = MVPP2_MAX_RXD;
5907 ring->tx_max_pending = MVPP2_MAX_TXD;
5908 ring->rx_pending = port->rx_ring_size;
5909 ring->tx_pending = port->tx_ring_size;
5912 static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
5913 struct ethtool_ringparam *ring)
5915 struct mvpp2_port *port = netdev_priv(dev);
5916 u16 prev_rx_ring_size = port->rx_ring_size;
5917 u16 prev_tx_ring_size = port->tx_ring_size;
5920 err = mvpp2_check_ringparam_valid(dev, ring);
5924 if (!netif_running(dev)) {
5925 port->rx_ring_size = ring->rx_pending;
5926 port->tx_ring_size = ring->tx_pending;
5930 /* The interface is running, so we have to force a
5931 * reallocation of the queues
5933 mvpp2_stop_dev(port);
5934 mvpp2_cleanup_rxqs(port);
5935 mvpp2_cleanup_txqs(port);
5937 port->rx_ring_size = ring->rx_pending;
5938 port->tx_ring_size = ring->tx_pending;
5940 err = mvpp2_setup_rxqs(port);
5942 /* Reallocate Rx queues with the original ring size */
5943 port->rx_ring_size = prev_rx_ring_size;
5944 ring->rx_pending = prev_rx_ring_size;
5945 err = mvpp2_setup_rxqs(port);
5949 err = mvpp2_setup_txqs(port);
5951 /* Reallocate Tx queues with the original ring size */
5952 port->tx_ring_size = prev_tx_ring_size;
5953 ring->tx_pending = prev_tx_ring_size;
5954 err = mvpp2_setup_txqs(port);
5956 goto err_clean_rxqs;
5959 mvpp2_start_dev(port);
5960 mvpp2_egress_enable(port);
5961 mvpp2_ingress_enable(port);
5966 mvpp2_cleanup_rxqs(port);
5968 netdev_err(dev, "fail to change ring parameters");
5974 static const struct net_device_ops mvpp2_netdev_ops = {
5975 .ndo_open = mvpp2_open,
5976 .ndo_stop = mvpp2_stop,
5977 .ndo_start_xmit = mvpp2_tx,
5978 .ndo_set_rx_mode = mvpp2_set_rx_mode,
5979 .ndo_set_mac_address = mvpp2_set_mac_address,
5980 .ndo_change_mtu = mvpp2_change_mtu,
5981 .ndo_get_stats64 = mvpp2_get_stats64,
5982 .ndo_do_ioctl = mvpp2_ioctl,
5985 static const struct ethtool_ops mvpp2_eth_tool_ops = {
5986 .get_link = ethtool_op_get_link,
5987 .get_settings = mvpp2_ethtool_get_settings,
5988 .set_settings = mvpp2_ethtool_set_settings,
5989 .set_coalesce = mvpp2_ethtool_set_coalesce,
5990 .get_coalesce = mvpp2_ethtool_get_coalesce,
5991 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
5992 .get_ringparam = mvpp2_ethtool_get_ringparam,
5993 .set_ringparam = mvpp2_ethtool_set_ringparam,
5996 /* Driver initialization */
5998 static void mvpp2_port_power_up(struct mvpp2_port *port)
6000 mvpp2_port_mii_set(port);
6001 mvpp2_port_periodic_xon_disable(port);
6002 mvpp2_port_fc_adv_enable(port);
6003 mvpp2_port_reset(port);
6006 /* Initialize port HW */
6007 static int mvpp2_port_init(struct mvpp2_port *port)
6009 struct device *dev = port->dev->dev.parent;
6010 struct mvpp2 *priv = port->priv;
6011 struct mvpp2_txq_pcpu *txq_pcpu;
6012 int queue, cpu, err;
6014 if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
6018 mvpp2_egress_disable(port);
6019 mvpp2_port_disable(port);
6021 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
6026 /* Associate physical Tx queues to this port and initialize.
6027 * The mapping is predefined.
6029 for (queue = 0; queue < txq_number; queue++) {
6030 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
6031 struct mvpp2_tx_queue *txq;
6033 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
6037 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
6040 goto err_free_percpu;
6043 txq->id = queue_phy_id;
6044 txq->log_id = queue;
6045 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
6046 for_each_present_cpu(cpu) {
6047 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6048 txq_pcpu->cpu = cpu;
6051 port->txqs[queue] = txq;
6054 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
6058 goto err_free_percpu;
6061 /* Allocate and initialize Rx queue for this port */
6062 for (queue = 0; queue < rxq_number; queue++) {
6063 struct mvpp2_rx_queue *rxq;
6065 /* Map physical Rx queue to port's logical Rx queue */
6066 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
6068 goto err_free_percpu;
6069 /* Map this Rx queue to a physical queue */
6070 rxq->id = port->first_rxq + queue;
6071 rxq->port = port->id;
6072 rxq->logic_rxq = queue;
6074 port->rxqs[queue] = rxq;
6077 /* Configure Rx queue group interrupt for this port */
6078 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number);
6080 /* Create Rx descriptor rings */
6081 for (queue = 0; queue < rxq_number; queue++) {
6082 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6084 rxq->size = port->rx_ring_size;
6085 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
6086 rxq->time_coal = MVPP2_RX_COAL_USEC;
6089 mvpp2_ingress_disable(port);
6091 /* Port default configuration */
6092 mvpp2_defaults_set(port);
6094 /* Port's classifier configuration */
6095 mvpp2_cls_oversize_rxq_set(port);
6096 mvpp2_cls_port_config(port);
6098 /* Provide an initial Rx packet size */
6099 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
6101 /* Initialize pools for swf */
6102 err = mvpp2_swf_bm_pool_init(port);
6104 goto err_free_percpu;
6109 for (queue = 0; queue < txq_number; queue++) {
6110 if (!port->txqs[queue])
6112 free_percpu(port->txqs[queue]->pcpu);
6117 /* Ports initialization */
6118 static int mvpp2_port_probe(struct platform_device *pdev,
6119 struct device_node *port_node,
6121 int *next_first_rxq)
6123 struct device_node *phy_node;
6124 struct mvpp2_port *port;
6125 struct mvpp2_port_pcpu *port_pcpu;
6126 struct net_device *dev;
6127 struct resource *res;
6128 const char *dt_mac_addr;
6129 const char *mac_from;
6130 char hw_mac_addr[ETH_ALEN];
6134 int priv_common_regs_num = 2;
6137 dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
6142 phy_node = of_parse_phandle(port_node, "phy", 0);
6144 dev_err(&pdev->dev, "missing phy\n");
6146 goto err_free_netdev;
6149 phy_mode = of_get_phy_mode(port_node);
6151 dev_err(&pdev->dev, "incorrect phy mode\n");
6153 goto err_free_netdev;
6156 if (of_property_read_u32(port_node, "port-id", &id)) {
6158 dev_err(&pdev->dev, "missing port-id value\n");
6159 goto err_free_netdev;
6162 dev->tx_queue_len = MVPP2_MAX_TXD;
6163 dev->watchdog_timeo = 5 * HZ;
6164 dev->netdev_ops = &mvpp2_netdev_ops;
6165 dev->ethtool_ops = &mvpp2_eth_tool_ops;
6167 port = netdev_priv(dev);
6169 port->irq = irq_of_parse_and_map(port_node, 0);
6170 if (port->irq <= 0) {
6172 goto err_free_netdev;
6175 if (of_property_read_bool(port_node, "marvell,loopback"))
6176 port->flags |= MVPP2_F_LOOPBACK;
6180 port->first_rxq = *next_first_rxq;
6181 port->phy_node = phy_node;
6182 port->phy_interface = phy_mode;
6184 res = platform_get_resource(pdev, IORESOURCE_MEM,
6185 priv_common_regs_num + id);
6186 port->base = devm_ioremap_resource(&pdev->dev, res);
6187 if (IS_ERR(port->base)) {
6188 err = PTR_ERR(port->base);
6192 /* Alloc per-cpu stats */
6193 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6199 dt_mac_addr = of_get_mac_address(port_node);
6200 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
6201 mac_from = "device tree";
6202 ether_addr_copy(dev->dev_addr, dt_mac_addr);
6204 mvpp2_get_mac_address(port, hw_mac_addr);
6205 if (is_valid_ether_addr(hw_mac_addr)) {
6206 mac_from = "hardware";
6207 ether_addr_copy(dev->dev_addr, hw_mac_addr);
6209 mac_from = "random";
6210 eth_hw_addr_random(dev);
6214 port->tx_ring_size = MVPP2_MAX_TXD;
6215 port->rx_ring_size = MVPP2_MAX_RXD;
6217 SET_NETDEV_DEV(dev, &pdev->dev);
6219 err = mvpp2_port_init(port);
6221 dev_err(&pdev->dev, "failed to init port %d\n", id);
6222 goto err_free_stats;
6224 mvpp2_port_power_up(port);
6226 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6229 goto err_free_txq_pcpu;
6232 for_each_present_cpu(cpu) {
6233 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
6235 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6236 HRTIMER_MODE_REL_PINNED);
6237 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6238 port_pcpu->timer_scheduled = false;
6240 tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
6241 (unsigned long)dev);
6244 netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
6245 features = NETIF_F_SG | NETIF_F_IP_CSUM;
6246 dev->features = features | NETIF_F_RXCSUM;
6247 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
6248 dev->vlan_features |= features;
6250 err = register_netdev(dev);
6252 dev_err(&pdev->dev, "failed to register netdev\n");
6253 goto err_free_port_pcpu;
6255 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6257 /* Increment the first Rx queue number to be used by the next port */
6258 *next_first_rxq += rxq_number;
6259 priv->port_list[id] = port;
6263 free_percpu(port->pcpu);
6265 for (i = 0; i < txq_number; i++)
6266 free_percpu(port->txqs[i]->pcpu);
6268 free_percpu(port->stats);
6270 irq_dispose_mapping(port->irq);
6276 /* Ports removal routine */
6277 static void mvpp2_port_remove(struct mvpp2_port *port)
6281 unregister_netdev(port->dev);
6282 free_percpu(port->pcpu);
6283 free_percpu(port->stats);
6284 for (i = 0; i < txq_number; i++)
6285 free_percpu(port->txqs[i]->pcpu);
6286 irq_dispose_mapping(port->irq);
6287 free_netdev(port->dev);
6290 /* Initialize decoding windows */
6291 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
6297 for (i = 0; i < 6; i++) {
6298 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
6299 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
6302 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
6307 for (i = 0; i < dram->num_cs; i++) {
6308 const struct mbus_dram_window *cs = dram->cs + i;
6310 mvpp2_write(priv, MVPP2_WIN_BASE(i),
6311 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
6312 dram->mbus_dram_target_id);
6314 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
6315 (cs->size - 1) & 0xffff0000);
6317 win_enable |= (1 << i);
6320 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
6323 /* Initialize Rx FIFO's */
6324 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
6328 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
6329 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
6330 MVPP2_RX_FIFO_PORT_DATA_SIZE);
6331 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
6332 MVPP2_RX_FIFO_PORT_ATTR_SIZE);
6335 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
6336 MVPP2_RX_FIFO_PORT_MIN_PKT);
6337 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
6340 /* Initialize network controller common part HW */
6341 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
6343 const struct mbus_dram_target_info *dram_target_info;
6347 /* Checks for hardware constraints */
6348 if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) ||
6349 (txq_number > MVPP2_MAX_TXQ)) {
6350 dev_err(&pdev->dev, "invalid queue size parameter\n");
6354 /* MBUS windows configuration */
6355 dram_target_info = mv_mbus_dram_info();
6356 if (dram_target_info)
6357 mvpp2_conf_mbus_windows(dram_target_info, priv);
6359 /* Disable HW PHY polling */
6360 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6361 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
6362 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6364 /* Allocate and initialize aggregated TXQs */
6365 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
6366 sizeof(struct mvpp2_tx_queue),
6368 if (!priv->aggr_txqs)
6371 for_each_present_cpu(i) {
6372 priv->aggr_txqs[i].id = i;
6373 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
6374 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i],
6375 MVPP2_AGGR_TXQ_SIZE, i, priv);
6381 mvpp2_rx_fifo_init(priv);
6383 /* Reset Rx queue group interrupt configuration */
6384 for (i = 0; i < MVPP2_MAX_PORTS; i++)
6385 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number);
6387 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
6388 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
6390 /* Allow cache snoop when transmiting packets */
6391 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
6393 /* Buffer Manager initialization */
6394 err = mvpp2_bm_init(pdev, priv);
6398 /* Parser default initialization */
6399 err = mvpp2_prs_default_init(pdev, priv);
6403 /* Classifier default initialization */
6404 mvpp2_cls_init(priv);
6409 static int mvpp2_probe(struct platform_device *pdev)
6411 struct device_node *dn = pdev->dev.of_node;
6412 struct device_node *port_node;
6414 struct resource *res;
6415 int port_count, first_rxq;
6418 priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL);
6422 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
6423 priv->base = devm_ioremap_resource(&pdev->dev, res);
6424 if (IS_ERR(priv->base))
6425 return PTR_ERR(priv->base);
6427 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
6428 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
6429 if (IS_ERR(priv->lms_base))
6430 return PTR_ERR(priv->lms_base);
6432 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
6433 if (IS_ERR(priv->pp_clk))
6434 return PTR_ERR(priv->pp_clk);
6435 err = clk_prepare_enable(priv->pp_clk);
6439 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
6440 if (IS_ERR(priv->gop_clk)) {
6441 err = PTR_ERR(priv->gop_clk);
6444 err = clk_prepare_enable(priv->gop_clk);
6448 /* Get system's tclk rate */
6449 priv->tclk = clk_get_rate(priv->pp_clk);
6451 /* Initialize network controller */
6452 err = mvpp2_init(pdev, priv);
6454 dev_err(&pdev->dev, "failed to initialize controller\n");
6458 port_count = of_get_available_child_count(dn);
6459 if (port_count == 0) {
6460 dev_err(&pdev->dev, "no ports enabled\n");
6465 priv->port_list = devm_kcalloc(&pdev->dev, port_count,
6466 sizeof(struct mvpp2_port *),
6468 if (!priv->port_list) {
6473 /* Initialize ports */
6475 for_each_available_child_of_node(dn, port_node) {
6476 err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq);
6481 platform_set_drvdata(pdev, priv);
6485 clk_disable_unprepare(priv->gop_clk);
6487 clk_disable_unprepare(priv->pp_clk);
6491 static int mvpp2_remove(struct platform_device *pdev)
6493 struct mvpp2 *priv = platform_get_drvdata(pdev);
6494 struct device_node *dn = pdev->dev.of_node;
6495 struct device_node *port_node;
6498 for_each_available_child_of_node(dn, port_node) {
6499 if (priv->port_list[i])
6500 mvpp2_port_remove(priv->port_list[i]);
6504 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
6505 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
6507 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
6510 for_each_present_cpu(i) {
6511 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
6513 dma_free_coherent(&pdev->dev,
6514 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
6516 aggr_txq->descs_phys);
6519 clk_disable_unprepare(priv->pp_clk);
6520 clk_disable_unprepare(priv->gop_clk);
6525 static const struct of_device_id mvpp2_match[] = {
6526 { .compatible = "marvell,armada-375-pp2" },
6529 MODULE_DEVICE_TABLE(of, mvpp2_match);
6531 static struct platform_driver mvpp2_driver = {
6532 .probe = mvpp2_probe,
6533 .remove = mvpp2_remove,
6535 .name = MVPP2_DRIVER_NAME,
6536 .of_match_table = mvpp2_match,
6540 module_platform_driver(mvpp2_driver);
6542 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
6543 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
6544 MODULE_LICENSE("GPL v2");