1 /*******************************************************************************
2 This contains the functions to handle the enhanced descriptors.
4 Copyright (C) 2007-2014 STMicroelectronics Ltd
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23 *******************************************************************************/
25 #include <linux/stmmac.h>
27 #include "descs_com.h"
29 static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
30 struct dma_desc *p, void __iomem *ioaddr)
32 struct net_device_stats *stats = (struct net_device_stats *)data;
33 unsigned int tdes0 = le32_to_cpu(p->des0);
36 /* Get tx owner first */
37 if (unlikely(tdes0 & ETDES0_OWN))
40 /* Verify tx error by looking at the last segment. */
41 if (likely(!(tdes0 & ETDES0_LAST_SEGMENT)))
44 if (unlikely(tdes0 & ETDES0_ERROR_SUMMARY)) {
45 if (unlikely(tdes0 & ETDES0_JABBER_TIMEOUT))
48 if (unlikely(tdes0 & ETDES0_FRAME_FLUSHED)) {
49 x->tx_frame_flushed++;
50 dwmac_dma_flush_tx_fifo(ioaddr);
53 if (unlikely(tdes0 & ETDES0_LOSS_CARRIER)) {
55 stats->tx_carrier_errors++;
57 if (unlikely(tdes0 & ETDES0_NO_CARRIER)) {
59 stats->tx_carrier_errors++;
61 if (unlikely((tdes0 & ETDES0_LATE_COLLISION) ||
62 (tdes0 & ETDES0_EXCESSIVE_COLLISIONS)))
64 (tdes0 & ETDES0_COLLISION_COUNT_MASK) >> 3;
66 if (unlikely(tdes0 & ETDES0_EXCESSIVE_DEFERRAL))
69 if (unlikely(tdes0 & ETDES0_UNDERFLOW_ERROR)) {
70 dwmac_dma_flush_tx_fifo(ioaddr);
74 if (unlikely(tdes0 & ETDES0_IP_HEADER_ERROR))
75 x->tx_ip_header_error++;
77 if (unlikely(tdes0 & ETDES0_PAYLOAD_ERROR)) {
78 x->tx_payload_error++;
79 dwmac_dma_flush_tx_fifo(ioaddr);
85 if (unlikely(tdes0 & ETDES0_DEFERRED))
88 #ifdef STMMAC_VLAN_TAG_USED
89 if (tdes0 & ETDES0_VLAN_FRAME)
96 static int enh_desc_get_tx_len(struct dma_desc *p)
98 return (le32_to_cpu(p->des1) & ETDES1_BUFFER1_SIZE_MASK);
101 static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
103 int ret = good_frame;
104 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
106 /* bits 5 7 0 | Frame status
107 * ----------------------------------------------------------
108 * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
109 * 1 0 0 | IPv4/6 No CSUM errorS.
110 * 1 0 1 | IPv4/6 CSUM PAYLOAD error
111 * 1 1 0 | IPv4/6 CSUM IP HR error
112 * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
113 * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
114 * 0 1 1 | COE bypassed.. no IPv4/6 frame
119 else if (status == 0x4)
121 else if (status == 0x5)
123 else if (status == 0x6)
125 else if (status == 0x7)
127 else if (status == 0x1)
129 else if (status == 0x3)
134 static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
135 struct dma_extended_desc *p)
137 unsigned int rdes0 = le32_to_cpu(p->basic.des0);
138 unsigned int rdes4 = le32_to_cpu(p->des4);
140 if (unlikely(rdes0 & ERDES0_RX_MAC_ADDR)) {
141 int message_type = (rdes4 & ERDES4_MSG_TYPE_MASK) >> 8;
143 if (rdes4 & ERDES4_IP_HDR_ERR)
145 if (rdes4 & ERDES4_IP_PAYLOAD_ERR)
147 if (rdes4 & ERDES4_IP_CSUM_BYPASSED)
148 x->ip_csum_bypassed++;
149 if (rdes4 & ERDES4_IPV4_PKT_RCVD)
151 if (rdes4 & ERDES4_IPV6_PKT_RCVD)
154 if (message_type == RDES_EXT_NO_PTP)
155 x->no_ptp_rx_msg_type_ext++;
156 else if (message_type == RDES_EXT_SYNC)
157 x->ptp_rx_msg_type_sync++;
158 else if (message_type == RDES_EXT_FOLLOW_UP)
159 x->ptp_rx_msg_type_follow_up++;
160 else if (message_type == RDES_EXT_DELAY_REQ)
161 x->ptp_rx_msg_type_delay_req++;
162 else if (message_type == RDES_EXT_DELAY_RESP)
163 x->ptp_rx_msg_type_delay_resp++;
164 else if (message_type == RDES_EXT_PDELAY_REQ)
165 x->ptp_rx_msg_type_pdelay_req++;
166 else if (message_type == RDES_EXT_PDELAY_RESP)
167 x->ptp_rx_msg_type_pdelay_resp++;
168 else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
169 x->ptp_rx_msg_type_pdelay_follow_up++;
170 else if (message_type == RDES_PTP_ANNOUNCE)
171 x->ptp_rx_msg_type_announce++;
172 else if (message_type == RDES_PTP_MANAGEMENT)
173 x->ptp_rx_msg_type_management++;
174 else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
175 x->ptp_rx_msg_pkt_reserved_type++;
177 if (rdes4 & ERDES4_PTP_FRAME_TYPE)
179 if (rdes4 & ERDES4_PTP_VER)
181 if (rdes4 & ERDES4_TIMESTAMP_DROPPED)
182 x->timestamp_dropped++;
183 if (rdes4 & ERDES4_AV_PKT_RCVD)
185 if (rdes4 & ERDES4_AV_TAGGED_PKT_RCVD)
186 x->av_tagged_pkt_rcvd++;
187 if ((rdes4 & ERDES4_VLAN_TAG_PRI_VAL_MASK) >> 18)
188 x->vlan_tag_priority_val++;
189 if (rdes4 & ERDES4_L3_FILTER_MATCH)
190 x->l3_filter_match++;
191 if (rdes4 & ERDES4_L4_FILTER_MATCH)
192 x->l4_filter_match++;
193 if ((rdes4 & ERDES4_L3_L4_FILT_NO_MATCH_MASK) >> 26)
194 x->l3_l4_filter_no_match++;
198 static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
201 struct net_device_stats *stats = (struct net_device_stats *)data;
202 unsigned int rdes0 = le32_to_cpu(p->des0);
203 int ret = good_frame;
205 if (unlikely(rdes0 & RDES0_OWN))
208 if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
209 stats->rx_length_errors++;
210 return discard_frame;
213 if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
214 if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
216 stats->rx_length_errors++;
218 if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR))
219 x->rx_gmac_overflow++;
221 if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR))
222 pr_err("\tIPC Csum Error/Giant frame\n");
224 if (unlikely(rdes0 & RDES0_COLLISION))
226 if (unlikely(rdes0 & RDES0_RECEIVE_WATCHDOG))
229 if (unlikely(rdes0 & RDES0_MII_ERROR)) /* GMII */
232 if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
234 stats->rx_crc_errors++;
239 /* After a payload csum error, the ES bit is set.
240 * It doesn't match with the information reported into the databook.
241 * At any rate, we need to understand if the CSUM hw computation is ok
242 * and report this info to the upper layers. */
243 if (likely(ret == good_frame))
244 ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
245 !!(rdes0 & RDES0_FRAME_TYPE),
246 !!(rdes0 & ERDES0_RX_MAC_ADDR));
248 if (unlikely(rdes0 & RDES0_DRIBBLING))
251 if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL)) {
252 x->sa_rx_filter_fail++;
255 if (unlikely(rdes0 & RDES0_DA_FILTER_FAIL)) {
256 x->da_rx_filter_fail++;
259 if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) {
263 #ifdef STMMAC_VLAN_TAG_USED
264 if (rdes0 & RDES0_VLAN_TAG)
271 static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
272 int mode, int end, int bfsize)
276 p->des0 |= cpu_to_le32(RDES0_OWN);
278 bfsize1 = min(bfsize, BUF_SIZE_8KiB);
279 p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
281 if (mode == STMMAC_CHAIN_MODE)
282 ehn_desc_rx_set_on_chain(p);
284 ehn_desc_rx_set_on_ring(p, end, bfsize);
287 p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
290 static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
292 p->des0 &= cpu_to_le32(~ETDES0_OWN);
293 if (mode == STMMAC_CHAIN_MODE)
294 enh_desc_end_tx_desc_on_chain(p);
296 enh_desc_end_tx_desc_on_ring(p, end);
299 static int enh_desc_get_tx_owner(struct dma_desc *p)
301 return (le32_to_cpu(p->des0) & ETDES0_OWN) >> 31;
304 static void enh_desc_set_tx_owner(struct dma_desc *p)
306 p->des0 |= cpu_to_le32(ETDES0_OWN);
309 static void enh_desc_set_rx_owner(struct dma_desc *p)
311 p->des0 |= cpu_to_le32(RDES0_OWN);
314 static int enh_desc_get_tx_ls(struct dma_desc *p)
316 return (le32_to_cpu(p->des0) & ETDES0_LAST_SEGMENT) >> 29;
319 static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
321 int ter = (le32_to_cpu(p->des0) & ETDES0_END_RING) >> 21;
323 memset(p, 0, offsetof(struct dma_desc, des2));
324 if (mode == STMMAC_CHAIN_MODE)
325 enh_desc_end_tx_desc_on_chain(p);
327 enh_desc_end_tx_desc_on_ring(p, ter);
330 static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
331 bool csum_flag, int mode, bool tx_own,
334 unsigned int tdes0 = le32_to_cpu(p->des0);
336 if (mode == STMMAC_CHAIN_MODE)
337 enh_set_tx_desc_len_on_chain(p, len);
339 enh_set_tx_desc_len_on_ring(p, len);
342 tdes0 |= ETDES0_FIRST_SEGMENT;
344 tdes0 &= ~ETDES0_FIRST_SEGMENT;
346 if (likely(csum_flag))
347 tdes0 |= (TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT);
349 tdes0 &= ~(TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT);
352 tdes0 |= ETDES0_LAST_SEGMENT;
354 /* Finally set the OWN bit. Later the DMA will start! */
359 /* When the own bit, for the first frame, has to be set, all
360 * descriptors for the same frame has to be set before, to
361 * avoid race condition.
365 p->des0 = cpu_to_le32(tdes0);
368 static void enh_desc_set_tx_ic(struct dma_desc *p)
370 p->des0 |= cpu_to_le32(ETDES0_INTERRUPT);
373 static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
375 unsigned int csum = 0;
376 /* The type-1 checksum offload engines append the checksum at
377 * the end of frame and the two bytes of checksum are added in
379 * Adjust for that in the framelen for type-1 checksum offload
382 if (rx_coe_type == STMMAC_RX_COE_TYPE1)
385 return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
386 >> RDES0_FRAME_LEN_SHIFT) - csum);
389 static void enh_desc_enable_tx_timestamp(struct dma_desc *p)
391 p->des0 |= cpu_to_le32(ETDES0_TIME_STAMP_ENABLE);
394 static int enh_desc_get_tx_timestamp_status(struct dma_desc *p)
396 return (le32_to_cpu(p->des0) & ETDES0_TIME_STAMP_STATUS) >> 17;
399 static u64 enh_desc_get_timestamp(void *desc, u32 ats)
404 struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
405 ns = le32_to_cpu(p->des6);
406 /* convert high/sec time stamp value to nanosecond */
407 ns += le32_to_cpu(p->des7) * 1000000000ULL;
409 struct dma_desc *p = (struct dma_desc *)desc;
410 ns = le32_to_cpu(p->des2);
411 ns += le32_to_cpu(p->des3) * 1000000000ULL;
417 static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats)
420 struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
421 return (le32_to_cpu(p->basic.des0) & RDES0_IPC_CSUM_ERROR) >> 7;
423 struct dma_desc *p = (struct dma_desc *)desc;
424 if ((le32_to_cpu(p->des2) == 0xffffffff) &&
425 (le32_to_cpu(p->des3) == 0xffffffff))
426 /* timestamp is corrupted, hence don't store it */
433 static void enh_desc_display_ring(void *head, unsigned int size, bool rx)
435 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
438 pr_info("Extended %s descriptor ring:\n", rx ? "RX" : "TX");
440 for (i = 0; i < size; i++) {
444 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
445 i, (unsigned int)virt_to_phys(ep),
446 (unsigned int)x, (unsigned int)(x >> 32),
447 ep->basic.des2, ep->basic.des3);
453 const struct stmmac_desc_ops enh_desc_ops = {
454 .tx_status = enh_desc_get_tx_status,
455 .rx_status = enh_desc_get_rx_status,
456 .get_tx_len = enh_desc_get_tx_len,
457 .init_rx_desc = enh_desc_init_rx_desc,
458 .init_tx_desc = enh_desc_init_tx_desc,
459 .get_tx_owner = enh_desc_get_tx_owner,
460 .release_tx_desc = enh_desc_release_tx_desc,
461 .prepare_tx_desc = enh_desc_prepare_tx_desc,
462 .set_tx_ic = enh_desc_set_tx_ic,
463 .get_tx_ls = enh_desc_get_tx_ls,
464 .set_tx_owner = enh_desc_set_tx_owner,
465 .set_rx_owner = enh_desc_set_rx_owner,
466 .get_rx_frame_len = enh_desc_get_rx_frame_len,
467 .rx_extended_status = enh_desc_get_ext_status,
468 .enable_tx_timestamp = enh_desc_enable_tx_timestamp,
469 .get_tx_timestamp_status = enh_desc_get_tx_timestamp_status,
470 .get_timestamp = enh_desc_get_timestamp,
471 .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,
472 .display_ring = enh_desc_display_ring,