2 * AMD 10Gb Ethernet driver
4 * This file is available to you under your choice of the following two
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 * This file incorporates work covered by the following copyright and
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
57 * License 2: Modified BSD
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 * This file incorporates work covered by the following copyright and
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
118 #include "xgbe-common.h"
120 static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
122 static void xgbe_free_ring(struct xgbe_prv_data *pdata,
123 struct xgbe_ring *ring)
125 struct xgbe_ring_data *rdata;
132 for (i = 0; i < ring->rdesc_count; i++) {
133 rdata = XGBE_GET_DESC_DATA(ring, i);
134 xgbe_unmap_rdata(pdata, rdata);
141 if (ring->rx_hdr_pa.pages) {
142 dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
143 ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
144 put_page(ring->rx_hdr_pa.pages);
146 ring->rx_hdr_pa.pages = NULL;
147 ring->rx_hdr_pa.pages_len = 0;
148 ring->rx_hdr_pa.pages_offset = 0;
149 ring->rx_hdr_pa.pages_dma = 0;
152 if (ring->rx_buf_pa.pages) {
153 dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
154 ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
155 put_page(ring->rx_buf_pa.pages);
157 ring->rx_buf_pa.pages = NULL;
158 ring->rx_buf_pa.pages_len = 0;
159 ring->rx_buf_pa.pages_offset = 0;
160 ring->rx_buf_pa.pages_dma = 0;
164 dma_free_coherent(pdata->dev,
165 (sizeof(struct xgbe_ring_desc) *
167 ring->rdesc, ring->rdesc_dma);
172 static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
174 struct xgbe_channel *channel;
177 DBGPR("-->xgbe_free_ring_resources\n");
179 for (i = 0; i < pdata->channel_count; i++) {
180 channel = pdata->channel[i];
181 xgbe_free_ring(pdata, channel->tx_ring);
182 xgbe_free_ring(pdata, channel->rx_ring);
185 DBGPR("<--xgbe_free_ring_resources\n");
188 static void *xgbe_alloc_node(size_t size, int node)
192 mem = kzalloc_node(size, GFP_KERNEL, node);
194 mem = kzalloc(size, GFP_KERNEL);
199 static void *xgbe_dma_alloc_node(struct device *dev, size_t size,
200 dma_addr_t *dma, int node)
203 int cur_node = dev_to_node(dev);
205 set_dev_node(dev, node);
206 mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
207 set_dev_node(dev, cur_node);
210 mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
215 static int xgbe_init_ring(struct xgbe_prv_data *pdata,
216 struct xgbe_ring *ring, unsigned int rdesc_count)
224 size = rdesc_count * sizeof(struct xgbe_ring_desc);
226 ring->rdesc_count = rdesc_count;
227 ring->rdesc = xgbe_dma_alloc_node(pdata->dev, size, &ring->rdesc_dma,
232 /* Descriptor information */
233 size = rdesc_count * sizeof(struct xgbe_ring_data);
235 ring->rdata = xgbe_alloc_node(size, ring->node);
239 netif_dbg(pdata, drv, pdata->netdev,
240 "rdesc=%p, rdesc_dma=%pad, rdata=%p, node=%d\n",
241 ring->rdesc, &ring->rdesc_dma, ring->rdata, ring->node);
246 static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
248 struct xgbe_channel *channel;
252 for (i = 0; i < pdata->channel_count; i++) {
253 channel = pdata->channel[i];
254 netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
257 ret = xgbe_init_ring(pdata, channel->tx_ring,
258 pdata->tx_desc_count);
260 netdev_alert(pdata->netdev,
261 "error initializing Tx ring\n");
265 netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
268 ret = xgbe_init_ring(pdata, channel->rx_ring,
269 pdata->rx_desc_count);
271 netdev_alert(pdata->netdev,
272 "error initializing Rx ring\n");
280 xgbe_free_ring_resources(pdata);
285 static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
286 struct xgbe_page_alloc *pa, int alloc_order,
289 struct page *pages = NULL;
290 dma_addr_t pages_dma;
297 /* Try to obtain pages, decreasing order if necessary */
298 gfp = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
300 pages = alloc_pages_node(node, gfp, order);
307 /* If we couldn't get local pages, try getting from anywhere */
308 if (!pages && (node != NUMA_NO_NODE)) {
317 pages_dma = dma_map_page(pdata->dev, pages, 0,
318 PAGE_SIZE << order, DMA_FROM_DEVICE);
319 if (dma_mapping_error(pdata->dev, pages_dma)) {
325 pa->pages_len = PAGE_SIZE << order;
326 pa->pages_offset = 0;
327 pa->pages_dma = pages_dma;
332 static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
333 struct xgbe_page_alloc *pa,
339 bd->dma_base = pa->pages_dma;
340 bd->dma_off = pa->pages_offset;
343 pa->pages_offset += len;
344 if ((pa->pages_offset + len) > pa->pages_len) {
345 /* This data descriptor is responsible for unmapping page(s) */
348 /* Get a new allocation next time */
351 pa->pages_offset = 0;
356 static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
357 struct xgbe_ring *ring,
358 struct xgbe_ring_data *rdata)
362 if (!ring->rx_hdr_pa.pages) {
363 ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, 0, ring->node);
368 if (!ring->rx_buf_pa.pages) {
369 ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa,
370 PAGE_ALLOC_COSTLY_ORDER, ring->node);
375 /* Set up the header page info */
376 xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
377 XGBE_SKB_ALLOC_SIZE);
379 /* Set up the buffer page info */
380 xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
386 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
388 struct xgbe_hw_if *hw_if = &pdata->hw_if;
389 struct xgbe_channel *channel;
390 struct xgbe_ring *ring;
391 struct xgbe_ring_data *rdata;
392 struct xgbe_ring_desc *rdesc;
393 dma_addr_t rdesc_dma;
396 DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
398 for (i = 0; i < pdata->channel_count; i++) {
399 channel = pdata->channel[i];
400 ring = channel->tx_ring;
405 rdesc_dma = ring->rdesc_dma;
407 for (j = 0; j < ring->rdesc_count; j++) {
408 rdata = XGBE_GET_DESC_DATA(ring, j);
410 rdata->rdesc = rdesc;
411 rdata->rdesc_dma = rdesc_dma;
414 rdesc_dma += sizeof(struct xgbe_ring_desc);
419 memset(&ring->tx, 0, sizeof(ring->tx));
421 hw_if->tx_desc_init(channel);
424 DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
427 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
429 struct xgbe_hw_if *hw_if = &pdata->hw_if;
430 struct xgbe_channel *channel;
431 struct xgbe_ring *ring;
432 struct xgbe_ring_desc *rdesc;
433 struct xgbe_ring_data *rdata;
434 dma_addr_t rdesc_dma;
437 DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
439 for (i = 0; i < pdata->channel_count; i++) {
440 channel = pdata->channel[i];
441 ring = channel->rx_ring;
446 rdesc_dma = ring->rdesc_dma;
448 for (j = 0; j < ring->rdesc_count; j++) {
449 rdata = XGBE_GET_DESC_DATA(ring, j);
451 rdata->rdesc = rdesc;
452 rdata->rdesc_dma = rdesc_dma;
454 if (xgbe_map_rx_buffer(pdata, ring, rdata))
458 rdesc_dma += sizeof(struct xgbe_ring_desc);
464 hw_if->rx_desc_init(channel);
467 DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
470 static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
471 struct xgbe_ring_data *rdata)
473 if (rdata->skb_dma) {
474 if (rdata->mapped_as_page) {
475 dma_unmap_page(pdata->dev, rdata->skb_dma,
476 rdata->skb_dma_len, DMA_TO_DEVICE);
478 dma_unmap_single(pdata->dev, rdata->skb_dma,
479 rdata->skb_dma_len, DMA_TO_DEVICE);
482 rdata->skb_dma_len = 0;
486 dev_kfree_skb_any(rdata->skb);
490 if (rdata->rx.hdr.pa.pages)
491 put_page(rdata->rx.hdr.pa.pages);
493 if (rdata->rx.hdr.pa_unmap.pages) {
494 dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma,
495 rdata->rx.hdr.pa_unmap.pages_len,
497 put_page(rdata->rx.hdr.pa_unmap.pages);
500 if (rdata->rx.buf.pa.pages)
501 put_page(rdata->rx.buf.pa.pages);
503 if (rdata->rx.buf.pa_unmap.pages) {
504 dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma,
505 rdata->rx.buf.pa_unmap.pages_len,
507 put_page(rdata->rx.buf.pa_unmap.pages);
510 memset(&rdata->tx, 0, sizeof(rdata->tx));
511 memset(&rdata->rx, 0, sizeof(rdata->rx));
513 rdata->mapped_as_page = 0;
515 if (rdata->state_saved) {
516 rdata->state_saved = 0;
517 rdata->state.skb = NULL;
518 rdata->state.len = 0;
519 rdata->state.error = 0;
523 static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
525 struct xgbe_prv_data *pdata = channel->pdata;
526 struct xgbe_ring *ring = channel->tx_ring;
527 struct xgbe_ring_data *rdata;
528 struct xgbe_packet_data *packet;
529 struct skb_frag_struct *frag;
531 unsigned int start_index, cur_index;
532 unsigned int offset, tso, vlan, datalen, len;
535 DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
538 start_index = ring->cur;
539 cur_index = ring->cur;
541 packet = &ring->packet_data;
542 packet->rdesc_count = 0;
545 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
547 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
550 /* Save space for a context descriptor if needed */
551 if ((tso && (packet->mss != ring->tx.cur_mss)) ||
552 (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
554 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
557 /* Map the TSO header */
558 skb_dma = dma_map_single(pdata->dev, skb->data,
559 packet->header_len, DMA_TO_DEVICE);
560 if (dma_mapping_error(pdata->dev, skb_dma)) {
561 netdev_alert(pdata->netdev, "dma_map_single failed\n");
564 rdata->skb_dma = skb_dma;
565 rdata->skb_dma_len = packet->header_len;
566 netif_dbg(pdata, tx_queued, pdata->netdev,
567 "skb header: index=%u, dma=%pad, len=%u\n",
568 cur_index, &skb_dma, packet->header_len);
570 offset = packet->header_len;
572 packet->length += packet->header_len;
575 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
578 /* Map the (remainder of the) packet */
579 for (datalen = skb_headlen(skb) - offset; datalen; ) {
580 len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
582 skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
584 if (dma_mapping_error(pdata->dev, skb_dma)) {
585 netdev_alert(pdata->netdev, "dma_map_single failed\n");
588 rdata->skb_dma = skb_dma;
589 rdata->skb_dma_len = len;
590 netif_dbg(pdata, tx_queued, pdata->netdev,
591 "skb data: index=%u, dma=%pad, len=%u\n",
592 cur_index, &skb_dma, len);
597 packet->length += len;
600 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
603 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
604 netif_dbg(pdata, tx_queued, pdata->netdev,
605 "mapping frag %u\n", i);
607 frag = &skb_shinfo(skb)->frags[i];
610 for (datalen = skb_frag_size(frag); datalen; ) {
611 len = min_t(unsigned int, datalen,
612 XGBE_TX_MAX_BUF_SIZE);
614 skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
616 if (dma_mapping_error(pdata->dev, skb_dma)) {
617 netdev_alert(pdata->netdev,
618 "skb_frag_dma_map failed\n");
621 rdata->skb_dma = skb_dma;
622 rdata->skb_dma_len = len;
623 rdata->mapped_as_page = 1;
624 netif_dbg(pdata, tx_queued, pdata->netdev,
625 "skb frag: index=%u, dma=%pad, len=%u\n",
626 cur_index, &skb_dma, len);
631 packet->length += len;
634 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
638 /* Save the skb address in the last entry. We always have some data
639 * that has been mapped so rdata is always advanced past the last
640 * piece of mapped data - use the entry pointed to by cur_index - 1.
642 rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1);
645 /* Save the number of descriptor entries used */
646 packet->rdesc_count = cur_index - start_index;
648 DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
650 return packet->rdesc_count;
653 while (start_index < cur_index) {
654 rdata = XGBE_GET_DESC_DATA(ring, start_index++);
655 xgbe_unmap_rdata(pdata, rdata);
658 DBGPR("<--xgbe_map_tx_skb: count=0\n");
663 void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
665 DBGPR("-->xgbe_init_function_ptrs_desc\n");
667 desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
668 desc_if->free_ring_resources = xgbe_free_ring_resources;
669 desc_if->map_tx_skb = xgbe_map_tx_skb;
670 desc_if->map_rx_buffer = xgbe_map_rx_buffer;
671 desc_if->unmap_rdata = xgbe_unmap_rdata;
672 desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
673 desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
675 DBGPR("<--xgbe_init_function_ptrs_desc\n");