2 * aQuantia Corporation Network Driver
3 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
10 /* File aq_ring.c: Definition of functions for Rx/Tx rings. */
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
19 static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
20 struct aq_nic_s *aq_nic)
25 kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL);
27 if (!self->buff_ring) {
31 self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
32 self->size * self->dx_size,
33 &self->dx_ring_pa, GFP_KERNEL);
47 struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
48 struct aq_nic_s *aq_nic,
50 struct aq_nic_cfg_s *aq_nic_cfg)
54 self->aq_nic = aq_nic;
56 self->size = aq_nic_cfg->txds;
57 self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
59 self = aq_ring_alloc(self, aq_nic);
73 struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
74 struct aq_nic_s *aq_nic,
76 struct aq_nic_cfg_s *aq_nic_cfg)
80 self->aq_nic = aq_nic;
82 self->size = aq_nic_cfg->rxds;
83 self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
85 self = aq_ring_alloc(self, aq_nic);
99 int aq_ring_init(struct aq_ring_s *self)
107 static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i,
110 return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
113 void aq_ring_update_queue_state(struct aq_ring_s *ring)
115 if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
116 aq_ring_queue_stop(ring);
117 else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
118 aq_ring_queue_wake(ring);
121 void aq_ring_queue_wake(struct aq_ring_s *ring)
123 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
125 if (__netif_subqueue_stopped(ndev, ring->idx)) {
126 netif_wake_subqueue(ndev, ring->idx);
127 ring->stats.tx.queue_restarts++;
131 void aq_ring_queue_stop(struct aq_ring_s *ring)
133 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
135 if (!__netif_subqueue_stopped(ndev, ring->idx))
136 netif_stop_subqueue(ndev, ring->idx);
139 void aq_ring_tx_clean(struct aq_ring_s *self)
141 struct device *dev = aq_nic_get_dev(self->aq_nic);
143 for (; self->sw_head != self->hw_head;
144 self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
145 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
147 if (likely(buff->is_mapped)) {
148 if (unlikely(buff->is_sop)) {
150 buff->eop_index != 0xffffU &&
151 (!aq_ring_dx_in_range(self->sw_head,
156 dma_unmap_single(dev, buff->pa, buff->len,
159 dma_unmap_page(dev, buff->pa, buff->len,
164 if (unlikely(buff->is_eop))
165 dev_kfree_skb_any(buff->skb);
168 buff->eop_index = 0xffffU;
172 #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
173 int aq_ring_rx_clean(struct aq_ring_s *self,
174 struct napi_struct *napi,
178 struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
180 bool is_rsc_completed = true;
182 for (; (self->sw_head != self->hw_head) && budget;
183 self->sw_head = aq_ring_next_dx(self, self->sw_head),
184 --budget, ++(*work_done)) {
185 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
186 struct sk_buff *skb = NULL;
187 unsigned int next_ = 0U;
189 struct aq_ring_buff_s *buff_ = NULL;
191 if (buff->is_error) {
192 __free_pages(buff->page, 0);
196 if (buff->is_cleaned)
200 for (next_ = buff->next,
201 buff_ = &self->buff_ring[next_]; true;
203 buff_ = &self->buff_ring[next_]) {
205 aq_ring_dx_in_range(self->sw_head,
209 if (unlikely(!is_rsc_completed)) {
210 is_rsc_completed = false;
218 if (!is_rsc_completed) {
224 /* for single fragment packets use build_skb() */
226 buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
227 skb = build_skb(page_address(buff->page),
228 AQ_CFG_RX_FRAME_MAX);
229 if (unlikely(!skb)) {
234 skb_put(skb, buff->len);
236 skb = netdev_alloc_skb(ndev, ETH_HLEN);
237 if (unlikely(!skb)) {
241 skb_put(skb, ETH_HLEN);
242 memcpy(skb->data, page_address(buff->page), ETH_HLEN);
244 skb_add_rx_frag(skb, 0, buff->page, ETH_HLEN,
245 buff->len - ETH_HLEN,
246 SKB_TRUESIZE(buff->len - ETH_HLEN));
249 for (i = 1U, next_ = buff->next,
250 buff_ = &self->buff_ring[next_];
251 true; next_ = buff_->next,
252 buff_ = &self->buff_ring[next_], ++i) {
253 skb_add_rx_frag(skb, i,
256 SKB_TRUESIZE(buff->len -
258 buff_->is_cleaned = 1;
266 skb->protocol = eth_type_trans(skb, ndev);
267 if (unlikely(buff->is_cso_err)) {
268 ++self->stats.rx.errors;
269 skb->ip_summed = CHECKSUM_NONE;
271 if (buff->is_ip_cso) {
272 __skb_incr_checksum_unnecessary(skb);
274 skb->ip_summed = CHECKSUM_NONE;
277 if (buff->is_udp_cso || buff->is_tcp_cso)
278 __skb_incr_checksum_unnecessary(skb);
281 skb_set_hash(skb, buff->rss_hash,
282 buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
285 skb_record_rx_queue(skb, self->idx);
287 napi_gro_receive(napi, skb);
289 ++self->stats.rx.packets;
290 self->stats.rx.bytes += skb->len;
297 int aq_ring_rx_fill(struct aq_ring_s *self)
299 unsigned int pages_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
300 (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
301 struct aq_ring_buff_s *buff = NULL;
305 for (i = aq_ring_avail_dx(self); i--;
306 self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
307 buff = &self->buff_ring[self->sw_tail];
310 buff->len = AQ_CFG_RX_FRAME_MAX;
312 buff->page = alloc_pages(GFP_ATOMIC | __GFP_COLD |
313 __GFP_COMP, pages_order);
319 buff->pa = dma_map_page(aq_nic_get_dev(self->aq_nic),
321 AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
323 if (dma_mapping_error(aq_nic_get_dev(self->aq_nic), buff->pa)) {
333 if (buff && buff->page)
334 __free_pages(buff->page, 0);
340 void aq_ring_rx_deinit(struct aq_ring_s *self)
345 for (; self->sw_head != self->sw_tail;
346 self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
347 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
349 dma_unmap_page(aq_nic_get_dev(self->aq_nic), buff->pa,
350 AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
352 __free_pages(buff->page, 0);
358 void aq_ring_free(struct aq_ring_s *self)
363 kfree(self->buff_ring);
366 dma_free_coherent(aq_nic_get_dev(self->aq_nic),
367 self->size * self->dx_size, self->dx_ring,