1 // SPDX-License-Identifier: GPL-2.0-or-later
3 #include <net/dst_metadata.h>
4 #include <net/busy_poll.h>
5 #include <trace/events/net.h>
9 /* This should be increased if a protocol with a bigger head is added. */
10 #define GRO_MAX_HEAD (MAX_HEADER + 128)
12 static DEFINE_SPINLOCK(offload_lock);
13 static struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base);
14 /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
15 int gro_normal_batch __read_mostly = 8;
18 * dev_add_offload - register offload handlers
19 * @po: protocol offload declaration
21 * Add protocol offload handlers to the networking stack. The passed
22 * &proto_offload is linked into kernel lists and may not be freed until
23 * it has been removed from the kernel lists.
25 * This call does not sleep therefore it can not
26 * guarantee all CPU's that are in middle of receiving packets
27 * will see the new offload handlers (until the next received packet).
29 void dev_add_offload(struct packet_offload *po)
31 struct packet_offload *elem;
33 spin_lock(&offload_lock);
34 list_for_each_entry(elem, &offload_base, list) {
35 if (po->priority < elem->priority)
38 list_add_rcu(&po->list, elem->list.prev);
39 spin_unlock(&offload_lock);
41 EXPORT_SYMBOL(dev_add_offload);
44 * __dev_remove_offload - remove offload handler
45 * @po: packet offload declaration
47 * Remove a protocol offload handler that was previously added to the
48 * kernel offload handlers by dev_add_offload(). The passed &offload_type
49 * is removed from the kernel lists and can be freed or reused once this
52 * The packet type might still be in use by receivers
53 * and must not be freed until after all the CPU's have gone
54 * through a quiescent state.
56 static void __dev_remove_offload(struct packet_offload *po)
58 struct list_head *head = &offload_base;
59 struct packet_offload *po1;
61 spin_lock(&offload_lock);
63 list_for_each_entry(po1, head, list) {
65 list_del_rcu(&po->list);
70 pr_warn("dev_remove_offload: %p not found\n", po);
72 spin_unlock(&offload_lock);
76 * dev_remove_offload - remove packet offload handler
77 * @po: packet offload declaration
79 * Remove a packet offload handler that was previously added to the kernel
80 * offload handlers by dev_add_offload(). The passed &offload_type is
81 * removed from the kernel lists and can be freed or reused once this
84 * This call sleeps to guarantee that no CPU is looking at the packet
87 void dev_remove_offload(struct packet_offload *po)
89 __dev_remove_offload(po);
93 EXPORT_SYMBOL(dev_remove_offload);
96 * skb_eth_gso_segment - segmentation handler for ethernet protocols.
97 * @skb: buffer to segment
98 * @features: features for the output path (see dev->features)
99 * @type: Ethernet Protocol ID
101 struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
102 netdev_features_t features, __be16 type)
104 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
105 struct packet_offload *ptype;
108 list_for_each_entry_rcu(ptype, &offload_base, list) {
109 if (ptype->type == type && ptype->callbacks.gso_segment) {
110 segs = ptype->callbacks.gso_segment(skb, features);
118 EXPORT_SYMBOL(skb_eth_gso_segment);
121 * skb_mac_gso_segment - mac layer segmentation handler.
122 * @skb: buffer to segment
123 * @features: features for the output path (see dev->features)
125 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
126 netdev_features_t features)
128 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
129 struct packet_offload *ptype;
130 int vlan_depth = skb->mac_len;
131 __be16 type = skb_network_protocol(skb, &vlan_depth);
134 return ERR_PTR(-EINVAL);
136 __skb_pull(skb, vlan_depth);
139 list_for_each_entry_rcu(ptype, &offload_base, list) {
140 if (ptype->type == type && ptype->callbacks.gso_segment) {
141 segs = ptype->callbacks.gso_segment(skb, features);
147 __skb_push(skb, skb->data - skb_mac_header(skb));
151 EXPORT_SYMBOL(skb_mac_gso_segment);
153 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
155 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
156 unsigned int offset = skb_gro_offset(skb);
157 unsigned int headlen = skb_headlen(skb);
158 unsigned int len = skb_gro_len(skb);
159 unsigned int delta_truesize;
160 unsigned int gro_max_size;
161 unsigned int new_truesize;
165 /* Do not splice page pool based packets w/ non-page pool
166 * packets. This can result in reference count issues as page
167 * pool pages will not decrement the reference count and will
168 * instead be immediately returned to the pool or have frag
171 if (p->pp_recycle != skb->pp_recycle)
172 return -ETOOMANYREFS;
174 /* pairs with WRITE_ONCE() in netif_set_gro_max_size() */
175 gro_max_size = READ_ONCE(p->dev->gro_max_size);
177 if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
180 if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
181 if (p->protocol != htons(ETH_P_IPV6) ||
182 skb_headroom(p) < sizeof(struct hop_jumbo_hdr) ||
183 ipv6_hdr(p)->nexthdr != IPPROTO_TCP ||
188 segs = NAPI_GRO_CB(skb)->count;
189 lp = NAPI_GRO_CB(p)->last;
190 pinfo = skb_shinfo(lp);
192 if (headlen <= offset) {
195 int i = skbinfo->nr_frags;
196 int nr_frags = pinfo->nr_frags + i;
198 if (nr_frags > MAX_SKB_FRAGS)
202 pinfo->nr_frags = nr_frags;
203 skbinfo->nr_frags = 0;
205 frag = pinfo->frags + nr_frags;
206 frag2 = skbinfo->frags + i;
211 skb_frag_off_add(frag, offset);
212 skb_frag_size_sub(frag, offset);
214 /* all fragments truesize : remove (head size + sk_buff) */
215 new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
216 delta_truesize = skb->truesize - new_truesize;
218 skb->truesize = new_truesize;
219 skb->len -= skb->data_len;
222 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
224 } else if (skb->head_frag) {
225 int nr_frags = pinfo->nr_frags;
226 skb_frag_t *frag = pinfo->frags + nr_frags;
227 struct page *page = virt_to_head_page(skb->head);
228 unsigned int first_size = headlen - offset;
229 unsigned int first_offset;
231 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
234 first_offset = skb->data -
235 (unsigned char *)page_address(page) +
238 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
240 __skb_frag_set_page(frag, page);
241 skb_frag_off_set(frag, first_offset);
242 skb_frag_size_set(frag, first_size);
244 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
245 /* We dont need to clear skbinfo->nr_frags here */
247 new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
248 delta_truesize = skb->truesize - new_truesize;
249 skb->truesize = new_truesize;
250 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
255 /* sk ownership - if any - completely transferred to the aggregated packet */
256 skb->destructor = NULL;
258 delta_truesize = skb->truesize;
259 if (offset > headlen) {
260 unsigned int eat = offset - headlen;
262 skb_frag_off_add(&skbinfo->frags[0], eat);
263 skb_frag_size_sub(&skbinfo->frags[0], eat);
264 skb->data_len -= eat;
269 __skb_pull(skb, offset);
271 if (NAPI_GRO_CB(p)->last == p)
272 skb_shinfo(p)->frag_list = skb;
274 NAPI_GRO_CB(p)->last->next = skb;
275 NAPI_GRO_CB(p)->last = skb;
276 __skb_header_release(skb);
280 NAPI_GRO_CB(p)->count += segs;
282 p->truesize += delta_truesize;
286 lp->truesize += delta_truesize;
289 NAPI_GRO_CB(skb)->same_flow = 1;
294 static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
296 struct packet_offload *ptype;
297 __be16 type = skb->protocol;
298 struct list_head *head = &offload_base;
301 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
303 if (NAPI_GRO_CB(skb)->count == 1) {
304 skb_shinfo(skb)->gso_size = 0;
309 list_for_each_entry_rcu(ptype, head, list) {
310 if (ptype->type != type || !ptype->callbacks.gro_complete)
313 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
314 ipv6_gro_complete, inet_gro_complete,
321 WARN_ON(&ptype->list == head);
327 gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
330 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
333 struct list_head *head = &napi->gro_hash[index].list;
334 struct sk_buff *skb, *p;
336 list_for_each_entry_safe_reverse(skb, p, head, list) {
337 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
339 skb_list_del_init(skb);
340 napi_gro_complete(napi, skb);
341 napi->gro_hash[index].count--;
344 if (!napi->gro_hash[index].count)
345 __clear_bit(index, &napi->gro_bitmask);
348 /* napi->gro_hash[].list contains packets ordered by age.
349 * youngest packets at the head of it.
350 * Complete skbs in reverse order to reduce latencies.
352 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
354 unsigned long bitmask = napi->gro_bitmask;
355 unsigned int i, base = ~0U;
357 while ((i = ffs(bitmask)) != 0) {
360 __napi_gro_flush_chain(napi, base, flush_old);
363 EXPORT_SYMBOL(napi_gro_flush);
365 static void gro_list_prepare(const struct list_head *head,
366 const struct sk_buff *skb)
368 unsigned int maclen = skb->dev->hard_header_len;
369 u32 hash = skb_get_hash_raw(skb);
372 list_for_each_entry(p, head, list) {
375 NAPI_GRO_CB(p)->flush = 0;
377 if (hash != skb_get_hash_raw(p)) {
378 NAPI_GRO_CB(p)->same_flow = 0;
382 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
383 diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
384 if (skb_vlan_tag_present(p))
385 diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb);
386 diffs |= skb_metadata_differs(p, skb);
387 if (maclen == ETH_HLEN)
388 diffs |= compare_ether_header(skb_mac_header(p),
389 skb_mac_header(skb));
391 diffs = memcmp(skb_mac_header(p),
395 /* in most common scenarions 'slow_gro' is 0
396 * otherwise we are already on some slower paths
397 * either skip all the infrequent tests altogether or
398 * avoid trying too hard to skip each of them individually
400 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
401 #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
402 struct tc_skb_ext *skb_ext;
403 struct tc_skb_ext *p_ext;
406 diffs |= p->sk != skb->sk;
407 diffs |= skb_metadata_dst_cmp(p, skb);
408 diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
410 #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
411 skb_ext = skb_ext_find(skb, TC_SKB_EXT);
412 p_ext = skb_ext_find(p, TC_SKB_EXT);
414 diffs |= (!!p_ext) ^ (!!skb_ext);
415 if (!diffs && unlikely(skb_ext))
416 diffs |= p_ext->chain ^ skb_ext->chain;
420 NAPI_GRO_CB(p)->same_flow = !diffs;
424 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
426 const struct skb_shared_info *pinfo = skb_shinfo(skb);
427 const skb_frag_t *frag0 = &pinfo->frags[0];
429 NAPI_GRO_CB(skb)->data_offset = 0;
430 NAPI_GRO_CB(skb)->frag0 = NULL;
431 NAPI_GRO_CB(skb)->frag0_len = 0;
433 if (!skb_headlen(skb) && pinfo->nr_frags &&
434 !PageHighMem(skb_frag_page(frag0)) &&
435 (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
436 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
437 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
438 skb_frag_size(frag0),
439 skb->end - skb->tail);
443 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
445 struct skb_shared_info *pinfo = skb_shinfo(skb);
447 BUG_ON(skb->end - skb->tail < grow);
449 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
451 skb->data_len -= grow;
454 skb_frag_off_add(&pinfo->frags[0], grow);
455 skb_frag_size_sub(&pinfo->frags[0], grow);
457 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
458 skb_frag_unref(skb, 0);
459 memmove(pinfo->frags, pinfo->frags + 1,
460 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
464 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
466 struct sk_buff *oldest;
468 oldest = list_last_entry(head, struct sk_buff, list);
470 /* We are called with head length >= MAX_GRO_SKBS, so this is
473 if (WARN_ON_ONCE(!oldest))
476 /* Do not adjust napi->gro_hash[].count, caller is adding a new
479 skb_list_del_init(oldest);
480 napi_gro_complete(napi, oldest);
483 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
485 u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
486 struct gro_list *gro_list = &napi->gro_hash[bucket];
487 struct list_head *head = &offload_base;
488 struct packet_offload *ptype;
489 __be16 type = skb->protocol;
490 struct sk_buff *pp = NULL;
495 if (netif_elide_gro(skb->dev))
498 gro_list_prepare(&gro_list->list, skb);
501 list_for_each_entry_rcu(ptype, head, list) {
502 if (ptype->type == type && ptype->callbacks.gro_receive)
509 skb_set_network_header(skb, skb_gro_offset(skb));
510 skb_reset_mac_len(skb);
511 BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
512 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
513 sizeof(u32))); /* Avoid slow unaligned acc */
514 *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
515 NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
516 NAPI_GRO_CB(skb)->is_atomic = 1;
517 NAPI_GRO_CB(skb)->count = 1;
518 if (unlikely(skb_is_gso(skb))) {
519 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
520 /* Only support TCP and non DODGY users. */
521 if (!skb_is_gso_tcp(skb) ||
522 (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
523 NAPI_GRO_CB(skb)->flush = 1;
526 /* Setup for GRO checksum validation */
527 switch (skb->ip_summed) {
528 case CHECKSUM_COMPLETE:
529 NAPI_GRO_CB(skb)->csum = skb->csum;
530 NAPI_GRO_CB(skb)->csum_valid = 1;
532 case CHECKSUM_UNNECESSARY:
533 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
537 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
538 ipv6_gro_receive, inet_gro_receive,
539 &gro_list->list, skb);
543 if (PTR_ERR(pp) == -EINPROGRESS) {
548 same_flow = NAPI_GRO_CB(skb)->same_flow;
549 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
552 skb_list_del_init(pp);
553 napi_gro_complete(napi, pp);
560 if (NAPI_GRO_CB(skb)->flush)
563 if (unlikely(gro_list->count >= MAX_GRO_SKBS))
564 gro_flush_oldest(napi, &gro_list->list);
568 NAPI_GRO_CB(skb)->age = jiffies;
569 NAPI_GRO_CB(skb)->last = skb;
570 if (!skb_is_gso(skb))
571 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
572 list_add(&skb->list, &gro_list->list);
576 grow = skb_gro_offset(skb) - skb_headlen(skb);
578 gro_pull_from_frag0(skb, grow);
580 if (gro_list->count) {
581 if (!test_bit(bucket, &napi->gro_bitmask))
582 __set_bit(bucket, &napi->gro_bitmask);
583 } else if (test_bit(bucket, &napi->gro_bitmask)) {
584 __clear_bit(bucket, &napi->gro_bitmask);
594 struct packet_offload *gro_find_receive_by_type(__be16 type)
596 struct list_head *offload_head = &offload_base;
597 struct packet_offload *ptype;
599 list_for_each_entry_rcu(ptype, offload_head, list) {
600 if (ptype->type != type || !ptype->callbacks.gro_receive)
606 EXPORT_SYMBOL(gro_find_receive_by_type);
608 struct packet_offload *gro_find_complete_by_type(__be16 type)
610 struct list_head *offload_head = &offload_base;
611 struct packet_offload *ptype;
613 list_for_each_entry_rcu(ptype, offload_head, list) {
614 if (ptype->type != type || !ptype->callbacks.gro_complete)
620 EXPORT_SYMBOL(gro_find_complete_by_type);
622 static gro_result_t napi_skb_finish(struct napi_struct *napi,
628 gro_normal_one(napi, skb, 1);
631 case GRO_MERGED_FREE:
632 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
633 napi_skb_free_stolen_head(skb);
634 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
637 __kfree_skb_defer(skb);
649 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
653 skb_mark_napi_id(skb, napi);
654 trace_napi_gro_receive_entry(skb);
656 skb_gro_reset_offset(skb, 0);
658 ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
659 trace_napi_gro_receive_exit(ret);
663 EXPORT_SYMBOL(napi_gro_receive);
665 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
667 if (unlikely(skb->pfmemalloc)) {
671 __skb_pull(skb, skb_headlen(skb));
672 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
673 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
674 __vlan_hwaccel_clear_tag(skb);
675 skb->dev = napi->dev;
678 /* eth_type_trans() assumes pkt_type is PACKET_HOST */
679 skb->pkt_type = PACKET_HOST;
681 skb->encapsulation = 0;
682 skb_shinfo(skb)->gso_type = 0;
683 skb_shinfo(skb)->gso_size = 0;
684 if (unlikely(skb->slow_gro)) {
694 struct sk_buff *napi_get_frags(struct napi_struct *napi)
696 struct sk_buff *skb = napi->skb;
699 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
702 skb_mark_napi_id(skb, napi);
707 EXPORT_SYMBOL(napi_get_frags);
709 static gro_result_t napi_frags_finish(struct napi_struct *napi,
716 __skb_push(skb, ETH_HLEN);
717 skb->protocol = eth_type_trans(skb, skb->dev);
718 if (ret == GRO_NORMAL)
719 gro_normal_one(napi, skb, 1);
722 case GRO_MERGED_FREE:
723 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
724 napi_skb_free_stolen_head(skb);
726 napi_reuse_skb(napi, skb);
737 /* Upper GRO stack assumes network header starts at gro_offset=0
738 * Drivers could call both napi_gro_frags() and napi_gro_receive()
739 * We copy ethernet header into skb->data to have a common layout.
741 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
743 struct sk_buff *skb = napi->skb;
744 const struct ethhdr *eth;
745 unsigned int hlen = sizeof(*eth);
749 skb_reset_mac_header(skb);
750 skb_gro_reset_offset(skb, hlen);
752 if (unlikely(skb_gro_header_hard(skb, hlen))) {
753 eth = skb_gro_header_slow(skb, hlen, 0);
754 if (unlikely(!eth)) {
755 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
756 __func__, napi->dev->name);
757 napi_reuse_skb(napi, skb);
761 eth = (const struct ethhdr *)skb->data;
762 gro_pull_from_frag0(skb, hlen);
763 NAPI_GRO_CB(skb)->frag0 += hlen;
764 NAPI_GRO_CB(skb)->frag0_len -= hlen;
766 __skb_pull(skb, hlen);
769 * This works because the only protocols we care about don't require
771 * We'll fix it up properly in napi_frags_finish()
773 skb->protocol = eth->h_proto;
778 gro_result_t napi_gro_frags(struct napi_struct *napi)
781 struct sk_buff *skb = napi_frags_skb(napi);
783 trace_napi_gro_frags_entry(skb);
785 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
786 trace_napi_gro_frags_exit(ret);
790 EXPORT_SYMBOL(napi_gro_frags);
792 /* Compute the checksum from gro_offset and return the folded value
793 * after adding in any pseudo checksum.
795 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
800 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
802 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
803 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
804 /* See comments in __skb_checksum_complete(). */
806 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
807 !skb->csum_complete_sw)
808 netdev_rx_csum_fault(skb->dev, skb);
811 NAPI_GRO_CB(skb)->csum = wsum;
812 NAPI_GRO_CB(skb)->csum_valid = 1;
816 EXPORT_SYMBOL(__skb_gro_checksum_complete);