GNU Linux-libre 6.8.7-gnu
[releases.git] / net / core / gro.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <net/gro.h>
3 #include <net/dst_metadata.h>
4 #include <net/busy_poll.h>
5 #include <trace/events/net.h>
6
7 #define MAX_GRO_SKBS 8
8
9 /* This should be increased if a protocol with a bigger head is added. */
10 #define GRO_MAX_HEAD (MAX_HEADER + 128)
11
12 static DEFINE_SPINLOCK(offload_lock);
13 struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base);
14 /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
15 int gro_normal_batch __read_mostly = 8;
16
17 /**
18  *      dev_add_offload - register offload handlers
19  *      @po: protocol offload declaration
20  *
21  *      Add protocol offload handlers to the networking stack. The passed
22  *      &proto_offload is linked into kernel lists and may not be freed until
23  *      it has been removed from the kernel lists.
24  *
25  *      This call does not sleep therefore it can not
26  *      guarantee all CPU's that are in middle of receiving packets
27  *      will see the new offload handlers (until the next received packet).
28  */
29 void dev_add_offload(struct packet_offload *po)
30 {
31         struct packet_offload *elem;
32
33         spin_lock(&offload_lock);
34         list_for_each_entry(elem, &offload_base, list) {
35                 if (po->priority < elem->priority)
36                         break;
37         }
38         list_add_rcu(&po->list, elem->list.prev);
39         spin_unlock(&offload_lock);
40 }
41 EXPORT_SYMBOL(dev_add_offload);
42
43 /**
44  *      __dev_remove_offload     - remove offload handler
45  *      @po: packet offload declaration
46  *
47  *      Remove a protocol offload handler that was previously added to the
48  *      kernel offload handlers by dev_add_offload(). The passed &offload_type
49  *      is removed from the kernel lists and can be freed or reused once this
50  *      function returns.
51  *
52  *      The packet type might still be in use by receivers
53  *      and must not be freed until after all the CPU's have gone
54  *      through a quiescent state.
55  */
56 static void __dev_remove_offload(struct packet_offload *po)
57 {
58         struct list_head *head = &offload_base;
59         struct packet_offload *po1;
60
61         spin_lock(&offload_lock);
62
63         list_for_each_entry(po1, head, list) {
64                 if (po == po1) {
65                         list_del_rcu(&po->list);
66                         goto out;
67                 }
68         }
69
70         pr_warn("dev_remove_offload: %p not found\n", po);
71 out:
72         spin_unlock(&offload_lock);
73 }
74
75 /**
76  *      dev_remove_offload       - remove packet offload handler
77  *      @po: packet offload declaration
78  *
79  *      Remove a packet offload handler that was previously added to the kernel
80  *      offload handlers by dev_add_offload(). The passed &offload_type is
81  *      removed from the kernel lists and can be freed or reused once this
82  *      function returns.
83  *
84  *      This call sleeps to guarantee that no CPU is looking at the packet
85  *      type after return.
86  */
87 void dev_remove_offload(struct packet_offload *po)
88 {
89         __dev_remove_offload(po);
90
91         synchronize_net();
92 }
93 EXPORT_SYMBOL(dev_remove_offload);
94
95
96 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
97 {
98         struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
99         unsigned int offset = skb_gro_offset(skb);
100         unsigned int headlen = skb_headlen(skb);
101         unsigned int len = skb_gro_len(skb);
102         unsigned int delta_truesize;
103         unsigned int gro_max_size;
104         unsigned int new_truesize;
105         struct sk_buff *lp;
106         int segs;
107
108         /* Do not splice page pool based packets w/ non-page pool
109          * packets. This can result in reference count issues as page
110          * pool pages will not decrement the reference count and will
111          * instead be immediately returned to the pool or have frag
112          * count decremented.
113          */
114         if (p->pp_recycle != skb->pp_recycle)
115                 return -ETOOMANYREFS;
116
117         /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */
118         gro_max_size = p->protocol == htons(ETH_P_IPV6) ?
119                         READ_ONCE(p->dev->gro_max_size) :
120                         READ_ONCE(p->dev->gro_ipv4_max_size);
121
122         if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
123                 return -E2BIG;
124
125         if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
126                 if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP ||
127                     (p->protocol == htons(ETH_P_IPV6) &&
128                      skb_headroom(p) < sizeof(struct hop_jumbo_hdr)) ||
129                     p->encapsulation)
130                         return -E2BIG;
131         }
132
133         segs = NAPI_GRO_CB(skb)->count;
134         lp = NAPI_GRO_CB(p)->last;
135         pinfo = skb_shinfo(lp);
136
137         if (headlen <= offset) {
138                 skb_frag_t *frag;
139                 skb_frag_t *frag2;
140                 int i = skbinfo->nr_frags;
141                 int nr_frags = pinfo->nr_frags + i;
142
143                 if (nr_frags > MAX_SKB_FRAGS)
144                         goto merge;
145
146                 offset -= headlen;
147                 pinfo->nr_frags = nr_frags;
148                 skbinfo->nr_frags = 0;
149
150                 frag = pinfo->frags + nr_frags;
151                 frag2 = skbinfo->frags + i;
152                 do {
153                         *--frag = *--frag2;
154                 } while (--i);
155
156                 skb_frag_off_add(frag, offset);
157                 skb_frag_size_sub(frag, offset);
158
159                 /* all fragments truesize : remove (head size + sk_buff) */
160                 new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
161                 delta_truesize = skb->truesize - new_truesize;
162
163                 skb->truesize = new_truesize;
164                 skb->len -= skb->data_len;
165                 skb->data_len = 0;
166
167                 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
168                 goto done;
169         } else if (skb->head_frag) {
170                 int nr_frags = pinfo->nr_frags;
171                 skb_frag_t *frag = pinfo->frags + nr_frags;
172                 struct page *page = virt_to_head_page(skb->head);
173                 unsigned int first_size = headlen - offset;
174                 unsigned int first_offset;
175
176                 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
177                         goto merge;
178
179                 first_offset = skb->data -
180                                (unsigned char *)page_address(page) +
181                                offset;
182
183                 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
184
185                 skb_frag_fill_page_desc(frag, page, first_offset, first_size);
186
187                 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
188                 /* We dont need to clear skbinfo->nr_frags here */
189
190                 new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
191                 delta_truesize = skb->truesize - new_truesize;
192                 skb->truesize = new_truesize;
193                 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
194                 goto done;
195         }
196
197 merge:
198         /* sk ownership - if any - completely transferred to the aggregated packet */
199         skb->destructor = NULL;
200         skb->sk = NULL;
201         delta_truesize = skb->truesize;
202         if (offset > headlen) {
203                 unsigned int eat = offset - headlen;
204
205                 skb_frag_off_add(&skbinfo->frags[0], eat);
206                 skb_frag_size_sub(&skbinfo->frags[0], eat);
207                 skb->data_len -= eat;
208                 skb->len -= eat;
209                 offset = headlen;
210         }
211
212         __skb_pull(skb, offset);
213
214         if (NAPI_GRO_CB(p)->last == p)
215                 skb_shinfo(p)->frag_list = skb;
216         else
217                 NAPI_GRO_CB(p)->last->next = skb;
218         NAPI_GRO_CB(p)->last = skb;
219         __skb_header_release(skb);
220         lp = p;
221
222 done:
223         NAPI_GRO_CB(p)->count += segs;
224         p->data_len += len;
225         p->truesize += delta_truesize;
226         p->len += len;
227         if (lp != p) {
228                 lp->data_len += len;
229                 lp->truesize += delta_truesize;
230                 lp->len += len;
231         }
232         NAPI_GRO_CB(skb)->same_flow = 1;
233         return 0;
234 }
235
236
237 static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
238 {
239         struct packet_offload *ptype;
240         __be16 type = skb->protocol;
241         struct list_head *head = &offload_base;
242         int err = -ENOENT;
243
244         BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
245
246         if (NAPI_GRO_CB(skb)->count == 1) {
247                 skb_shinfo(skb)->gso_size = 0;
248                 goto out;
249         }
250
251         rcu_read_lock();
252         list_for_each_entry_rcu(ptype, head, list) {
253                 if (ptype->type != type || !ptype->callbacks.gro_complete)
254                         continue;
255
256                 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
257                                          ipv6_gro_complete, inet_gro_complete,
258                                          skb, 0);
259                 break;
260         }
261         rcu_read_unlock();
262
263         if (err) {
264                 WARN_ON(&ptype->list == head);
265                 kfree_skb(skb);
266                 return;
267         }
268
269 out:
270         gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
271 }
272
273 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
274                                    bool flush_old)
275 {
276         struct list_head *head = &napi->gro_hash[index].list;
277         struct sk_buff *skb, *p;
278
279         list_for_each_entry_safe_reverse(skb, p, head, list) {
280                 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
281                         return;
282                 skb_list_del_init(skb);
283                 napi_gro_complete(napi, skb);
284                 napi->gro_hash[index].count--;
285         }
286
287         if (!napi->gro_hash[index].count)
288                 __clear_bit(index, &napi->gro_bitmask);
289 }
290
291 /* napi->gro_hash[].list contains packets ordered by age.
292  * youngest packets at the head of it.
293  * Complete skbs in reverse order to reduce latencies.
294  */
295 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
296 {
297         unsigned long bitmask = napi->gro_bitmask;
298         unsigned int i, base = ~0U;
299
300         while ((i = ffs(bitmask)) != 0) {
301                 bitmask >>= i;
302                 base += i;
303                 __napi_gro_flush_chain(napi, base, flush_old);
304         }
305 }
306 EXPORT_SYMBOL(napi_gro_flush);
307
308 static unsigned long gro_list_prepare_tc_ext(const struct sk_buff *skb,
309                                              const struct sk_buff *p,
310                                              unsigned long diffs)
311 {
312 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
313         struct tc_skb_ext *skb_ext;
314         struct tc_skb_ext *p_ext;
315
316         skb_ext = skb_ext_find(skb, TC_SKB_EXT);
317         p_ext = skb_ext_find(p, TC_SKB_EXT);
318
319         diffs |= (!!p_ext) ^ (!!skb_ext);
320         if (!diffs && unlikely(skb_ext))
321                 diffs |= p_ext->chain ^ skb_ext->chain;
322 #endif
323         return diffs;
324 }
325
326 static void gro_list_prepare(const struct list_head *head,
327                              const struct sk_buff *skb)
328 {
329         unsigned int maclen = skb->dev->hard_header_len;
330         u32 hash = skb_get_hash_raw(skb);
331         struct sk_buff *p;
332
333         list_for_each_entry(p, head, list) {
334                 unsigned long diffs;
335
336                 NAPI_GRO_CB(p)->flush = 0;
337
338                 if (hash != skb_get_hash_raw(p)) {
339                         NAPI_GRO_CB(p)->same_flow = 0;
340                         continue;
341                 }
342
343                 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
344                 diffs |= p->vlan_all ^ skb->vlan_all;
345                 diffs |= skb_metadata_differs(p, skb);
346                 if (maclen == ETH_HLEN)
347                         diffs |= compare_ether_header(skb_mac_header(p),
348                                                       skb_mac_header(skb));
349                 else if (!diffs)
350                         diffs = memcmp(skb_mac_header(p),
351                                        skb_mac_header(skb),
352                                        maclen);
353
354                 /* in most common scenarions 'slow_gro' is 0
355                  * otherwise we are already on some slower paths
356                  * either skip all the infrequent tests altogether or
357                  * avoid trying too hard to skip each of them individually
358                  */
359                 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
360                         diffs |= p->sk != skb->sk;
361                         diffs |= skb_metadata_dst_cmp(p, skb);
362                         diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
363
364                         diffs |= gro_list_prepare_tc_ext(skb, p, diffs);
365                 }
366
367                 NAPI_GRO_CB(p)->same_flow = !diffs;
368         }
369 }
370
371 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
372 {
373         const struct skb_shared_info *pinfo = skb_shinfo(skb);
374         const skb_frag_t *frag0 = &pinfo->frags[0];
375
376         NAPI_GRO_CB(skb)->data_offset = 0;
377         NAPI_GRO_CB(skb)->frag0 = NULL;
378         NAPI_GRO_CB(skb)->frag0_len = 0;
379
380         if (!skb_headlen(skb) && pinfo->nr_frags &&
381             !PageHighMem(skb_frag_page(frag0)) &&
382             (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
383                 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
384                 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
385                                                     skb_frag_size(frag0),
386                                                     skb->end - skb->tail);
387         }
388 }
389
390 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
391 {
392         struct skb_shared_info *pinfo = skb_shinfo(skb);
393
394         BUG_ON(skb->end - skb->tail < grow);
395
396         memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
397
398         skb->data_len -= grow;
399         skb->tail += grow;
400
401         skb_frag_off_add(&pinfo->frags[0], grow);
402         skb_frag_size_sub(&pinfo->frags[0], grow);
403
404         if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
405                 skb_frag_unref(skb, 0);
406                 memmove(pinfo->frags, pinfo->frags + 1,
407                         --pinfo->nr_frags * sizeof(pinfo->frags[0]));
408         }
409 }
410
411 static void gro_try_pull_from_frag0(struct sk_buff *skb)
412 {
413         int grow = skb_gro_offset(skb) - skb_headlen(skb);
414
415         if (grow > 0)
416                 gro_pull_from_frag0(skb, grow);
417 }
418
419 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
420 {
421         struct sk_buff *oldest;
422
423         oldest = list_last_entry(head, struct sk_buff, list);
424
425         /* We are called with head length >= MAX_GRO_SKBS, so this is
426          * impossible.
427          */
428         if (WARN_ON_ONCE(!oldest))
429                 return;
430
431         /* Do not adjust napi->gro_hash[].count, caller is adding a new
432          * SKB to the chain.
433          */
434         skb_list_del_init(oldest);
435         napi_gro_complete(napi, oldest);
436 }
437
438 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
439 {
440         u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
441         struct gro_list *gro_list = &napi->gro_hash[bucket];
442         struct list_head *head = &offload_base;
443         struct packet_offload *ptype;
444         __be16 type = skb->protocol;
445         struct sk_buff *pp = NULL;
446         enum gro_result ret;
447         int same_flow;
448
449         if (netif_elide_gro(skb->dev))
450                 goto normal;
451
452         gro_list_prepare(&gro_list->list, skb);
453
454         rcu_read_lock();
455         list_for_each_entry_rcu(ptype, head, list) {
456                 if (ptype->type == type && ptype->callbacks.gro_receive)
457                         goto found_ptype;
458         }
459         rcu_read_unlock();
460         goto normal;
461
462 found_ptype:
463         skb_set_network_header(skb, skb_gro_offset(skb));
464         skb_reset_mac_len(skb);
465         BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
466         BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
467                                         sizeof(u32))); /* Avoid slow unaligned acc */
468         *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
469         NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
470         NAPI_GRO_CB(skb)->is_atomic = 1;
471         NAPI_GRO_CB(skb)->count = 1;
472         if (unlikely(skb_is_gso(skb))) {
473                 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
474                 /* Only support TCP and non DODGY users. */
475                 if (!skb_is_gso_tcp(skb) ||
476                     (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
477                         NAPI_GRO_CB(skb)->flush = 1;
478         }
479
480         /* Setup for GRO checksum validation */
481         switch (skb->ip_summed) {
482         case CHECKSUM_COMPLETE:
483                 NAPI_GRO_CB(skb)->csum = skb->csum;
484                 NAPI_GRO_CB(skb)->csum_valid = 1;
485                 break;
486         case CHECKSUM_UNNECESSARY:
487                 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
488                 break;
489         }
490
491         pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
492                                 ipv6_gro_receive, inet_gro_receive,
493                                 &gro_list->list, skb);
494
495         rcu_read_unlock();
496
497         if (PTR_ERR(pp) == -EINPROGRESS) {
498                 ret = GRO_CONSUMED;
499                 goto ok;
500         }
501
502         same_flow = NAPI_GRO_CB(skb)->same_flow;
503         ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
504
505         if (pp) {
506                 skb_list_del_init(pp);
507                 napi_gro_complete(napi, pp);
508                 gro_list->count--;
509         }
510
511         if (same_flow)
512                 goto ok;
513
514         if (NAPI_GRO_CB(skb)->flush)
515                 goto normal;
516
517         if (unlikely(gro_list->count >= MAX_GRO_SKBS))
518                 gro_flush_oldest(napi, &gro_list->list);
519         else
520                 gro_list->count++;
521
522         /* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */
523         gro_try_pull_from_frag0(skb);
524         NAPI_GRO_CB(skb)->age = jiffies;
525         NAPI_GRO_CB(skb)->last = skb;
526         if (!skb_is_gso(skb))
527                 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
528         list_add(&skb->list, &gro_list->list);
529         ret = GRO_HELD;
530 ok:
531         if (gro_list->count) {
532                 if (!test_bit(bucket, &napi->gro_bitmask))
533                         __set_bit(bucket, &napi->gro_bitmask);
534         } else if (test_bit(bucket, &napi->gro_bitmask)) {
535                 __clear_bit(bucket, &napi->gro_bitmask);
536         }
537
538         return ret;
539
540 normal:
541         ret = GRO_NORMAL;
542         gro_try_pull_from_frag0(skb);
543         goto ok;
544 }
545
546 struct packet_offload *gro_find_receive_by_type(__be16 type)
547 {
548         struct list_head *offload_head = &offload_base;
549         struct packet_offload *ptype;
550
551         list_for_each_entry_rcu(ptype, offload_head, list) {
552                 if (ptype->type != type || !ptype->callbacks.gro_receive)
553                         continue;
554                 return ptype;
555         }
556         return NULL;
557 }
558 EXPORT_SYMBOL(gro_find_receive_by_type);
559
560 struct packet_offload *gro_find_complete_by_type(__be16 type)
561 {
562         struct list_head *offload_head = &offload_base;
563         struct packet_offload *ptype;
564
565         list_for_each_entry_rcu(ptype, offload_head, list) {
566                 if (ptype->type != type || !ptype->callbacks.gro_complete)
567                         continue;
568                 return ptype;
569         }
570         return NULL;
571 }
572 EXPORT_SYMBOL(gro_find_complete_by_type);
573
574 static gro_result_t napi_skb_finish(struct napi_struct *napi,
575                                     struct sk_buff *skb,
576                                     gro_result_t ret)
577 {
578         switch (ret) {
579         case GRO_NORMAL:
580                 gro_normal_one(napi, skb, 1);
581                 break;
582
583         case GRO_MERGED_FREE:
584                 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
585                         napi_skb_free_stolen_head(skb);
586                 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
587                         __kfree_skb(skb);
588                 else
589                         __napi_kfree_skb(skb, SKB_CONSUMED);
590                 break;
591
592         case GRO_HELD:
593         case GRO_MERGED:
594         case GRO_CONSUMED:
595                 break;
596         }
597
598         return ret;
599 }
600
601 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
602 {
603         gro_result_t ret;
604
605         skb_mark_napi_id(skb, napi);
606         trace_napi_gro_receive_entry(skb);
607
608         skb_gro_reset_offset(skb, 0);
609
610         ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
611         trace_napi_gro_receive_exit(ret);
612
613         return ret;
614 }
615 EXPORT_SYMBOL(napi_gro_receive);
616
617 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
618 {
619         if (unlikely(skb->pfmemalloc)) {
620                 consume_skb(skb);
621                 return;
622         }
623         __skb_pull(skb, skb_headlen(skb));
624         /* restore the reserve we had after netdev_alloc_skb_ip_align() */
625         skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
626         __vlan_hwaccel_clear_tag(skb);
627         skb->dev = napi->dev;
628         skb->skb_iif = 0;
629
630         /* eth_type_trans() assumes pkt_type is PACKET_HOST */
631         skb->pkt_type = PACKET_HOST;
632
633         skb->encapsulation = 0;
634         skb_shinfo(skb)->gso_type = 0;
635         skb_shinfo(skb)->gso_size = 0;
636         if (unlikely(skb->slow_gro)) {
637                 skb_orphan(skb);
638                 skb_ext_reset(skb);
639                 nf_reset_ct(skb);
640                 skb->slow_gro = 0;
641         }
642
643         napi->skb = skb;
644 }
645
646 struct sk_buff *napi_get_frags(struct napi_struct *napi)
647 {
648         struct sk_buff *skb = napi->skb;
649
650         if (!skb) {
651                 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
652                 if (skb) {
653                         napi->skb = skb;
654                         skb_mark_napi_id(skb, napi);
655                 }
656         }
657         return skb;
658 }
659 EXPORT_SYMBOL(napi_get_frags);
660
661 static gro_result_t napi_frags_finish(struct napi_struct *napi,
662                                       struct sk_buff *skb,
663                                       gro_result_t ret)
664 {
665         switch (ret) {
666         case GRO_NORMAL:
667         case GRO_HELD:
668                 __skb_push(skb, ETH_HLEN);
669                 skb->protocol = eth_type_trans(skb, skb->dev);
670                 if (ret == GRO_NORMAL)
671                         gro_normal_one(napi, skb, 1);
672                 break;
673
674         case GRO_MERGED_FREE:
675                 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
676                         napi_skb_free_stolen_head(skb);
677                 else
678                         napi_reuse_skb(napi, skb);
679                 break;
680
681         case GRO_MERGED:
682         case GRO_CONSUMED:
683                 break;
684         }
685
686         return ret;
687 }
688
689 /* Upper GRO stack assumes network header starts at gro_offset=0
690  * Drivers could call both napi_gro_frags() and napi_gro_receive()
691  * We copy ethernet header into skb->data to have a common layout.
692  */
693 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
694 {
695         struct sk_buff *skb = napi->skb;
696         const struct ethhdr *eth;
697         unsigned int hlen = sizeof(*eth);
698
699         napi->skb = NULL;
700
701         skb_reset_mac_header(skb);
702         skb_gro_reset_offset(skb, hlen);
703
704         if (unlikely(skb_gro_header_hard(skb, hlen))) {
705                 eth = skb_gro_header_slow(skb, hlen, 0);
706                 if (unlikely(!eth)) {
707                         net_warn_ratelimited("%s: dropping impossible skb from %s\n",
708                                              __func__, napi->dev->name);
709                         napi_reuse_skb(napi, skb);
710                         return NULL;
711                 }
712         } else {
713                 eth = (const struct ethhdr *)skb->data;
714                 gro_pull_from_frag0(skb, hlen);
715                 NAPI_GRO_CB(skb)->frag0 += hlen;
716                 NAPI_GRO_CB(skb)->frag0_len -= hlen;
717         }
718         __skb_pull(skb, hlen);
719
720         /*
721          * This works because the only protocols we care about don't require
722          * special handling.
723          * We'll fix it up properly in napi_frags_finish()
724          */
725         skb->protocol = eth->h_proto;
726
727         return skb;
728 }
729
730 gro_result_t napi_gro_frags(struct napi_struct *napi)
731 {
732         gro_result_t ret;
733         struct sk_buff *skb = napi_frags_skb(napi);
734
735         trace_napi_gro_frags_entry(skb);
736
737         ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
738         trace_napi_gro_frags_exit(ret);
739
740         return ret;
741 }
742 EXPORT_SYMBOL(napi_gro_frags);
743
744 /* Compute the checksum from gro_offset and return the folded value
745  * after adding in any pseudo checksum.
746  */
747 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
748 {
749         __wsum wsum;
750         __sum16 sum;
751
752         wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
753
754         /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
755         sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
756         /* See comments in __skb_checksum_complete(). */
757         if (likely(!sum)) {
758                 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
759                     !skb->csum_complete_sw)
760                         netdev_rx_csum_fault(skb->dev, skb);
761         }
762
763         NAPI_GRO_CB(skb)->csum = wsum;
764         NAPI_GRO_CB(skb)->csum_valid = 1;
765
766         return sum;
767 }
768 EXPORT_SYMBOL(__skb_gro_checksum_complete);