Mention branches and keyring.
[releases.git] / core / sysctl_net_core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* -*- linux-c -*-
3  * sysctl_net_core.c: sysctl interface to net core subsystem.
4  *
5  * Begun April 1, 1996, Mike Shaver.
6  * Added /proc/sys/net/core directory entry (empty =) ). [MS]
7  */
8
9 #include <linux/filter.h>
10 #include <linux/mm.h>
11 #include <linux/sysctl.h>
12 #include <linux/module.h>
13 #include <linux/socket.h>
14 #include <linux/netdevice.h>
15 #include <linux/ratelimit.h>
16 #include <linux/vmalloc.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <linux/sched/isolation.h>
20
21 #include <net/ip.h>
22 #include <net/sock.h>
23 #include <net/net_ratelimit.h>
24 #include <net/busy_poll.h>
25 #include <net/pkt_sched.h>
26
27 #include "dev.h"
28
29 static int int_3600 = 3600;
30 static int min_sndbuf = SOCK_MIN_SNDBUF;
31 static int min_rcvbuf = SOCK_MIN_RCVBUF;
32 static int max_skb_frags = MAX_SKB_FRAGS;
33 static int min_mem_pcpu_rsv = SK_MEMORY_PCPU_RESERVE;
34
35 static int net_msg_warn;        /* Unused, but still a sysctl */
36
37 int sysctl_fb_tunnels_only_for_init_net __read_mostly = 0;
38 EXPORT_SYMBOL(sysctl_fb_tunnels_only_for_init_net);
39
40 /* 0 - Keep current behavior:
41  *     IPv4: inherit all current settings from init_net
42  *     IPv6: reset all settings to default
43  * 1 - Both inherit all current settings from init_net
44  * 2 - Both reset all settings to default
45  * 3 - Both inherit all settings from current netns
46  */
47 int sysctl_devconf_inherit_init_net __read_mostly;
48 EXPORT_SYMBOL(sysctl_devconf_inherit_init_net);
49
50 #if IS_ENABLED(CONFIG_NET_FLOW_LIMIT) || IS_ENABLED(CONFIG_RPS)
51 static void dump_cpumask(void *buffer, size_t *lenp, loff_t *ppos,
52                          struct cpumask *mask)
53 {
54         char kbuf[128];
55         int len;
56
57         if (*ppos || !*lenp) {
58                 *lenp = 0;
59                 return;
60         }
61
62         len = min(sizeof(kbuf) - 1, *lenp);
63         len = scnprintf(kbuf, len, "%*pb", cpumask_pr_args(mask));
64         if (!len) {
65                 *lenp = 0;
66                 return;
67         }
68
69         if (len < *lenp)
70                 kbuf[len++] = '\n';
71         memcpy(buffer, kbuf, len);
72         *lenp = len;
73         *ppos += len;
74 }
75 #endif
76
77 #ifdef CONFIG_RPS
78
79 static struct cpumask *rps_default_mask_cow_alloc(struct net *net)
80 {
81         struct cpumask *rps_default_mask;
82
83         if (net->core.rps_default_mask)
84                 return net->core.rps_default_mask;
85
86         rps_default_mask = kzalloc(cpumask_size(), GFP_KERNEL);
87         if (!rps_default_mask)
88                 return NULL;
89
90         /* pairs with READ_ONCE in rx_queue_default_mask() */
91         WRITE_ONCE(net->core.rps_default_mask, rps_default_mask);
92         return rps_default_mask;
93 }
94
95 static int rps_default_mask_sysctl(struct ctl_table *table, int write,
96                                    void *buffer, size_t *lenp, loff_t *ppos)
97 {
98         struct net *net = (struct net *)table->data;
99         int err = 0;
100
101         rtnl_lock();
102         if (write) {
103                 struct cpumask *rps_default_mask = rps_default_mask_cow_alloc(net);
104
105                 err = -ENOMEM;
106                 if (!rps_default_mask)
107                         goto done;
108
109                 err = cpumask_parse(buffer, rps_default_mask);
110                 if (err)
111                         goto done;
112
113                 err = rps_cpumask_housekeeping(rps_default_mask);
114                 if (err)
115                         goto done;
116         } else {
117                 dump_cpumask(buffer, lenp, ppos,
118                              net->core.rps_default_mask ? : cpu_none_mask);
119         }
120
121 done:
122         rtnl_unlock();
123         return err;
124 }
125
126 static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
127                                 void *buffer, size_t *lenp, loff_t *ppos)
128 {
129         unsigned int orig_size, size;
130         int ret, i;
131         struct ctl_table tmp = {
132                 .data = &size,
133                 .maxlen = sizeof(size),
134                 .mode = table->mode
135         };
136         struct rps_sock_flow_table *orig_sock_table, *sock_table;
137         static DEFINE_MUTEX(sock_flow_mutex);
138
139         mutex_lock(&sock_flow_mutex);
140
141         orig_sock_table = rcu_dereference_protected(rps_sock_flow_table,
142                                         lockdep_is_held(&sock_flow_mutex));
143         size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0;
144
145         ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
146
147         if (write) {
148                 if (size) {
149                         if (size > 1<<29) {
150                                 /* Enforce limit to prevent overflow */
151                                 mutex_unlock(&sock_flow_mutex);
152                                 return -EINVAL;
153                         }
154                         size = roundup_pow_of_two(size);
155                         if (size != orig_size) {
156                                 sock_table =
157                                     vmalloc(RPS_SOCK_FLOW_TABLE_SIZE(size));
158                                 if (!sock_table) {
159                                         mutex_unlock(&sock_flow_mutex);
160                                         return -ENOMEM;
161                                 }
162                                 rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1;
163                                 sock_table->mask = size - 1;
164                         } else
165                                 sock_table = orig_sock_table;
166
167                         for (i = 0; i < size; i++)
168                                 sock_table->ents[i] = RPS_NO_CPU;
169                 } else
170                         sock_table = NULL;
171
172                 if (sock_table != orig_sock_table) {
173                         rcu_assign_pointer(rps_sock_flow_table, sock_table);
174                         if (sock_table) {
175                                 static_branch_inc(&rps_needed);
176                                 static_branch_inc(&rfs_needed);
177                         }
178                         if (orig_sock_table) {
179                                 static_branch_dec(&rps_needed);
180                                 static_branch_dec(&rfs_needed);
181                                 kvfree_rcu_mightsleep(orig_sock_table);
182                         }
183                 }
184         }
185
186         mutex_unlock(&sock_flow_mutex);
187
188         return ret;
189 }
190 #endif /* CONFIG_RPS */
191
192 #ifdef CONFIG_NET_FLOW_LIMIT
193 static DEFINE_MUTEX(flow_limit_update_mutex);
194
195 static int flow_limit_cpu_sysctl(struct ctl_table *table, int write,
196                                  void *buffer, size_t *lenp, loff_t *ppos)
197 {
198         struct sd_flow_limit *cur;
199         struct softnet_data *sd;
200         cpumask_var_t mask;
201         int i, len, ret = 0;
202
203         if (!alloc_cpumask_var(&mask, GFP_KERNEL))
204                 return -ENOMEM;
205
206         if (write) {
207                 ret = cpumask_parse(buffer, mask);
208                 if (ret)
209                         goto done;
210
211                 mutex_lock(&flow_limit_update_mutex);
212                 len = sizeof(*cur) + netdev_flow_limit_table_len;
213                 for_each_possible_cpu(i) {
214                         sd = &per_cpu(softnet_data, i);
215                         cur = rcu_dereference_protected(sd->flow_limit,
216                                      lockdep_is_held(&flow_limit_update_mutex));
217                         if (cur && !cpumask_test_cpu(i, mask)) {
218                                 RCU_INIT_POINTER(sd->flow_limit, NULL);
219                                 kfree_rcu_mightsleep(cur);
220                         } else if (!cur && cpumask_test_cpu(i, mask)) {
221                                 cur = kzalloc_node(len, GFP_KERNEL,
222                                                    cpu_to_node(i));
223                                 if (!cur) {
224                                         /* not unwinding previous changes */
225                                         ret = -ENOMEM;
226                                         goto write_unlock;
227                                 }
228                                 cur->num_buckets = netdev_flow_limit_table_len;
229                                 rcu_assign_pointer(sd->flow_limit, cur);
230                         }
231                 }
232 write_unlock:
233                 mutex_unlock(&flow_limit_update_mutex);
234         } else {
235                 cpumask_clear(mask);
236                 rcu_read_lock();
237                 for_each_possible_cpu(i) {
238                         sd = &per_cpu(softnet_data, i);
239                         if (rcu_dereference(sd->flow_limit))
240                                 cpumask_set_cpu(i, mask);
241                 }
242                 rcu_read_unlock();
243
244                 dump_cpumask(buffer, lenp, ppos, mask);
245         }
246
247 done:
248         free_cpumask_var(mask);
249         return ret;
250 }
251
252 static int flow_limit_table_len_sysctl(struct ctl_table *table, int write,
253                                        void *buffer, size_t *lenp, loff_t *ppos)
254 {
255         unsigned int old, *ptr;
256         int ret;
257
258         mutex_lock(&flow_limit_update_mutex);
259
260         ptr = table->data;
261         old = *ptr;
262         ret = proc_dointvec(table, write, buffer, lenp, ppos);
263         if (!ret && write && !is_power_of_2(*ptr)) {
264                 *ptr = old;
265                 ret = -EINVAL;
266         }
267
268         mutex_unlock(&flow_limit_update_mutex);
269         return ret;
270 }
271 #endif /* CONFIG_NET_FLOW_LIMIT */
272
273 #ifdef CONFIG_NET_SCHED
274 static int set_default_qdisc(struct ctl_table *table, int write,
275                              void *buffer, size_t *lenp, loff_t *ppos)
276 {
277         char id[IFNAMSIZ];
278         struct ctl_table tbl = {
279                 .data = id,
280                 .maxlen = IFNAMSIZ,
281         };
282         int ret;
283
284         qdisc_get_default(id, IFNAMSIZ);
285
286         ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
287         if (write && ret == 0)
288                 ret = qdisc_set_default(id);
289         return ret;
290 }
291 #endif
292
293 static int proc_do_dev_weight(struct ctl_table *table, int write,
294                            void *buffer, size_t *lenp, loff_t *ppos)
295 {
296         static DEFINE_MUTEX(dev_weight_mutex);
297         int ret, weight;
298
299         mutex_lock(&dev_weight_mutex);
300         ret = proc_dointvec(table, write, buffer, lenp, ppos);
301         if (!ret && write) {
302                 weight = READ_ONCE(weight_p);
303                 WRITE_ONCE(dev_rx_weight, weight * dev_weight_rx_bias);
304                 WRITE_ONCE(dev_tx_weight, weight * dev_weight_tx_bias);
305         }
306         mutex_unlock(&dev_weight_mutex);
307
308         return ret;
309 }
310
311 static int proc_do_rss_key(struct ctl_table *table, int write,
312                            void *buffer, size_t *lenp, loff_t *ppos)
313 {
314         struct ctl_table fake_table;
315         char buf[NETDEV_RSS_KEY_LEN * 3];
316
317         snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key);
318         fake_table.data = buf;
319         fake_table.maxlen = sizeof(buf);
320         return proc_dostring(&fake_table, write, buffer, lenp, ppos);
321 }
322
323 #ifdef CONFIG_BPF_JIT
324 static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
325                                            void *buffer, size_t *lenp,
326                                            loff_t *ppos)
327 {
328         int ret, jit_enable = *(int *)table->data;
329         int min = *(int *)table->extra1;
330         int max = *(int *)table->extra2;
331         struct ctl_table tmp = *table;
332
333         if (write && !capable(CAP_SYS_ADMIN))
334                 return -EPERM;
335
336         tmp.data = &jit_enable;
337         ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
338         if (write && !ret) {
339                 if (jit_enable < 2 ||
340                     (jit_enable == 2 && bpf_dump_raw_ok(current_cred()))) {
341                         *(int *)table->data = jit_enable;
342                         if (jit_enable == 2)
343                                 pr_warn("bpf_jit_enable = 2 was set! NEVER use this in production, only for JIT debugging!\n");
344                 } else {
345                         ret = -EPERM;
346                 }
347         }
348
349         if (write && ret && min == max)
350                 pr_info_once("CONFIG_BPF_JIT_ALWAYS_ON is enabled, bpf_jit_enable is permanently set to 1.\n");
351
352         return ret;
353 }
354
355 # ifdef CONFIG_HAVE_EBPF_JIT
356 static int
357 proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
358                                     void *buffer, size_t *lenp, loff_t *ppos)
359 {
360         if (!capable(CAP_SYS_ADMIN))
361                 return -EPERM;
362
363         return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
364 }
365 # endif /* CONFIG_HAVE_EBPF_JIT */
366
367 static int
368 proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write,
369                                      void *buffer, size_t *lenp, loff_t *ppos)
370 {
371         if (!capable(CAP_SYS_ADMIN))
372                 return -EPERM;
373
374         return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
375 }
376 #endif
377
378 static struct ctl_table net_core_table[] = {
379         {
380                 .procname       = "wmem_max",
381                 .data           = &sysctl_wmem_max,
382                 .maxlen         = sizeof(int),
383                 .mode           = 0644,
384                 .proc_handler   = proc_dointvec_minmax,
385                 .extra1         = &min_sndbuf,
386         },
387         {
388                 .procname       = "rmem_max",
389                 .data           = &sysctl_rmem_max,
390                 .maxlen         = sizeof(int),
391                 .mode           = 0644,
392                 .proc_handler   = proc_dointvec_minmax,
393                 .extra1         = &min_rcvbuf,
394         },
395         {
396                 .procname       = "wmem_default",
397                 .data           = &sysctl_wmem_default,
398                 .maxlen         = sizeof(int),
399                 .mode           = 0644,
400                 .proc_handler   = proc_dointvec_minmax,
401                 .extra1         = &min_sndbuf,
402         },
403         {
404                 .procname       = "rmem_default",
405                 .data           = &sysctl_rmem_default,
406                 .maxlen         = sizeof(int),
407                 .mode           = 0644,
408                 .proc_handler   = proc_dointvec_minmax,
409                 .extra1         = &min_rcvbuf,
410         },
411         {
412                 .procname       = "mem_pcpu_rsv",
413                 .data           = &sysctl_mem_pcpu_rsv,
414                 .maxlen         = sizeof(int),
415                 .mode           = 0644,
416                 .proc_handler   = proc_dointvec_minmax,
417                 .extra1         = &min_mem_pcpu_rsv,
418         },
419         {
420                 .procname       = "dev_weight",
421                 .data           = &weight_p,
422                 .maxlen         = sizeof(int),
423                 .mode           = 0644,
424                 .proc_handler   = proc_do_dev_weight,
425         },
426         {
427                 .procname       = "dev_weight_rx_bias",
428                 .data           = &dev_weight_rx_bias,
429                 .maxlen         = sizeof(int),
430                 .mode           = 0644,
431                 .proc_handler   = proc_do_dev_weight,
432         },
433         {
434                 .procname       = "dev_weight_tx_bias",
435                 .data           = &dev_weight_tx_bias,
436                 .maxlen         = sizeof(int),
437                 .mode           = 0644,
438                 .proc_handler   = proc_do_dev_weight,
439         },
440         {
441                 .procname       = "netdev_max_backlog",
442                 .data           = &netdev_max_backlog,
443                 .maxlen         = sizeof(int),
444                 .mode           = 0644,
445                 .proc_handler   = proc_dointvec
446         },
447         {
448                 .procname       = "netdev_rss_key",
449                 .data           = &netdev_rss_key,
450                 .maxlen         = sizeof(int),
451                 .mode           = 0444,
452                 .proc_handler   = proc_do_rss_key,
453         },
454 #ifdef CONFIG_BPF_JIT
455         {
456                 .procname       = "bpf_jit_enable",
457                 .data           = &bpf_jit_enable,
458                 .maxlen         = sizeof(int),
459                 .mode           = 0644,
460                 .proc_handler   = proc_dointvec_minmax_bpf_enable,
461 # ifdef CONFIG_BPF_JIT_ALWAYS_ON
462                 .extra1         = SYSCTL_ONE,
463                 .extra2         = SYSCTL_ONE,
464 # else
465                 .extra1         = SYSCTL_ZERO,
466                 .extra2         = SYSCTL_TWO,
467 # endif
468         },
469 # ifdef CONFIG_HAVE_EBPF_JIT
470         {
471                 .procname       = "bpf_jit_harden",
472                 .data           = &bpf_jit_harden,
473                 .maxlen         = sizeof(int),
474                 .mode           = 0600,
475                 .proc_handler   = proc_dointvec_minmax_bpf_restricted,
476                 .extra1         = SYSCTL_ZERO,
477                 .extra2         = SYSCTL_TWO,
478         },
479         {
480                 .procname       = "bpf_jit_kallsyms",
481                 .data           = &bpf_jit_kallsyms,
482                 .maxlen         = sizeof(int),
483                 .mode           = 0600,
484                 .proc_handler   = proc_dointvec_minmax_bpf_restricted,
485                 .extra1         = SYSCTL_ZERO,
486                 .extra2         = SYSCTL_ONE,
487         },
488 # endif
489         {
490                 .procname       = "bpf_jit_limit",
491                 .data           = &bpf_jit_limit,
492                 .maxlen         = sizeof(long),
493                 .mode           = 0600,
494                 .proc_handler   = proc_dolongvec_minmax_bpf_restricted,
495                 .extra1         = SYSCTL_LONG_ONE,
496                 .extra2         = &bpf_jit_limit_max,
497         },
498 #endif
499         {
500                 .procname       = "netdev_tstamp_prequeue",
501                 .data           = &netdev_tstamp_prequeue,
502                 .maxlen         = sizeof(int),
503                 .mode           = 0644,
504                 .proc_handler   = proc_dointvec
505         },
506         {
507                 .procname       = "message_cost",
508                 .data           = &net_ratelimit_state.interval,
509                 .maxlen         = sizeof(int),
510                 .mode           = 0644,
511                 .proc_handler   = proc_dointvec_jiffies,
512         },
513         {
514                 .procname       = "message_burst",
515                 .data           = &net_ratelimit_state.burst,
516                 .maxlen         = sizeof(int),
517                 .mode           = 0644,
518                 .proc_handler   = proc_dointvec,
519         },
520         {
521                 .procname       = "tstamp_allow_data",
522                 .data           = &sysctl_tstamp_allow_data,
523                 .maxlen         = sizeof(int),
524                 .mode           = 0644,
525                 .proc_handler   = proc_dointvec_minmax,
526                 .extra1         = SYSCTL_ZERO,
527                 .extra2         = SYSCTL_ONE
528         },
529 #ifdef CONFIG_RPS
530         {
531                 .procname       = "rps_sock_flow_entries",
532                 .maxlen         = sizeof(int),
533                 .mode           = 0644,
534                 .proc_handler   = rps_sock_flow_sysctl
535         },
536 #endif
537 #ifdef CONFIG_NET_FLOW_LIMIT
538         {
539                 .procname       = "flow_limit_cpu_bitmap",
540                 .mode           = 0644,
541                 .proc_handler   = flow_limit_cpu_sysctl
542         },
543         {
544                 .procname       = "flow_limit_table_len",
545                 .data           = &netdev_flow_limit_table_len,
546                 .maxlen         = sizeof(int),
547                 .mode           = 0644,
548                 .proc_handler   = flow_limit_table_len_sysctl
549         },
550 #endif /* CONFIG_NET_FLOW_LIMIT */
551 #ifdef CONFIG_NET_RX_BUSY_POLL
552         {
553                 .procname       = "busy_poll",
554                 .data           = &sysctl_net_busy_poll,
555                 .maxlen         = sizeof(unsigned int),
556                 .mode           = 0644,
557                 .proc_handler   = proc_dointvec_minmax,
558                 .extra1         = SYSCTL_ZERO,
559         },
560         {
561                 .procname       = "busy_read",
562                 .data           = &sysctl_net_busy_read,
563                 .maxlen         = sizeof(unsigned int),
564                 .mode           = 0644,
565                 .proc_handler   = proc_dointvec_minmax,
566                 .extra1         = SYSCTL_ZERO,
567         },
568 #endif
569 #ifdef CONFIG_NET_SCHED
570         {
571                 .procname       = "default_qdisc",
572                 .mode           = 0644,
573                 .maxlen         = IFNAMSIZ,
574                 .proc_handler   = set_default_qdisc
575         },
576 #endif
577         {
578                 .procname       = "netdev_budget",
579                 .data           = &netdev_budget,
580                 .maxlen         = sizeof(int),
581                 .mode           = 0644,
582                 .proc_handler   = proc_dointvec
583         },
584         {
585                 .procname       = "warnings",
586                 .data           = &net_msg_warn,
587                 .maxlen         = sizeof(int),
588                 .mode           = 0644,
589                 .proc_handler   = proc_dointvec
590         },
591         {
592                 .procname       = "max_skb_frags",
593                 .data           = &sysctl_max_skb_frags,
594                 .maxlen         = sizeof(int),
595                 .mode           = 0644,
596                 .proc_handler   = proc_dointvec_minmax,
597                 .extra1         = SYSCTL_ONE,
598                 .extra2         = &max_skb_frags,
599         },
600         {
601                 .procname       = "netdev_budget_usecs",
602                 .data           = &netdev_budget_usecs,
603                 .maxlen         = sizeof(unsigned int),
604                 .mode           = 0644,
605                 .proc_handler   = proc_dointvec_minmax,
606                 .extra1         = SYSCTL_ZERO,
607         },
608         {
609                 .procname       = "fb_tunnels_only_for_init_net",
610                 .data           = &sysctl_fb_tunnels_only_for_init_net,
611                 .maxlen         = sizeof(int),
612                 .mode           = 0644,
613                 .proc_handler   = proc_dointvec_minmax,
614                 .extra1         = SYSCTL_ZERO,
615                 .extra2         = SYSCTL_TWO,
616         },
617         {
618                 .procname       = "devconf_inherit_init_net",
619                 .data           = &sysctl_devconf_inherit_init_net,
620                 .maxlen         = sizeof(int),
621                 .mode           = 0644,
622                 .proc_handler   = proc_dointvec_minmax,
623                 .extra1         = SYSCTL_ZERO,
624                 .extra2         = SYSCTL_THREE,
625         },
626         {
627                 .procname       = "high_order_alloc_disable",
628                 .data           = &net_high_order_alloc_disable_key.key,
629                 .maxlen         = sizeof(net_high_order_alloc_disable_key),
630                 .mode           = 0644,
631                 .proc_handler   = proc_do_static_key,
632         },
633         {
634                 .procname       = "gro_normal_batch",
635                 .data           = &gro_normal_batch,
636                 .maxlen         = sizeof(unsigned int),
637                 .mode           = 0644,
638                 .proc_handler   = proc_dointvec_minmax,
639                 .extra1         = SYSCTL_ONE,
640         },
641         {
642                 .procname       = "netdev_unregister_timeout_secs",
643                 .data           = &netdev_unregister_timeout_secs,
644                 .maxlen         = sizeof(unsigned int),
645                 .mode           = 0644,
646                 .proc_handler   = proc_dointvec_minmax,
647                 .extra1         = SYSCTL_ONE,
648                 .extra2         = &int_3600,
649         },
650         {
651                 .procname       = "skb_defer_max",
652                 .data           = &sysctl_skb_defer_max,
653                 .maxlen         = sizeof(unsigned int),
654                 .mode           = 0644,
655                 .proc_handler   = proc_dointvec_minmax,
656                 .extra1         = SYSCTL_ZERO,
657         },
658         { }
659 };
660
661 static struct ctl_table netns_core_table[] = {
662 #if IS_ENABLED(CONFIG_RPS)
663         {
664                 .procname       = "rps_default_mask",
665                 .data           = &init_net,
666                 .mode           = 0644,
667                 .proc_handler   = rps_default_mask_sysctl
668         },
669 #endif
670         {
671                 .procname       = "somaxconn",
672                 .data           = &init_net.core.sysctl_somaxconn,
673                 .maxlen         = sizeof(int),
674                 .mode           = 0644,
675                 .extra1         = SYSCTL_ZERO,
676                 .proc_handler   = proc_dointvec_minmax
677         },
678         {
679                 .procname       = "optmem_max",
680                 .data           = &init_net.core.sysctl_optmem_max,
681                 .maxlen         = sizeof(int),
682                 .mode           = 0644,
683                 .extra1         = SYSCTL_ZERO,
684                 .proc_handler   = proc_dointvec_minmax
685         },
686         {
687                 .procname       = "txrehash",
688                 .data           = &init_net.core.sysctl_txrehash,
689                 .maxlen         = sizeof(u8),
690                 .mode           = 0644,
691                 .extra1         = SYSCTL_ZERO,
692                 .extra2         = SYSCTL_ONE,
693                 .proc_handler   = proc_dou8vec_minmax,
694         },
695         { }
696 };
697
698 static int __init fb_tunnels_only_for_init_net_sysctl_setup(char *str)
699 {
700         /* fallback tunnels for initns only */
701         if (!strncmp(str, "initns", 6))
702                 sysctl_fb_tunnels_only_for_init_net = 1;
703         /* no fallback tunnels anywhere */
704         else if (!strncmp(str, "none", 4))
705                 sysctl_fb_tunnels_only_for_init_net = 2;
706
707         return 1;
708 }
709 __setup("fb_tunnels=", fb_tunnels_only_for_init_net_sysctl_setup);
710
711 static __net_init int sysctl_core_net_init(struct net *net)
712 {
713         struct ctl_table *tbl, *tmp;
714
715         tbl = netns_core_table;
716         if (!net_eq(net, &init_net)) {
717                 tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
718                 if (tbl == NULL)
719                         goto err_dup;
720
721                 for (tmp = tbl; tmp->procname; tmp++)
722                         tmp->data += (char *)net - (char *)&init_net;
723         }
724
725         net->core.sysctl_hdr = register_net_sysctl_sz(net, "net/core", tbl,
726                                                       ARRAY_SIZE(netns_core_table));
727         if (net->core.sysctl_hdr == NULL)
728                 goto err_reg;
729
730         return 0;
731
732 err_reg:
733         if (tbl != netns_core_table)
734                 kfree(tbl);
735 err_dup:
736         return -ENOMEM;
737 }
738
739 static __net_exit void sysctl_core_net_exit(struct net *net)
740 {
741         struct ctl_table *tbl;
742
743         tbl = net->core.sysctl_hdr->ctl_table_arg;
744         unregister_net_sysctl_table(net->core.sysctl_hdr);
745         BUG_ON(tbl == netns_core_table);
746 #if IS_ENABLED(CONFIG_RPS)
747         kfree(net->core.rps_default_mask);
748 #endif
749         kfree(tbl);
750 }
751
752 static __net_initdata struct pernet_operations sysctl_core_ops = {
753         .init = sysctl_core_net_init,
754         .exit = sysctl_core_net_exit,
755 };
756
757 static __init int sysctl_core_init(void)
758 {
759         register_net_sysctl(&init_net, "net/core", net_core_table);
760         return register_pernet_subsys(&sysctl_core_ops);
761 }
762
763 fs_initcall(sysctl_core_init);