1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/jhash.h>
3 #include <linux/netfilter.h>
4 #include <linux/rcupdate.h>
5 #include <linux/rhashtable.h>
6 #include <linux/vmalloc.h>
7 #include <net/genetlink.h>
8 #include <net/netns/generic.h>
9 #include <uapi/linux/genetlink.h>
12 struct ila_xlat_params {
18 struct ila_xlat_params xp;
19 struct rhash_head node;
20 struct ila_map __rcu *next;
24 #define MAX_LOCKS 1024
25 #define LOCKS_PER_CPU 10
27 static int alloc_ila_locks(struct ila_net *ilan)
29 return alloc_bucket_spinlocks(&ilan->xlat.locks, &ilan->xlat.locks_mask,
30 MAX_LOCKS, LOCKS_PER_CPU,
34 static u32 hashrnd __read_mostly;
35 static __always_inline void __ila_hash_secret_init(void)
37 net_get_random_once(&hashrnd, sizeof(hashrnd));
40 static inline u32 ila_locator_hash(struct ila_locator loc)
42 u32 *v = (u32 *)loc.v32;
44 __ila_hash_secret_init();
45 return jhash_2words(v[0], v[1], hashrnd);
48 static inline spinlock_t *ila_get_lock(struct ila_net *ilan,
49 struct ila_locator loc)
51 return &ilan->xlat.locks[ila_locator_hash(loc) & ilan->xlat.locks_mask];
54 static inline int ila_cmp_wildcards(struct ila_map *ila,
55 struct ila_addr *iaddr, int ifindex)
57 return (ila->xp.ifindex && ila->xp.ifindex != ifindex);
60 static inline int ila_cmp_params(struct ila_map *ila,
61 struct ila_xlat_params *xp)
63 return (ila->xp.ifindex != xp->ifindex);
66 static int ila_cmpfn(struct rhashtable_compare_arg *arg,
69 const struct ila_map *ila = obj;
71 return (ila->xp.ip.locator_match.v64 != *(__be64 *)arg->key);
74 static inline int ila_order(struct ila_map *ila)
84 static const struct rhashtable_params rht_params = {
86 .head_offset = offsetof(struct ila_map, node),
87 .key_offset = offsetof(struct ila_map, xp.ip.locator_match),
88 .key_len = sizeof(u64), /* identifier */
91 .automatic_shrinking = true,
92 .obj_cmpfn = ila_cmpfn,
95 static int parse_nl_config(struct genl_info *info,
96 struct ila_xlat_params *xp)
98 memset(xp, 0, sizeof(*xp));
100 if (info->attrs[ILA_ATTR_LOCATOR])
101 xp->ip.locator.v64 = (__force __be64)nla_get_u64(
102 info->attrs[ILA_ATTR_LOCATOR]);
104 if (info->attrs[ILA_ATTR_LOCATOR_MATCH])
105 xp->ip.locator_match.v64 = (__force __be64)nla_get_u64(
106 info->attrs[ILA_ATTR_LOCATOR_MATCH]);
108 if (info->attrs[ILA_ATTR_CSUM_MODE])
109 xp->ip.csum_mode = nla_get_u8(info->attrs[ILA_ATTR_CSUM_MODE]);
111 xp->ip.csum_mode = ILA_CSUM_NO_ACTION;
113 if (info->attrs[ILA_ATTR_IDENT_TYPE])
114 xp->ip.ident_type = nla_get_u8(
115 info->attrs[ILA_ATTR_IDENT_TYPE]);
117 xp->ip.ident_type = ILA_ATYPE_USE_FORMAT;
119 if (info->attrs[ILA_ATTR_IFINDEX])
120 xp->ifindex = nla_get_s32(info->attrs[ILA_ATTR_IFINDEX]);
125 /* Must be called with rcu readlock */
126 static inline struct ila_map *ila_lookup_wildcards(struct ila_addr *iaddr,
128 struct ila_net *ilan)
132 ila = rhashtable_lookup_fast(&ilan->xlat.rhash_table, &iaddr->loc,
135 if (!ila_cmp_wildcards(ila, iaddr, ifindex))
137 ila = rcu_access_pointer(ila->next);
143 /* Must be called with rcu readlock */
144 static inline struct ila_map *ila_lookup_by_params(struct ila_xlat_params *xp,
145 struct ila_net *ilan)
149 ila = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
150 &xp->ip.locator_match,
153 if (!ila_cmp_params(ila, xp))
155 ila = rcu_access_pointer(ila->next);
161 static inline void ila_release(struct ila_map *ila)
166 static void ila_free_node(struct ila_map *ila)
168 struct ila_map *next;
170 /* Assume rcu_readlock held */
172 next = rcu_access_pointer(ila->next);
178 static void ila_free_cb(void *ptr, void *arg)
180 ila_free_node((struct ila_map *)ptr);
183 static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila);
186 ila_nf_input(void *priv,
188 const struct nf_hook_state *state)
190 ila_xlat_addr(skb, false);
194 static const struct nf_hook_ops ila_nf_hook_ops[] = {
196 .hook = ila_nf_input,
198 .hooknum = NF_INET_PRE_ROUTING,
203 static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
205 struct ila_net *ilan = net_generic(net, ila_net_id);
206 struct ila_map *ila, *head;
207 spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
210 if (!ilan->xlat.hooks_registered) {
211 /* We defer registering net hooks in the namespace until the
212 * first mapping is added.
214 err = nf_register_net_hooks(net, ila_nf_hook_ops,
215 ARRAY_SIZE(ila_nf_hook_ops));
219 ilan->xlat.hooks_registered = true;
222 ila = kzalloc(sizeof(*ila), GFP_KERNEL);
226 ila_init_saved_csum(&xp->ip);
230 order = ila_order(ila);
234 head = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
235 &xp->ip.locator_match,
238 /* New entry for the rhash_table */
239 err = rhashtable_lookup_insert_fast(&ilan->xlat.rhash_table,
240 &ila->node, rht_params);
242 struct ila_map *tila = head, *prev = NULL;
245 if (!ila_cmp_params(tila, xp)) {
250 if (order > ila_order(tila))
254 tila = rcu_dereference_protected(tila->next,
255 lockdep_is_held(lock));
259 /* Insert in sub list of head */
260 RCU_INIT_POINTER(ila->next, tila);
261 rcu_assign_pointer(prev->next, ila);
263 /* Make this ila new head */
264 RCU_INIT_POINTER(ila->next, head);
265 err = rhashtable_replace_fast(&ilan->xlat.rhash_table,
267 &ila->node, rht_params);
282 static int ila_del_mapping(struct net *net, struct ila_xlat_params *xp)
284 struct ila_net *ilan = net_generic(net, ila_net_id);
285 struct ila_map *ila, *head, *prev;
286 spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
291 head = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
292 &xp->ip.locator_match, rht_params);
298 if (ila_cmp_params(ila, xp)) {
300 ila = rcu_dereference_protected(ila->next,
301 lockdep_is_held(lock));
308 /* Not head, just delete from list */
309 rcu_assign_pointer(prev->next, ila->next);
311 /* It is the head. If there is something in the
312 * sublist we need to make a new head.
314 head = rcu_dereference_protected(ila->next,
315 lockdep_is_held(lock));
317 /* Put first entry in the sublist into the
320 err = rhashtable_replace_fast(
321 &ilan->xlat.rhash_table, &ila->node,
322 &head->node, rht_params);
326 /* Entry no longer used */
327 err = rhashtable_remove_fast(
328 &ilan->xlat.rhash_table,
329 &ila->node, rht_params);
344 int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info)
346 struct net *net = genl_info_net(info);
347 struct ila_xlat_params p;
350 err = parse_nl_config(info, &p);
354 return ila_add_mapping(net, &p);
357 int ila_xlat_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info)
359 struct net *net = genl_info_net(info);
360 struct ila_xlat_params xp;
363 err = parse_nl_config(info, &xp);
367 ila_del_mapping(net, &xp);
372 static inline spinlock_t *lock_from_ila_map(struct ila_net *ilan,
375 return ila_get_lock(ilan, ila->xp.ip.locator_match);
378 int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info)
380 struct net *net = genl_info_net(info);
381 struct ila_net *ilan = net_generic(net, ila_net_id);
382 struct rhashtable_iter iter;
387 rhashtable_walk_enter(&ilan->xlat.rhash_table, &iter);
388 rhashtable_walk_start(&iter);
391 ila = rhashtable_walk_next(&iter);
394 if (PTR_ERR(ila) == -EAGAIN)
402 lock = lock_from_ila_map(ilan, ila);
406 ret = rhashtable_remove_fast(&ilan->xlat.rhash_table,
407 &ila->node, rht_params);
418 rhashtable_walk_stop(&iter);
419 rhashtable_walk_exit(&iter);
423 static int ila_fill_info(struct ila_map *ila, struct sk_buff *msg)
425 if (nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR,
426 (__force u64)ila->xp.ip.locator.v64,
428 nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR_MATCH,
429 (__force u64)ila->xp.ip.locator_match.v64,
431 nla_put_s32(msg, ILA_ATTR_IFINDEX, ila->xp.ifindex) ||
432 nla_put_u8(msg, ILA_ATTR_CSUM_MODE, ila->xp.ip.csum_mode) ||
433 nla_put_u8(msg, ILA_ATTR_IDENT_TYPE, ila->xp.ip.ident_type))
439 static int ila_dump_info(struct ila_map *ila,
440 u32 portid, u32 seq, u32 flags,
441 struct sk_buff *skb, u8 cmd)
445 hdr = genlmsg_put(skb, portid, seq, &ila_nl_family, flags, cmd);
449 if (ila_fill_info(ila, skb) < 0)
450 goto nla_put_failure;
452 genlmsg_end(skb, hdr);
456 genlmsg_cancel(skb, hdr);
460 int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info)
462 struct net *net = genl_info_net(info);
463 struct ila_net *ilan = net_generic(net, ila_net_id);
465 struct ila_xlat_params xp;
469 ret = parse_nl_config(info, &xp);
473 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
480 ila = ila_lookup_by_params(&xp, ilan);
482 ret = ila_dump_info(ila,
484 info->snd_seq, 0, msg,
493 return genlmsg_reply(msg, info);
500 struct ila_dump_iter {
501 struct rhashtable_iter rhiter;
505 int ila_xlat_nl_dump_start(struct netlink_callback *cb)
507 struct net *net = sock_net(cb->skb->sk);
508 struct ila_net *ilan = net_generic(net, ila_net_id);
509 struct ila_dump_iter *iter;
511 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
515 rhashtable_walk_enter(&ilan->xlat.rhash_table, &iter->rhiter);
518 cb->args[0] = (long)iter;
523 int ila_xlat_nl_dump_done(struct netlink_callback *cb)
525 struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
527 rhashtable_walk_exit(&iter->rhiter);
534 int ila_xlat_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
536 struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
537 struct rhashtable_iter *rhiter = &iter->rhiter;
538 int skip = iter->skip;
542 rhashtable_walk_start(rhiter);
544 /* Get first entry */
545 ila = rhashtable_walk_peek(rhiter);
547 if (ila && !IS_ERR(ila) && skip) {
548 /* Skip over visited entries */
550 while (ila && skip) {
551 /* Skip over any ila entries in this list that we
552 * have already dumped.
554 ila = rcu_access_pointer(ila->next);
564 if (ret == -EAGAIN) {
565 /* Table has changed and iter has reset. Return
566 * -EAGAIN to the application even if we have
567 * written data to the skb. The application
568 * needs to deal with this.
581 ret = ila_dump_info(ila, NETLINK_CB(cb->skb).portid,
582 cb->nlh->nlmsg_seq, NLM_F_MULTI,
588 ila = rcu_access_pointer(ila->next);
592 ila = rhashtable_walk_next(rhiter);
597 ret = (skb->len ? : ret);
600 rhashtable_walk_stop(rhiter);
604 int ila_xlat_init_net(struct net *net)
606 struct ila_net *ilan = net_generic(net, ila_net_id);
609 err = alloc_ila_locks(ilan);
613 err = rhashtable_init(&ilan->xlat.rhash_table, &rht_params);
615 free_bucket_spinlocks(ilan->xlat.locks);
622 void ila_xlat_exit_net(struct net *net)
624 struct ila_net *ilan = net_generic(net, ila_net_id);
626 rhashtable_free_and_destroy(&ilan->xlat.rhash_table, ila_free_cb, NULL);
628 free_bucket_spinlocks(ilan->xlat.locks);
630 if (ilan->xlat.hooks_registered)
631 nf_unregister_net_hooks(net, ila_nf_hook_ops,
632 ARRAY_SIZE(ila_nf_hook_ops));
635 static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila)
638 struct ipv6hdr *ip6h = ipv6_hdr(skb);
639 struct net *net = dev_net(skb->dev);
640 struct ila_net *ilan = net_generic(net, ila_net_id);
641 struct ila_addr *iaddr = ila_a2i(&ip6h->daddr);
643 /* Assumes skb contains a valid IPv6 header that is pulled */
645 /* No check here that ILA type in the mapping matches what is in the
646 * address. We assume that whatever sender gaves us can be translated.
647 * The checksum mode however is relevant.
652 ila = ila_lookup_wildcards(iaddr, skb->dev->ifindex, ilan);
654 ila_update_ipv6_locator(skb, &ila->xp.ip, sir2ila);