1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2013 Jozsef Kadlecsik <kadlec@netfilter.org> */
4 #ifndef _IP_SET_HASH_GEN_H
5 #define _IP_SET_HASH_GEN_H
7 #include <linux/rcupdate.h>
8 #include <linux/jhash.h>
9 #include <linux/types.h>
10 #include <linux/netfilter/nfnetlink.h>
11 #include <linux/netfilter/ipset/ip_set.h>
13 #define __ipset_dereference(p) \
14 rcu_dereference_protected(p, 1)
15 #define ipset_dereference_nfnl(p) \
16 rcu_dereference_protected(p, \
17 lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
18 #define ipset_dereference_set(p, set) \
19 rcu_dereference_protected(p, \
20 lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
21 lockdep_is_held(&(set)->lock))
22 #define ipset_dereference_bh_nfnl(p) \
23 rcu_dereference_bh_check(p, \
24 lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
26 /* Hashing which uses arrays to resolve clashing. The hash table is resized
27 * (doubled) when searching becomes too long.
28 * Internally jhash is used with the assumption that the size of the
29 * stored data is a multiple of sizeof(u32).
31 * Readers and resizing
33 * Resizing can be triggered by userspace command only, and those
34 * are serialized by the nfnl mutex. During resizing the set is
35 * read-locked, so the only possible concurrent operations are
36 * the kernel side readers. Those must be protected by proper RCU locking.
39 /* Number of elements to store in an initial array block */
40 #define AHASH_INIT_SIZE 4
41 /* Max number of elements to store in an array block */
42 #define AHASH_MAX_SIZE (3 * AHASH_INIT_SIZE)
43 /* Max muber of elements in the array block when tuned */
44 #define AHASH_MAX_TUNED 64
46 /* Max number of elements can be tuned */
47 #ifdef IP_SET_HASH_WITH_MULTI
48 #define AHASH_MAX(h) ((h)->ahash_max)
51 tune_ahash_max(u8 curr, u32 multi)
58 n = curr + AHASH_INIT_SIZE;
59 /* Currently, at listing one hash bucket must fit into a message.
60 * Therefore we have a hard limit here.
62 return n > curr && n <= AHASH_MAX_TUNED ? n : curr;
65 #define TUNE_AHASH_MAX(h, multi) \
66 ((h)->ahash_max = tune_ahash_max((h)->ahash_max, multi))
68 #define AHASH_MAX(h) AHASH_MAX_SIZE
69 #define TUNE_AHASH_MAX(h, multi)
74 struct rcu_head rcu; /* for call_rcu */
75 /* Which positions are used in the array */
76 DECLARE_BITMAP(used, AHASH_MAX_TUNED);
77 u8 size; /* size of the array */
78 u8 pos; /* position of the first free entry */
79 unsigned char value[] /* the array of the values */
80 __aligned(__alignof__(u64));
83 /* Region size for locking == 2^HTABLE_REGION_BITS */
84 #define HTABLE_REGION_BITS 10
85 #define ahash_numof_locks(htable_bits) \
86 ((htable_bits) < HTABLE_REGION_BITS ? 1 \
87 : jhash_size((htable_bits) - HTABLE_REGION_BITS))
88 #define ahash_sizeof_regions(htable_bits) \
89 (ahash_numof_locks(htable_bits) * sizeof(struct ip_set_region))
90 #define ahash_region(n, htable_bits) \
91 ((n) % ahash_numof_locks(htable_bits))
92 #define ahash_bucket_start(h, htable_bits) \
93 ((htable_bits) < HTABLE_REGION_BITS ? 0 \
94 : (h) * jhash_size(HTABLE_REGION_BITS))
95 #define ahash_bucket_end(h, htable_bits) \
96 ((htable_bits) < HTABLE_REGION_BITS ? jhash_size(htable_bits) \
97 : ((h) + 1) * jhash_size(HTABLE_REGION_BITS))
100 struct delayed_work dwork;
101 struct ip_set *set; /* Set the gc belongs to */
102 u32 region; /* Last gc run position */
105 /* The hash table: the table size stored here in order to make resizing easy */
107 atomic_t ref; /* References for resizing */
108 atomic_t uref; /* References for dumping and gc */
109 u8 htable_bits; /* size of hash table == 2^htable_bits */
110 u32 maxelem; /* Maxelem per region */
111 struct ip_set_region *hregion; /* Region locks and ext sizes */
112 struct hbucket __rcu *bucket[]; /* hashtable buckets */
115 #define hbucket(h, i) ((h)->bucket[i])
116 #define ext_size(n, dsize) \
117 (sizeof(struct hbucket) + (n) * (dsize))
119 #ifndef IPSET_NET_COUNT
120 #define IPSET_NET_COUNT 1
123 /* Book-keeping of the prefixes added to the set */
124 struct net_prefixes {
125 u32 nets[IPSET_NET_COUNT]; /* number of elements for this cidr */
126 u8 cidr[IPSET_NET_COUNT]; /* the cidr value */
129 /* Compute the hash table size */
131 htable_size(u8 hbits)
135 /* We must fit both into u32 in jhash and INT_MAX in kvmalloc_node() */
138 hsize = jhash_size(hbits);
139 if ((INT_MAX - sizeof(struct htable)) / sizeof(struct hbucket *)
143 return hsize * sizeof(struct hbucket *) + sizeof(struct htable);
146 #ifdef IP_SET_HASH_WITH_NETS
147 #if IPSET_NET_COUNT > 1
148 #define __CIDR(cidr, i) (cidr[i])
150 #define __CIDR(cidr, i) (cidr)
153 /* cidr + 1 is stored in net_prefixes to support /0 */
154 #define NCIDR_PUT(cidr) ((cidr) + 1)
155 #define NCIDR_GET(cidr) ((cidr) - 1)
157 #ifdef IP_SET_HASH_WITH_NETS_PACKED
158 /* When cidr is packed with nomatch, cidr - 1 is stored in the data entry */
159 #define DCIDR_PUT(cidr) ((cidr) - 1)
160 #define DCIDR_GET(cidr, i) (__CIDR(cidr, i) + 1)
162 #define DCIDR_PUT(cidr) (cidr)
163 #define DCIDR_GET(cidr, i) __CIDR(cidr, i)
166 #define INIT_CIDR(cidr, host_mask) \
167 DCIDR_PUT(((cidr) ? NCIDR_GET(cidr) : host_mask))
169 #ifdef IP_SET_HASH_WITH_NET0
170 /* cidr from 0 to HOST_MASK value and c = cidr + 1 */
171 #define NLEN (HOST_MASK + 1)
172 #define CIDR_POS(c) ((c) - 1)
174 /* cidr from 1 to HOST_MASK value and c = cidr + 1 */
175 #define NLEN HOST_MASK
176 #define CIDR_POS(c) ((c) - 2)
181 #endif /* IP_SET_HASH_WITH_NETS */
183 #define SET_ELEM_EXPIRED(set, d) \
184 (SET_WITH_TIMEOUT(set) && \
185 ip_set_timeout_expired(ext_timeout(d, set)))
187 #endif /* _IP_SET_HASH_GEN_H */
190 #error "MTYPE is not defined!"
194 #error "HTYPE is not defined!"
198 #error "HOST_MASK is not defined!"
201 /* Family dependent templates */
204 #undef mtype_data_equal
205 #undef mtype_do_data_match
206 #undef mtype_data_set_flags
207 #undef mtype_data_reset_elem
208 #undef mtype_data_reset_flags
209 #undef mtype_data_netmask
210 #undef mtype_data_list
211 #undef mtype_data_next
214 #undef mtype_ahash_destroy
215 #undef mtype_ext_cleanup
216 #undef mtype_add_cidr
217 #undef mtype_del_cidr
218 #undef mtype_ahash_memsize
221 #undef mtype_same_set
227 #undef mtype_test_cidrs
231 #undef mtype_ext_size
232 #undef mtype_resize_ad
238 #undef mtype_cancel_gc
240 #undef mtype_data_match
245 #define mtype_data_equal IPSET_TOKEN(MTYPE, _data_equal)
246 #ifdef IP_SET_HASH_WITH_NETS
247 #define mtype_do_data_match IPSET_TOKEN(MTYPE, _do_data_match)
249 #define mtype_do_data_match(d) 1
251 #define mtype_data_set_flags IPSET_TOKEN(MTYPE, _data_set_flags)
252 #define mtype_data_reset_elem IPSET_TOKEN(MTYPE, _data_reset_elem)
253 #define mtype_data_reset_flags IPSET_TOKEN(MTYPE, _data_reset_flags)
254 #define mtype_data_netmask IPSET_TOKEN(MTYPE, _data_netmask)
255 #define mtype_data_list IPSET_TOKEN(MTYPE, _data_list)
256 #define mtype_data_next IPSET_TOKEN(MTYPE, _data_next)
257 #define mtype_elem IPSET_TOKEN(MTYPE, _elem)
259 #define mtype_ahash_destroy IPSET_TOKEN(MTYPE, _ahash_destroy)
260 #define mtype_ext_cleanup IPSET_TOKEN(MTYPE, _ext_cleanup)
261 #define mtype_add_cidr IPSET_TOKEN(MTYPE, _add_cidr)
262 #define mtype_del_cidr IPSET_TOKEN(MTYPE, _del_cidr)
263 #define mtype_ahash_memsize IPSET_TOKEN(MTYPE, _ahash_memsize)
264 #define mtype_flush IPSET_TOKEN(MTYPE, _flush)
265 #define mtype_destroy IPSET_TOKEN(MTYPE, _destroy)
266 #define mtype_same_set IPSET_TOKEN(MTYPE, _same_set)
267 #define mtype_kadt IPSET_TOKEN(MTYPE, _kadt)
268 #define mtype_uadt IPSET_TOKEN(MTYPE, _uadt)
270 #define mtype_add IPSET_TOKEN(MTYPE, _add)
271 #define mtype_del IPSET_TOKEN(MTYPE, _del)
272 #define mtype_test_cidrs IPSET_TOKEN(MTYPE, _test_cidrs)
273 #define mtype_test IPSET_TOKEN(MTYPE, _test)
274 #define mtype_uref IPSET_TOKEN(MTYPE, _uref)
275 #define mtype_resize IPSET_TOKEN(MTYPE, _resize)
276 #define mtype_ext_size IPSET_TOKEN(MTYPE, _ext_size)
277 #define mtype_resize_ad IPSET_TOKEN(MTYPE, _resize_ad)
278 #define mtype_head IPSET_TOKEN(MTYPE, _head)
279 #define mtype_list IPSET_TOKEN(MTYPE, _list)
280 #define mtype_gc_do IPSET_TOKEN(MTYPE, _gc_do)
281 #define mtype_gc IPSET_TOKEN(MTYPE, _gc)
282 #define mtype_gc_init IPSET_TOKEN(MTYPE, _gc_init)
283 #define mtype_cancel_gc IPSET_TOKEN(MTYPE, _cancel_gc)
284 #define mtype_variant IPSET_TOKEN(MTYPE, _variant)
285 #define mtype_data_match IPSET_TOKEN(MTYPE, _data_match)
288 #define HKEY_DATALEN sizeof(struct mtype_elem)
293 #define HKEY(data, initval, htable_bits) \
295 const u32 *__k = (const u32 *)data; \
296 u32 __l = HKEY_DATALEN / sizeof(u32); \
298 BUILD_BUG_ON(HKEY_DATALEN % sizeof(u32) != 0); \
300 jhash2(__k, __l, initval) & jhash_mask(htable_bits); \
303 /* The generic hash structure */
305 struct htable __rcu *table; /* the hash table */
306 struct htable_gc gc; /* gc workqueue */
307 u32 maxelem; /* max elements in the hash */
308 u32 initval; /* random jhash init value */
309 #ifdef IP_SET_HASH_WITH_MARKMASK
310 u32 markmask; /* markmask value for mark mask to store */
312 #ifdef IP_SET_HASH_WITH_MULTI
313 u8 ahash_max; /* max elements in an array block */
315 #ifdef IP_SET_HASH_WITH_NETMASK
316 u8 netmask; /* netmask value for subnets to store */
318 struct list_head ad; /* Resize add|del backlist */
319 struct mtype_elem next; /* temporary storage for uadd */
320 #ifdef IP_SET_HASH_WITH_NETS
321 struct net_prefixes nets[NLEN]; /* book-keeping of prefixes */
325 /* ADD|DEL entries saved during resize */
326 struct mtype_resize_ad {
327 struct list_head list;
328 enum ipset_adt ad; /* ADD|DEL element */
329 struct mtype_elem d; /* Element value */
330 struct ip_set_ext ext; /* Extensions for ADD */
331 struct ip_set_ext mext; /* Target extensions for ADD */
332 u32 flags; /* Flags for ADD */
335 #ifdef IP_SET_HASH_WITH_NETS
336 /* Network cidr size book keeping when the hash stores different
337 * sized networks. cidr == real cidr + 1 to support /0.
340 mtype_add_cidr(struct ip_set *set, struct htype *h, u8 cidr, u8 n)
344 spin_lock_bh(&set->lock);
345 /* Add in increasing prefix order, so larger cidr first */
346 for (i = 0, j = -1; i < NLEN && h->nets[i].cidr[n]; i++) {
349 } else if (h->nets[i].cidr[n] < cidr) {
351 } else if (h->nets[i].cidr[n] == cidr) {
352 h->nets[CIDR_POS(cidr)].nets[n]++;
358 h->nets[i].cidr[n] = h->nets[i - 1].cidr[n];
360 h->nets[i].cidr[n] = cidr;
361 h->nets[CIDR_POS(cidr)].nets[n] = 1;
363 spin_unlock_bh(&set->lock);
367 mtype_del_cidr(struct ip_set *set, struct htype *h, u8 cidr, u8 n)
369 u8 i, j, net_end = NLEN - 1;
371 spin_lock_bh(&set->lock);
372 for (i = 0; i < NLEN; i++) {
373 if (h->nets[i].cidr[n] != cidr)
375 h->nets[CIDR_POS(cidr)].nets[n]--;
376 if (h->nets[CIDR_POS(cidr)].nets[n] > 0)
378 for (j = i; j < net_end && h->nets[j].cidr[n]; j++)
379 h->nets[j].cidr[n] = h->nets[j + 1].cidr[n];
380 h->nets[j].cidr[n] = 0;
384 spin_unlock_bh(&set->lock);
388 /* Calculate the actual memory size of the set data */
390 mtype_ahash_memsize(const struct htype *h, const struct htable *t)
392 return sizeof(*h) + sizeof(*t) + ahash_sizeof_regions(t->htable_bits);
395 /* Get the ith element from the array block n */
396 #define ahash_data(n, i, dsize) \
397 ((struct mtype_elem *)((n)->value + ((i) * (dsize))))
400 mtype_ext_cleanup(struct ip_set *set, struct hbucket *n)
404 for (i = 0; i < n->pos; i++)
405 if (test_bit(i, n->used))
406 ip_set_ext_destroy(set, ahash_data(n, i, set->dsize));
409 /* Flush a hash type of set: destroy all elements */
411 mtype_flush(struct ip_set *set)
413 struct htype *h = set->data;
418 t = ipset_dereference_nfnl(h->table);
419 for (r = 0; r < ahash_numof_locks(t->htable_bits); r++) {
420 spin_lock_bh(&t->hregion[r].lock);
421 for (i = ahash_bucket_start(r, t->htable_bits);
422 i < ahash_bucket_end(r, t->htable_bits); i++) {
423 n = __ipset_dereference(hbucket(t, i));
426 if (set->extensions & IPSET_EXT_DESTROY)
427 mtype_ext_cleanup(set, n);
428 /* FIXME: use slab cache */
429 rcu_assign_pointer(hbucket(t, i), NULL);
432 t->hregion[r].ext_size = 0;
433 t->hregion[r].elements = 0;
434 spin_unlock_bh(&t->hregion[r].lock);
436 #ifdef IP_SET_HASH_WITH_NETS
437 memset(h->nets, 0, sizeof(h->nets));
441 /* Destroy the hashtable part of the set */
443 mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy)
448 for (i = 0; i < jhash_size(t->htable_bits); i++) {
449 n = (__force struct hbucket *)hbucket(t, i);
452 if (set->extensions & IPSET_EXT_DESTROY && ext_destroy)
453 mtype_ext_cleanup(set, n);
454 /* FIXME: use slab cache */
458 ip_set_free(t->hregion);
462 /* Destroy a hash type of set */
464 mtype_destroy(struct ip_set *set)
466 struct htype *h = set->data;
467 struct list_head *l, *lt;
469 mtype_ahash_destroy(set, (__force struct htable *)h->table, true);
470 list_for_each_safe(l, lt, &h->ad) {
480 mtype_same_set(const struct ip_set *a, const struct ip_set *b)
482 const struct htype *x = a->data;
483 const struct htype *y = b->data;
485 /* Resizing changes htable_bits, so we ignore it */
486 return x->maxelem == y->maxelem &&
487 a->timeout == b->timeout &&
488 #ifdef IP_SET_HASH_WITH_NETMASK
489 x->netmask == y->netmask &&
491 #ifdef IP_SET_HASH_WITH_MARKMASK
492 x->markmask == y->markmask &&
494 a->extensions == b->extensions;
498 mtype_gc_do(struct ip_set *set, struct htype *h, struct htable *t, u32 r)
500 struct hbucket *n, *tmp;
501 struct mtype_elem *data;
503 size_t dsize = set->dsize;
504 #ifdef IP_SET_HASH_WITH_NETS
507 u8 htable_bits = t->htable_bits;
509 spin_lock_bh(&t->hregion[r].lock);
510 for (i = ahash_bucket_start(r, htable_bits);
511 i < ahash_bucket_end(r, htable_bits); i++) {
512 n = __ipset_dereference(hbucket(t, i));
515 for (j = 0, d = 0; j < n->pos; j++) {
516 if (!test_bit(j, n->used)) {
520 data = ahash_data(n, j, dsize);
521 if (!ip_set_timeout_expired(ext_timeout(data, set)))
523 pr_debug("expired %u/%u\n", i, j);
524 clear_bit(j, n->used);
525 smp_mb__after_atomic();
526 #ifdef IP_SET_HASH_WITH_NETS
527 for (k = 0; k < IPSET_NET_COUNT; k++)
528 mtype_del_cidr(set, h,
529 NCIDR_PUT(DCIDR_GET(data->cidr, k)),
532 t->hregion[r].elements--;
533 ip_set_ext_destroy(set, data);
536 if (d >= AHASH_INIT_SIZE) {
538 t->hregion[r].ext_size -=
539 ext_size(n->size, dsize);
540 rcu_assign_pointer(hbucket(t, i), NULL);
544 tmp = kzalloc(sizeof(*tmp) +
545 (n->size - AHASH_INIT_SIZE) * dsize,
548 /* Still try to delete expired elements. */
550 tmp->size = n->size - AHASH_INIT_SIZE;
551 for (j = 0, d = 0; j < n->pos; j++) {
552 if (!test_bit(j, n->used))
554 data = ahash_data(n, j, dsize);
555 memcpy(tmp->value + d * dsize,
557 set_bit(d, tmp->used);
561 t->hregion[r].ext_size -=
562 ext_size(AHASH_INIT_SIZE, dsize);
563 rcu_assign_pointer(hbucket(t, i), tmp);
567 spin_unlock_bh(&t->hregion[r].lock);
571 mtype_gc(struct work_struct *work)
573 struct htable_gc *gc;
578 unsigned int next_run;
580 gc = container_of(work, struct htable_gc, dwork.work);
584 spin_lock_bh(&set->lock);
585 t = ipset_dereference_set(h->table, set);
586 atomic_inc(&t->uref);
587 numof_locks = ahash_numof_locks(t->htable_bits);
589 if (r >= numof_locks) {
592 next_run = (IPSET_GC_PERIOD(set->timeout) * HZ) / numof_locks;
593 if (next_run < HZ/10)
595 spin_unlock_bh(&set->lock);
597 mtype_gc_do(set, h, t, r);
599 if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
600 pr_debug("Table destroy after resize by expire: %p\n", t);
601 mtype_ahash_destroy(set, t, false);
604 queue_delayed_work(system_power_efficient_wq, &gc->dwork, next_run);
609 mtype_gc_init(struct htable_gc *gc)
611 INIT_DEFERRABLE_WORK(&gc->dwork, mtype_gc);
612 queue_delayed_work(system_power_efficient_wq, &gc->dwork, HZ);
616 mtype_cancel_gc(struct ip_set *set)
618 struct htype *h = set->data;
620 if (SET_WITH_TIMEOUT(set))
621 cancel_delayed_work_sync(&h->gc.dwork);
625 mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
626 struct ip_set_ext *mext, u32 flags);
628 mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
629 struct ip_set_ext *mext, u32 flags);
631 /* Resize a hash: create a new hash table with doubling the hashsize
632 * and inserting the elements to it. Repeat until we succeed or
633 * fail due to memory pressures.
636 mtype_resize(struct ip_set *set, bool retried)
638 struct htype *h = set->data;
639 struct htable *t, *orig;
641 size_t hsize, dsize = set->dsize;
642 #ifdef IP_SET_HASH_WITH_NETS
644 struct mtype_elem *tmp;
646 struct mtype_elem *data;
647 struct mtype_elem *d;
648 struct hbucket *n, *m;
649 struct list_head *l, *lt;
650 struct mtype_resize_ad *x;
651 u32 i, j, r, nr, key;
654 #ifdef IP_SET_HASH_WITH_NETS
655 tmp = kmalloc(dsize, GFP_KERNEL);
659 orig = ipset_dereference_bh_nfnl(h->table);
660 htable_bits = orig->htable_bits;
667 hsize = htable_size(htable_bits);
670 t = ip_set_alloc(hsize);
675 t->hregion = ip_set_alloc(ahash_sizeof_regions(htable_bits));
681 t->htable_bits = htable_bits;
682 t->maxelem = h->maxelem / ahash_numof_locks(htable_bits);
683 for (i = 0; i < ahash_numof_locks(htable_bits); i++)
684 spin_lock_init(&t->hregion[i].lock);
686 /* There can't be another parallel resizing,
687 * but dumping, gc, kernel side add/del are possible
689 orig = ipset_dereference_bh_nfnl(h->table);
690 atomic_set(&orig->ref, 1);
691 atomic_inc(&orig->uref);
692 pr_debug("attempt to resize set %s from %u to %u, t %p\n",
693 set->name, orig->htable_bits, htable_bits, orig);
694 for (r = 0; r < ahash_numof_locks(orig->htable_bits); r++) {
695 /* Expire may replace a hbucket with another one */
697 for (i = ahash_bucket_start(r, orig->htable_bits);
698 i < ahash_bucket_end(r, orig->htable_bits); i++) {
699 n = __ipset_dereference(hbucket(orig, i));
702 for (j = 0; j < n->pos; j++) {
703 if (!test_bit(j, n->used))
705 data = ahash_data(n, j, dsize);
706 if (SET_ELEM_EXPIRED(set, data))
708 #ifdef IP_SET_HASH_WITH_NETS
709 /* We have readers running parallel with us,
710 * so the live data cannot be modified.
713 memcpy(tmp, data, dsize);
715 mtype_data_reset_flags(data, &flags);
717 key = HKEY(data, h->initval, htable_bits);
718 m = __ipset_dereference(hbucket(t, key));
719 nr = ahash_region(key, htable_bits);
721 m = kzalloc(sizeof(*m) +
722 AHASH_INIT_SIZE * dsize,
728 m->size = AHASH_INIT_SIZE;
729 t->hregion[nr].ext_size +=
730 ext_size(AHASH_INIT_SIZE,
732 RCU_INIT_POINTER(hbucket(t, key), m);
733 } else if (m->pos >= m->size) {
736 if (m->size >= AHASH_MAX(h)) {
739 ht = kzalloc(sizeof(*ht) +
740 (m->size + AHASH_INIT_SIZE)
748 memcpy(ht, m, sizeof(struct hbucket) +
750 ht->size = m->size + AHASH_INIT_SIZE;
751 t->hregion[nr].ext_size +=
752 ext_size(AHASH_INIT_SIZE,
756 RCU_INIT_POINTER(hbucket(t, key), ht);
758 d = ahash_data(m, m->pos, dsize);
759 memcpy(d, data, dsize);
760 set_bit(m->pos++, m->used);
761 t->hregion[nr].elements++;
762 #ifdef IP_SET_HASH_WITH_NETS
763 mtype_data_reset_flags(d, &flags);
767 rcu_read_unlock_bh();
770 /* There can't be any other writer. */
771 rcu_assign_pointer(h->table, t);
773 /* Give time to other readers of the set */
776 pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name,
777 orig->htable_bits, orig, t->htable_bits, t);
778 /* Add/delete elements processed by the SET target during resize.
779 * Kernel-side add cannot trigger a resize and userspace actions
780 * are serialized by the mutex.
782 list_for_each_safe(l, lt, &h->ad) {
783 x = list_entry(l, struct mtype_resize_ad, list);
784 if (x->ad == IPSET_ADD) {
785 mtype_add(set, &x->d, &x->ext, &x->mext, x->flags);
787 mtype_del(set, &x->d, NULL, NULL, 0);
792 /* If there's nobody else using the table, destroy it */
793 if (atomic_dec_and_test(&orig->uref)) {
794 pr_debug("Table destroy by resize %p\n", orig);
795 mtype_ahash_destroy(set, orig, false);
799 #ifdef IP_SET_HASH_WITH_NETS
805 rcu_read_unlock_bh();
806 atomic_set(&orig->ref, 0);
807 atomic_dec(&orig->uref);
808 mtype_ahash_destroy(set, t, false);
814 /* In case we have plenty of memory :-) */
815 pr_warn("Cannot increase the hashsize of set %s further\n", set->name);
816 ret = -IPSET_ERR_HASH_FULL;
820 /* Get the current number of elements and ext_size in the set */
822 mtype_ext_size(struct ip_set *set, u32 *elements, size_t *ext_size)
824 struct htype *h = set->data;
825 const struct htable *t;
828 struct mtype_elem *data;
830 t = rcu_dereference_bh(h->table);
831 for (r = 0; r < ahash_numof_locks(t->htable_bits); r++) {
832 for (i = ahash_bucket_start(r, t->htable_bits);
833 i < ahash_bucket_end(r, t->htable_bits); i++) {
834 n = rcu_dereference_bh(hbucket(t, i));
837 for (j = 0; j < n->pos; j++) {
838 if (!test_bit(j, n->used))
840 data = ahash_data(n, j, set->dsize);
841 if (!SET_ELEM_EXPIRED(set, data))
845 *ext_size += t->hregion[r].ext_size;
849 /* Add an element to a hash and update the internal counters when succeeded,
850 * otherwise report the proper error code.
853 mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
854 struct ip_set_ext *mext, u32 flags)
856 struct htype *h = set->data;
858 const struct mtype_elem *d = value;
859 struct mtype_elem *data;
860 struct hbucket *n, *old = ERR_PTR(-ENOENT);
862 bool flag_exist = flags & IPSET_FLAG_EXIST;
863 bool deleted = false, forceadd = false, reuse = false;
864 u32 r, key, multi = 0, elements, maxelem;
867 t = rcu_dereference_bh(h->table);
868 key = HKEY(value, h->initval, t->htable_bits);
869 r = ahash_region(key, t->htable_bits);
870 atomic_inc(&t->uref);
871 elements = t->hregion[r].elements;
872 maxelem = t->maxelem;
873 if (elements >= maxelem) {
875 if (SET_WITH_TIMEOUT(set)) {
876 rcu_read_unlock_bh();
877 mtype_gc_do(set, h, t, r);
880 maxelem = h->maxelem;
882 for (e = 0; e < ahash_numof_locks(t->htable_bits); e++)
883 elements += t->hregion[e].elements;
884 if (elements >= maxelem && SET_WITH_FORCEADD(set))
887 rcu_read_unlock_bh();
889 spin_lock_bh(&t->hregion[r].lock);
890 n = rcu_dereference_bh(hbucket(t, key));
892 if (forceadd || elements >= maxelem)
895 n = kzalloc(sizeof(*n) + AHASH_INIT_SIZE * set->dsize,
901 n->size = AHASH_INIT_SIZE;
902 t->hregion[r].ext_size +=
903 ext_size(AHASH_INIT_SIZE, set->dsize);
906 for (i = 0; i < n->pos; i++) {
907 if (!test_bit(i, n->used)) {
908 /* Reuse first deleted entry */
910 deleted = reuse = true;
915 data = ahash_data(n, i, set->dsize);
916 if (mtype_data_equal(data, d, &multi)) {
917 if (flag_exist || SET_ELEM_EXPIRED(set, data)) {
918 /* Just the extensions could be overwritten */
920 goto overwrite_extensions;
922 ret = -IPSET_ERR_EXIST;
925 /* Reuse first timed out entry */
926 if (SET_ELEM_EXPIRED(set, data) && j == -1) {
931 if (reuse || forceadd) {
934 data = ahash_data(n, j, set->dsize);
936 #ifdef IP_SET_HASH_WITH_NETS
937 for (i = 0; i < IPSET_NET_COUNT; i++)
938 mtype_del_cidr(set, h,
939 NCIDR_PUT(DCIDR_GET(data->cidr, i)),
942 ip_set_ext_destroy(set, data);
943 t->hregion[r].elements--;
947 if (elements >= maxelem)
949 /* Create a new slot */
950 if (n->pos >= n->size) {
951 TUNE_AHASH_MAX(h, multi);
952 if (n->size >= AHASH_MAX(h)) {
953 /* Trigger rehashing */
954 mtype_data_next(&h->next, d);
959 n = kzalloc(sizeof(*n) +
960 (old->size + AHASH_INIT_SIZE) * set->dsize,
966 memcpy(n, old, sizeof(struct hbucket) +
967 old->size * set->dsize);
968 n->size = old->size + AHASH_INIT_SIZE;
969 t->hregion[r].ext_size +=
970 ext_size(AHASH_INIT_SIZE, set->dsize);
975 data = ahash_data(n, j, set->dsize);
977 t->hregion[r].elements++;
978 #ifdef IP_SET_HASH_WITH_NETS
979 for (i = 0; i < IPSET_NET_COUNT; i++)
980 mtype_add_cidr(set, h, NCIDR_PUT(DCIDR_GET(d->cidr, i)), i);
982 memcpy(data, d, sizeof(struct mtype_elem));
983 overwrite_extensions:
984 #ifdef IP_SET_HASH_WITH_NETS
985 mtype_data_set_flags(data, flags);
987 if (SET_WITH_COUNTER(set))
988 ip_set_init_counter(ext_counter(data, set), ext);
989 if (SET_WITH_COMMENT(set))
990 ip_set_init_comment(set, ext_comment(data, set), ext);
991 if (SET_WITH_SKBINFO(set))
992 ip_set_init_skbinfo(ext_skbinfo(data, set), ext);
993 /* Must come last for the case when timed out entry is reused */
994 if (SET_WITH_TIMEOUT(set))
995 ip_set_timeout_set(ext_timeout(data, set), ext->timeout);
996 smp_mb__before_atomic();
998 if (old != ERR_PTR(-ENOENT)) {
999 rcu_assign_pointer(hbucket(t, key), n);
1001 kfree_rcu(old, rcu);
1005 spin_unlock_bh(&t->hregion[r].lock);
1006 if (atomic_read(&t->ref) && ext->target) {
1007 /* Resize is in process and kernel side add, save values */
1008 struct mtype_resize_ad *x;
1010 x = kzalloc(sizeof(struct mtype_resize_ad), GFP_ATOMIC);
1015 memcpy(&x->d, value, sizeof(struct mtype_elem));
1016 memcpy(&x->ext, ext, sizeof(struct ip_set_ext));
1017 memcpy(&x->mext, mext, sizeof(struct ip_set_ext));
1019 spin_lock_bh(&set->lock);
1020 list_add_tail(&x->list, &h->ad);
1021 spin_unlock_bh(&set->lock);
1026 if (net_ratelimit())
1027 pr_warn("Set %s is full, maxelem %u reached\n",
1028 set->name, maxelem);
1029 ret = -IPSET_ERR_HASH_FULL;
1031 spin_unlock_bh(&t->hregion[r].lock);
1033 if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
1034 pr_debug("Table destroy after resize by add: %p\n", t);
1035 mtype_ahash_destroy(set, t, false);
1040 /* Delete an element from the hash and free up space if possible.
1043 mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
1044 struct ip_set_ext *mext, u32 flags)
1046 struct htype *h = set->data;
1048 const struct mtype_elem *d = value;
1049 struct mtype_elem *data;
1051 struct mtype_resize_ad *x = NULL;
1052 int i, j, k, r, ret = -IPSET_ERR_EXIST;
1054 size_t dsize = set->dsize;
1056 /* Userspace add and resize is excluded by the mutex.
1057 * Kernespace add does not trigger resize.
1060 t = rcu_dereference_bh(h->table);
1061 key = HKEY(value, h->initval, t->htable_bits);
1062 r = ahash_region(key, t->htable_bits);
1063 atomic_inc(&t->uref);
1064 rcu_read_unlock_bh();
1066 spin_lock_bh(&t->hregion[r].lock);
1067 n = rcu_dereference_bh(hbucket(t, key));
1070 for (i = 0, k = 0; i < n->pos; i++) {
1071 if (!test_bit(i, n->used)) {
1075 data = ahash_data(n, i, dsize);
1076 if (!mtype_data_equal(data, d, &multi))
1078 if (SET_ELEM_EXPIRED(set, data))
1082 clear_bit(i, n->used);
1083 smp_mb__after_atomic();
1084 if (i + 1 == n->pos)
1086 t->hregion[r].elements--;
1087 #ifdef IP_SET_HASH_WITH_NETS
1088 for (j = 0; j < IPSET_NET_COUNT; j++)
1089 mtype_del_cidr(set, h,
1090 NCIDR_PUT(DCIDR_GET(d->cidr, j)), j);
1092 ip_set_ext_destroy(set, data);
1094 if (atomic_read(&t->ref) && ext->target) {
1095 /* Resize is in process and kernel side del,
1098 x = kzalloc(sizeof(struct mtype_resize_ad),
1102 memcpy(&x->d, value,
1103 sizeof(struct mtype_elem));
1107 for (; i < n->pos; i++) {
1108 if (!test_bit(i, n->used))
1111 if (n->pos == 0 && k == 0) {
1112 t->hregion[r].ext_size -= ext_size(n->size, dsize);
1113 rcu_assign_pointer(hbucket(t, key), NULL);
1115 } else if (k >= AHASH_INIT_SIZE) {
1116 struct hbucket *tmp = kzalloc(sizeof(*tmp) +
1117 (n->size - AHASH_INIT_SIZE) * dsize,
1121 tmp->size = n->size - AHASH_INIT_SIZE;
1122 for (j = 0, k = 0; j < n->pos; j++) {
1123 if (!test_bit(j, n->used))
1125 data = ahash_data(n, j, dsize);
1126 memcpy(tmp->value + k * dsize, data, dsize);
1127 set_bit(k, tmp->used);
1131 t->hregion[r].ext_size -=
1132 ext_size(AHASH_INIT_SIZE, dsize);
1133 rcu_assign_pointer(hbucket(t, key), tmp);
1140 spin_unlock_bh(&t->hregion[r].lock);
1142 spin_lock_bh(&set->lock);
1143 list_add(&x->list, &h->ad);
1144 spin_unlock_bh(&set->lock);
1146 if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
1147 pr_debug("Table destroy after resize by del: %p\n", t);
1148 mtype_ahash_destroy(set, t, false);
1154 mtype_data_match(struct mtype_elem *data, const struct ip_set_ext *ext,
1155 struct ip_set_ext *mext, struct ip_set *set, u32 flags)
1157 if (!ip_set_match_extensions(set, ext, mext, flags, data))
1159 /* nomatch entries return -ENOTEMPTY */
1160 return mtype_do_data_match(data);
1163 #ifdef IP_SET_HASH_WITH_NETS
1164 /* Special test function which takes into account the different network
1165 * sizes added to the set
1168 mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d,
1169 const struct ip_set_ext *ext,
1170 struct ip_set_ext *mext, u32 flags)
1172 struct htype *h = set->data;
1173 struct htable *t = rcu_dereference_bh(h->table);
1175 struct mtype_elem *data;
1176 #if IPSET_NET_COUNT == 2
1177 struct mtype_elem orig = *d;
1178 int ret, i, j = 0, k;
1184 pr_debug("test by nets\n");
1185 for (; j < NLEN && h->nets[j].cidr[0] && !multi; j++) {
1186 #if IPSET_NET_COUNT == 2
1187 mtype_data_reset_elem(d, &orig);
1188 mtype_data_netmask(d, NCIDR_GET(h->nets[j].cidr[0]), false);
1189 for (k = 0; k < NLEN && h->nets[k].cidr[1] && !multi;
1191 mtype_data_netmask(d, NCIDR_GET(h->nets[k].cidr[1]),
1194 mtype_data_netmask(d, NCIDR_GET(h->nets[j].cidr[0]));
1196 key = HKEY(d, h->initval, t->htable_bits);
1197 n = rcu_dereference_bh(hbucket(t, key));
1200 for (i = 0; i < n->pos; i++) {
1201 if (!test_bit(i, n->used))
1203 data = ahash_data(n, i, set->dsize);
1204 if (!mtype_data_equal(data, d, &multi))
1206 ret = mtype_data_match(data, ext, mext, set, flags);
1209 #ifdef IP_SET_HASH_WITH_MULTI
1210 /* No match, reset multiple match flag */
1214 #if IPSET_NET_COUNT == 2
1222 /* Test whether the element is added to the set */
1224 mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
1225 struct ip_set_ext *mext, u32 flags)
1227 struct htype *h = set->data;
1229 struct mtype_elem *d = value;
1231 struct mtype_elem *data;
1236 t = rcu_dereference_bh(h->table);
1237 #ifdef IP_SET_HASH_WITH_NETS
1238 /* If we test an IP address and not a network address,
1239 * try all possible network sizes
1241 for (i = 0; i < IPSET_NET_COUNT; i++)
1242 if (DCIDR_GET(d->cidr, i) != HOST_MASK)
1244 if (i == IPSET_NET_COUNT) {
1245 ret = mtype_test_cidrs(set, d, ext, mext, flags);
1250 key = HKEY(d, h->initval, t->htable_bits);
1251 n = rcu_dereference_bh(hbucket(t, key));
1256 for (i = 0; i < n->pos; i++) {
1257 if (!test_bit(i, n->used))
1259 data = ahash_data(n, i, set->dsize);
1260 if (!mtype_data_equal(data, d, &multi))
1262 ret = mtype_data_match(data, ext, mext, set, flags);
1267 rcu_read_unlock_bh();
1271 /* Reply a HEADER request: fill out the header part of the set */
1273 mtype_head(struct ip_set *set, struct sk_buff *skb)
1275 struct htype *h = set->data;
1276 const struct htable *t;
1277 struct nlattr *nested;
1280 size_t ext_size = 0;
1284 t = rcu_dereference_bh(h->table);
1285 mtype_ext_size(set, &elements, &ext_size);
1286 memsize = mtype_ahash_memsize(h, t) + ext_size + set->ext_size;
1287 htable_bits = t->htable_bits;
1288 rcu_read_unlock_bh();
1290 nested = nla_nest_start(skb, IPSET_ATTR_DATA);
1292 goto nla_put_failure;
1293 if (nla_put_net32(skb, IPSET_ATTR_HASHSIZE,
1294 htonl(jhash_size(htable_bits))) ||
1295 nla_put_net32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem)))
1296 goto nla_put_failure;
1297 #ifdef IP_SET_HASH_WITH_NETMASK
1298 if (h->netmask != HOST_MASK &&
1299 nla_put_u8(skb, IPSET_ATTR_NETMASK, h->netmask))
1300 goto nla_put_failure;
1302 #ifdef IP_SET_HASH_WITH_MARKMASK
1303 if (nla_put_u32(skb, IPSET_ATTR_MARKMASK, h->markmask))
1304 goto nla_put_failure;
1306 if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
1307 nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
1308 nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(elements)))
1309 goto nla_put_failure;
1310 if (unlikely(ip_set_put_flags(skb, set)))
1311 goto nla_put_failure;
1312 nla_nest_end(skb, nested);
1319 /* Make possible to run dumping parallel with resizing */
1321 mtype_uref(struct ip_set *set, struct netlink_callback *cb, bool start)
1323 struct htype *h = set->data;
1328 t = ipset_dereference_bh_nfnl(h->table);
1329 atomic_inc(&t->uref);
1330 cb->args[IPSET_CB_PRIVATE] = (unsigned long)t;
1331 rcu_read_unlock_bh();
1332 } else if (cb->args[IPSET_CB_PRIVATE]) {
1333 t = (struct htable *)cb->args[IPSET_CB_PRIVATE];
1334 if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
1335 pr_debug("Table destroy after resize "
1336 " by dump: %p\n", t);
1337 mtype_ahash_destroy(set, t, false);
1339 cb->args[IPSET_CB_PRIVATE] = 0;
1343 /* Reply a LIST/SAVE request: dump the elements of the specified set */
1345 mtype_list(const struct ip_set *set,
1346 struct sk_buff *skb, struct netlink_callback *cb)
1348 const struct htable *t;
1349 struct nlattr *atd, *nested;
1350 const struct hbucket *n;
1351 const struct mtype_elem *e;
1352 u32 first = cb->args[IPSET_CB_ARG0];
1353 /* We assume that one hash bucket fills into one page */
1357 atd = nla_nest_start(skb, IPSET_ATTR_ADT);
1361 pr_debug("list hash set %s\n", set->name);
1362 t = (const struct htable *)cb->args[IPSET_CB_PRIVATE];
1363 /* Expire may replace a hbucket with another one */
1365 for (; cb->args[IPSET_CB_ARG0] < jhash_size(t->htable_bits);
1366 cb->args[IPSET_CB_ARG0]++) {
1368 incomplete = skb_tail_pointer(skb);
1369 n = rcu_dereference(hbucket(t, cb->args[IPSET_CB_ARG0]));
1370 pr_debug("cb->arg bucket: %lu, t %p n %p\n",
1371 cb->args[IPSET_CB_ARG0], t, n);
1374 for (i = 0; i < n->pos; i++) {
1375 if (!test_bit(i, n->used))
1377 e = ahash_data(n, i, set->dsize);
1378 if (SET_ELEM_EXPIRED(set, e))
1380 pr_debug("list hash %lu hbucket %p i %u, data %p\n",
1381 cb->args[IPSET_CB_ARG0], n, i, e);
1382 nested = nla_nest_start(skb, IPSET_ATTR_DATA);
1384 if (cb->args[IPSET_CB_ARG0] == first) {
1385 nla_nest_cancel(skb, atd);
1389 goto nla_put_failure;
1391 if (mtype_data_list(skb, e))
1392 goto nla_put_failure;
1393 if (ip_set_put_extensions(skb, set, e, true))
1394 goto nla_put_failure;
1395 nla_nest_end(skb, nested);
1398 nla_nest_end(skb, atd);
1399 /* Set listing finished */
1400 cb->args[IPSET_CB_ARG0] = 0;
1405 nlmsg_trim(skb, incomplete);
1406 if (unlikely(first == cb->args[IPSET_CB_ARG0])) {
1407 pr_warn("Can't list set %s: one bucket does not fit into a message. Please report it!\n",
1409 cb->args[IPSET_CB_ARG0] = 0;
1412 nla_nest_end(skb, atd);
1420 IPSET_TOKEN(MTYPE, _kadt)(struct ip_set *set, const struct sk_buff *skb,
1421 const struct xt_action_param *par,
1422 enum ipset_adt adt, struct ip_set_adt_opt *opt);
1425 IPSET_TOKEN(MTYPE, _uadt)(struct ip_set *set, struct nlattr *tb[],
1426 enum ipset_adt adt, u32 *lineno, u32 flags,
1429 static const struct ip_set_type_variant mtype_variant = {
1433 [IPSET_ADD] = mtype_add,
1434 [IPSET_DEL] = mtype_del,
1435 [IPSET_TEST] = mtype_test,
1437 .destroy = mtype_destroy,
1438 .flush = mtype_flush,
1442 .resize = mtype_resize,
1443 .same_set = mtype_same_set,
1444 .cancel_gc = mtype_cancel_gc,
1445 .region_lock = true,
1448 #ifdef IP_SET_EMIT_CREATE
1450 IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
1451 struct nlattr *tb[], u32 flags)
1453 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
1454 #ifdef IP_SET_HASH_WITH_MARKMASK
1458 #ifdef IP_SET_HASH_WITH_NETMASK
1466 pr_debug("Create set %s with family %s\n",
1467 set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6");
1469 #ifdef IP_SET_PROTO_UNDEF
1470 if (set->family != NFPROTO_UNSPEC)
1471 return -IPSET_ERR_INVALID_FAMILY;
1473 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
1474 return -IPSET_ERR_INVALID_FAMILY;
1477 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
1478 !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
1479 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
1480 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
1481 return -IPSET_ERR_PROTOCOL;
1483 #ifdef IP_SET_HASH_WITH_MARKMASK
1484 /* Separated condition in order to avoid directive in argument list */
1485 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_MARKMASK)))
1486 return -IPSET_ERR_PROTOCOL;
1488 markmask = 0xffffffff;
1489 if (tb[IPSET_ATTR_MARKMASK]) {
1490 markmask = ntohl(nla_get_be32(tb[IPSET_ATTR_MARKMASK]));
1492 return -IPSET_ERR_INVALID_MARKMASK;
1496 #ifdef IP_SET_HASH_WITH_NETMASK
1497 netmask = set->family == NFPROTO_IPV4 ? 32 : 128;
1498 if (tb[IPSET_ATTR_NETMASK]) {
1499 netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
1501 if ((set->family == NFPROTO_IPV4 && netmask > 32) ||
1502 (set->family == NFPROTO_IPV6 && netmask > 128) ||
1504 return -IPSET_ERR_INVALID_NETMASK;
1508 if (tb[IPSET_ATTR_HASHSIZE]) {
1509 hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
1510 if (hashsize < IPSET_MIMINAL_HASHSIZE)
1511 hashsize = IPSET_MIMINAL_HASHSIZE;
1514 if (tb[IPSET_ATTR_MAXELEM])
1515 maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
1518 h = kzalloc(hsize, GFP_KERNEL);
1522 /* Compute htable_bits from the user input parameter hashsize.
1523 * Assume that hashsize == 2^htable_bits,
1524 * otherwise round up to the first 2^n value.
1526 hbits = fls(hashsize - 1);
1527 hsize = htable_size(hbits);
1532 t = ip_set_alloc(hsize);
1537 t->hregion = ip_set_alloc(ahash_sizeof_regions(hbits));
1544 for (i = 0; i < ahash_numof_locks(hbits); i++)
1545 spin_lock_init(&t->hregion[i].lock);
1546 h->maxelem = maxelem;
1547 #ifdef IP_SET_HASH_WITH_NETMASK
1548 h->netmask = netmask;
1550 #ifdef IP_SET_HASH_WITH_MARKMASK
1551 h->markmask = markmask;
1553 get_random_bytes(&h->initval, sizeof(h->initval));
1555 t->htable_bits = hbits;
1556 t->maxelem = h->maxelem / ahash_numof_locks(hbits);
1557 RCU_INIT_POINTER(h->table, t);
1559 INIT_LIST_HEAD(&h->ad);
1561 #ifndef IP_SET_PROTO_UNDEF
1562 if (set->family == NFPROTO_IPV4) {
1564 set->variant = &IPSET_TOKEN(HTYPE, 4_variant);
1565 set->dsize = ip_set_elem_len(set, tb,
1566 sizeof(struct IPSET_TOKEN(HTYPE, 4_elem)),
1567 __alignof__(struct IPSET_TOKEN(HTYPE, 4_elem)));
1568 #ifndef IP_SET_PROTO_UNDEF
1570 set->variant = &IPSET_TOKEN(HTYPE, 6_variant);
1571 set->dsize = ip_set_elem_len(set, tb,
1572 sizeof(struct IPSET_TOKEN(HTYPE, 6_elem)),
1573 __alignof__(struct IPSET_TOKEN(HTYPE, 6_elem)));
1576 set->timeout = IPSET_NO_TIMEOUT;
1577 if (tb[IPSET_ATTR_TIMEOUT]) {
1578 set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
1579 #ifndef IP_SET_PROTO_UNDEF
1580 if (set->family == NFPROTO_IPV4)
1582 IPSET_TOKEN(HTYPE, 4_gc_init)(&h->gc);
1583 #ifndef IP_SET_PROTO_UNDEF
1585 IPSET_TOKEN(HTYPE, 6_gc_init)(&h->gc);
1588 pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
1589 set->name, jhash_size(t->htable_bits),
1590 t->htable_bits, h->maxelem, set->data, t);
1594 #endif /* IP_SET_EMIT_CREATE */