1 // SPDX-License-Identifier: GPL-2.0
3 * padata.c - generic interface to process data streams in parallel
5 * See Documentation/padata.txt for an api documentation.
7 * Copyright (C) 2008, 2009 secunet Security Networks AG
8 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms and conditions of the GNU General Public License,
12 * version 2, as published by the Free Software Foundation.
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24 #include <linux/export.h>
25 #include <linux/cpumask.h>
26 #include <linux/err.h>
27 #include <linux/cpu.h>
28 #include <linux/padata.h>
29 #include <linux/mutex.h>
30 #include <linux/sched.h>
31 #include <linux/slab.h>
32 #include <linux/sysfs.h>
33 #include <linux/rcupdate.h>
34 #include <linux/module.h>
36 #define MAX_OBJ_NUM 1000
38 static void padata_free_pd(struct parallel_data *pd);
40 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
44 target_cpu = cpumask_first(pd->cpumask.pcpu);
45 for (cpu = 0; cpu < cpu_index; cpu++)
46 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
51 static int padata_cpu_hash(struct parallel_data *pd)
57 * Hash the sequence numbers to the cpus by taking
58 * seq_nr mod. number of cpus in use.
61 seq_nr = atomic_inc_return(&pd->seq_nr);
62 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
64 return padata_index_to_cpu(pd, cpu_index);
67 static void padata_parallel_worker(struct work_struct *parallel_work)
69 struct padata_parallel_queue *pqueue;
70 LIST_HEAD(local_list);
73 pqueue = container_of(parallel_work,
74 struct padata_parallel_queue, work);
76 spin_lock(&pqueue->parallel.lock);
77 list_replace_init(&pqueue->parallel.list, &local_list);
78 spin_unlock(&pqueue->parallel.lock);
80 while (!list_empty(&local_list)) {
81 struct padata_priv *padata;
83 padata = list_entry(local_list.next,
84 struct padata_priv, list);
86 list_del_init(&padata->list);
88 padata->parallel(padata);
95 * padata_do_parallel - padata parallelization function
97 * @pinst: padata instance
98 * @padata: object to be parallelized
99 * @cb_cpu: cpu the serialization callback function will run on,
100 * must be in the serial cpumask of padata(i.e. cpumask.cbcpu).
102 * The parallelization callback function will run with BHs off.
103 * Note: Every object which is parallelized by padata_do_parallel
104 * must be seen by padata_do_serial.
106 int padata_do_parallel(struct padata_instance *pinst,
107 struct padata_priv *padata, int cb_cpu)
110 struct padata_parallel_queue *queue;
111 struct parallel_data *pd;
115 pd = rcu_dereference_bh(pinst->pd);
118 if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
121 if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu))
125 if ((pinst->flags & PADATA_RESET))
128 if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
132 atomic_inc(&pd->refcnt);
134 padata->cb_cpu = cb_cpu;
136 target_cpu = padata_cpu_hash(pd);
137 padata->cpu = target_cpu;
138 queue = per_cpu_ptr(pd->pqueue, target_cpu);
140 spin_lock(&queue->parallel.lock);
141 list_add_tail(&padata->list, &queue->parallel.list);
142 spin_unlock(&queue->parallel.lock);
144 queue_work_on(target_cpu, pinst->wq, &queue->work);
147 rcu_read_unlock_bh();
151 EXPORT_SYMBOL(padata_do_parallel);
154 * padata_get_next - Get the next object that needs serialization.
158 * A pointer to the control struct of the next object that needs
159 * serialization, if present in one of the percpu reorder queues.
161 * -EINPROGRESS, if the next object that needs serialization will
162 * be parallel processed by another cpu and is not yet present in
163 * the cpu's reorder queue.
165 * -ENODATA, if this cpu has to do the parallel processing for
168 static struct padata_priv *padata_get_next(struct parallel_data *pd)
170 struct padata_parallel_queue *next_queue;
171 struct padata_priv *padata;
172 struct padata_list *reorder;
175 next_queue = per_cpu_ptr(pd->pqueue, cpu);
176 reorder = &next_queue->reorder;
178 spin_lock(&reorder->lock);
179 if (!list_empty(&reorder->list)) {
180 padata = list_entry(reorder->list.next,
181 struct padata_priv, list);
183 list_del_init(&padata->list);
184 atomic_dec(&pd->reorder_objects);
186 pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1,
189 spin_unlock(&reorder->lock);
192 spin_unlock(&reorder->lock);
194 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
195 padata = ERR_PTR(-ENODATA);
199 padata = ERR_PTR(-EINPROGRESS);
204 static void padata_reorder(struct parallel_data *pd)
207 struct padata_priv *padata;
208 struct padata_serial_queue *squeue;
209 struct padata_instance *pinst = pd->pinst;
210 struct padata_parallel_queue *next_queue;
213 * We need to ensure that only one cpu can work on dequeueing of
214 * the reorder queue the time. Calculating in which percpu reorder
215 * queue the next object will arrive takes some time. A spinlock
216 * would be highly contended. Also it is not clear in which order
217 * the objects arrive to the reorder queues. So a cpu could wait to
218 * get the lock just to notice that there is nothing to do at the
219 * moment. Therefore we use a trylock and let the holder of the lock
220 * care for all the objects enqueued during the holdtime of the lock.
222 if (!spin_trylock_bh(&pd->lock))
226 padata = padata_get_next(pd);
229 * If the next object that needs serialization is parallel
230 * processed by another cpu and is still on it's way to the
231 * cpu's reorder queue, nothing to do for now.
233 if (PTR_ERR(padata) == -EINPROGRESS)
237 * This cpu has to do the parallel processing of the next
238 * object. It's waiting in the cpu's parallelization queue,
239 * so exit immediately.
241 if (PTR_ERR(padata) == -ENODATA) {
242 spin_unlock_bh(&pd->lock);
246 cb_cpu = padata->cb_cpu;
247 squeue = per_cpu_ptr(pd->squeue, cb_cpu);
249 spin_lock(&squeue->serial.lock);
250 list_add_tail(&padata->list, &squeue->serial.list);
251 spin_unlock(&squeue->serial.lock);
253 queue_work_on(cb_cpu, pinst->wq, &squeue->work);
256 spin_unlock_bh(&pd->lock);
259 * The next object that needs serialization might have arrived to
260 * the reorder queues in the meantime.
262 * Ensure reorder queue is read after pd->lock is dropped so we see
263 * new objects from another task in padata_do_serial. Pairs with
264 * smp_mb__after_atomic in padata_do_serial.
268 next_queue = per_cpu_ptr(pd->pqueue, pd->cpu);
269 if (!list_empty(&next_queue->reorder.list))
270 queue_work(pinst->wq, &pd->reorder_work);
273 static void invoke_padata_reorder(struct work_struct *work)
275 struct parallel_data *pd;
278 pd = container_of(work, struct parallel_data, reorder_work);
283 static void padata_serial_worker(struct work_struct *serial_work)
285 struct padata_serial_queue *squeue;
286 struct parallel_data *pd;
287 LIST_HEAD(local_list);
291 squeue = container_of(serial_work, struct padata_serial_queue, work);
294 spin_lock(&squeue->serial.lock);
295 list_replace_init(&squeue->serial.list, &local_list);
296 spin_unlock(&squeue->serial.lock);
300 while (!list_empty(&local_list)) {
301 struct padata_priv *padata;
303 padata = list_entry(local_list.next,
304 struct padata_priv, list);
306 list_del_init(&padata->list);
308 padata->serial(padata);
313 if (atomic_sub_and_test(cnt, &pd->refcnt))
318 * padata_do_serial - padata serialization function
320 * @padata: object to be serialized.
322 * padata_do_serial must be called for every parallelized object.
323 * The serialization callback function will run with BHs off.
325 void padata_do_serial(struct padata_priv *padata)
327 struct parallel_data *pd = padata->pd;
328 struct padata_parallel_queue *pqueue = per_cpu_ptr(pd->pqueue,
331 spin_lock(&pqueue->reorder.lock);
332 list_add_tail(&padata->list, &pqueue->reorder.list);
333 atomic_inc(&pd->reorder_objects);
334 spin_unlock(&pqueue->reorder.lock);
337 * Ensure the addition to the reorder list is ordered correctly
338 * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
341 smp_mb__after_atomic();
345 EXPORT_SYMBOL(padata_do_serial);
347 static int padata_setup_cpumasks(struct parallel_data *pd,
348 const struct cpumask *pcpumask,
349 const struct cpumask *cbcpumask)
351 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
354 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
355 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
356 free_cpumask_var(pd->cpumask.pcpu);
360 cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask);
364 static void __padata_list_init(struct padata_list *pd_list)
366 INIT_LIST_HEAD(&pd_list->list);
367 spin_lock_init(&pd_list->lock);
370 /* Initialize all percpu queues used by serial workers */
371 static void padata_init_squeues(struct parallel_data *pd)
374 struct padata_serial_queue *squeue;
376 for_each_cpu(cpu, pd->cpumask.cbcpu) {
377 squeue = per_cpu_ptr(pd->squeue, cpu);
379 __padata_list_init(&squeue->serial);
380 INIT_WORK(&squeue->work, padata_serial_worker);
384 /* Initialize all percpu queues used by parallel workers */
385 static void padata_init_pqueues(struct parallel_data *pd)
388 struct padata_parallel_queue *pqueue;
391 for_each_possible_cpu(cpu) {
392 pqueue = per_cpu_ptr(pd->pqueue, cpu);
394 if (!cpumask_test_cpu(cpu, pd->cpumask.pcpu)) {
395 pqueue->cpu_index = -1;
399 pqueue->cpu_index = cpu_index;
402 __padata_list_init(&pqueue->reorder);
403 __padata_list_init(&pqueue->parallel);
404 INIT_WORK(&pqueue->work, padata_parallel_worker);
405 atomic_set(&pqueue->num_obj, 0);
409 /* Allocate and initialize the internal cpumask dependend resources. */
410 static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
411 const struct cpumask *pcpumask,
412 const struct cpumask *cbcpumask)
414 struct parallel_data *pd;
416 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
420 pd->pqueue = alloc_percpu(struct padata_parallel_queue);
424 pd->squeue = alloc_percpu(struct padata_serial_queue);
426 goto err_free_pqueue;
427 if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0)
428 goto err_free_squeue;
430 padata_init_pqueues(pd);
431 padata_init_squeues(pd);
432 atomic_set(&pd->seq_nr, -1);
433 atomic_set(&pd->reorder_objects, 0);
434 atomic_set(&pd->refcnt, 1);
436 spin_lock_init(&pd->lock);
437 pd->cpu = cpumask_first(pd->cpumask.pcpu);
438 INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
443 free_percpu(pd->squeue);
445 free_percpu(pd->pqueue);
452 static void padata_free_pd(struct parallel_data *pd)
454 free_cpumask_var(pd->cpumask.pcpu);
455 free_cpumask_var(pd->cpumask.cbcpu);
456 free_percpu(pd->pqueue);
457 free_percpu(pd->squeue);
461 static void __padata_start(struct padata_instance *pinst)
463 pinst->flags |= PADATA_INIT;
466 static void __padata_stop(struct padata_instance *pinst)
468 if (!(pinst->flags & PADATA_INIT))
471 pinst->flags &= ~PADATA_INIT;
476 /* Replace the internal control structure with a new one. */
477 static void padata_replace(struct padata_instance *pinst,
478 struct parallel_data *pd_new)
480 struct parallel_data *pd_old = pinst->pd;
481 int notification_mask = 0;
483 pinst->flags |= PADATA_RESET;
485 rcu_assign_pointer(pinst->pd, pd_new);
489 if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu))
490 notification_mask |= PADATA_CPU_PARALLEL;
491 if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu))
492 notification_mask |= PADATA_CPU_SERIAL;
494 if (atomic_dec_and_test(&pd_old->refcnt))
495 padata_free_pd(pd_old);
497 if (notification_mask)
498 blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
502 pinst->flags &= ~PADATA_RESET;
506 * padata_register_cpumask_notifier - Registers a notifier that will be called
507 * if either pcpu or cbcpu or both cpumasks change.
509 * @pinst: A poineter to padata instance
510 * @nblock: A pointer to notifier block.
512 int padata_register_cpumask_notifier(struct padata_instance *pinst,
513 struct notifier_block *nblock)
515 return blocking_notifier_chain_register(&pinst->cpumask_change_notifier,
518 EXPORT_SYMBOL(padata_register_cpumask_notifier);
521 * padata_unregister_cpumask_notifier - Unregisters cpumask notifier
522 * registered earlier using padata_register_cpumask_notifier
524 * @pinst: A pointer to data instance.
525 * @nlock: A pointer to notifier block.
527 int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
528 struct notifier_block *nblock)
530 return blocking_notifier_chain_unregister(
531 &pinst->cpumask_change_notifier,
534 EXPORT_SYMBOL(padata_unregister_cpumask_notifier);
537 /* If cpumask contains no active cpu, we mark the instance as invalid. */
538 static bool padata_validate_cpumask(struct padata_instance *pinst,
539 const struct cpumask *cpumask)
541 if (!cpumask_intersects(cpumask, cpu_online_mask)) {
542 pinst->flags |= PADATA_INVALID;
546 pinst->flags &= ~PADATA_INVALID;
550 static int __padata_set_cpumasks(struct padata_instance *pinst,
551 cpumask_var_t pcpumask,
552 cpumask_var_t cbcpumask)
555 struct parallel_data *pd;
557 valid = padata_validate_cpumask(pinst, pcpumask);
559 __padata_stop(pinst);
563 valid = padata_validate_cpumask(pinst, cbcpumask);
565 __padata_stop(pinst);
568 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
572 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
573 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
575 padata_replace(pinst, pd);
578 __padata_start(pinst);
584 * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value
585 * equivalent to @cpumask.
587 * @pinst: padata instance
588 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
589 * to parallel and serial cpumasks respectively.
590 * @cpumask: the cpumask to use
592 int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
593 cpumask_var_t cpumask)
595 struct cpumask *serial_mask, *parallel_mask;
599 mutex_lock(&pinst->lock);
601 switch (cpumask_type) {
602 case PADATA_CPU_PARALLEL:
603 serial_mask = pinst->cpumask.cbcpu;
604 parallel_mask = cpumask;
606 case PADATA_CPU_SERIAL:
607 parallel_mask = pinst->cpumask.pcpu;
608 serial_mask = cpumask;
614 err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
617 mutex_unlock(&pinst->lock);
622 EXPORT_SYMBOL(padata_set_cpumask);
625 * padata_start - start the parallel processing
627 * @pinst: padata instance to start
629 int padata_start(struct padata_instance *pinst)
633 mutex_lock(&pinst->lock);
635 if (pinst->flags & PADATA_INVALID)
638 __padata_start(pinst);
640 mutex_unlock(&pinst->lock);
644 EXPORT_SYMBOL(padata_start);
647 * padata_stop - stop the parallel processing
649 * @pinst: padata instance to stop
651 void padata_stop(struct padata_instance *pinst)
653 mutex_lock(&pinst->lock);
654 __padata_stop(pinst);
655 mutex_unlock(&pinst->lock);
657 EXPORT_SYMBOL(padata_stop);
659 #ifdef CONFIG_HOTPLUG_CPU
661 static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
663 struct parallel_data *pd;
665 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
666 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
667 pinst->cpumask.cbcpu);
671 padata_replace(pinst, pd);
673 if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
674 padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
675 __padata_start(pinst);
681 static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
683 struct parallel_data *pd = NULL;
685 if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
687 if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
688 !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
689 __padata_stop(pinst);
691 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
692 pinst->cpumask.cbcpu);
696 padata_replace(pinst, pd);
698 cpumask_clear_cpu(cpu, pd->cpumask.cbcpu);
699 cpumask_clear_cpu(cpu, pd->cpumask.pcpu);
706 * padata_remove_cpu - remove a cpu from the one or both(serial and parallel)
709 * @pinst: padata instance
710 * @cpu: cpu to remove
711 * @mask: bitmask specifying from which cpumask @cpu should be removed
712 * The @mask may be any combination of the following flags:
713 * PADATA_CPU_SERIAL - serial cpumask
714 * PADATA_CPU_PARALLEL - parallel cpumask
716 int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask)
720 if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
723 mutex_lock(&pinst->lock);
726 if (mask & PADATA_CPU_SERIAL)
727 cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu);
728 if (mask & PADATA_CPU_PARALLEL)
729 cpumask_clear_cpu(cpu, pinst->cpumask.pcpu);
731 err = __padata_remove_cpu(pinst, cpu);
734 mutex_unlock(&pinst->lock);
738 EXPORT_SYMBOL(padata_remove_cpu);
740 static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
742 return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
743 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
746 static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
748 struct padata_instance *pinst;
751 pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node);
752 if (!pinst_has_cpu(pinst, cpu))
755 mutex_lock(&pinst->lock);
756 ret = __padata_add_cpu(pinst, cpu);
757 mutex_unlock(&pinst->lock);
761 static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
763 struct padata_instance *pinst;
766 pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node);
767 if (!pinst_has_cpu(pinst, cpu))
770 mutex_lock(&pinst->lock);
771 ret = __padata_remove_cpu(pinst, cpu);
772 mutex_unlock(&pinst->lock);
776 static enum cpuhp_state hp_online;
779 static void __padata_free(struct padata_instance *pinst)
781 #ifdef CONFIG_HOTPLUG_CPU
782 cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD,
783 &pinst->cpu_dead_node);
784 cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node);
788 padata_free_pd(pinst->pd);
789 free_cpumask_var(pinst->cpumask.pcpu);
790 free_cpumask_var(pinst->cpumask.cbcpu);
794 #define kobj2pinst(_kobj) \
795 container_of(_kobj, struct padata_instance, kobj)
796 #define attr2pentry(_attr) \
797 container_of(_attr, struct padata_sysfs_entry, attr)
799 static void padata_sysfs_release(struct kobject *kobj)
801 struct padata_instance *pinst = kobj2pinst(kobj);
802 __padata_free(pinst);
805 struct padata_sysfs_entry {
806 struct attribute attr;
807 ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
808 ssize_t (*store)(struct padata_instance *, struct attribute *,
809 const char *, size_t);
812 static ssize_t show_cpumask(struct padata_instance *pinst,
813 struct attribute *attr, char *buf)
815 struct cpumask *cpumask;
818 mutex_lock(&pinst->lock);
819 if (!strcmp(attr->name, "serial_cpumask"))
820 cpumask = pinst->cpumask.cbcpu;
822 cpumask = pinst->cpumask.pcpu;
824 len = snprintf(buf, PAGE_SIZE, "%*pb\n",
825 nr_cpu_ids, cpumask_bits(cpumask));
826 mutex_unlock(&pinst->lock);
827 return len < PAGE_SIZE ? len : -EINVAL;
830 static ssize_t store_cpumask(struct padata_instance *pinst,
831 struct attribute *attr,
832 const char *buf, size_t count)
834 cpumask_var_t new_cpumask;
838 if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
841 ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
846 mask_type = !strcmp(attr->name, "serial_cpumask") ?
847 PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
848 ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
853 free_cpumask_var(new_cpumask);
857 #define PADATA_ATTR_RW(_name, _show_name, _store_name) \
858 static struct padata_sysfs_entry _name##_attr = \
859 __ATTR(_name, 0644, _show_name, _store_name)
860 #define PADATA_ATTR_RO(_name, _show_name) \
861 static struct padata_sysfs_entry _name##_attr = \
862 __ATTR(_name, 0400, _show_name, NULL)
864 PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
865 PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
868 * Padata sysfs provides the following objects:
869 * serial_cpumask [RW] - cpumask for serial workers
870 * parallel_cpumask [RW] - cpumask for parallel workers
872 static struct attribute *padata_default_attrs[] = {
873 &serial_cpumask_attr.attr,
874 ¶llel_cpumask_attr.attr,
878 static ssize_t padata_sysfs_show(struct kobject *kobj,
879 struct attribute *attr, char *buf)
881 struct padata_instance *pinst;
882 struct padata_sysfs_entry *pentry;
885 pinst = kobj2pinst(kobj);
886 pentry = attr2pentry(attr);
888 ret = pentry->show(pinst, attr, buf);
893 static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
894 const char *buf, size_t count)
896 struct padata_instance *pinst;
897 struct padata_sysfs_entry *pentry;
900 pinst = kobj2pinst(kobj);
901 pentry = attr2pentry(attr);
903 ret = pentry->store(pinst, attr, buf, count);
908 static const struct sysfs_ops padata_sysfs_ops = {
909 .show = padata_sysfs_show,
910 .store = padata_sysfs_store,
913 static struct kobj_type padata_attr_type = {
914 .sysfs_ops = &padata_sysfs_ops,
915 .default_attrs = padata_default_attrs,
916 .release = padata_sysfs_release,
920 * padata_alloc - allocate and initialize a padata instance and specify
921 * cpumasks for serial and parallel workers.
923 * @wq: workqueue to use for the allocated padata instance
924 * @pcpumask: cpumask that will be used for padata parallelization
925 * @cbcpumask: cpumask that will be used for padata serialization
927 * Must be called from a cpus_read_lock() protected region
929 static struct padata_instance *padata_alloc(struct workqueue_struct *wq,
930 const struct cpumask *pcpumask,
931 const struct cpumask *cbcpumask)
933 struct padata_instance *pinst;
934 struct parallel_data *pd = NULL;
936 pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
940 if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
942 if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
943 free_cpumask_var(pinst->cpumask.pcpu);
946 if (!padata_validate_cpumask(pinst, pcpumask) ||
947 !padata_validate_cpumask(pinst, cbcpumask))
950 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
954 rcu_assign_pointer(pinst->pd, pd);
958 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
959 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
963 BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier);
964 kobject_init(&pinst->kobj, &padata_attr_type);
965 mutex_init(&pinst->lock);
967 #ifdef CONFIG_HOTPLUG_CPU
968 cpuhp_state_add_instance_nocalls_cpuslocked(hp_online,
969 &pinst->cpu_online_node);
970 cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
971 &pinst->cpu_dead_node);
976 free_cpumask_var(pinst->cpumask.pcpu);
977 free_cpumask_var(pinst->cpumask.cbcpu);
985 * padata_alloc_possible - Allocate and initialize padata instance.
986 * Use the cpu_possible_mask for serial and
989 * @wq: workqueue to use for the allocated padata instance
991 * Must be called from a cpus_read_lock() protected region
993 struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq)
995 lockdep_assert_cpus_held();
996 return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
998 EXPORT_SYMBOL(padata_alloc_possible);
1001 * padata_free - free a padata instance
1003 * @padata_inst: padata instance to free
1005 void padata_free(struct padata_instance *pinst)
1007 kobject_put(&pinst->kobj);
1009 EXPORT_SYMBOL(padata_free);
1011 #ifdef CONFIG_HOTPLUG_CPU
1013 static __init int padata_driver_init(void)
1017 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
1018 padata_cpu_online, NULL);
1023 ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead",
1024 NULL, padata_cpu_dead);
1026 cpuhp_remove_multi_state(hp_online);
1031 module_init(padata_driver_init);
1033 static __exit void padata_driver_exit(void)
1035 cpuhp_remove_multi_state(CPUHP_PADATA_DEAD);
1036 cpuhp_remove_multi_state(hp_online);
1038 module_exit(padata_driver_exit);