1 #ifndef _LINUX_CPUSET_H
2 #define _LINUX_CPUSET_H
6 * Copyright (C) 2003 BULL SA
7 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
11 #include <linux/sched.h>
12 #include <linux/cpumask.h>
13 #include <linux/nodemask.h>
15 #include <linux/jump_label.h>
20 * Static branch rewrites can happen in an arbitrary order for a given
21 * key. In code paths where we need to loop with read_mems_allowed_begin() and
22 * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
23 * to ensure that begin() always gets rewritten before retry() in the
24 * disabled -> enabled transition. If not, then if local irqs are disabled
25 * around the loop, we can deadlock since retry() would always be
26 * comparing the latest value of the mems_allowed seqcount against 0 as
27 * begin() still would see cpusets_enabled() as false. The enabled -> disabled
28 * transition should happen in reverse order for the same reasons (want to stop
29 * looking at real value of mems_allowed.sequence in retry() first).
31 extern struct static_key_false cpusets_pre_enable_key;
32 extern struct static_key_false cpusets_enabled_key;
33 static inline bool cpusets_enabled(void)
35 return static_branch_unlikely(&cpusets_enabled_key);
38 static inline int nr_cpusets(void)
40 /* jump label reference count + the top-level cpuset */
41 return static_key_count(&cpusets_enabled_key.key) + 1;
44 static inline void cpuset_inc(void)
46 static_branch_inc(&cpusets_pre_enable_key);
47 static_branch_inc(&cpusets_enabled_key);
50 static inline void cpuset_dec(void)
52 static_branch_dec(&cpusets_enabled_key);
53 static_branch_dec(&cpusets_pre_enable_key);
56 extern int cpuset_init(void);
57 extern void cpuset_init_smp(void);
58 extern void cpuset_force_rebuild(void);
59 extern void cpuset_update_active_cpus(bool cpu_online);
60 extern void cpuset_wait_for_hotplug(void);
61 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
62 extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
63 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
64 #define cpuset_current_mems_allowed (current->mems_allowed)
65 void cpuset_init_current_mems_allowed(void);
66 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
68 extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
70 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
72 if (cpusets_enabled())
73 return __cpuset_node_allowed(node, gfp_mask);
77 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
79 return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
82 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
84 if (cpusets_enabled())
85 return __cpuset_zone_allowed(z, gfp_mask);
89 extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
90 const struct task_struct *tsk2);
92 #define cpuset_memory_pressure_bump() \
94 if (cpuset_memory_pressure_enabled) \
95 __cpuset_memory_pressure_bump(); \
97 extern int cpuset_memory_pressure_enabled;
98 extern void __cpuset_memory_pressure_bump(void);
100 extern void cpuset_task_status_allowed(struct seq_file *m,
101 struct task_struct *task);
102 extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
103 struct pid *pid, struct task_struct *tsk);
105 extern int cpuset_mem_spread_node(void);
106 extern int cpuset_slab_spread_node(void);
108 static inline int cpuset_do_page_mem_spread(void)
110 return task_spread_page(current);
113 static inline int cpuset_do_slab_mem_spread(void)
115 return task_spread_slab(current);
118 extern int current_cpuset_is_being_rebound(void);
120 extern void rebuild_sched_domains(void);
122 extern void cpuset_print_current_mems_allowed(void);
125 * read_mems_allowed_begin is required when making decisions involving
126 * mems_allowed such as during page allocation. mems_allowed can be updated in
127 * parallel and depending on the new value an operation can fail potentially
128 * causing process failure. A retry loop with read_mems_allowed_begin and
129 * read_mems_allowed_retry prevents these artificial failures.
131 static inline unsigned int read_mems_allowed_begin(void)
133 if (!static_branch_unlikely(&cpusets_pre_enable_key))
136 return read_seqcount_begin(¤t->mems_allowed_seq);
140 * If this returns true, the operation that took place after
141 * read_mems_allowed_begin may have failed artificially due to a concurrent
142 * update of mems_allowed. It is up to the caller to retry the operation if
145 static inline bool read_mems_allowed_retry(unsigned int seq)
147 if (!static_branch_unlikely(&cpusets_enabled_key))
150 return read_seqcount_retry(¤t->mems_allowed_seq, seq);
153 static inline void set_mems_allowed(nodemask_t nodemask)
158 local_irq_save(flags);
159 write_seqcount_begin(¤t->mems_allowed_seq);
160 current->mems_allowed = nodemask;
161 write_seqcount_end(¤t->mems_allowed_seq);
162 local_irq_restore(flags);
163 task_unlock(current);
166 #else /* !CONFIG_CPUSETS */
168 static inline bool cpusets_enabled(void) { return false; }
170 static inline int cpuset_init(void) { return 0; }
171 static inline void cpuset_init_smp(void) {}
173 static inline void cpuset_force_rebuild(void) { }
175 static inline void cpuset_update_active_cpus(bool cpu_online)
177 partition_sched_domains(1, NULL, NULL);
180 static inline void cpuset_wait_for_hotplug(void) { }
182 static inline void cpuset_cpus_allowed(struct task_struct *p,
183 struct cpumask *mask)
185 cpumask_copy(mask, cpu_possible_mask);
188 static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
192 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
194 return node_possible_map;
197 #define cpuset_current_mems_allowed (node_states[N_MEMORY])
198 static inline void cpuset_init_current_mems_allowed(void) {}
200 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
205 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
210 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
215 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
220 static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
221 const struct task_struct *tsk2)
226 static inline void cpuset_memory_pressure_bump(void) {}
228 static inline void cpuset_task_status_allowed(struct seq_file *m,
229 struct task_struct *task)
233 static inline int cpuset_mem_spread_node(void)
238 static inline int cpuset_slab_spread_node(void)
243 static inline int cpuset_do_page_mem_spread(void)
248 static inline int cpuset_do_slab_mem_spread(void)
253 static inline int current_cpuset_is_being_rebound(void)
258 static inline void rebuild_sched_domains(void)
260 partition_sched_domains(1, NULL, NULL);
263 static inline void cpuset_print_current_mems_allowed(void)
267 static inline void set_mems_allowed(nodemask_t nodemask)
271 static inline unsigned int read_mems_allowed_begin(void)
276 static inline bool read_mems_allowed_retry(unsigned int seq)
281 #endif /* !CONFIG_CPUSETS */
283 #endif /* _LINUX_CPUSET_H */