4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
19 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
21 * Copyright (c) 2012, 2015 Intel Corporation.
24 * This file is part of Lustre, http://www.lustre.org/
25 * Lustre is a trademark of Sun Microsystems, Inc.
27 * libcfs/include/libcfs/libcfs_cpu.h
30 * . CPU partition is virtual processing unit
32 * . CPU partition can present 1-N cores, or 1-N NUMA nodes,
33 * in other words, CPU partition is a processors pool.
35 * CPU Partition Table (CPT)
36 * . a set of CPU partitions
38 * . There are two modes for CPT: CFS_CPU_MODE_NUMA and CFS_CPU_MODE_SMP
40 * . User can specify total number of CPU partitions while creating a
41 * CPT, ID of CPU partition is always start from 0.
43 * Example: if there are 8 cores on the system, while creating a CPT
44 * with cpu_npartitions=4:
45 * core[0, 1] = partition[0], core[2, 3] = partition[1]
46 * core[4, 5] = partition[2], core[6, 7] = partition[3]
49 * core[0, 1, ... 7] = partition[0]
51 * . User can also specify CPU partitions by string pattern
53 * Examples: cpu_partitions="0[0,1], 1[2,3]"
54 * cpu_partitions="N 0[0-3], 1[4-8]"
56 * The first character "N" means following numbers are numa ID
58 * . NUMA allocators, CPU affinity threads are built over CPU partitions,
59 * instead of HW CPUs or HW nodes.
61 * . By default, Lustre modules should refer to the global cfs_cpt_table,
62 * instead of accessing HW CPUs directly, so concurrency of Lustre can be
63 * configured by cpu_npartitions of the global cfs_cpt_table
65 * . If cpu_npartitions=1(all CPUs in one pool), lustre should work the
66 * same way as 2.2 or earlier versions
68 * Author: liang@whamcloud.com
71 #ifndef __LIBCFS_CPU_H__
72 #define __LIBCFS_CPU_H__
74 /* any CPU partition */
75 #define CFS_CPT_ANY (-1)
79 * return cpumask of CPU partition \a cpt
81 cpumask_t *cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt);
83 * print string information of cpt-table
85 int cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len);
86 #else /* !CONFIG_SMP */
87 struct cfs_cpt_table {
88 /* # of CPU partitions */
93 nodemask_t ctb_nodemask;
98 static inline cpumask_t *
99 cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt)
105 cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
109 #endif /* CONFIG_SMP */
111 extern struct cfs_cpt_table *cfs_cpt_table;
114 * destroy a CPU partition table
116 void cfs_cpt_table_free(struct cfs_cpt_table *cptab);
118 * create a cfs_cpt_table with \a ncpt number of partitions
120 struct cfs_cpt_table *cfs_cpt_table_alloc(unsigned int ncpt);
122 * return total number of CPU partitions in \a cptab
125 cfs_cpt_number(struct cfs_cpt_table *cptab);
127 * return number of HW cores or hyper-threadings in a CPU partition \a cpt
129 int cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt);
131 * is there any online CPU in CPU partition \a cpt
133 int cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt);
135 * return nodemask of CPU partition \a cpt
137 nodemask_t *cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt);
139 * shadow current HW processor ID to CPU-partition ID of \a cptab
141 int cfs_cpt_current(struct cfs_cpt_table *cptab, int remap);
143 * shadow HW processor ID \a CPU to CPU-partition ID by \a cptab
145 int cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu);
147 * bind current thread on a CPU-partition \a cpt of \a cptab
149 int cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt);
151 * add \a cpu to CPU partition @cpt of \a cptab, return 1 for success,
152 * otherwise 0 is returned
154 int cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu);
156 * remove \a cpu from CPU partition \a cpt of \a cptab
158 void cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu);
160 * add all cpus in \a mask to CPU partition \a cpt
161 * return 1 if successfully set all CPUs, otherwise return 0
163 int cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab,
164 int cpt, cpumask_t *mask);
166 * remove all cpus in \a mask from CPU partition \a cpt
168 void cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab,
169 int cpt, cpumask_t *mask);
171 * add all cpus in NUMA node \a node to CPU partition \a cpt
172 * return 1 if successfully set all CPUs, otherwise return 0
174 int cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node);
176 * remove all cpus in NUMA node \a node from CPU partition \a cpt
178 void cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node);
181 * add all cpus in node mask \a mask to CPU partition \a cpt
182 * return 1 if successfully set all CPUs, otherwise return 0
184 int cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab,
185 int cpt, nodemask_t *mask);
187 * remove all cpus in node mask \a mask from CPU partition \a cpt
189 void cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab,
190 int cpt, nodemask_t *mask);
192 * unset all cpus for CPU partition \a cpt
194 void cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt);
196 * convert partition id \a cpt to numa node id, if there are more than one
197 * nodes in this partition, it might return a different node id each time.
199 int cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt);
202 * return number of HTs in the same core of \a cpu
204 int cfs_cpu_ht_nsiblings(int cpu);
207 * allocate per-cpu-partition data, returned value is an array of pointers,
208 * variable can be indexed by CPU ID.
209 * cptab != NULL: size of array is number of CPU partitions
210 * cptab == NULL: size of array is number of HW cores
212 void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size);
214 * destory per-cpu-partition variable
216 void cfs_percpt_free(void *vars);
217 int cfs_percpt_number(void *vars);
219 #define cfs_percpt_for_each(var, i, vars) \
220 for (i = 0; i < cfs_percpt_number(vars) && \
221 ((var) = (vars)[i]) != NULL; i++)
224 * percpu partition lock
226 * There are some use-cases like this in Lustre:
227 * . each CPU partition has it's own private data which is frequently changed,
228 * and mostly by the local CPU partition.
229 * . all CPU partitions share some global data, these data are rarely changed.
231 * LNet is typical example.
232 * CPU partition lock is designed for this kind of use-cases:
233 * . each CPU partition has it's own private lock
234 * . change on private data just needs to take the private lock
235 * . read on shared data just needs to take _any_ of private locks
236 * . change on shared data needs to take _all_ private locks,
237 * which is slow and should be really rare.
240 CFS_PERCPT_LOCK_EX = -1, /* negative */
243 struct cfs_percpt_lock {
244 /* cpu-partition-table for this lock */
245 struct cfs_cpt_table *pcl_cptab;
246 /* exclusively locked */
247 unsigned int pcl_locked;
248 /* private lock table */
249 spinlock_t **pcl_locks;
252 /* return number of private locks */
253 #define cfs_percpt_lock_num(pcl) cfs_cpt_number(pcl->pcl_cptab)
256 * create a cpu-partition lock based on CPU partition table \a cptab,
257 * each private lock has extra \a psize bytes padding data
259 struct cfs_percpt_lock *cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
260 struct lock_class_key *keys);
261 /* destroy a cpu-partition lock */
262 void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl);
264 /* lock private lock \a index of \a pcl */
265 void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index);
267 /* unlock private lock \a index of \a pcl */
268 void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index);
270 #define CFS_PERCPT_LOCK_KEYS 256
272 /* NB: don't allocate keys dynamically, lockdep needs them to be in ".data" */
273 #define cfs_percpt_lock_alloc(cptab) \
275 static struct lock_class_key ___keys[CFS_PERCPT_LOCK_KEYS]; \
276 struct cfs_percpt_lock *___lk; \
278 if (cfs_cpt_number(cptab) > CFS_PERCPT_LOCK_KEYS) \
279 ___lk = cfs_percpt_lock_create(cptab, NULL); \
281 ___lk = cfs_percpt_lock_create(cptab, ___keys); \
286 * iterate over all CPU partitions in \a cptab
288 #define cfs_cpt_for_each(i, cptab) \
289 for (i = 0; i < cfs_cpt_number(cptab); i++)
291 int cfs_cpu_init(void);
292 void cfs_cpu_fini(void);
294 #endif /* __LIBCFS_CPU_H__ */