2 * Copyright(c) 2015 - 2017 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/topology.h>
48 #include <linux/cpumask.h>
49 #include <linux/module.h>
50 #include <linux/interrupt.h>
57 struct hfi1_affinity_node_list node_affinity = {
58 .list = LIST_HEAD_INIT(node_affinity.list),
59 .lock = __MUTEX_INITIALIZER(node_affinity.lock)
62 /* Name of IRQ types, indexed by enum irq_type */
63 static const char * const irq_type_names[] = {
70 /* Per NUMA node count of HFI devices */
71 static unsigned int *hfi1_per_node_cntr;
73 static inline void init_cpu_mask_set(struct cpu_mask_set *set)
75 cpumask_clear(&set->mask);
76 cpumask_clear(&set->used);
80 /* Initialize non-HT cpu cores mask */
81 void init_real_cpu_mask(void)
83 int possible, curr_cpu, i, ht;
85 cpumask_clear(&node_affinity.real_cpu_mask);
87 /* Start with cpu online mask as the real cpu mask */
88 cpumask_copy(&node_affinity.real_cpu_mask, cpu_online_mask);
91 * Remove HT cores from the real cpu mask. Do this in two steps below.
93 possible = cpumask_weight(&node_affinity.real_cpu_mask);
94 ht = cpumask_weight(topology_sibling_cpumask(
95 cpumask_first(&node_affinity.real_cpu_mask)));
97 * Step 1. Skip over the first N HT siblings and use them as the
98 * "real" cores. Assumes that HT cores are not enumerated in
99 * succession (except in the single core case).
101 curr_cpu = cpumask_first(&node_affinity.real_cpu_mask);
102 for (i = 0; i < possible / ht; i++)
103 curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
105 * Step 2. Remove the remaining HT siblings. Use cpumask_next() to
108 for (; i < possible; i++) {
109 cpumask_clear_cpu(curr_cpu, &node_affinity.real_cpu_mask);
110 curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
114 int node_affinity_init(void)
117 struct pci_dev *dev = NULL;
118 const struct pci_device_id *ids = hfi1_pci_tbl;
120 cpumask_clear(&node_affinity.proc.used);
121 cpumask_copy(&node_affinity.proc.mask, cpu_online_mask);
123 node_affinity.proc.gen = 0;
124 node_affinity.num_core_siblings =
125 cpumask_weight(topology_sibling_cpumask(
126 cpumask_first(&node_affinity.proc.mask)
128 node_affinity.num_possible_nodes = num_possible_nodes();
129 node_affinity.num_online_nodes = num_online_nodes();
130 node_affinity.num_online_cpus = num_online_cpus();
133 * The real cpu mask is part of the affinity struct but it has to be
134 * initialized early. It is needed to calculate the number of user
135 * contexts in set_up_context_variables().
137 init_real_cpu_mask();
139 hfi1_per_node_cntr = kcalloc(node_affinity.num_possible_nodes,
140 sizeof(*hfi1_per_node_cntr), GFP_KERNEL);
141 if (!hfi1_per_node_cntr)
144 while (ids->vendor) {
146 while ((dev = pci_get_device(ids->vendor, ids->device, dev))) {
147 node = pcibus_to_node(dev->bus);
151 hfi1_per_node_cntr[node]++;
160 * Invalid PCI NUMA node information found, note it, and populate
163 pr_err("HFI: Invalid PCI NUMA node. Performance may be affected\n");
164 pr_err("HFI: System BIOS may need to be upgraded\n");
165 for (node = 0; node < node_affinity.num_possible_nodes; node++)
166 hfi1_per_node_cntr[node] = 1;
173 void node_affinity_destroy(void)
175 struct list_head *pos, *q;
176 struct hfi1_affinity_node *entry;
178 mutex_lock(&node_affinity.lock);
179 list_for_each_safe(pos, q, &node_affinity.list) {
180 entry = list_entry(pos, struct hfi1_affinity_node,
185 mutex_unlock(&node_affinity.lock);
186 kfree(hfi1_per_node_cntr);
189 static struct hfi1_affinity_node *node_affinity_allocate(int node)
191 struct hfi1_affinity_node *entry;
193 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
197 INIT_LIST_HEAD(&entry->list);
203 * It appends an entry to the list.
204 * It *must* be called with node_affinity.lock held.
206 static void node_affinity_add_tail(struct hfi1_affinity_node *entry)
208 list_add_tail(&entry->list, &node_affinity.list);
211 /* It must be called with node_affinity.lock held */
212 static struct hfi1_affinity_node *node_affinity_lookup(int node)
214 struct list_head *pos;
215 struct hfi1_affinity_node *entry;
217 list_for_each(pos, &node_affinity.list) {
218 entry = list_entry(pos, struct hfi1_affinity_node, list);
219 if (entry->node == node)
227 * Interrupt affinity.
229 * non-rcv avail gets a default mask that
230 * starts as possible cpus with threads reset
231 * and each rcv avail reset.
233 * rcv avail gets node relative 1 wrapping back
234 * to the node relative 1 as necessary.
237 int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
239 int node = pcibus_to_node(dd->pcidev->bus);
240 struct hfi1_affinity_node *entry;
241 const struct cpumask *local_mask;
242 int curr_cpu, possible, i;
245 * If the BIOS does not have the NUMA node information set, select
246 * NUMA 0 so we get consistent performance.
249 dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
254 local_mask = cpumask_of_node(dd->node);
255 if (cpumask_first(local_mask) >= nr_cpu_ids)
256 local_mask = topology_core_cpumask(0);
258 mutex_lock(&node_affinity.lock);
259 entry = node_affinity_lookup(dd->node);
262 * If this is the first time this NUMA node's affinity is used,
263 * create an entry in the global affinity structure and initialize it.
266 entry = node_affinity_allocate(node);
269 "Unable to allocate global affinity node\n");
270 mutex_unlock(&node_affinity.lock);
273 init_cpu_mask_set(&entry->def_intr);
274 init_cpu_mask_set(&entry->rcv_intr);
275 cpumask_clear(&entry->general_intr_mask);
276 /* Use the "real" cpu mask of this node as the default */
277 cpumask_and(&entry->def_intr.mask, &node_affinity.real_cpu_mask,
280 /* fill in the receive list */
281 possible = cpumask_weight(&entry->def_intr.mask);
282 curr_cpu = cpumask_first(&entry->def_intr.mask);
285 /* only one CPU, everyone will use it */
286 cpumask_set_cpu(curr_cpu, &entry->rcv_intr.mask);
287 cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
290 * The general/control context will be the first CPU in
291 * the default list, so it is removed from the default
292 * list and added to the general interrupt list.
294 cpumask_clear_cpu(curr_cpu, &entry->def_intr.mask);
295 cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
296 curr_cpu = cpumask_next(curr_cpu,
297 &entry->def_intr.mask);
300 * Remove the remaining kernel receive queues from
301 * the default list and add them to the receive list.
304 i < (dd->n_krcv_queues - 1) *
305 hfi1_per_node_cntr[dd->node];
307 cpumask_clear_cpu(curr_cpu,
308 &entry->def_intr.mask);
309 cpumask_set_cpu(curr_cpu,
310 &entry->rcv_intr.mask);
311 curr_cpu = cpumask_next(curr_cpu,
312 &entry->def_intr.mask);
313 if (curr_cpu >= nr_cpu_ids)
318 * If there ends up being 0 CPU cores leftover for SDMA
319 * engines, use the same CPU cores as general/control
322 if (cpumask_weight(&entry->def_intr.mask) == 0)
323 cpumask_copy(&entry->def_intr.mask,
324 &entry->general_intr_mask);
327 node_affinity_add_tail(entry);
329 mutex_unlock(&node_affinity.lock);
334 * Function updates the irq affinity hint for msix after it has been changed
335 * by the user using the /proc/irq interface. This function only accepts
336 * one cpu in the mask.
338 static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu)
340 struct sdma_engine *sde = msix->arg;
341 struct hfi1_devdata *dd = sde->dd;
342 struct hfi1_affinity_node *entry;
343 struct cpu_mask_set *set;
346 if (cpu > num_online_cpus() || cpu == sde->cpu)
349 mutex_lock(&node_affinity.lock);
350 entry = node_affinity_lookup(dd->node);
356 cpumask_clear(&msix->mask);
357 cpumask_set_cpu(cpu, &msix->mask);
358 dd_dev_dbg(dd, "IRQ: %u, type %s engine %u -> cpu: %d\n",
359 msix->irq, irq_type_names[msix->type],
361 irq_set_affinity_hint(msix->irq, &msix->mask);
364 * Set the new cpu in the hfi1_affinity_node and clean
365 * the old cpu if it is not used by any other IRQ
367 set = &entry->def_intr;
368 cpumask_set_cpu(cpu, &set->mask);
369 cpumask_set_cpu(cpu, &set->used);
370 for (i = 0; i < dd->num_msix_entries; i++) {
371 struct hfi1_msix_entry *other_msix;
373 other_msix = &dd->msix_entries[i];
374 if (other_msix->type != IRQ_SDMA || other_msix == msix)
377 if (cpumask_test_cpu(old_cpu, &other_msix->mask))
380 cpumask_clear_cpu(old_cpu, &set->mask);
381 cpumask_clear_cpu(old_cpu, &set->used);
383 mutex_unlock(&node_affinity.lock);
386 static void hfi1_irq_notifier_notify(struct irq_affinity_notify *notify,
387 const cpumask_t *mask)
389 int cpu = cpumask_first(mask);
390 struct hfi1_msix_entry *msix = container_of(notify,
391 struct hfi1_msix_entry,
394 /* Only one CPU configuration supported currently */
395 hfi1_update_sdma_affinity(msix, cpu);
398 static void hfi1_irq_notifier_release(struct kref *ref)
401 * This is required by affinity notifier. We don't have anything to
406 static void hfi1_setup_sdma_notifier(struct hfi1_msix_entry *msix)
408 struct irq_affinity_notify *notify = &msix->notify;
410 notify->irq = msix->irq;
411 notify->notify = hfi1_irq_notifier_notify;
412 notify->release = hfi1_irq_notifier_release;
414 if (irq_set_affinity_notifier(notify->irq, notify))
415 pr_err("Failed to register sdma irq affinity notifier for irq %d\n",
419 static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry *msix)
421 struct irq_affinity_notify *notify = &msix->notify;
423 if (irq_set_affinity_notifier(notify->irq, NULL))
424 pr_err("Failed to cleanup sdma irq affinity notifier for irq %d\n",
429 * Function sets the irq affinity for msix.
430 * It *must* be called with node_affinity.lock held.
432 static int get_irq_affinity(struct hfi1_devdata *dd,
433 struct hfi1_msix_entry *msix)
436 struct hfi1_affinity_node *entry;
437 struct cpu_mask_set *set = NULL;
438 struct sdma_engine *sde = NULL;
439 struct hfi1_ctxtdata *rcd = NULL;
444 cpumask_clear(&msix->mask);
446 entry = node_affinity_lookup(dd->node);
448 switch (msix->type) {
450 sde = (struct sdma_engine *)msix->arg;
451 scnprintf(extra, 64, "engine %u", sde->this_idx);
452 set = &entry->def_intr;
455 cpu = cpumask_first(&entry->general_intr_mask);
458 rcd = (struct hfi1_ctxtdata *)msix->arg;
459 if (rcd->ctxt == HFI1_CTRL_CTXT)
460 cpu = cpumask_first(&entry->general_intr_mask);
462 set = &entry->rcv_intr;
463 scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
466 dd_dev_err(dd, "Invalid IRQ type %d\n", msix->type);
471 * The general and control contexts are placed on a particular
472 * CPU, which is set above. Skip accounting for it. Everything else
473 * finds its CPU here.
475 if (cpu == -1 && set) {
476 if (!zalloc_cpumask_var(&diff, GFP_KERNEL))
479 if (cpumask_equal(&set->mask, &set->used)) {
481 * We've used up all the CPUs, bump up the generation
482 * and reset the 'used' map
485 cpumask_clear(&set->used);
487 cpumask_andnot(diff, &set->mask, &set->used);
488 cpu = cpumask_first(diff);
489 cpumask_set_cpu(cpu, &set->used);
491 free_cpumask_var(diff);
494 cpumask_set_cpu(cpu, &msix->mask);
495 dd_dev_info(dd, "IRQ: %u, type %s %s -> cpu: %d\n",
496 msix->irq, irq_type_names[msix->type],
498 irq_set_affinity_hint(msix->irq, &msix->mask);
500 if (msix->type == IRQ_SDMA) {
502 hfi1_setup_sdma_notifier(msix);
508 int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
512 mutex_lock(&node_affinity.lock);
513 ret = get_irq_affinity(dd, msix);
514 mutex_unlock(&node_affinity.lock);
518 void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
519 struct hfi1_msix_entry *msix)
521 struct cpu_mask_set *set = NULL;
522 struct hfi1_ctxtdata *rcd;
523 struct hfi1_affinity_node *entry;
525 mutex_lock(&node_affinity.lock);
526 entry = node_affinity_lookup(dd->node);
528 switch (msix->type) {
530 set = &entry->def_intr;
531 hfi1_cleanup_sdma_notifier(msix);
534 /* Don't do accounting for general contexts */
537 rcd = (struct hfi1_ctxtdata *)msix->arg;
538 /* Don't do accounting for control contexts */
539 if (rcd->ctxt != HFI1_CTRL_CTXT)
540 set = &entry->rcv_intr;
543 mutex_unlock(&node_affinity.lock);
548 cpumask_andnot(&set->used, &set->used, &msix->mask);
549 if (cpumask_empty(&set->used) && set->gen) {
551 cpumask_copy(&set->used, &set->mask);
555 irq_set_affinity_hint(msix->irq, NULL);
556 cpumask_clear(&msix->mask);
557 mutex_unlock(&node_affinity.lock);
560 /* This should be called with node_affinity.lock held */
561 static void find_hw_thread_mask(uint hw_thread_no, cpumask_var_t hw_thread_mask,
562 struct hfi1_affinity_node_list *affinity)
564 int possible, curr_cpu, i;
565 uint num_cores_per_socket = node_affinity.num_online_cpus /
566 affinity->num_core_siblings /
567 node_affinity.num_online_nodes;
569 cpumask_copy(hw_thread_mask, &affinity->proc.mask);
570 if (affinity->num_core_siblings > 0) {
571 /* Removing other siblings not needed for now */
572 possible = cpumask_weight(hw_thread_mask);
573 curr_cpu = cpumask_first(hw_thread_mask);
575 i < num_cores_per_socket * node_affinity.num_online_nodes;
577 curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
579 for (; i < possible; i++) {
580 cpumask_clear_cpu(curr_cpu, hw_thread_mask);
581 curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
584 /* Identifying correct HW threads within physical cores */
585 cpumask_shift_left(hw_thread_mask, hw_thread_mask,
586 num_cores_per_socket *
587 node_affinity.num_online_nodes *
592 int hfi1_get_proc_affinity(int node)
594 int cpu = -1, ret, i;
595 struct hfi1_affinity_node *entry;
596 cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
597 const struct cpumask *node_mask,
598 *proc_mask = ¤t->cpus_allowed;
599 struct hfi1_affinity_node_list *affinity = &node_affinity;
600 struct cpu_mask_set *set = &affinity->proc;
603 * check whether process/context affinity has already
606 if (cpumask_weight(proc_mask) == 1) {
607 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
608 current->pid, current->comm,
609 cpumask_pr_args(proc_mask));
611 * Mark the pre-set CPU as used. This is atomic so we don't
614 cpu = cpumask_first(proc_mask);
615 cpumask_set_cpu(cpu, &set->used);
617 } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
618 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
619 current->pid, current->comm,
620 cpumask_pr_args(proc_mask));
625 * The process does not have a preset CPU affinity so find one to
626 * recommend using the following algorithm:
628 * For each user process that is opening a context on HFI Y:
629 * a) If all cores are filled, reinitialize the bitmask
630 * b) Fill real cores first, then HT cores (First set of HT
631 * cores on all physical cores, then second set of HT core,
632 * and, so on) in the following order:
634 * 1. Same NUMA node as HFI Y and not running an IRQ
636 * 2. Same NUMA node as HFI Y and running an IRQ handler
637 * 3. Different NUMA node to HFI Y and not running an IRQ
639 * 4. Different NUMA node to HFI Y and running an IRQ
641 * c) Mark core as filled in the bitmask. As user processes are
642 * done, clear cores from the bitmask.
645 ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
648 ret = zalloc_cpumask_var(&hw_thread_mask, GFP_KERNEL);
651 ret = zalloc_cpumask_var(&available_mask, GFP_KERNEL);
653 goto free_hw_thread_mask;
654 ret = zalloc_cpumask_var(&intrs_mask, GFP_KERNEL);
656 goto free_available_mask;
658 mutex_lock(&affinity->lock);
660 * If we've used all available HW threads, clear the mask and start
663 if (cpumask_equal(&set->mask, &set->used)) {
665 cpumask_clear(&set->used);
669 * If NUMA node has CPUs used by interrupt handlers, include them in the
670 * interrupt handler mask.
672 entry = node_affinity_lookup(node);
674 cpumask_copy(intrs_mask, (entry->def_intr.gen ?
675 &entry->def_intr.mask :
676 &entry->def_intr.used));
677 cpumask_or(intrs_mask, intrs_mask, (entry->rcv_intr.gen ?
678 &entry->rcv_intr.mask :
679 &entry->rcv_intr.used));
680 cpumask_or(intrs_mask, intrs_mask, &entry->general_intr_mask);
682 hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
683 cpumask_pr_args(intrs_mask));
685 cpumask_copy(hw_thread_mask, &set->mask);
688 * If HT cores are enabled, identify which HW threads within the
689 * physical cores should be used.
691 if (affinity->num_core_siblings > 0) {
692 for (i = 0; i < affinity->num_core_siblings; i++) {
693 find_hw_thread_mask(i, hw_thread_mask, affinity);
696 * If there's at least one available core for this HW
697 * thread number, stop looking for a core.
699 * diff will always be not empty at least once in this
700 * loop as the used mask gets reset when
701 * (set->mask == set->used) before this loop.
703 cpumask_andnot(diff, hw_thread_mask, &set->used);
704 if (!cpumask_empty(diff))
708 hfi1_cdbg(PROC, "Same available HW thread on all physical CPUs: %*pbl",
709 cpumask_pr_args(hw_thread_mask));
711 node_mask = cpumask_of_node(node);
712 hfi1_cdbg(PROC, "Device on NUMA %u, CPUs %*pbl", node,
713 cpumask_pr_args(node_mask));
715 /* Get cpumask of available CPUs on preferred NUMA */
716 cpumask_and(available_mask, hw_thread_mask, node_mask);
717 cpumask_andnot(available_mask, available_mask, &set->used);
718 hfi1_cdbg(PROC, "Available CPUs on NUMA %u: %*pbl", node,
719 cpumask_pr_args(available_mask));
722 * At first, we don't want to place processes on the same
723 * CPUs as interrupt handlers. Then, CPUs running interrupt
726 * 1) If diff is not empty, then there are CPUs not running
727 * non-interrupt handlers available, so diff gets copied
728 * over to available_mask.
729 * 2) If diff is empty, then all CPUs not running interrupt
730 * handlers are taken, so available_mask contains all
731 * available CPUs running interrupt handlers.
732 * 3) If available_mask is empty, then all CPUs on the
733 * preferred NUMA node are taken, so other NUMA nodes are
734 * used for process assignments using the same method as
735 * the preferred NUMA node.
737 cpumask_andnot(diff, available_mask, intrs_mask);
738 if (!cpumask_empty(diff))
739 cpumask_copy(available_mask, diff);
741 /* If we don't have CPUs on the preferred node, use other NUMA nodes */
742 if (cpumask_empty(available_mask)) {
743 cpumask_andnot(available_mask, hw_thread_mask, &set->used);
744 /* Excluding preferred NUMA cores */
745 cpumask_andnot(available_mask, available_mask, node_mask);
747 "Preferred NUMA node cores are taken, cores available in other NUMA nodes: %*pbl",
748 cpumask_pr_args(available_mask));
751 * At first, we don't want to place processes on the same
752 * CPUs as interrupt handlers.
754 cpumask_andnot(diff, available_mask, intrs_mask);
755 if (!cpumask_empty(diff))
756 cpumask_copy(available_mask, diff);
758 hfi1_cdbg(PROC, "Possible CPUs for process: %*pbl",
759 cpumask_pr_args(available_mask));
761 cpu = cpumask_first(available_mask);
762 if (cpu >= nr_cpu_ids) /* empty */
765 cpumask_set_cpu(cpu, &set->used);
767 mutex_unlock(&affinity->lock);
768 hfi1_cdbg(PROC, "Process assigned to CPU %d", cpu);
770 free_cpumask_var(intrs_mask);
772 free_cpumask_var(available_mask);
774 free_cpumask_var(hw_thread_mask);
776 free_cpumask_var(diff);
781 void hfi1_put_proc_affinity(int cpu)
783 struct hfi1_affinity_node_list *affinity = &node_affinity;
784 struct cpu_mask_set *set = &affinity->proc;
789 mutex_lock(&affinity->lock);
790 cpumask_clear_cpu(cpu, &set->used);
791 hfi1_cdbg(PROC, "Returning CPU %d for future process assignment", cpu);
792 if (cpumask_empty(&set->used) && set->gen) {
794 cpumask_copy(&set->used, &set->mask);
796 mutex_unlock(&affinity->lock);