1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * In-Memory Collection (IMC) Performance Monitor counter support.
5 * Copyright (C) 2017 Madhavan Srinivasan, IBM Corporation.
6 * (C) 2017 Anju T Sudhakar, IBM Corporation.
7 * (C) 2017 Hemant K Shaw, IBM Corporation.
10 #include <linux/perf_event.h>
11 #include <linux/slab.h>
13 #include <asm/imc-pmu.h>
14 #include <asm/cputhreads.h>
16 #include <linux/string.h>
17 #include <linux/spinlock.h>
19 /* Nest IMC data structures and variables */
22 * Used to avoid races in counting the nest-pmu units during hotplug
23 * register and unregister
25 static DEFINE_MUTEX(nest_init_lock);
26 static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
27 static struct imc_pmu **per_nest_pmu_arr;
28 static cpumask_t nest_imc_cpumask;
29 static struct imc_pmu_ref *nest_imc_refc;
32 /* Core IMC data structures and variables */
34 static cpumask_t core_imc_cpumask;
35 static struct imc_pmu_ref *core_imc_refc;
36 static struct imc_pmu *core_imc_pmu;
38 /* Thread IMC data structures and variables */
40 static DEFINE_PER_CPU(u64 *, thread_imc_mem);
41 static struct imc_pmu *thread_imc_pmu;
42 static int thread_imc_mem_size;
44 /* Trace IMC data structures */
45 static DEFINE_PER_CPU(u64 *, trace_imc_mem);
46 static struct imc_pmu_ref *trace_imc_refc;
47 static int trace_imc_mem_size;
50 * Global data structure used to avoid races between thread,
53 static struct imc_pmu_ref imc_global_refc = {
54 .lock = __SPIN_LOCK_UNLOCKED(imc_global_refc.lock),
59 static struct imc_pmu *imc_event_to_pmu(struct perf_event *event)
61 return container_of(event->pmu, struct imc_pmu, pmu);
64 PMU_FORMAT_ATTR(event, "config:0-61");
65 PMU_FORMAT_ATTR(offset, "config:0-31");
66 PMU_FORMAT_ATTR(rvalue, "config:32");
67 PMU_FORMAT_ATTR(mode, "config:33-40");
68 static struct attribute *imc_format_attrs[] = {
69 &format_attr_event.attr,
70 &format_attr_offset.attr,
71 &format_attr_rvalue.attr,
72 &format_attr_mode.attr,
76 static const struct attribute_group imc_format_group = {
78 .attrs = imc_format_attrs,
81 /* Format attribute for imc trace-mode */
82 PMU_FORMAT_ATTR(cpmc_reserved, "config:0-19");
83 PMU_FORMAT_ATTR(cpmc_event, "config:20-27");
84 PMU_FORMAT_ATTR(cpmc_samplesel, "config:28-29");
85 PMU_FORMAT_ATTR(cpmc_load, "config:30-61");
86 static struct attribute *trace_imc_format_attrs[] = {
87 &format_attr_event.attr,
88 &format_attr_cpmc_reserved.attr,
89 &format_attr_cpmc_event.attr,
90 &format_attr_cpmc_samplesel.attr,
91 &format_attr_cpmc_load.attr,
95 static const struct attribute_group trace_imc_format_group = {
97 .attrs = trace_imc_format_attrs,
100 /* Get the cpumask printed to a buffer "buf" */
101 static ssize_t imc_pmu_cpumask_get_attr(struct device *dev,
102 struct device_attribute *attr,
105 struct pmu *pmu = dev_get_drvdata(dev);
106 struct imc_pmu *imc_pmu = container_of(pmu, struct imc_pmu, pmu);
107 cpumask_t *active_mask;
109 switch(imc_pmu->domain){
110 case IMC_DOMAIN_NEST:
111 active_mask = &nest_imc_cpumask;
113 case IMC_DOMAIN_CORE:
114 active_mask = &core_imc_cpumask;
120 return cpumap_print_to_pagebuf(true, buf, active_mask);
123 static DEVICE_ATTR(cpumask, S_IRUGO, imc_pmu_cpumask_get_attr, NULL);
125 static struct attribute *imc_pmu_cpumask_attrs[] = {
126 &dev_attr_cpumask.attr,
130 static const struct attribute_group imc_pmu_cpumask_attr_group = {
131 .attrs = imc_pmu_cpumask_attrs,
134 /* device_str_attr_create : Populate event "name" and string "str" in attribute */
135 static struct attribute *device_str_attr_create(const char *name, const char *str)
137 struct perf_pmu_events_attr *attr;
139 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
142 sysfs_attr_init(&attr->attr.attr);
144 attr->event_str = str;
145 attr->attr.attr.name = name;
146 attr->attr.attr.mode = 0444;
147 attr->attr.show = perf_event_sysfs_show;
149 return &attr->attr.attr;
152 static int imc_parse_event(struct device_node *np, const char *scale,
153 const char *unit, const char *prefix,
154 u32 base, struct imc_events *event)
159 if (of_property_read_u32(np, "reg", ®))
161 /* Add the base_reg value to the "reg" */
162 event->value = base + reg;
164 if (of_property_read_string(np, "event-name", &s))
167 event->name = kasprintf(GFP_KERNEL, "%s%s", prefix, s);
171 if (of_property_read_string(np, "scale", &s))
175 event->scale = kstrdup(s, GFP_KERNEL);
180 if (of_property_read_string(np, "unit", &s))
184 event->unit = kstrdup(s, GFP_KERNEL);
198 * imc_free_events: Function to cleanup the events list, having
201 static void imc_free_events(struct imc_events *events, int nr_entries)
205 /* Nothing to clean, return */
208 for (i = 0; i < nr_entries; i++) {
209 kfree(events[i].unit);
210 kfree(events[i].scale);
211 kfree(events[i].name);
218 * update_events_in_group: Update the "events" information in an attr_group
219 * and assign the attr_group to the pmu "pmu".
221 static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
223 struct attribute_group *attr_group;
224 struct attribute **attrs, *dev_str;
225 struct device_node *np, *pmu_events;
226 u32 handle, base_reg;
227 int i = 0, j = 0, ct, ret;
228 const char *prefix, *g_scale, *g_unit;
229 const char *ev_val_str, *ev_scale_str, *ev_unit_str;
231 if (!of_property_read_u32(node, "events", &handle))
232 pmu_events = of_find_node_by_phandle(handle);
236 /* Did not find any node with a given phandle */
240 /* Get a count of number of child nodes */
241 ct = of_get_child_count(pmu_events);
243 /* Get the event prefix */
244 if (of_property_read_string(node, "events-prefix", &prefix)) {
245 of_node_put(pmu_events);
249 /* Get a global unit and scale data if available */
250 if (of_property_read_string(node, "scale", &g_scale))
253 if (of_property_read_string(node, "unit", &g_unit))
256 /* "reg" property gives out the base offset of the counters data */
257 of_property_read_u32(node, "reg", &base_reg);
259 /* Allocate memory for the events */
260 pmu->events = kcalloc(ct, sizeof(struct imc_events), GFP_KERNEL);
262 of_node_put(pmu_events);
267 /* Parse the events and update the struct */
268 for_each_child_of_node(pmu_events, np) {
269 ret = imc_parse_event(np, g_scale, g_unit, prefix, base_reg, &pmu->events[ct]);
274 of_node_put(pmu_events);
276 /* Allocate memory for attribute group */
277 attr_group = kzalloc(sizeof(*attr_group), GFP_KERNEL);
279 imc_free_events(pmu->events, ct);
284 * Allocate memory for attributes.
285 * Since we have count of events for this pmu, we also allocate
286 * memory for the scale and unit attribute for now.
287 * "ct" has the total event structs added from the events-parent node.
288 * So allocate three times the "ct" (this includes event, event_scale and
291 attrs = kcalloc(((ct * 3) + 1), sizeof(struct attribute *), GFP_KERNEL);
294 imc_free_events(pmu->events, ct);
298 attr_group->name = "events";
299 attr_group->attrs = attrs;
301 ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i].value);
302 dev_str = device_str_attr_create(pmu->events[i].name, ev_val_str);
306 attrs[j++] = dev_str;
307 if (pmu->events[i].scale) {
308 ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale", pmu->events[i].name);
309 dev_str = device_str_attr_create(ev_scale_str, pmu->events[i].scale);
313 attrs[j++] = dev_str;
316 if (pmu->events[i].unit) {
317 ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit", pmu->events[i].name);
318 dev_str = device_str_attr_create(ev_unit_str, pmu->events[i].unit);
322 attrs[j++] = dev_str;
326 /* Save the event attribute */
327 pmu->attr_groups[IMC_EVENT_ATTR] = attr_group;
332 /* get_nest_pmu_ref: Return the imc_pmu_ref struct for the given node */
333 static struct imc_pmu_ref *get_nest_pmu_ref(int cpu)
335 return per_cpu(local_nest_imc_refc, cpu);
338 static void nest_change_cpu_context(int old_cpu, int new_cpu)
340 struct imc_pmu **pn = per_nest_pmu_arr;
342 if (old_cpu < 0 || new_cpu < 0)
346 perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu);
351 static int ppc_nest_imc_cpu_offline(unsigned int cpu)
353 int nid, target = -1;
354 const struct cpumask *l_cpumask;
355 struct imc_pmu_ref *ref;
358 * Check in the designated list for this cpu. Dont bother
359 * if not one of them.
361 if (!cpumask_test_and_clear_cpu(cpu, &nest_imc_cpumask))
365 * Check whether nest_imc is registered. We could end up here if the
366 * cpuhotplug callback registration fails. i.e, callback invokes the
367 * offline path for all successfully registered nodes. At this stage,
368 * nest_imc pmu will not be registered and we should return here.
370 * We return with a zero since this is not an offline failure. And
371 * cpuhp_setup_state() returns the actual failure reason to the caller,
372 * which in turn will call the cleanup routine.
378 * Now that this cpu is one of the designated,
379 * find a next cpu a) which is online and b) in same chip.
381 nid = cpu_to_node(cpu);
382 l_cpumask = cpumask_of_node(nid);
383 target = cpumask_last(l_cpumask);
386 * If this(target) is the last cpu in the cpumask for this chip,
387 * check for any possible online cpu in the chip.
389 if (unlikely(target == cpu))
390 target = cpumask_any_but(l_cpumask, cpu);
393 * Update the cpumask with the target cpu and
394 * migrate the context if needed
396 if (target >= 0 && target < nr_cpu_ids) {
397 cpumask_set_cpu(target, &nest_imc_cpumask);
398 nest_change_cpu_context(cpu, target);
400 opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
401 get_hard_smp_processor_id(cpu));
403 * If this is the last cpu in this chip then, skip the reference
404 * count lock and make the reference count on this chip zero.
406 ref = get_nest_pmu_ref(cpu);
415 static int ppc_nest_imc_cpu_online(unsigned int cpu)
417 const struct cpumask *l_cpumask;
418 static struct cpumask tmp_mask;
421 /* Get the cpumask of this node */
422 l_cpumask = cpumask_of_node(cpu_to_node(cpu));
425 * If this is not the first online CPU on this node, then
428 if (cpumask_and(&tmp_mask, l_cpumask, &nest_imc_cpumask))
432 * If this is the first online cpu on this node
433 * disable the nest counters by making an OPAL call.
435 res = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
436 get_hard_smp_processor_id(cpu));
440 /* Make this CPU the designated target for counter collection */
441 cpumask_set_cpu(cpu, &nest_imc_cpumask);
445 static int nest_pmu_cpumask_init(void)
447 return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
448 "perf/powerpc/imc:online",
449 ppc_nest_imc_cpu_online,
450 ppc_nest_imc_cpu_offline);
453 static void nest_imc_counters_release(struct perf_event *event)
456 struct imc_pmu_ref *ref;
461 node_id = cpu_to_node(event->cpu);
464 * See if we need to disable the nest PMU.
465 * If no events are currently in use, then we have to take a
466 * lock to ensure that we don't race with another task doing
467 * enable or disable the nest counters.
469 ref = get_nest_pmu_ref(event->cpu);
473 /* Take the lock for this node and then decrement the reference count */
474 spin_lock(&ref->lock);
475 if (ref->refc == 0) {
477 * The scenario where this is true is, when perf session is
478 * started, followed by offlining of all cpus in a given node.
480 * In the cpuhotplug offline path, ppc_nest_imc_cpu_offline()
481 * function set the ref->count to zero, if the cpu which is
482 * about to offline is the last cpu in a given node and make
483 * an OPAL call to disable the engine in that node.
486 spin_unlock(&ref->lock);
490 if (ref->refc == 0) {
491 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
492 get_hard_smp_processor_id(event->cpu));
494 spin_unlock(&ref->lock);
495 pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id);
498 } else if (ref->refc < 0) {
499 WARN(1, "nest-imc: Invalid event reference count\n");
502 spin_unlock(&ref->lock);
505 static int nest_imc_event_init(struct perf_event *event)
507 int chip_id, rc, node_id;
508 u32 l_config, config = event->attr.config;
509 struct imc_mem_info *pcni;
511 struct imc_pmu_ref *ref;
514 if (event->attr.type != event->pmu->type)
517 /* Sampling not supported */
518 if (event->hw.sample_period)
524 pmu = imc_event_to_pmu(event);
526 /* Sanity check for config (event offset) */
527 if ((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size)
531 * Nest HW counter memory resides in a per-chip reserve-memory (HOMER).
532 * Get the base memory address for this cpu.
534 chip_id = cpu_to_chip_id(event->cpu);
536 /* Return, if chip_id is not valid */
540 pcni = pmu->mem_info;
542 if (pcni->id == chip_id) {
547 } while (pcni->vbase);
553 * Add the event offset to the base address.
555 l_config = config & IMC_EVENT_OFFSET_MASK;
556 event->hw.event_base = (u64)pcni->vbase + l_config;
557 node_id = cpu_to_node(event->cpu);
560 * Get the imc_pmu_ref struct for this node.
561 * Take the lock and then increment the count of nest pmu events inited.
563 ref = get_nest_pmu_ref(event->cpu);
567 spin_lock(&ref->lock);
568 if (ref->refc == 0) {
569 rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST,
570 get_hard_smp_processor_id(event->cpu));
572 spin_unlock(&ref->lock);
573 pr_err("nest-imc: Unable to start the counters for node %d\n",
579 spin_unlock(&ref->lock);
581 event->destroy = nest_imc_counters_release;
586 * core_imc_mem_init : Initializes memory for the current core.
588 * Uses alloc_pages_node() and uses the returned address as an argument to
589 * an opal call to configure the pdbar. The address sent as an argument is
590 * converted to physical address before the opal call is made. This is the
591 * base address at which the core imc counters are populated.
593 static int core_imc_mem_init(int cpu, int size)
595 int nid, rc = 0, core_id = (cpu / threads_per_core);
596 struct imc_mem_info *mem_info;
600 * alloc_pages_node() will allocate memory for core in the
603 nid = cpu_to_node(cpu);
604 mem_info = &core_imc_pmu->mem_info[core_id];
605 mem_info->id = core_id;
607 /* We need only vbase for core counters */
608 page = alloc_pages_node(nid,
609 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
610 __GFP_NOWARN, get_order(size));
613 mem_info->vbase = page_address(page);
615 core_imc_refc[core_id].id = core_id;
616 spin_lock_init(&core_imc_refc[core_id].lock);
618 rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE,
619 __pa((void *)mem_info->vbase),
620 get_hard_smp_processor_id(cpu));
622 free_pages((u64)mem_info->vbase, get_order(size));
623 mem_info->vbase = NULL;
629 static bool is_core_imc_mem_inited(int cpu)
631 struct imc_mem_info *mem_info;
632 int core_id = (cpu / threads_per_core);
634 mem_info = &core_imc_pmu->mem_info[core_id];
635 if (!mem_info->vbase)
641 static int ppc_core_imc_cpu_online(unsigned int cpu)
643 const struct cpumask *l_cpumask;
644 static struct cpumask tmp_mask;
647 /* Get the cpumask for this core */
648 l_cpumask = cpu_sibling_mask(cpu);
650 /* If a cpu for this core is already set, then, don't do anything */
651 if (cpumask_and(&tmp_mask, l_cpumask, &core_imc_cpumask))
654 if (!is_core_imc_mem_inited(cpu)) {
655 ret = core_imc_mem_init(cpu, core_imc_pmu->counter_mem_size);
657 pr_info("core_imc memory allocation for cpu %d failed\n", cpu);
662 /* set the cpu in the mask */
663 cpumask_set_cpu(cpu, &core_imc_cpumask);
667 static int ppc_core_imc_cpu_offline(unsigned int cpu)
669 unsigned int core_id;
671 struct imc_pmu_ref *ref;
674 * clear this cpu out of the mask, if not present in the mask,
675 * don't bother doing anything.
677 if (!cpumask_test_and_clear_cpu(cpu, &core_imc_cpumask))
681 * Check whether core_imc is registered. We could end up here
682 * if the cpuhotplug callback registration fails. i.e, callback
683 * invokes the offline path for all successfully registered cpus.
684 * At this stage, core_imc pmu will not be registered and we
685 * should return here.
687 * We return with a zero since this is not an offline failure.
688 * And cpuhp_setup_state() returns the actual failure reason
689 * to the caller, which inturn will call the cleanup routine.
691 if (!core_imc_pmu->pmu.event_init)
694 /* Find any online cpu in that core except the current "cpu" */
695 ncpu = cpumask_last(cpu_sibling_mask(cpu));
697 if (unlikely(ncpu == cpu))
698 ncpu = cpumask_any_but(cpu_sibling_mask(cpu), cpu);
700 if (ncpu >= 0 && ncpu < nr_cpu_ids) {
701 cpumask_set_cpu(ncpu, &core_imc_cpumask);
702 perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu);
705 * If this is the last cpu in this core then skip taking reference
706 * count lock for this core and directly zero "refc" for this core.
708 opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
709 get_hard_smp_processor_id(cpu));
710 core_id = cpu / threads_per_core;
711 ref = &core_imc_refc[core_id];
717 * Reduce the global reference count, if this is the
718 * last cpu in this core and core-imc event running
721 spin_lock(&imc_global_refc.lock);
722 if (imc_global_refc.id == IMC_DOMAIN_CORE)
723 imc_global_refc.refc--;
725 spin_unlock(&imc_global_refc.lock);
730 static int core_imc_pmu_cpumask_init(void)
732 return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
733 "perf/powerpc/imc_core:online",
734 ppc_core_imc_cpu_online,
735 ppc_core_imc_cpu_offline);
738 static void reset_global_refc(struct perf_event *event)
740 spin_lock(&imc_global_refc.lock);
741 imc_global_refc.refc--;
744 * If no other thread is running any
745 * event for this domain(thread/core/trace),
746 * set the global id to zero.
748 if (imc_global_refc.refc <= 0) {
749 imc_global_refc.refc = 0;
750 imc_global_refc.id = 0;
752 spin_unlock(&imc_global_refc.lock);
755 static void core_imc_counters_release(struct perf_event *event)
758 struct imc_pmu_ref *ref;
763 * See if we need to disable the IMC PMU.
764 * If no events are currently in use, then we have to take a
765 * lock to ensure that we don't race with another task doing
766 * enable or disable the core counters.
768 core_id = event->cpu / threads_per_core;
770 /* Take the lock and decrement the refernce count for this core */
771 ref = &core_imc_refc[core_id];
775 spin_lock(&ref->lock);
776 if (ref->refc == 0) {
778 * The scenario where this is true is, when perf session is
779 * started, followed by offlining of all cpus in a given core.
781 * In the cpuhotplug offline path, ppc_core_imc_cpu_offline()
782 * function set the ref->count to zero, if the cpu which is
783 * about to offline is the last cpu in a given core and make
784 * an OPAL call to disable the engine in that core.
787 spin_unlock(&ref->lock);
791 if (ref->refc == 0) {
792 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
793 get_hard_smp_processor_id(event->cpu));
795 spin_unlock(&ref->lock);
796 pr_err("IMC: Unable to stop the counters for core %d\n", core_id);
799 } else if (ref->refc < 0) {
800 WARN(1, "core-imc: Invalid event reference count\n");
803 spin_unlock(&ref->lock);
805 reset_global_refc(event);
808 static int core_imc_event_init(struct perf_event *event)
811 u64 config = event->attr.config;
812 struct imc_mem_info *pcmi;
814 struct imc_pmu_ref *ref;
816 if (event->attr.type != event->pmu->type)
819 /* Sampling not supported */
820 if (event->hw.sample_period)
827 pmu = imc_event_to_pmu(event);
829 /* Sanity check for config (event offset) */
830 if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size))
833 if (!is_core_imc_mem_inited(event->cpu))
836 core_id = event->cpu / threads_per_core;
837 pcmi = &core_imc_pmu->mem_info[core_id];
841 ref = &core_imc_refc[core_id];
846 * Core pmu units are enabled only when it is used.
847 * See if this is triggered for the first time.
848 * If yes, take the lock and enable the core counters.
849 * If not, just increment the count in core_imc_refc struct.
851 spin_lock(&ref->lock);
852 if (ref->refc == 0) {
853 rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
854 get_hard_smp_processor_id(event->cpu));
856 spin_unlock(&ref->lock);
857 pr_err("core-imc: Unable to start the counters for core %d\n",
863 spin_unlock(&ref->lock);
866 * Since the system can run either in accumulation or trace-mode
867 * of IMC at a time, core-imc events are allowed only if no other
868 * trace/thread imc events are enabled/monitored.
870 * Take the global lock, and check the refc.id
871 * to know whether any other trace/thread imc
872 * events are running.
874 spin_lock(&imc_global_refc.lock);
875 if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) {
877 * No other trace/thread imc events are running in
878 * the system, so set the refc.id to core-imc.
880 imc_global_refc.id = IMC_DOMAIN_CORE;
881 imc_global_refc.refc++;
883 spin_unlock(&imc_global_refc.lock);
886 spin_unlock(&imc_global_refc.lock);
888 event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK);
889 event->destroy = core_imc_counters_release;
894 * Allocates a page of memory for each of the online cpus, and load
896 * The physical base address of the page allocated for a cpu will be
897 * written to the LDBAR for that cpu, when the thread-imc event
900 * LDBAR Register Layout:
902 * 0 4 8 12 16 20 24 28
903 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
904 * | | [ ] [ Counter Address [8:50]
909 * 32 36 40 44 48 52 56 60
910 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
911 * Counter Address [8:50] ]
914 static int thread_imc_mem_alloc(int cpu_id, int size)
916 u64 *local_mem = per_cpu(thread_imc_mem, cpu_id);
917 int nid = cpu_to_node(cpu_id);
922 * This case could happen only once at start, since we dont
923 * free the memory in cpu offline path.
925 page = alloc_pages_node(nid,
926 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
927 __GFP_NOWARN, get_order(size));
930 local_mem = page_address(page);
932 per_cpu(thread_imc_mem, cpu_id) = local_mem;
935 mtspr(SPRN_LDBAR, 0);
939 static int ppc_thread_imc_cpu_online(unsigned int cpu)
941 return thread_imc_mem_alloc(cpu, thread_imc_mem_size);
944 static int ppc_thread_imc_cpu_offline(unsigned int cpu)
947 * Set the bit 0 of LDBAR to zero.
949 * If bit 0 of LDBAR is unset, it will stop posting
950 * the counter data to memory.
951 * For thread-imc, bit 0 of LDBAR will be set to 1 in the
952 * event_add function. So reset this bit here, to stop the updates
953 * to memory in the cpu_offline path.
955 mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
957 /* Reduce the refc if thread-imc event running on this cpu */
958 spin_lock(&imc_global_refc.lock);
959 if (imc_global_refc.id == IMC_DOMAIN_THREAD)
960 imc_global_refc.refc--;
961 spin_unlock(&imc_global_refc.lock);
966 static int thread_imc_cpu_init(void)
968 return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
969 "perf/powerpc/imc_thread:online",
970 ppc_thread_imc_cpu_online,
971 ppc_thread_imc_cpu_offline);
974 static int thread_imc_event_init(struct perf_event *event)
976 u32 config = event->attr.config;
977 struct task_struct *target;
980 if (event->attr.type != event->pmu->type)
983 if (!perfmon_capable())
986 /* Sampling not supported */
987 if (event->hw.sample_period)
991 pmu = imc_event_to_pmu(event);
993 /* Sanity check for config offset */
994 if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size))
997 target = event->hw.target;
1001 spin_lock(&imc_global_refc.lock);
1003 * Check if any other trace/core imc events are running in the
1004 * system, if not set the global id to thread-imc.
1006 if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_THREAD) {
1007 imc_global_refc.id = IMC_DOMAIN_THREAD;
1008 imc_global_refc.refc++;
1010 spin_unlock(&imc_global_refc.lock);
1013 spin_unlock(&imc_global_refc.lock);
1015 event->pmu->task_ctx_nr = perf_sw_context;
1016 event->destroy = reset_global_refc;
1020 static bool is_thread_imc_pmu(struct perf_event *event)
1022 if (!strncmp(event->pmu->name, "thread_imc", strlen("thread_imc")))
1028 static __be64 *get_event_base_addr(struct perf_event *event)
1032 if (is_thread_imc_pmu(event)) {
1033 addr = (u64)per_cpu(thread_imc_mem, smp_processor_id());
1034 return (__be64 *)(addr + (event->attr.config & IMC_EVENT_OFFSET_MASK));
1037 return (__be64 *)event->hw.event_base;
1040 static void thread_imc_pmu_start_txn(struct pmu *pmu,
1041 unsigned int txn_flags)
1043 if (txn_flags & ~PERF_PMU_TXN_ADD)
1045 perf_pmu_disable(pmu);
1048 static void thread_imc_pmu_cancel_txn(struct pmu *pmu)
1050 perf_pmu_enable(pmu);
1053 static int thread_imc_pmu_commit_txn(struct pmu *pmu)
1055 perf_pmu_enable(pmu);
1059 static u64 imc_read_counter(struct perf_event *event)
1065 * In-Memory Collection (IMC) counters are free flowing counters.
1066 * So we take a snapshot of the counter value on enable and save it
1067 * to calculate the delta at later stage to present the event counter
1070 addr = get_event_base_addr(event);
1071 data = be64_to_cpu(READ_ONCE(*addr));
1072 local64_set(&event->hw.prev_count, data);
1077 static void imc_event_update(struct perf_event *event)
1079 u64 counter_prev, counter_new, final_count;
1081 counter_prev = local64_read(&event->hw.prev_count);
1082 counter_new = imc_read_counter(event);
1083 final_count = counter_new - counter_prev;
1085 /* Update the delta to the event count */
1086 local64_add(final_count, &event->count);
1089 static void imc_event_start(struct perf_event *event, int flags)
1092 * In Memory Counters are free flowing counters. HW or the microcode
1093 * keeps adding to the counter offset in memory. To get event
1094 * counter value, we snapshot the value here and we calculate
1095 * delta at later point.
1097 imc_read_counter(event);
1100 static void imc_event_stop(struct perf_event *event, int flags)
1103 * Take a snapshot and calculate the delta and update
1104 * the event counter values.
1106 imc_event_update(event);
1109 static int imc_event_add(struct perf_event *event, int flags)
1111 if (flags & PERF_EF_START)
1112 imc_event_start(event, flags);
1117 static int thread_imc_event_add(struct perf_event *event, int flags)
1120 struct imc_pmu_ref *ref;
1121 u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, smp_processor_id());
1123 if (flags & PERF_EF_START)
1124 imc_event_start(event, flags);
1126 if (!is_core_imc_mem_inited(smp_processor_id()))
1129 core_id = smp_processor_id() / threads_per_core;
1130 ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | THREAD_IMC_ENABLE;
1131 mtspr(SPRN_LDBAR, ldbar_value);
1134 * imc pmus are enabled only when it is used.
1135 * See if this is triggered for the first time.
1136 * If yes, take the lock and enable the counters.
1137 * If not, just increment the count in ref count struct.
1139 ref = &core_imc_refc[core_id];
1143 spin_lock(&ref->lock);
1144 if (ref->refc == 0) {
1145 if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
1146 get_hard_smp_processor_id(smp_processor_id()))) {
1147 spin_unlock(&ref->lock);
1148 pr_err("thread-imc: Unable to start the counter\
1149 for core %d\n", core_id);
1154 spin_unlock(&ref->lock);
1158 static void thread_imc_event_del(struct perf_event *event, int flags)
1162 struct imc_pmu_ref *ref;
1164 core_id = smp_processor_id() / threads_per_core;
1165 ref = &core_imc_refc[core_id];
1167 pr_debug("imc: Failed to get event reference count\n");
1171 spin_lock(&ref->lock);
1173 if (ref->refc == 0) {
1174 if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
1175 get_hard_smp_processor_id(smp_processor_id()))) {
1176 spin_unlock(&ref->lock);
1177 pr_err("thread-imc: Unable to stop the counters\
1178 for core %d\n", core_id);
1181 } else if (ref->refc < 0) {
1184 spin_unlock(&ref->lock);
1186 /* Set bit 0 of LDBAR to zero, to stop posting updates to memory */
1187 mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
1190 * Take a snapshot and calculate the delta and update
1191 * the event counter values.
1193 imc_event_update(event);
1197 * Allocate a page of memory for each cpu, and load LDBAR with 0.
1199 static int trace_imc_mem_alloc(int cpu_id, int size)
1201 u64 *local_mem = per_cpu(trace_imc_mem, cpu_id);
1202 int phys_id = cpu_to_node(cpu_id), rc = 0;
1203 int core_id = (cpu_id / threads_per_core);
1208 page = alloc_pages_node(phys_id,
1209 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
1210 __GFP_NOWARN, get_order(size));
1213 local_mem = page_address(page);
1214 per_cpu(trace_imc_mem, cpu_id) = local_mem;
1216 /* Initialise the counters for trace mode */
1217 rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_TRACE, __pa((void *)local_mem),
1218 get_hard_smp_processor_id(cpu_id));
1220 pr_info("IMC:opal init failed for trace imc\n");
1225 trace_imc_refc[core_id].id = core_id;
1226 spin_lock_init(&trace_imc_refc[core_id].lock);
1228 mtspr(SPRN_LDBAR, 0);
1232 static int ppc_trace_imc_cpu_online(unsigned int cpu)
1234 return trace_imc_mem_alloc(cpu, trace_imc_mem_size);
1237 static int ppc_trace_imc_cpu_offline(unsigned int cpu)
1240 * No need to set bit 0 of LDBAR to zero, as
1241 * it is set to zero for imc trace-mode
1243 * Reduce the refc if any trace-imc event running
1246 spin_lock(&imc_global_refc.lock);
1247 if (imc_global_refc.id == IMC_DOMAIN_TRACE)
1248 imc_global_refc.refc--;
1249 spin_unlock(&imc_global_refc.lock);
1254 static int trace_imc_cpu_init(void)
1256 return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE,
1257 "perf/powerpc/imc_trace:online",
1258 ppc_trace_imc_cpu_online,
1259 ppc_trace_imc_cpu_offline);
1262 static u64 get_trace_imc_event_base_addr(void)
1264 return (u64)per_cpu(trace_imc_mem, smp_processor_id());
1268 * Function to parse trace-imc data obtained
1269 * and to prepare the perf sample.
1271 static int trace_imc_prepare_sample(struct trace_imc_data *mem,
1272 struct perf_sample_data *data,
1274 struct perf_event_header *header,
1275 struct perf_event *event)
1277 /* Sanity checks for a valid record */
1278 if (be64_to_cpu(READ_ONCE(mem->tb1)) > *prev_tb)
1279 *prev_tb = be64_to_cpu(READ_ONCE(mem->tb1));
1283 if ((be64_to_cpu(READ_ONCE(mem->tb1)) & IMC_TRACE_RECORD_TB1_MASK) !=
1284 be64_to_cpu(READ_ONCE(mem->tb2)))
1287 /* Prepare perf sample */
1288 data->ip = be64_to_cpu(READ_ONCE(mem->ip));
1289 data->period = event->hw.last_period;
1291 header->type = PERF_RECORD_SAMPLE;
1292 header->size = sizeof(*header) + event->header_size;
1295 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
1296 switch (IMC_TRACE_RECORD_VAL_HVPR(be64_to_cpu(READ_ONCE(mem->val)))) {
1297 case 0:/* when MSR HV and PR not set in the trace-record */
1298 header->misc |= PERF_RECORD_MISC_GUEST_KERNEL;
1300 case 1: /* MSR HV is 0 and PR is 1 */
1301 header->misc |= PERF_RECORD_MISC_GUEST_USER;
1303 case 2: /* MSR HV is 1 and PR is 0 */
1304 header->misc |= PERF_RECORD_MISC_KERNEL;
1306 case 3: /* MSR HV is 1 and PR is 1 */
1307 header->misc |= PERF_RECORD_MISC_USER;
1310 pr_info("IMC: Unable to set the flag based on MSR bits\n");
1314 if (is_kernel_addr(data->ip))
1315 header->misc |= PERF_RECORD_MISC_KERNEL;
1317 header->misc |= PERF_RECORD_MISC_USER;
1319 perf_event_header__init_id(header, data, event);
1324 static void dump_trace_imc_data(struct perf_event *event)
1326 struct trace_imc_data *mem;
1330 mem = (struct trace_imc_data *)get_trace_imc_event_base_addr();
1331 for (i = 0; i < (trace_imc_mem_size / sizeof(struct trace_imc_data));
1333 struct perf_sample_data data;
1334 struct perf_event_header header;
1336 ret = trace_imc_prepare_sample(mem, &data, &prev_tb, &header, event);
1337 if (ret) /* Exit, if not a valid record */
1340 /* If this is a valid record, create the sample */
1341 struct perf_output_handle handle;
1343 if (perf_output_begin(&handle, &data, event, header.size))
1346 perf_output_sample(&handle, &header, &data, event);
1347 perf_output_end(&handle);
1352 static int trace_imc_event_add(struct perf_event *event, int flags)
1354 int core_id = smp_processor_id() / threads_per_core;
1355 struct imc_pmu_ref *ref = NULL;
1356 u64 local_mem, ldbar_value;
1358 /* Set trace-imc bit in ldbar and load ldbar with per-thread memory address */
1359 local_mem = get_trace_imc_event_base_addr();
1360 ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | TRACE_IMC_ENABLE;
1362 /* trace-imc reference count */
1364 ref = &trace_imc_refc[core_id];
1366 pr_debug("imc: Failed to get the event reference count\n");
1370 mtspr(SPRN_LDBAR, ldbar_value);
1371 spin_lock(&ref->lock);
1372 if (ref->refc == 0) {
1373 if (opal_imc_counters_start(OPAL_IMC_COUNTERS_TRACE,
1374 get_hard_smp_processor_id(smp_processor_id()))) {
1375 spin_unlock(&ref->lock);
1376 pr_err("trace-imc: Unable to start the counters for core %d\n", core_id);
1381 spin_unlock(&ref->lock);
1385 static void trace_imc_event_read(struct perf_event *event)
1390 static void trace_imc_event_stop(struct perf_event *event, int flags)
1392 u64 local_mem = get_trace_imc_event_base_addr();
1393 dump_trace_imc_data(event);
1394 memset((void *)local_mem, 0, sizeof(u64));
1397 static void trace_imc_event_start(struct perf_event *event, int flags)
1402 static void trace_imc_event_del(struct perf_event *event, int flags)
1404 int core_id = smp_processor_id() / threads_per_core;
1405 struct imc_pmu_ref *ref = NULL;
1408 ref = &trace_imc_refc[core_id];
1410 pr_debug("imc: Failed to get event reference count\n");
1414 spin_lock(&ref->lock);
1416 if (ref->refc == 0) {
1417 if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_TRACE,
1418 get_hard_smp_processor_id(smp_processor_id()))) {
1419 spin_unlock(&ref->lock);
1420 pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id);
1423 } else if (ref->refc < 0) {
1426 spin_unlock(&ref->lock);
1428 trace_imc_event_stop(event, flags);
1431 static int trace_imc_event_init(struct perf_event *event)
1433 if (event->attr.type != event->pmu->type)
1436 if (!perfmon_capable())
1439 /* Return if this is a couting event */
1440 if (event->attr.sample_period == 0)
1444 * Take the global lock, and make sure
1445 * no other thread is running any core/thread imc
1448 spin_lock(&imc_global_refc.lock);
1449 if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) {
1451 * No core/thread imc events are running in the
1452 * system, so set the refc.id to trace-imc.
1454 imc_global_refc.id = IMC_DOMAIN_TRACE;
1455 imc_global_refc.refc++;
1457 spin_unlock(&imc_global_refc.lock);
1460 spin_unlock(&imc_global_refc.lock);
1465 * There can only be a single PMU for perf_hw_context events which is assigned to
1466 * core PMU. Hence use "perf_sw_context" for trace_imc.
1468 event->pmu->task_ctx_nr = perf_sw_context;
1469 event->destroy = reset_global_refc;
1473 /* update_pmu_ops : Populate the appropriate operations for "pmu" */
1474 static int update_pmu_ops(struct imc_pmu *pmu)
1476 pmu->pmu.task_ctx_nr = perf_invalid_context;
1477 pmu->pmu.add = imc_event_add;
1478 pmu->pmu.del = imc_event_stop;
1479 pmu->pmu.start = imc_event_start;
1480 pmu->pmu.stop = imc_event_stop;
1481 pmu->pmu.read = imc_event_update;
1482 pmu->pmu.attr_groups = pmu->attr_groups;
1483 pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
1484 pmu->attr_groups[IMC_FORMAT_ATTR] = &imc_format_group;
1486 switch (pmu->domain) {
1487 case IMC_DOMAIN_NEST:
1488 pmu->pmu.event_init = nest_imc_event_init;
1489 pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group;
1491 case IMC_DOMAIN_CORE:
1492 pmu->pmu.event_init = core_imc_event_init;
1493 pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group;
1495 case IMC_DOMAIN_THREAD:
1496 pmu->pmu.event_init = thread_imc_event_init;
1497 pmu->pmu.add = thread_imc_event_add;
1498 pmu->pmu.del = thread_imc_event_del;
1499 pmu->pmu.start_txn = thread_imc_pmu_start_txn;
1500 pmu->pmu.cancel_txn = thread_imc_pmu_cancel_txn;
1501 pmu->pmu.commit_txn = thread_imc_pmu_commit_txn;
1503 case IMC_DOMAIN_TRACE:
1504 pmu->pmu.event_init = trace_imc_event_init;
1505 pmu->pmu.add = trace_imc_event_add;
1506 pmu->pmu.del = trace_imc_event_del;
1507 pmu->pmu.start = trace_imc_event_start;
1508 pmu->pmu.stop = trace_imc_event_stop;
1509 pmu->pmu.read = trace_imc_event_read;
1510 pmu->attr_groups[IMC_FORMAT_ATTR] = &trace_imc_format_group;
1519 /* init_nest_pmu_ref: Initialize the imc_pmu_ref struct for all the nodes */
1520 static int init_nest_pmu_ref(void)
1524 nest_imc_refc = kcalloc(num_possible_nodes(), sizeof(*nest_imc_refc),
1531 for_each_node(nid) {
1533 * Take the lock to avoid races while tracking the number of
1534 * sessions using the chip's nest pmu units.
1536 spin_lock_init(&nest_imc_refc[i].lock);
1539 * Loop to init the "id" with the node_id. Variable "i" initialized to
1540 * 0 and will be used as index to the array. "i" will not go off the
1541 * end of the array since the "for_each_node" loops for "N_POSSIBLE"
1544 nest_imc_refc[i++].id = nid;
1548 * Loop to init the per_cpu "local_nest_imc_refc" with the proper
1549 * "nest_imc_refc" index. This makes get_nest_pmu_ref() alot simple.
1551 for_each_possible_cpu(cpu) {
1552 nid = cpu_to_node(cpu);
1553 for (i = 0; i < num_possible_nodes(); i++) {
1554 if (nest_imc_refc[i].id == nid) {
1555 per_cpu(local_nest_imc_refc, cpu) = &nest_imc_refc[i];
1563 static void cleanup_all_core_imc_memory(void)
1565 int i, nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
1566 struct imc_mem_info *ptr = core_imc_pmu->mem_info;
1567 int size = core_imc_pmu->counter_mem_size;
1569 /* mem_info will never be NULL */
1570 for (i = 0; i < nr_cores; i++) {
1572 free_pages((u64)ptr[i].vbase, get_order(size));
1576 kfree(core_imc_refc);
1579 static void thread_imc_ldbar_disable(void *dummy)
1582 * By setting 0th bit of LDBAR to zero, we disable thread-imc
1583 * updates to memory.
1585 mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
1588 void thread_imc_disable(void)
1590 on_each_cpu(thread_imc_ldbar_disable, NULL, 1);
1593 static void cleanup_all_thread_imc_memory(void)
1595 int i, order = get_order(thread_imc_mem_size);
1597 for_each_online_cpu(i) {
1598 if (per_cpu(thread_imc_mem, i))
1599 free_pages((u64)per_cpu(thread_imc_mem, i), order);
1604 static void cleanup_all_trace_imc_memory(void)
1606 int i, order = get_order(trace_imc_mem_size);
1608 for_each_online_cpu(i) {
1609 if (per_cpu(trace_imc_mem, i))
1610 free_pages((u64)per_cpu(trace_imc_mem, i), order);
1613 kfree(trace_imc_refc);
1616 /* Function to free the attr_groups which are dynamically allocated */
1617 static void imc_common_mem_free(struct imc_pmu *pmu_ptr)
1619 if (pmu_ptr->attr_groups[IMC_EVENT_ATTR])
1620 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
1621 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
1625 * Common function to unregister cpu hotplug callback and
1627 * TODO: Need to handle pmu unregistering, which will be
1628 * done in followup series.
1630 static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
1632 if (pmu_ptr->domain == IMC_DOMAIN_NEST) {
1633 mutex_lock(&nest_init_lock);
1634 if (nest_pmus == 1) {
1635 cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE);
1636 kfree(nest_imc_refc);
1637 kfree(per_nest_pmu_arr);
1638 per_nest_pmu_arr = NULL;
1643 mutex_unlock(&nest_init_lock);
1646 /* Free core_imc memory */
1647 if (pmu_ptr->domain == IMC_DOMAIN_CORE) {
1648 cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE);
1649 cleanup_all_core_imc_memory();
1652 /* Free thread_imc memory */
1653 if (pmu_ptr->domain == IMC_DOMAIN_THREAD) {
1654 cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE);
1655 cleanup_all_thread_imc_memory();
1658 if (pmu_ptr->domain == IMC_DOMAIN_TRACE) {
1659 cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE);
1660 cleanup_all_trace_imc_memory();
1665 * Function to unregister thread-imc if core-imc
1666 * is not registered.
1668 void unregister_thread_imc(void)
1670 imc_common_cpuhp_mem_free(thread_imc_pmu);
1671 imc_common_mem_free(thread_imc_pmu);
1672 perf_pmu_unregister(&thread_imc_pmu->pmu);
1676 * imc_mem_init : Function to support memory allocation for core imc.
1678 static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
1682 int nr_cores, cpu, res = -ENOMEM;
1684 if (of_property_read_string(parent, "name", &s))
1687 switch (pmu_ptr->domain) {
1688 case IMC_DOMAIN_NEST:
1689 /* Update the pmu name */
1690 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s_imc", "nest_", s);
1691 if (!pmu_ptr->pmu.name)
1694 /* Needed for hotplug/migration */
1695 if (!per_nest_pmu_arr) {
1696 per_nest_pmu_arr = kcalloc(get_max_nest_dev() + 1,
1697 sizeof(struct imc_pmu *),
1699 if (!per_nest_pmu_arr)
1702 per_nest_pmu_arr[pmu_index] = pmu_ptr;
1704 case IMC_DOMAIN_CORE:
1705 /* Update the pmu name */
1706 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc");
1707 if (!pmu_ptr->pmu.name)
1710 nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
1711 pmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info),
1714 if (!pmu_ptr->mem_info)
1717 core_imc_refc = kcalloc(nr_cores, sizeof(struct imc_pmu_ref),
1720 if (!core_imc_refc) {
1721 kfree(pmu_ptr->mem_info);
1725 core_imc_pmu = pmu_ptr;
1727 case IMC_DOMAIN_THREAD:
1728 /* Update the pmu name */
1729 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc");
1730 if (!pmu_ptr->pmu.name)
1733 thread_imc_mem_size = pmu_ptr->counter_mem_size;
1734 for_each_online_cpu(cpu) {
1735 res = thread_imc_mem_alloc(cpu, pmu_ptr->counter_mem_size);
1737 cleanup_all_thread_imc_memory();
1742 thread_imc_pmu = pmu_ptr;
1744 case IMC_DOMAIN_TRACE:
1745 /* Update the pmu name */
1746 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc");
1747 if (!pmu_ptr->pmu.name)
1750 nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
1751 trace_imc_refc = kcalloc(nr_cores, sizeof(struct imc_pmu_ref),
1753 if (!trace_imc_refc)
1756 trace_imc_mem_size = pmu_ptr->counter_mem_size;
1757 for_each_online_cpu(cpu) {
1758 res = trace_imc_mem_alloc(cpu, trace_imc_mem_size);
1760 cleanup_all_trace_imc_memory();
1775 * init_imc_pmu : Setup and register the IMC pmu device.
1777 * @parent: Device tree unit node
1778 * @pmu_ptr: memory allocated for this pmu
1779 * @pmu_idx: Count of nest pmc registered
1781 * init_imc_pmu() setup pmu cpumask and registers for a cpu hotplug callback.
1782 * Handles failure cases and accordingly frees memory.
1784 int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_idx)
1788 ret = imc_mem_init(pmu_ptr, parent, pmu_idx);
1792 switch (pmu_ptr->domain) {
1793 case IMC_DOMAIN_NEST:
1795 * Nest imc pmu need only one cpu per chip, we initialize the
1796 * cpumask for the first nest imc pmu and use the same for the
1797 * rest. To handle the cpuhotplug callback unregister, we track
1798 * the number of nest pmus in "nest_pmus".
1800 mutex_lock(&nest_init_lock);
1801 if (nest_pmus == 0) {
1802 ret = init_nest_pmu_ref();
1804 mutex_unlock(&nest_init_lock);
1805 kfree(per_nest_pmu_arr);
1806 per_nest_pmu_arr = NULL;
1809 /* Register for cpu hotplug notification. */
1810 ret = nest_pmu_cpumask_init();
1812 mutex_unlock(&nest_init_lock);
1813 kfree(nest_imc_refc);
1814 kfree(per_nest_pmu_arr);
1815 per_nest_pmu_arr = NULL;
1820 mutex_unlock(&nest_init_lock);
1822 case IMC_DOMAIN_CORE:
1823 ret = core_imc_pmu_cpumask_init();
1825 cleanup_all_core_imc_memory();
1830 case IMC_DOMAIN_THREAD:
1831 ret = thread_imc_cpu_init();
1833 cleanup_all_thread_imc_memory();
1838 case IMC_DOMAIN_TRACE:
1839 ret = trace_imc_cpu_init();
1841 cleanup_all_trace_imc_memory();
1847 return -EINVAL; /* Unknown domain */
1850 ret = update_events_in_group(parent, pmu_ptr);
1852 goto err_free_cpuhp_mem;
1854 ret = update_pmu_ops(pmu_ptr);
1856 goto err_free_cpuhp_mem;
1858 ret = perf_pmu_register(&pmu_ptr->pmu, pmu_ptr->pmu.name, -1);
1860 goto err_free_cpuhp_mem;
1862 pr_debug("%s performance monitor hardware support registered\n",
1868 imc_common_cpuhp_mem_free(pmu_ptr);
1870 imc_common_mem_free(pmu_ptr);