1 #include <linux/module.h>
3 #include <asm/cpu_device_id.h>
4 #include <asm/intel-family.h>
7 static struct intel_uncore_type *empty_uncore[] = { NULL, };
8 struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
9 struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
11 static bool pcidrv_registered;
12 struct pci_driver *uncore_pci_driver;
13 /* pci bus to socket mapping */
14 DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
15 struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
16 struct pci_extra_dev *uncore_extra_pci_dev;
17 static int max_packages;
19 /* mask of cpus that collect uncore events */
20 static cpumask_t uncore_cpu_mask;
22 /* constraint for the fixed counter */
23 static struct event_constraint uncore_constraint_fixed =
24 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
25 struct event_constraint uncore_constraint_empty =
26 EVENT_CONSTRAINT(0, 0, 0);
28 MODULE_LICENSE("GPL");
30 static int uncore_pcibus_to_physid(struct pci_bus *bus)
32 struct pci2phy_map *map;
35 raw_spin_lock(&pci2phy_map_lock);
36 list_for_each_entry(map, &pci2phy_map_head, list) {
37 if (map->segment == pci_domain_nr(bus)) {
38 phys_id = map->pbus_to_physid[bus->number];
42 raw_spin_unlock(&pci2phy_map_lock);
47 static void uncore_free_pcibus_map(void)
49 struct pci2phy_map *map, *tmp;
51 list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) {
57 struct pci2phy_map *__find_pci2phy_map(int segment)
59 struct pci2phy_map *map, *alloc = NULL;
62 lockdep_assert_held(&pci2phy_map_lock);
65 list_for_each_entry(map, &pci2phy_map_head, list) {
66 if (map->segment == segment)
71 raw_spin_unlock(&pci2phy_map_lock);
72 alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
73 raw_spin_lock(&pci2phy_map_lock);
83 map->segment = segment;
84 for (i = 0; i < 256; i++)
85 map->pbus_to_physid[i] = -1;
86 list_add_tail(&map->list, &pci2phy_map_head);
93 ssize_t uncore_event_show(struct device *dev,
94 struct device_attribute *attr, char *buf)
96 struct uncore_event_desc *event =
97 container_of(attr, struct uncore_event_desc, attr);
98 return sprintf(buf, "%s", event->config);
101 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
103 return pmu->boxes[topology_logical_package_id(cpu)];
106 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
110 rdmsrl(event->hw.event_base, count);
116 * generic get constraint function for shared match/mask registers.
118 struct event_constraint *
119 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
121 struct intel_uncore_extra_reg *er;
122 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
123 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
128 * reg->alloc can be set due to existing state, so for fake box we
129 * need to ignore this, otherwise we might fail to allocate proper
130 * fake state for this extra reg constraint.
132 if (reg1->idx == EXTRA_REG_NONE ||
133 (!uncore_box_is_fake(box) && reg1->alloc))
136 er = &box->shared_regs[reg1->idx];
137 raw_spin_lock_irqsave(&er->lock, flags);
138 if (!atomic_read(&er->ref) ||
139 (er->config1 == reg1->config && er->config2 == reg2->config)) {
140 atomic_inc(&er->ref);
141 er->config1 = reg1->config;
142 er->config2 = reg2->config;
145 raw_spin_unlock_irqrestore(&er->lock, flags);
148 if (!uncore_box_is_fake(box))
153 return &uncore_constraint_empty;
156 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
158 struct intel_uncore_extra_reg *er;
159 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
162 * Only put constraint if extra reg was actually allocated. Also
163 * takes care of event which do not use an extra shared reg.
165 * Also, if this is a fake box we shouldn't touch any event state
166 * (reg->alloc) and we don't care about leaving inconsistent box
167 * state either since it will be thrown out.
169 if (uncore_box_is_fake(box) || !reg1->alloc)
172 er = &box->shared_regs[reg1->idx];
173 atomic_dec(&er->ref);
177 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
179 struct intel_uncore_extra_reg *er;
183 er = &box->shared_regs[idx];
185 raw_spin_lock_irqsave(&er->lock, flags);
187 raw_spin_unlock_irqrestore(&er->lock, flags);
192 static void uncore_assign_hw_event(struct intel_uncore_box *box,
193 struct perf_event *event, int idx)
195 struct hw_perf_event *hwc = &event->hw;
198 hwc->last_tag = ++box->tags[idx];
200 if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
201 hwc->event_base = uncore_fixed_ctr(box);
202 hwc->config_base = uncore_fixed_ctl(box);
206 hwc->config_base = uncore_event_ctl(box, hwc->idx);
207 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
210 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
212 u64 prev_count, new_count, delta;
215 if (event->hw.idx == UNCORE_PMC_IDX_FIXED)
216 shift = 64 - uncore_fixed_ctr_bits(box);
218 shift = 64 - uncore_perf_ctr_bits(box);
220 /* the hrtimer might modify the previous event value */
222 prev_count = local64_read(&event->hw.prev_count);
223 new_count = uncore_read_counter(box, event);
224 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
227 delta = (new_count << shift) - (prev_count << shift);
230 local64_add(delta, &event->count);
234 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
235 * for SandyBridge. So we use hrtimer to periodically poll the counter
238 static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
240 struct intel_uncore_box *box;
241 struct perf_event *event;
245 box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
246 if (!box->n_active || box->cpu != smp_processor_id())
247 return HRTIMER_NORESTART;
249 * disable local interrupt to prevent uncore_pmu_event_start/stop
250 * to interrupt the update process
252 local_irq_save(flags);
255 * handle boxes with an active event list as opposed to active
258 list_for_each_entry(event, &box->active_list, active_entry) {
259 uncore_perf_event_update(box, event);
262 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
263 uncore_perf_event_update(box, box->events[bit]);
265 local_irq_restore(flags);
267 hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
268 return HRTIMER_RESTART;
271 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
273 hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
274 HRTIMER_MODE_REL_PINNED);
277 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
279 hrtimer_cancel(&box->hrtimer);
282 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
284 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
285 box->hrtimer.function = uncore_pmu_hrtimer;
288 static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
291 int i, size, numshared = type->num_shared_regs ;
292 struct intel_uncore_box *box;
294 size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg);
296 box = kzalloc_node(size, GFP_KERNEL, node);
300 for (i = 0; i < numshared; i++)
301 raw_spin_lock_init(&box->shared_regs[i].lock);
303 uncore_pmu_init_hrtimer(box);
305 box->pci_phys_id = -1;
308 /* set default hrtimer timeout */
309 box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
311 INIT_LIST_HEAD(&box->active_list);
317 * Using uncore_pmu_event_init pmu event_init callback
318 * as a detection point for uncore events.
320 static int uncore_pmu_event_init(struct perf_event *event);
322 static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event)
324 return &box->pmu->pmu == event->pmu;
328 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
331 struct perf_event *event;
334 max_count = box->pmu->type->num_counters;
335 if (box->pmu->type->fixed_ctl)
338 if (box->n_events >= max_count)
343 if (is_box_event(box, leader)) {
344 box->event_list[n] = leader;
351 list_for_each_entry(event, &leader->sibling_list, group_entry) {
352 if (!is_box_event(box, event) ||
353 event->state <= PERF_EVENT_STATE_OFF)
359 box->event_list[n] = event;
365 static struct event_constraint *
366 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
368 struct intel_uncore_type *type = box->pmu->type;
369 struct event_constraint *c;
371 if (type->ops->get_constraint) {
372 c = type->ops->get_constraint(box, event);
377 if (event->attr.config == UNCORE_FIXED_EVENT)
378 return &uncore_constraint_fixed;
380 if (type->constraints) {
381 for_each_event_constraint(c, type->constraints) {
382 if ((event->hw.config & c->cmask) == c->code)
387 return &type->unconstrainted;
390 static void uncore_put_event_constraint(struct intel_uncore_box *box,
391 struct perf_event *event)
393 if (box->pmu->type->ops->put_constraint)
394 box->pmu->type->ops->put_constraint(box, event);
397 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
399 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
400 struct event_constraint *c;
401 int i, wmin, wmax, ret = 0;
402 struct hw_perf_event *hwc;
404 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
406 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
407 c = uncore_get_event_constraint(box, box->event_list[i]);
408 box->event_constraint[i] = c;
409 wmin = min(wmin, c->weight);
410 wmax = max(wmax, c->weight);
413 /* fastpath, try to reuse previous register */
414 for (i = 0; i < n; i++) {
415 hwc = &box->event_list[i]->hw;
416 c = box->event_constraint[i];
422 /* constraint still honored */
423 if (!test_bit(hwc->idx, c->idxmsk))
426 /* not already used */
427 if (test_bit(hwc->idx, used_mask))
430 __set_bit(hwc->idx, used_mask);
432 assign[i] = hwc->idx;
436 ret = perf_assign_events(box->event_constraint, n,
437 wmin, wmax, n, assign);
439 if (!assign || ret) {
440 for (i = 0; i < n; i++)
441 uncore_put_event_constraint(box, box->event_list[i]);
443 return ret ? -EINVAL : 0;
446 static void uncore_pmu_event_start(struct perf_event *event, int flags)
448 struct intel_uncore_box *box = uncore_event_to_box(event);
449 int idx = event->hw.idx;
451 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
454 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
458 box->events[idx] = event;
460 __set_bit(idx, box->active_mask);
462 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
463 uncore_enable_event(box, event);
465 if (box->n_active == 1) {
466 uncore_enable_box(box);
467 uncore_pmu_start_hrtimer(box);
471 static void uncore_pmu_event_stop(struct perf_event *event, int flags)
473 struct intel_uncore_box *box = uncore_event_to_box(event);
474 struct hw_perf_event *hwc = &event->hw;
476 if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
477 uncore_disable_event(box, event);
479 box->events[hwc->idx] = NULL;
480 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
481 hwc->state |= PERF_HES_STOPPED;
483 if (box->n_active == 0) {
484 uncore_disable_box(box);
485 uncore_pmu_cancel_hrtimer(box);
489 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
491 * Drain the remaining delta count out of a event
492 * that we are disabling:
494 uncore_perf_event_update(box, event);
495 hwc->state |= PERF_HES_UPTODATE;
499 static int uncore_pmu_event_add(struct perf_event *event, int flags)
501 struct intel_uncore_box *box = uncore_event_to_box(event);
502 struct hw_perf_event *hwc = &event->hw;
503 int assign[UNCORE_PMC_IDX_MAX];
509 ret = n = uncore_collect_events(box, event, false);
513 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
514 if (!(flags & PERF_EF_START))
515 hwc->state |= PERF_HES_ARCH;
517 ret = uncore_assign_events(box, assign, n);
521 /* save events moving to new counters */
522 for (i = 0; i < box->n_events; i++) {
523 event = box->event_list[i];
526 if (hwc->idx == assign[i] &&
527 hwc->last_tag == box->tags[assign[i]])
530 * Ensure we don't accidentally enable a stopped
531 * counter simply because we rescheduled.
533 if (hwc->state & PERF_HES_STOPPED)
534 hwc->state |= PERF_HES_ARCH;
536 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
539 /* reprogram moved events into new counters */
540 for (i = 0; i < n; i++) {
541 event = box->event_list[i];
544 if (hwc->idx != assign[i] ||
545 hwc->last_tag != box->tags[assign[i]])
546 uncore_assign_hw_event(box, event, assign[i]);
547 else if (i < box->n_events)
550 if (hwc->state & PERF_HES_ARCH)
553 uncore_pmu_event_start(event, 0);
560 static void uncore_pmu_event_del(struct perf_event *event, int flags)
562 struct intel_uncore_box *box = uncore_event_to_box(event);
565 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
567 for (i = 0; i < box->n_events; i++) {
568 if (event == box->event_list[i]) {
569 uncore_put_event_constraint(box, event);
571 for (++i; i < box->n_events; i++)
572 box->event_list[i - 1] = box->event_list[i];
580 event->hw.last_tag = ~0ULL;
583 void uncore_pmu_event_read(struct perf_event *event)
585 struct intel_uncore_box *box = uncore_event_to_box(event);
586 uncore_perf_event_update(box, event);
590 * validation ensures the group can be loaded onto the
591 * PMU if it was the only group available.
593 static int uncore_validate_group(struct intel_uncore_pmu *pmu,
594 struct perf_event *event)
596 struct perf_event *leader = event->group_leader;
597 struct intel_uncore_box *fake_box;
598 int ret = -EINVAL, n;
600 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
606 * the event is not yet connected with its
607 * siblings therefore we must first collect
608 * existing siblings, then add the new event
609 * before we can simulate the scheduling
611 n = uncore_collect_events(fake_box, leader, true);
615 fake_box->n_events = n;
616 n = uncore_collect_events(fake_box, event, false);
620 fake_box->n_events = n;
622 ret = uncore_assign_events(fake_box, NULL, n);
628 static int uncore_pmu_event_init(struct perf_event *event)
630 struct intel_uncore_pmu *pmu;
631 struct intel_uncore_box *box;
632 struct hw_perf_event *hwc = &event->hw;
635 if (event->attr.type != event->pmu->type)
638 pmu = uncore_event_to_pmu(event);
639 /* no device found for this pmu */
640 if (pmu->func_id < 0)
644 * Uncore PMU does measure at all privilege level all the time.
645 * So it doesn't make sense to specify any exclude bits.
647 if (event->attr.exclude_user || event->attr.exclude_kernel ||
648 event->attr.exclude_hv || event->attr.exclude_idle)
651 /* Sampling not supported yet */
652 if (hwc->sample_period)
656 * Place all uncore events for a particular physical package
661 box = uncore_pmu_to_box(pmu, event->cpu);
662 if (!box || box->cpu < 0)
664 event->cpu = box->cpu;
665 event->pmu_private = box;
667 event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
670 event->hw.last_tag = ~0ULL;
671 event->hw.extra_reg.idx = EXTRA_REG_NONE;
672 event->hw.branch_reg.idx = EXTRA_REG_NONE;
674 if (event->attr.config == UNCORE_FIXED_EVENT) {
675 /* no fixed counter */
676 if (!pmu->type->fixed_ctl)
679 * if there is only one fixed counter, only the first pmu
680 * can access the fixed counter
682 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
685 /* fixed counters have event field hardcoded to zero */
688 hwc->config = event->attr.config &
689 (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32));
690 if (pmu->type->ops->hw_config) {
691 ret = pmu->type->ops->hw_config(box, event);
697 if (event->group_leader != event)
698 ret = uncore_validate_group(pmu, event);
705 static ssize_t uncore_get_attr_cpumask(struct device *dev,
706 struct device_attribute *attr, char *buf)
708 return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
711 static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
713 static struct attribute *uncore_pmu_attrs[] = {
714 &dev_attr_cpumask.attr,
718 static struct attribute_group uncore_pmu_attr_group = {
719 .attrs = uncore_pmu_attrs,
722 static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
726 if (!pmu->type->pmu) {
727 pmu->pmu = (struct pmu) {
728 .attr_groups = pmu->type->attr_groups,
729 .task_ctx_nr = perf_invalid_context,
730 .event_init = uncore_pmu_event_init,
731 .add = uncore_pmu_event_add,
732 .del = uncore_pmu_event_del,
733 .start = uncore_pmu_event_start,
734 .stop = uncore_pmu_event_stop,
735 .read = uncore_pmu_event_read,
736 .module = THIS_MODULE,
739 pmu->pmu = *pmu->type->pmu;
740 pmu->pmu.attr_groups = pmu->type->attr_groups;
743 if (pmu->type->num_boxes == 1) {
744 if (strlen(pmu->type->name) > 0)
745 sprintf(pmu->name, "uncore_%s", pmu->type->name);
747 sprintf(pmu->name, "uncore");
749 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
753 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
755 pmu->registered = true;
759 static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
761 if (!pmu->registered)
763 perf_pmu_unregister(&pmu->pmu);
764 pmu->registered = false;
767 static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
771 for (pkg = 0; pkg < max_packages; pkg++)
772 kfree(pmu->boxes[pkg]);
776 static void uncore_type_exit(struct intel_uncore_type *type)
778 struct intel_uncore_pmu *pmu = type->pmus;
782 for (i = 0; i < type->num_boxes; i++, pmu++) {
783 uncore_pmu_unregister(pmu);
784 uncore_free_boxes(pmu);
789 kfree(type->events_group);
790 type->events_group = NULL;
793 static void uncore_types_exit(struct intel_uncore_type **types)
795 for (; *types; types++)
796 uncore_type_exit(*types);
799 static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
801 struct intel_uncore_pmu *pmus;
802 struct attribute_group *attr_group;
803 struct attribute **attrs;
807 pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
811 size = max_packages * sizeof(struct intel_uncore_box *);
813 for (i = 0; i < type->num_boxes; i++) {
814 pmus[i].func_id = setid ? i : -1;
817 pmus[i].boxes = kzalloc(size, GFP_KERNEL);
823 type->unconstrainted = (struct event_constraint)
824 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
825 0, type->num_counters, 0, 0);
827 if (type->event_descs) {
828 for (i = 0; type->event_descs[i].attr.attr.name; i++);
830 attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
831 sizeof(*attr_group), GFP_KERNEL);
835 attrs = (struct attribute **)(attr_group + 1);
836 attr_group->name = "events";
837 attr_group->attrs = attrs;
839 for (j = 0; j < i; j++)
840 attrs[j] = &type->event_descs[j].attr.attr;
842 type->events_group = attr_group;
845 type->pmu_group = &uncore_pmu_attr_group;
850 uncore_types_init(struct intel_uncore_type **types, bool setid)
854 for (; *types; types++) {
855 ret = uncore_type_init(*types, setid);
863 * add a pci uncore device
865 static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
867 struct intel_uncore_type *type;
868 struct intel_uncore_pmu *pmu = NULL;
869 struct intel_uncore_box *box;
870 int phys_id, pkg, ret;
872 phys_id = uncore_pcibus_to_physid(pdev->bus);
876 pkg = topology_phys_to_logical_pkg(phys_id);
880 if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
881 int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
883 uncore_extra_pci_dev[pkg].dev[idx] = pdev;
884 pci_set_drvdata(pdev, NULL);
888 type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
891 * Some platforms, e.g. Knights Landing, use a common PCI device ID
892 * for multiple instances of an uncore PMU device type. We should check
893 * PCI slot and func to indicate the uncore box.
895 if (id->driver_data & ~0xffff) {
896 struct pci_driver *pci_drv = pdev->driver;
897 const struct pci_device_id *ids = pci_drv->id_table;
900 while (ids && ids->vendor) {
901 if ((ids->vendor == pdev->vendor) &&
902 (ids->device == pdev->device)) {
903 devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data),
904 UNCORE_PCI_DEV_FUNC(ids->driver_data));
905 if (devfn == pdev->devfn) {
906 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
916 * for performance monitoring unit with multiple boxes,
917 * each box has a different function id.
919 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
922 if (WARN_ON_ONCE(pmu->boxes[pkg] != NULL))
925 box = uncore_alloc_box(type, NUMA_NO_NODE);
929 if (pmu->func_id < 0)
930 pmu->func_id = pdev->devfn;
932 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
934 atomic_inc(&box->refcnt);
935 box->pci_phys_id = phys_id;
939 uncore_box_init(box);
940 pci_set_drvdata(pdev, box);
942 pmu->boxes[pkg] = box;
943 if (atomic_inc_return(&pmu->activeboxes) > 1)
946 /* First active box registers the pmu */
947 ret = uncore_pmu_register(pmu);
949 pci_set_drvdata(pdev, NULL);
950 pmu->boxes[pkg] = NULL;
951 uncore_box_exit(box);
957 static void uncore_pci_remove(struct pci_dev *pdev)
959 struct intel_uncore_box *box;
960 struct intel_uncore_pmu *pmu;
963 phys_id = uncore_pcibus_to_physid(pdev->bus);
964 pkg = topology_phys_to_logical_pkg(phys_id);
966 box = pci_get_drvdata(pdev);
968 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
969 if (uncore_extra_pci_dev[pkg].dev[i] == pdev) {
970 uncore_extra_pci_dev[pkg].dev[i] = NULL;
974 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
979 if (WARN_ON_ONCE(phys_id != box->pci_phys_id))
982 pci_set_drvdata(pdev, NULL);
983 pmu->boxes[pkg] = NULL;
984 if (atomic_dec_return(&pmu->activeboxes) == 0)
985 uncore_pmu_unregister(pmu);
986 uncore_box_exit(box);
990 static int __init uncore_pci_init(void)
995 size = max_packages * sizeof(struct pci_extra_dev);
996 uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
997 if (!uncore_extra_pci_dev) {
1002 ret = uncore_types_init(uncore_pci_uncores, false);
1006 uncore_pci_driver->probe = uncore_pci_probe;
1007 uncore_pci_driver->remove = uncore_pci_remove;
1009 ret = pci_register_driver(uncore_pci_driver);
1013 pcidrv_registered = true;
1017 uncore_types_exit(uncore_pci_uncores);
1018 kfree(uncore_extra_pci_dev);
1019 uncore_extra_pci_dev = NULL;
1020 uncore_free_pcibus_map();
1022 uncore_pci_uncores = empty_uncore;
1026 static void uncore_pci_exit(void)
1028 if (pcidrv_registered) {
1029 pcidrv_registered = false;
1030 pci_unregister_driver(uncore_pci_driver);
1031 uncore_types_exit(uncore_pci_uncores);
1032 kfree(uncore_extra_pci_dev);
1033 uncore_free_pcibus_map();
1037 static int uncore_cpu_dying(unsigned int cpu)
1039 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1040 struct intel_uncore_pmu *pmu;
1041 struct intel_uncore_box *box;
1044 pkg = topology_logical_package_id(cpu);
1045 for (; *types; types++) {
1048 for (i = 0; i < type->num_boxes; i++, pmu++) {
1049 box = pmu->boxes[pkg];
1050 if (box && atomic_dec_return(&box->refcnt) == 0)
1051 uncore_box_exit(box);
1057 static int uncore_cpu_starting(unsigned int cpu)
1059 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1060 struct intel_uncore_pmu *pmu;
1061 struct intel_uncore_box *box;
1064 pkg = topology_logical_package_id(cpu);
1065 for (; *types; types++) {
1068 for (i = 0; i < type->num_boxes; i++, pmu++) {
1069 box = pmu->boxes[pkg];
1072 /* The first cpu on a package activates the box */
1073 if (atomic_inc_return(&box->refcnt) == 1)
1074 uncore_box_init(box);
1081 static int uncore_cpu_prepare(unsigned int cpu)
1083 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1084 struct intel_uncore_pmu *pmu;
1085 struct intel_uncore_box *box;
1088 pkg = topology_logical_package_id(cpu);
1089 for (; *types; types++) {
1092 for (i = 0; i < type->num_boxes; i++, pmu++) {
1093 if (pmu->boxes[pkg])
1095 /* First cpu of a package allocates the box */
1096 box = uncore_alloc_box(type, cpu_to_node(cpu));
1101 pmu->boxes[pkg] = box;
1107 static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
1110 struct intel_uncore_pmu *pmu = type->pmus;
1111 struct intel_uncore_box *box;
1114 pkg = topology_logical_package_id(old_cpu < 0 ? new_cpu : old_cpu);
1115 for (i = 0; i < type->num_boxes; i++, pmu++) {
1116 box = pmu->boxes[pkg];
1121 WARN_ON_ONCE(box->cpu != -1);
1126 WARN_ON_ONCE(box->cpu != old_cpu);
1131 uncore_pmu_cancel_hrtimer(box);
1132 perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
1137 static void uncore_change_context(struct intel_uncore_type **uncores,
1138 int old_cpu, int new_cpu)
1140 for (; *uncores; uncores++)
1141 uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
1144 static int uncore_event_cpu_offline(unsigned int cpu)
1148 /* Check if exiting cpu is used for collecting uncore events */
1149 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
1152 /* Find a new cpu to collect uncore events */
1153 target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
1155 /* Migrate uncore events to the new target */
1156 if (target < nr_cpu_ids)
1157 cpumask_set_cpu(target, &uncore_cpu_mask);
1161 uncore_change_context(uncore_msr_uncores, cpu, target);
1162 uncore_change_context(uncore_pci_uncores, cpu, target);
1166 static int uncore_event_cpu_online(unsigned int cpu)
1171 * Check if there is an online cpu in the package
1172 * which collects uncore events already.
1174 target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu));
1175 if (target < nr_cpu_ids)
1178 cpumask_set_cpu(cpu, &uncore_cpu_mask);
1180 uncore_change_context(uncore_msr_uncores, -1, cpu);
1181 uncore_change_context(uncore_pci_uncores, -1, cpu);
1185 static int __init type_pmu_register(struct intel_uncore_type *type)
1189 for (i = 0; i < type->num_boxes; i++) {
1190 ret = uncore_pmu_register(&type->pmus[i]);
1197 static int __init uncore_msr_pmus_register(void)
1199 struct intel_uncore_type **types = uncore_msr_uncores;
1202 for (; *types; types++) {
1203 ret = type_pmu_register(*types);
1210 static int __init uncore_cpu_init(void)
1214 ret = uncore_types_init(uncore_msr_uncores, true);
1218 ret = uncore_msr_pmus_register();
1223 uncore_types_exit(uncore_msr_uncores);
1224 uncore_msr_uncores = empty_uncore;
1228 #define X86_UNCORE_MODEL_MATCH(model, init) \
1229 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
1231 struct intel_uncore_init_fun {
1232 void (*cpu_init)(void);
1233 int (*pci_init)(void);
1236 static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
1237 .cpu_init = nhm_uncore_cpu_init,
1240 static const struct intel_uncore_init_fun snb_uncore_init __initconst = {
1241 .cpu_init = snb_uncore_cpu_init,
1242 .pci_init = snb_uncore_pci_init,
1245 static const struct intel_uncore_init_fun ivb_uncore_init __initconst = {
1246 .cpu_init = snb_uncore_cpu_init,
1247 .pci_init = ivb_uncore_pci_init,
1250 static const struct intel_uncore_init_fun hsw_uncore_init __initconst = {
1251 .cpu_init = snb_uncore_cpu_init,
1252 .pci_init = hsw_uncore_pci_init,
1255 static const struct intel_uncore_init_fun bdw_uncore_init __initconst = {
1256 .cpu_init = snb_uncore_cpu_init,
1257 .pci_init = bdw_uncore_pci_init,
1260 static const struct intel_uncore_init_fun snbep_uncore_init __initconst = {
1261 .cpu_init = snbep_uncore_cpu_init,
1262 .pci_init = snbep_uncore_pci_init,
1265 static const struct intel_uncore_init_fun nhmex_uncore_init __initconst = {
1266 .cpu_init = nhmex_uncore_cpu_init,
1269 static const struct intel_uncore_init_fun ivbep_uncore_init __initconst = {
1270 .cpu_init = ivbep_uncore_cpu_init,
1271 .pci_init = ivbep_uncore_pci_init,
1274 static const struct intel_uncore_init_fun hswep_uncore_init __initconst = {
1275 .cpu_init = hswep_uncore_cpu_init,
1276 .pci_init = hswep_uncore_pci_init,
1279 static const struct intel_uncore_init_fun bdx_uncore_init __initconst = {
1280 .cpu_init = bdx_uncore_cpu_init,
1281 .pci_init = bdx_uncore_pci_init,
1284 static const struct intel_uncore_init_fun knl_uncore_init __initconst = {
1285 .cpu_init = knl_uncore_cpu_init,
1286 .pci_init = knl_uncore_pci_init,
1289 static const struct intel_uncore_init_fun skl_uncore_init __initconst = {
1290 .cpu_init = skl_uncore_cpu_init,
1291 .pci_init = skl_uncore_pci_init,
1294 static const struct intel_uncore_init_fun skx_uncore_init __initconst = {
1295 .cpu_init = skx_uncore_cpu_init,
1296 .pci_init = skx_uncore_pci_init,
1299 static const struct x86_cpu_id intel_uncore_match[] __initconst = {
1300 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP, nhm_uncore_init),
1301 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM, nhm_uncore_init),
1302 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE, nhm_uncore_init),
1303 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EP, nhm_uncore_init),
1304 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, snb_uncore_init),
1305 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, ivb_uncore_init),
1306 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_uncore_init),
1307 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_uncore_init),
1308 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_uncore_init),
1309 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, bdw_uncore_init),
1310 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, bdw_uncore_init),
1311 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_uncore_init),
1312 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EX, nhmex_uncore_init),
1313 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EX, nhmex_uncore_init),
1314 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, ivbep_uncore_init),
1315 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hswep_uncore_init),
1316 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, bdx_uncore_init),
1317 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init),
1318 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init),
1319 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_uncore_init),
1320 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init),
1321 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init),
1322 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, skx_uncore_init),
1326 MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
1328 static int __init intel_uncore_init(void)
1330 const struct x86_cpu_id *id;
1331 struct intel_uncore_init_fun *uncore_init;
1332 int pret = 0, cret = 0, ret;
1334 id = x86_match_cpu(intel_uncore_match);
1338 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1341 max_packages = topology_max_packages();
1343 uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
1344 if (uncore_init->pci_init) {
1345 pret = uncore_init->pci_init();
1347 pret = uncore_pci_init();
1350 if (uncore_init->cpu_init) {
1351 uncore_init->cpu_init();
1352 cret = uncore_cpu_init();
1359 * Install callbacks. Core will call them for each online cpu.
1361 * The first online cpu of each package allocates and takes
1362 * the refcounts for all other online cpus in that package.
1363 * If msrs are not enabled no allocation is required and
1364 * uncore_cpu_prepare() is not called for each online cpu.
1367 ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
1368 "PERF_X86_UNCORE_PREP",
1369 uncore_cpu_prepare, NULL);
1373 cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
1374 "PERF_X86_UNCORE_PREP",
1375 uncore_cpu_prepare, NULL);
1378 cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
1379 "AP_PERF_X86_UNCORE_STARTING",
1380 uncore_cpu_starting, uncore_cpu_dying);
1382 cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
1383 "AP_PERF_X86_UNCORE_ONLINE",
1384 uncore_event_cpu_online, uncore_event_cpu_offline);
1388 uncore_types_exit(uncore_msr_uncores);
1392 module_init(intel_uncore_init);
1394 static void __exit intel_uncore_exit(void)
1396 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
1397 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
1398 cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
1399 uncore_types_exit(uncore_msr_uncores);
1402 module_exit(intel_uncore_exit);