2 * Copyright 2014, Michael Ellerman, IBM Corp.
3 * Licensed under GPLv2.
13 #define NUMBER_OF_EBBS 50
16 * Test that if we overflow the counter while in the EBB handler, we take
17 * another EBB on exiting from the handler.
19 * We do this by counting with a stupidly low sample period, causing us to
20 * overflow the PMU while we're still in the EBB handler, leading to another
23 * We get out of what would otherwise be an infinite loop by leaving the
24 * counter frozen once we've taken enough EBBs.
27 static void ebb_callee(void)
31 val = mfspr(SPRN_BESCR);
32 if (!(val & BESCR_PMEO)) {
33 ebb_state.stats.spurious++;
37 ebb_state.stats.ebb_count++;
38 trace_log_counter(ebb_state.trace, ebb_state.stats.ebb_count);
41 count_pmc(1, sample_period);
44 if (ebb_state.stats.ebb_count == NUMBER_OF_EBBS)
45 /* Reset but leave counters frozen */
46 reset_ebb_with_clear_mask(MMCR0_PMAO);
51 /* Do some stuff to chew some cycles and pop the counter */
52 siar = mfspr(SPRN_SIAR);
53 trace_log_reg(ebb_state.trace, SPRN_SIAR, siar);
55 val = mfspr(SPRN_PMC1);
56 trace_log_reg(ebb_state.trace, SPRN_PMC1, val);
58 val = mfspr(SPRN_MMCR0);
59 trace_log_reg(ebb_state.trace, SPRN_MMCR0, val);
62 int back_to_back_ebbs(void)
66 SKIP_IF(!ebb_is_supported());
68 event_init_named(&event, 0x1001e, "cycles");
69 event_leader_ebb_init(&event);
71 event.attr.exclude_kernel = 1;
72 event.attr.exclude_hv = 1;
73 event.attr.exclude_idle = 1;
75 FAIL_IF(event_open(&event));
77 setup_ebb_handler(ebb_callee);
79 FAIL_IF(ebb_event_enable(&event));
84 mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
88 while (ebb_state.stats.ebb_count < NUMBER_OF_EBBS)
89 FAIL_IF(core_busy_loop());
98 FAIL_IF(ebb_state.stats.ebb_count != NUMBER_OF_EBBS);
105 return test_harness(back_to_back_ebbs, "back_to_back_ebbs");