1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7 #include <linux/pid_namespace.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/sysfs.h>
10 #include "coresight-etm4x.h"
11 #include "coresight-priv.h"
13 static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
16 struct etmv4_config *config = &drvdata->config;
18 idx = config->addr_idx;
21 * TRCACATRn.TYPE bit[1:0]: type of comparison
22 * the trace unit performs
24 if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
29 * We are performing instruction address comparison. Set the
30 * relevant bit of ViewInst Include/Exclude Control register
31 * for corresponding address comparator pair.
33 if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
34 config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
37 if (exclude == true) {
39 * Set exclude bit and unset the include bit
40 * corresponding to comparator pair
42 config->viiectlr |= BIT(idx / 2 + 16);
43 config->viiectlr &= ~BIT(idx / 2);
46 * Set include bit and unset exclude bit
47 * corresponding to comparator pair
49 config->viiectlr |= BIT(idx / 2);
50 config->viiectlr &= ~BIT(idx / 2 + 16);
56 static ssize_t nr_pe_cmp_show(struct device *dev,
57 struct device_attribute *attr,
61 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
63 val = drvdata->nr_pe_cmp;
64 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
66 static DEVICE_ATTR_RO(nr_pe_cmp);
68 static ssize_t nr_addr_cmp_show(struct device *dev,
69 struct device_attribute *attr,
73 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
75 val = drvdata->nr_addr_cmp;
76 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
78 static DEVICE_ATTR_RO(nr_addr_cmp);
80 static ssize_t nr_cntr_show(struct device *dev,
81 struct device_attribute *attr,
85 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
87 val = drvdata->nr_cntr;
88 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
90 static DEVICE_ATTR_RO(nr_cntr);
92 static ssize_t nr_ext_inp_show(struct device *dev,
93 struct device_attribute *attr,
97 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
99 val = drvdata->nr_ext_inp;
100 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
102 static DEVICE_ATTR_RO(nr_ext_inp);
104 static ssize_t numcidc_show(struct device *dev,
105 struct device_attribute *attr,
109 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
111 val = drvdata->numcidc;
112 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
114 static DEVICE_ATTR_RO(numcidc);
116 static ssize_t numvmidc_show(struct device *dev,
117 struct device_attribute *attr,
121 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
123 val = drvdata->numvmidc;
124 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
126 static DEVICE_ATTR_RO(numvmidc);
128 static ssize_t nrseqstate_show(struct device *dev,
129 struct device_attribute *attr,
133 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
135 val = drvdata->nrseqstate;
136 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
138 static DEVICE_ATTR_RO(nrseqstate);
140 static ssize_t nr_resource_show(struct device *dev,
141 struct device_attribute *attr,
145 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
147 val = drvdata->nr_resource;
148 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
150 static DEVICE_ATTR_RO(nr_resource);
152 static ssize_t nr_ss_cmp_show(struct device *dev,
153 struct device_attribute *attr,
157 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
159 val = drvdata->nr_ss_cmp;
160 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
162 static DEVICE_ATTR_RO(nr_ss_cmp);
164 static ssize_t reset_store(struct device *dev,
165 struct device_attribute *attr,
166 const char *buf, size_t size)
170 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
171 struct etmv4_config *config = &drvdata->config;
173 if (kstrtoul(buf, 16, &val))
176 spin_lock(&drvdata->spinlock);
180 /* Disable data tracing: do not trace load and store data transfers */
181 config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
182 config->cfg &= ~(BIT(1) | BIT(2));
184 /* Disable data value and data address tracing */
185 config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
186 ETM_MODE_DATA_TRACE_VAL);
187 config->cfg &= ~(BIT(16) | BIT(17));
189 /* Disable all events tracing */
190 config->eventctrl0 = 0x0;
191 config->eventctrl1 = 0x0;
193 /* Disable timestamp event */
194 config->ts_ctrl = 0x0;
196 /* Disable stalling */
197 config->stall_ctrl = 0x0;
199 /* Reset trace synchronization period to 2^8 = 256 bytes*/
200 if (drvdata->syncpr == false)
201 config->syncfreq = 0x8;
204 * Enable ViewInst to trace everything with start-stop logic in
205 * started state. ARM recommends start-stop logic is set before
208 config->vinst_ctrl = BIT(0);
209 if (drvdata->nr_addr_cmp > 0) {
210 config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
211 /* SSSTATUS, bit[9] */
212 config->vinst_ctrl |= BIT(9);
215 /* No address range filtering for ViewInst */
216 config->viiectlr = 0x0;
218 /* No start-stop filtering for ViewInst */
219 config->vissctlr = 0x0;
220 config->vipcssctlr = 0x0;
222 /* Disable seq events */
223 for (i = 0; i < drvdata->nrseqstate-1; i++)
224 config->seq_ctrl[i] = 0x0;
225 config->seq_rst = 0x0;
226 config->seq_state = 0x0;
228 /* Disable external input events */
229 config->ext_inp = 0x0;
231 config->cntr_idx = 0x0;
232 for (i = 0; i < drvdata->nr_cntr; i++) {
233 config->cntrldvr[i] = 0x0;
234 config->cntr_ctrl[i] = 0x0;
235 config->cntr_val[i] = 0x0;
238 config->res_idx = 0x0;
239 for (i = 2; i < 2 * drvdata->nr_resource; i++)
240 config->res_ctrl[i] = 0x0;
242 config->ss_idx = 0x0;
243 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
244 config->ss_ctrl[i] = 0x0;
245 config->ss_pe_cmp[i] = 0x0;
248 config->addr_idx = 0x0;
249 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
250 config->addr_val[i] = 0x0;
251 config->addr_acc[i] = 0x0;
252 config->addr_type[i] = ETM_ADDR_TYPE_NONE;
255 config->ctxid_idx = 0x0;
256 for (i = 0; i < drvdata->numcidc; i++)
257 config->ctxid_pid[i] = 0x0;
259 config->ctxid_mask0 = 0x0;
260 config->ctxid_mask1 = 0x0;
262 config->vmid_idx = 0x0;
263 for (i = 0; i < drvdata->numvmidc; i++)
264 config->vmid_val[i] = 0x0;
265 config->vmid_mask0 = 0x0;
266 config->vmid_mask1 = 0x0;
268 drvdata->trcid = drvdata->cpu + 1;
270 spin_unlock(&drvdata->spinlock);
274 static DEVICE_ATTR_WO(reset);
276 static ssize_t mode_show(struct device *dev,
277 struct device_attribute *attr,
281 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
282 struct etmv4_config *config = &drvdata->config;
285 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
288 static ssize_t mode_store(struct device *dev,
289 struct device_attribute *attr,
290 const char *buf, size_t size)
292 unsigned long val, mode;
293 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
294 struct etmv4_config *config = &drvdata->config;
296 if (kstrtoul(buf, 16, &val))
299 spin_lock(&drvdata->spinlock);
300 config->mode = val & ETMv4_MODE_ALL;
302 if (drvdata->instrp0 == true) {
303 /* start by clearing instruction P0 field */
304 config->cfg &= ~(BIT(1) | BIT(2));
305 if (config->mode & ETM_MODE_LOAD)
306 /* 0b01 Trace load instructions as P0 instructions */
307 config->cfg |= BIT(1);
308 if (config->mode & ETM_MODE_STORE)
309 /* 0b10 Trace store instructions as P0 instructions */
310 config->cfg |= BIT(2);
311 if (config->mode & ETM_MODE_LOAD_STORE)
313 * 0b11 Trace load and store instructions
316 config->cfg |= BIT(1) | BIT(2);
319 /* bit[3], Branch broadcast mode */
320 if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
321 config->cfg |= BIT(3);
323 config->cfg &= ~BIT(3);
325 /* bit[4], Cycle counting instruction trace bit */
326 if ((config->mode & ETMv4_MODE_CYCACC) &&
327 (drvdata->trccci == true))
328 config->cfg |= BIT(4);
330 config->cfg &= ~BIT(4);
332 /* bit[6], Context ID tracing bit */
333 if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
334 config->cfg |= BIT(6);
336 config->cfg &= ~BIT(6);
338 if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
339 config->cfg |= BIT(7);
341 config->cfg &= ~BIT(7);
343 /* bits[10:8], Conditional instruction tracing bit */
344 mode = ETM_MODE_COND(config->mode);
345 if (drvdata->trccond == true) {
346 config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
347 config->cfg |= mode << 8;
350 /* bit[11], Global timestamp tracing bit */
351 if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
352 config->cfg |= BIT(11);
354 config->cfg &= ~BIT(11);
356 /* bit[12], Return stack enable bit */
357 if ((config->mode & ETM_MODE_RETURNSTACK) &&
358 (drvdata->retstack == true))
359 config->cfg |= BIT(12);
361 config->cfg &= ~BIT(12);
363 /* bits[14:13], Q element enable field */
364 mode = ETM_MODE_QELEM(config->mode);
365 /* start by clearing QE bits */
366 config->cfg &= ~(BIT(13) | BIT(14));
368 * if supported, Q elements with instruction counts are enabled.
369 * Always set the low bit for any requested mode. Valid combos are
370 * 0b00, 0b01 and 0b11.
372 if (mode && drvdata->q_support)
373 config->cfg |= BIT(13);
375 * if supported, Q elements with and without instruction
378 if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
379 config->cfg |= BIT(14);
381 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
382 if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
383 (drvdata->atbtrig == true))
384 config->eventctrl1 |= BIT(11);
386 config->eventctrl1 &= ~BIT(11);
388 /* bit[12], Low-power state behavior override bit */
389 if ((config->mode & ETM_MODE_LPOVERRIDE) &&
390 (drvdata->lpoverride == true))
391 config->eventctrl1 |= BIT(12);
393 config->eventctrl1 &= ~BIT(12);
395 /* bit[8], Instruction stall bit */
396 if ((config->mode & ETM_MODE_ISTALL_EN) && (drvdata->stallctl == true))
397 config->stall_ctrl |= BIT(8);
399 config->stall_ctrl &= ~BIT(8);
401 /* bit[10], Prioritize instruction trace bit */
402 if (config->mode & ETM_MODE_INSTPRIO)
403 config->stall_ctrl |= BIT(10);
405 config->stall_ctrl &= ~BIT(10);
407 /* bit[13], Trace overflow prevention bit */
408 if ((config->mode & ETM_MODE_NOOVERFLOW) &&
409 (drvdata->nooverflow == true))
410 config->stall_ctrl |= BIT(13);
412 config->stall_ctrl &= ~BIT(13);
414 /* bit[9] Start/stop logic control bit */
415 if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
416 config->vinst_ctrl |= BIT(9);
418 config->vinst_ctrl &= ~BIT(9);
420 /* bit[10], Whether a trace unit must trace a Reset exception */
421 if (config->mode & ETM_MODE_TRACE_RESET)
422 config->vinst_ctrl |= BIT(10);
424 config->vinst_ctrl &= ~BIT(10);
426 /* bit[11], Whether a trace unit must trace a system error exception */
427 if ((config->mode & ETM_MODE_TRACE_ERR) &&
428 (drvdata->trc_error == true))
429 config->vinst_ctrl |= BIT(11);
431 config->vinst_ctrl &= ~BIT(11);
433 if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
434 etm4_config_trace_mode(config);
436 spin_unlock(&drvdata->spinlock);
440 static DEVICE_ATTR_RW(mode);
442 static ssize_t pe_show(struct device *dev,
443 struct device_attribute *attr,
447 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
448 struct etmv4_config *config = &drvdata->config;
450 val = config->pe_sel;
451 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
454 static ssize_t pe_store(struct device *dev,
455 struct device_attribute *attr,
456 const char *buf, size_t size)
459 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
460 struct etmv4_config *config = &drvdata->config;
462 if (kstrtoul(buf, 16, &val))
465 spin_lock(&drvdata->spinlock);
466 if (val > drvdata->nr_pe) {
467 spin_unlock(&drvdata->spinlock);
471 config->pe_sel = val;
472 spin_unlock(&drvdata->spinlock);
475 static DEVICE_ATTR_RW(pe);
477 static ssize_t event_show(struct device *dev,
478 struct device_attribute *attr,
482 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
483 struct etmv4_config *config = &drvdata->config;
485 val = config->eventctrl0;
486 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
489 static ssize_t event_store(struct device *dev,
490 struct device_attribute *attr,
491 const char *buf, size_t size)
494 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
495 struct etmv4_config *config = &drvdata->config;
497 if (kstrtoul(buf, 16, &val))
500 spin_lock(&drvdata->spinlock);
501 switch (drvdata->nr_event) {
503 /* EVENT0, bits[7:0] */
504 config->eventctrl0 = val & 0xFF;
507 /* EVENT1, bits[15:8] */
508 config->eventctrl0 = val & 0xFFFF;
511 /* EVENT2, bits[23:16] */
512 config->eventctrl0 = val & 0xFFFFFF;
515 /* EVENT3, bits[31:24] */
516 config->eventctrl0 = val;
521 spin_unlock(&drvdata->spinlock);
524 static DEVICE_ATTR_RW(event);
526 static ssize_t event_instren_show(struct device *dev,
527 struct device_attribute *attr,
531 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
532 struct etmv4_config *config = &drvdata->config;
534 val = BMVAL(config->eventctrl1, 0, 3);
535 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
538 static ssize_t event_instren_store(struct device *dev,
539 struct device_attribute *attr,
540 const char *buf, size_t size)
543 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
544 struct etmv4_config *config = &drvdata->config;
546 if (kstrtoul(buf, 16, &val))
549 spin_lock(&drvdata->spinlock);
550 /* start by clearing all instruction event enable bits */
551 config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
552 switch (drvdata->nr_event) {
554 /* generate Event element for event 1 */
555 config->eventctrl1 |= val & BIT(1);
558 /* generate Event element for event 1 and 2 */
559 config->eventctrl1 |= val & (BIT(0) | BIT(1));
562 /* generate Event element for event 1, 2 and 3 */
563 config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
566 /* generate Event element for all 4 events */
567 config->eventctrl1 |= val & 0xF;
572 spin_unlock(&drvdata->spinlock);
575 static DEVICE_ATTR_RW(event_instren);
577 static ssize_t event_ts_show(struct device *dev,
578 struct device_attribute *attr,
582 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
583 struct etmv4_config *config = &drvdata->config;
585 val = config->ts_ctrl;
586 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
589 static ssize_t event_ts_store(struct device *dev,
590 struct device_attribute *attr,
591 const char *buf, size_t size)
594 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
595 struct etmv4_config *config = &drvdata->config;
597 if (kstrtoul(buf, 16, &val))
599 if (!drvdata->ts_size)
602 config->ts_ctrl = val & ETMv4_EVENT_MASK;
605 static DEVICE_ATTR_RW(event_ts);
607 static ssize_t syncfreq_show(struct device *dev,
608 struct device_attribute *attr,
612 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
613 struct etmv4_config *config = &drvdata->config;
615 val = config->syncfreq;
616 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
619 static ssize_t syncfreq_store(struct device *dev,
620 struct device_attribute *attr,
621 const char *buf, size_t size)
624 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
625 struct etmv4_config *config = &drvdata->config;
627 if (kstrtoul(buf, 16, &val))
629 if (drvdata->syncpr == true)
632 config->syncfreq = val & ETMv4_SYNC_MASK;
635 static DEVICE_ATTR_RW(syncfreq);
637 static ssize_t cyc_threshold_show(struct device *dev,
638 struct device_attribute *attr,
642 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
643 struct etmv4_config *config = &drvdata->config;
645 val = config->ccctlr;
646 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
649 static ssize_t cyc_threshold_store(struct device *dev,
650 struct device_attribute *attr,
651 const char *buf, size_t size)
654 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
655 struct etmv4_config *config = &drvdata->config;
657 if (kstrtoul(buf, 16, &val))
660 /* mask off max threshold before checking min value */
661 val &= ETM_CYC_THRESHOLD_MASK;
662 if (val < drvdata->ccitmin)
665 config->ccctlr = val;
668 static DEVICE_ATTR_RW(cyc_threshold);
670 static ssize_t bb_ctrl_show(struct device *dev,
671 struct device_attribute *attr,
675 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
676 struct etmv4_config *config = &drvdata->config;
678 val = config->bb_ctrl;
679 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
682 static ssize_t bb_ctrl_store(struct device *dev,
683 struct device_attribute *attr,
684 const char *buf, size_t size)
687 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
688 struct etmv4_config *config = &drvdata->config;
690 if (kstrtoul(buf, 16, &val))
692 if (drvdata->trcbb == false)
694 if (!drvdata->nr_addr_cmp)
698 * Bit[8] controls include(1) / exclude(0), bits[0-7] select
699 * individual range comparators. If include then at least 1
700 * range must be selected.
702 if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0))
705 config->bb_ctrl = val & GENMASK(8, 0);
708 static DEVICE_ATTR_RW(bb_ctrl);
710 static ssize_t event_vinst_show(struct device *dev,
711 struct device_attribute *attr,
715 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
716 struct etmv4_config *config = &drvdata->config;
718 val = config->vinst_ctrl & ETMv4_EVENT_MASK;
719 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
722 static ssize_t event_vinst_store(struct device *dev,
723 struct device_attribute *attr,
724 const char *buf, size_t size)
727 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
728 struct etmv4_config *config = &drvdata->config;
730 if (kstrtoul(buf, 16, &val))
733 spin_lock(&drvdata->spinlock);
734 val &= ETMv4_EVENT_MASK;
735 config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
736 config->vinst_ctrl |= val;
737 spin_unlock(&drvdata->spinlock);
740 static DEVICE_ATTR_RW(event_vinst);
742 static ssize_t s_exlevel_vinst_show(struct device *dev,
743 struct device_attribute *attr,
747 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
748 struct etmv4_config *config = &drvdata->config;
750 val = (config->vinst_ctrl & ETM_EXLEVEL_S_VICTLR_MASK) >> 16;
751 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
754 static ssize_t s_exlevel_vinst_store(struct device *dev,
755 struct device_attribute *attr,
756 const char *buf, size_t size)
759 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
760 struct etmv4_config *config = &drvdata->config;
762 if (kstrtoul(buf, 16, &val))
765 spin_lock(&drvdata->spinlock);
766 /* clear all EXLEVEL_S bits */
767 config->vinst_ctrl &= ~(ETM_EXLEVEL_S_VICTLR_MASK);
768 /* enable instruction tracing for corresponding exception level */
769 val &= drvdata->s_ex_level;
770 config->vinst_ctrl |= (val << 16);
771 spin_unlock(&drvdata->spinlock);
774 static DEVICE_ATTR_RW(s_exlevel_vinst);
776 static ssize_t ns_exlevel_vinst_show(struct device *dev,
777 struct device_attribute *attr,
781 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
782 struct etmv4_config *config = &drvdata->config;
784 /* EXLEVEL_NS, bits[23:20] */
785 val = (config->vinst_ctrl & ETM_EXLEVEL_NS_VICTLR_MASK) >> 20;
786 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
789 static ssize_t ns_exlevel_vinst_store(struct device *dev,
790 struct device_attribute *attr,
791 const char *buf, size_t size)
794 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
795 struct etmv4_config *config = &drvdata->config;
797 if (kstrtoul(buf, 16, &val))
800 spin_lock(&drvdata->spinlock);
801 /* clear EXLEVEL_NS bits */
802 config->vinst_ctrl &= ~(ETM_EXLEVEL_NS_VICTLR_MASK);
803 /* enable instruction tracing for corresponding exception level */
804 val &= drvdata->ns_ex_level;
805 config->vinst_ctrl |= (val << 20);
806 spin_unlock(&drvdata->spinlock);
809 static DEVICE_ATTR_RW(ns_exlevel_vinst);
811 static ssize_t addr_idx_show(struct device *dev,
812 struct device_attribute *attr,
816 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
817 struct etmv4_config *config = &drvdata->config;
819 val = config->addr_idx;
820 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
823 static ssize_t addr_idx_store(struct device *dev,
824 struct device_attribute *attr,
825 const char *buf, size_t size)
828 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
829 struct etmv4_config *config = &drvdata->config;
831 if (kstrtoul(buf, 16, &val))
833 if (val >= drvdata->nr_addr_cmp * 2)
837 * Use spinlock to ensure index doesn't change while it gets
838 * dereferenced multiple times within a spinlock block elsewhere.
840 spin_lock(&drvdata->spinlock);
841 config->addr_idx = val;
842 spin_unlock(&drvdata->spinlock);
845 static DEVICE_ATTR_RW(addr_idx);
847 static ssize_t addr_instdatatype_show(struct device *dev,
848 struct device_attribute *attr,
853 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
854 struct etmv4_config *config = &drvdata->config;
856 spin_lock(&drvdata->spinlock);
857 idx = config->addr_idx;
858 val = BMVAL(config->addr_acc[idx], 0, 1);
859 len = scnprintf(buf, PAGE_SIZE, "%s\n",
860 val == ETM_INSTR_ADDR ? "instr" :
861 (val == ETM_DATA_LOAD_ADDR ? "data_load" :
862 (val == ETM_DATA_STORE_ADDR ? "data_store" :
863 "data_load_store")));
864 spin_unlock(&drvdata->spinlock);
868 static ssize_t addr_instdatatype_store(struct device *dev,
869 struct device_attribute *attr,
870 const char *buf, size_t size)
874 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
875 struct etmv4_config *config = &drvdata->config;
877 if (strlen(buf) >= 20)
879 if (sscanf(buf, "%s", str) != 1)
882 spin_lock(&drvdata->spinlock);
883 idx = config->addr_idx;
884 if (!strcmp(str, "instr"))
885 /* TYPE, bits[1:0] */
886 config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
888 spin_unlock(&drvdata->spinlock);
891 static DEVICE_ATTR_RW(addr_instdatatype);
893 static ssize_t addr_single_show(struct device *dev,
894 struct device_attribute *attr,
899 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
900 struct etmv4_config *config = &drvdata->config;
902 idx = config->addr_idx;
903 spin_lock(&drvdata->spinlock);
904 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
905 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
906 spin_unlock(&drvdata->spinlock);
909 val = (unsigned long)config->addr_val[idx];
910 spin_unlock(&drvdata->spinlock);
911 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
914 static ssize_t addr_single_store(struct device *dev,
915 struct device_attribute *attr,
916 const char *buf, size_t size)
920 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
921 struct etmv4_config *config = &drvdata->config;
923 if (kstrtoul(buf, 16, &val))
926 spin_lock(&drvdata->spinlock);
927 idx = config->addr_idx;
928 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
929 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
930 spin_unlock(&drvdata->spinlock);
934 config->addr_val[idx] = (u64)val;
935 config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
936 spin_unlock(&drvdata->spinlock);
939 static DEVICE_ATTR_RW(addr_single);
941 static ssize_t addr_range_show(struct device *dev,
942 struct device_attribute *attr,
946 unsigned long val1, val2;
947 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
948 struct etmv4_config *config = &drvdata->config;
950 spin_lock(&drvdata->spinlock);
951 idx = config->addr_idx;
953 spin_unlock(&drvdata->spinlock);
956 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
957 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
958 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
959 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
960 spin_unlock(&drvdata->spinlock);
964 val1 = (unsigned long)config->addr_val[idx];
965 val2 = (unsigned long)config->addr_val[idx + 1];
966 spin_unlock(&drvdata->spinlock);
967 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
970 static ssize_t addr_range_store(struct device *dev,
971 struct device_attribute *attr,
972 const char *buf, size_t size)
975 unsigned long val1, val2;
976 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
977 struct etmv4_config *config = &drvdata->config;
978 int elements, exclude;
980 elements = sscanf(buf, "%lx %lx %x", &val1, &val2, &exclude);
982 /* exclude is optional, but need at least two parameter */
985 /* lower address comparator cannot have a higher address value */
989 spin_lock(&drvdata->spinlock);
990 idx = config->addr_idx;
992 spin_unlock(&drvdata->spinlock);
996 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
997 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
998 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
999 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1000 spin_unlock(&drvdata->spinlock);
1004 config->addr_val[idx] = (u64)val1;
1005 config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1006 config->addr_val[idx + 1] = (u64)val2;
1007 config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1009 * Program include or exclude control bits for vinst or vdata
1010 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1011 * use supplied value, or default to bit set in 'mode'
1014 exclude = config->mode & ETM_MODE_EXCLUDE;
1015 etm4_set_mode_exclude(drvdata, exclude ? true : false);
1017 spin_unlock(&drvdata->spinlock);
1020 static DEVICE_ATTR_RW(addr_range);
1022 static ssize_t addr_start_show(struct device *dev,
1023 struct device_attribute *attr,
1028 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1029 struct etmv4_config *config = &drvdata->config;
1031 spin_lock(&drvdata->spinlock);
1032 idx = config->addr_idx;
1034 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1035 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1036 spin_unlock(&drvdata->spinlock);
1040 val = (unsigned long)config->addr_val[idx];
1041 spin_unlock(&drvdata->spinlock);
1042 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1045 static ssize_t addr_start_store(struct device *dev,
1046 struct device_attribute *attr,
1047 const char *buf, size_t size)
1051 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1052 struct etmv4_config *config = &drvdata->config;
1054 if (kstrtoul(buf, 16, &val))
1057 spin_lock(&drvdata->spinlock);
1058 idx = config->addr_idx;
1059 if (!drvdata->nr_addr_cmp) {
1060 spin_unlock(&drvdata->spinlock);
1063 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1064 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1065 spin_unlock(&drvdata->spinlock);
1069 config->addr_val[idx] = (u64)val;
1070 config->addr_type[idx] = ETM_ADDR_TYPE_START;
1071 config->vissctlr |= BIT(idx);
1072 spin_unlock(&drvdata->spinlock);
1075 static DEVICE_ATTR_RW(addr_start);
1077 static ssize_t addr_stop_show(struct device *dev,
1078 struct device_attribute *attr,
1083 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1084 struct etmv4_config *config = &drvdata->config;
1086 spin_lock(&drvdata->spinlock);
1087 idx = config->addr_idx;
1089 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1090 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1091 spin_unlock(&drvdata->spinlock);
1095 val = (unsigned long)config->addr_val[idx];
1096 spin_unlock(&drvdata->spinlock);
1097 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1100 static ssize_t addr_stop_store(struct device *dev,
1101 struct device_attribute *attr,
1102 const char *buf, size_t size)
1106 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1107 struct etmv4_config *config = &drvdata->config;
1109 if (kstrtoul(buf, 16, &val))
1112 spin_lock(&drvdata->spinlock);
1113 idx = config->addr_idx;
1114 if (!drvdata->nr_addr_cmp) {
1115 spin_unlock(&drvdata->spinlock);
1118 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1119 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1120 spin_unlock(&drvdata->spinlock);
1124 config->addr_val[idx] = (u64)val;
1125 config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1126 config->vissctlr |= BIT(idx + 16);
1127 spin_unlock(&drvdata->spinlock);
1130 static DEVICE_ATTR_RW(addr_stop);
1132 static ssize_t addr_ctxtype_show(struct device *dev,
1133 struct device_attribute *attr,
1138 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1139 struct etmv4_config *config = &drvdata->config;
1141 spin_lock(&drvdata->spinlock);
1142 idx = config->addr_idx;
1143 /* CONTEXTTYPE, bits[3:2] */
1144 val = BMVAL(config->addr_acc[idx], 2, 3);
1145 len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1146 (val == ETM_CTX_CTXID ? "ctxid" :
1147 (val == ETM_CTX_VMID ? "vmid" : "all")));
1148 spin_unlock(&drvdata->spinlock);
1152 static ssize_t addr_ctxtype_store(struct device *dev,
1153 struct device_attribute *attr,
1154 const char *buf, size_t size)
1158 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1159 struct etmv4_config *config = &drvdata->config;
1161 if (strlen(buf) >= 10)
1163 if (sscanf(buf, "%s", str) != 1)
1166 spin_lock(&drvdata->spinlock);
1167 idx = config->addr_idx;
1168 if (!strcmp(str, "none"))
1169 /* start by clearing context type bits */
1170 config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1171 else if (!strcmp(str, "ctxid")) {
1172 /* 0b01 The trace unit performs a Context ID */
1173 if (drvdata->numcidc) {
1174 config->addr_acc[idx] |= BIT(2);
1175 config->addr_acc[idx] &= ~BIT(3);
1177 } else if (!strcmp(str, "vmid")) {
1178 /* 0b10 The trace unit performs a VMID */
1179 if (drvdata->numvmidc) {
1180 config->addr_acc[idx] &= ~BIT(2);
1181 config->addr_acc[idx] |= BIT(3);
1183 } else if (!strcmp(str, "all")) {
1185 * 0b11 The trace unit performs a Context ID
1186 * comparison and a VMID
1188 if (drvdata->numcidc)
1189 config->addr_acc[idx] |= BIT(2);
1190 if (drvdata->numvmidc)
1191 config->addr_acc[idx] |= BIT(3);
1193 spin_unlock(&drvdata->spinlock);
1196 static DEVICE_ATTR_RW(addr_ctxtype);
1198 static ssize_t addr_context_show(struct device *dev,
1199 struct device_attribute *attr,
1204 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1205 struct etmv4_config *config = &drvdata->config;
1207 spin_lock(&drvdata->spinlock);
1208 idx = config->addr_idx;
1209 /* context ID comparator bits[6:4] */
1210 val = BMVAL(config->addr_acc[idx], 4, 6);
1211 spin_unlock(&drvdata->spinlock);
1212 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1215 static ssize_t addr_context_store(struct device *dev,
1216 struct device_attribute *attr,
1217 const char *buf, size_t size)
1221 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1222 struct etmv4_config *config = &drvdata->config;
1224 if (kstrtoul(buf, 16, &val))
1226 if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1228 if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
1229 drvdata->numcidc : drvdata->numvmidc))
1232 spin_lock(&drvdata->spinlock);
1233 idx = config->addr_idx;
1234 /* clear context ID comparator bits[6:4] */
1235 config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1236 config->addr_acc[idx] |= (val << 4);
1237 spin_unlock(&drvdata->spinlock);
1240 static DEVICE_ATTR_RW(addr_context);
1242 static ssize_t addr_exlevel_s_ns_show(struct device *dev,
1243 struct device_attribute *attr,
1248 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1249 struct etmv4_config *config = &drvdata->config;
1251 spin_lock(&drvdata->spinlock);
1252 idx = config->addr_idx;
1253 val = BMVAL(config->addr_acc[idx], 8, 14);
1254 spin_unlock(&drvdata->spinlock);
1255 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1258 static ssize_t addr_exlevel_s_ns_store(struct device *dev,
1259 struct device_attribute *attr,
1260 const char *buf, size_t size)
1264 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1265 struct etmv4_config *config = &drvdata->config;
1267 if (kstrtoul(buf, 0, &val))
1270 if (val & ~((GENMASK(14, 8) >> 8)))
1273 spin_lock(&drvdata->spinlock);
1274 idx = config->addr_idx;
1275 /* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
1276 config->addr_acc[idx] &= ~(GENMASK(14, 8));
1277 config->addr_acc[idx] |= (val << 8);
1278 spin_unlock(&drvdata->spinlock);
1281 static DEVICE_ATTR_RW(addr_exlevel_s_ns);
1283 static const char * const addr_type_names[] = {
1291 static ssize_t addr_cmp_view_show(struct device *dev,
1292 struct device_attribute *attr, char *buf)
1295 unsigned long addr_v, addr_v2, addr_ctrl;
1296 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1297 struct etmv4_config *config = &drvdata->config;
1299 bool exclude = false;
1301 spin_lock(&drvdata->spinlock);
1302 idx = config->addr_idx;
1303 addr_v = config->addr_val[idx];
1304 addr_ctrl = config->addr_acc[idx];
1305 addr_type = config->addr_type[idx];
1306 if (addr_type == ETM_ADDR_TYPE_RANGE) {
1310 addr_v = config->addr_val[idx];
1312 addr_v2 = config->addr_val[idx + 1];
1314 exclude = config->viiectlr & BIT(idx / 2 + 16);
1316 spin_unlock(&drvdata->spinlock);
1318 size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] %s %#lx", idx,
1319 addr_type_names[addr_type], addr_v);
1320 if (addr_type == ETM_ADDR_TYPE_RANGE) {
1321 size += scnprintf(buf + size, PAGE_SIZE - size,
1322 " %#lx %s", addr_v2,
1323 exclude ? "exclude" : "include");
1325 size += scnprintf(buf + size, PAGE_SIZE - size,
1326 " ctrl(%#lx)\n", addr_ctrl);
1328 size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] unused\n", idx);
1332 static DEVICE_ATTR_RO(addr_cmp_view);
1334 static ssize_t vinst_pe_cmp_start_stop_show(struct device *dev,
1335 struct device_attribute *attr,
1339 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1340 struct etmv4_config *config = &drvdata->config;
1342 if (!drvdata->nr_pe_cmp)
1344 val = config->vipcssctlr;
1345 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1347 static ssize_t vinst_pe_cmp_start_stop_store(struct device *dev,
1348 struct device_attribute *attr,
1349 const char *buf, size_t size)
1352 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1353 struct etmv4_config *config = &drvdata->config;
1355 if (kstrtoul(buf, 16, &val))
1357 if (!drvdata->nr_pe_cmp)
1360 spin_lock(&drvdata->spinlock);
1361 config->vipcssctlr = val;
1362 spin_unlock(&drvdata->spinlock);
1365 static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop);
1367 static ssize_t seq_idx_show(struct device *dev,
1368 struct device_attribute *attr,
1372 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1373 struct etmv4_config *config = &drvdata->config;
1375 val = config->seq_idx;
1376 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1379 static ssize_t seq_idx_store(struct device *dev,
1380 struct device_attribute *attr,
1381 const char *buf, size_t size)
1384 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1385 struct etmv4_config *config = &drvdata->config;
1387 if (kstrtoul(buf, 16, &val))
1389 if (val >= drvdata->nrseqstate - 1)
1393 * Use spinlock to ensure index doesn't change while it gets
1394 * dereferenced multiple times within a spinlock block elsewhere.
1396 spin_lock(&drvdata->spinlock);
1397 config->seq_idx = val;
1398 spin_unlock(&drvdata->spinlock);
1401 static DEVICE_ATTR_RW(seq_idx);
1403 static ssize_t seq_state_show(struct device *dev,
1404 struct device_attribute *attr,
1408 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1409 struct etmv4_config *config = &drvdata->config;
1411 val = config->seq_state;
1412 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1415 static ssize_t seq_state_store(struct device *dev,
1416 struct device_attribute *attr,
1417 const char *buf, size_t size)
1420 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1421 struct etmv4_config *config = &drvdata->config;
1423 if (kstrtoul(buf, 16, &val))
1425 if (val >= drvdata->nrseqstate)
1428 config->seq_state = val;
1431 static DEVICE_ATTR_RW(seq_state);
1433 static ssize_t seq_event_show(struct device *dev,
1434 struct device_attribute *attr,
1439 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1440 struct etmv4_config *config = &drvdata->config;
1442 spin_lock(&drvdata->spinlock);
1443 idx = config->seq_idx;
1444 val = config->seq_ctrl[idx];
1445 spin_unlock(&drvdata->spinlock);
1446 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1449 static ssize_t seq_event_store(struct device *dev,
1450 struct device_attribute *attr,
1451 const char *buf, size_t size)
1455 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1456 struct etmv4_config *config = &drvdata->config;
1458 if (kstrtoul(buf, 16, &val))
1461 spin_lock(&drvdata->spinlock);
1462 idx = config->seq_idx;
1463 /* Seq control has two masks B[15:8] F[7:0] */
1464 config->seq_ctrl[idx] = val & 0xFFFF;
1465 spin_unlock(&drvdata->spinlock);
1468 static DEVICE_ATTR_RW(seq_event);
1470 static ssize_t seq_reset_event_show(struct device *dev,
1471 struct device_attribute *attr,
1475 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1476 struct etmv4_config *config = &drvdata->config;
1478 val = config->seq_rst;
1479 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1482 static ssize_t seq_reset_event_store(struct device *dev,
1483 struct device_attribute *attr,
1484 const char *buf, size_t size)
1487 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1488 struct etmv4_config *config = &drvdata->config;
1490 if (kstrtoul(buf, 16, &val))
1492 if (!(drvdata->nrseqstate))
1495 config->seq_rst = val & ETMv4_EVENT_MASK;
1498 static DEVICE_ATTR_RW(seq_reset_event);
1500 static ssize_t cntr_idx_show(struct device *dev,
1501 struct device_attribute *attr,
1505 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1506 struct etmv4_config *config = &drvdata->config;
1508 val = config->cntr_idx;
1509 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1512 static ssize_t cntr_idx_store(struct device *dev,
1513 struct device_attribute *attr,
1514 const char *buf, size_t size)
1517 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1518 struct etmv4_config *config = &drvdata->config;
1520 if (kstrtoul(buf, 16, &val))
1522 if (val >= drvdata->nr_cntr)
1526 * Use spinlock to ensure index doesn't change while it gets
1527 * dereferenced multiple times within a spinlock block elsewhere.
1529 spin_lock(&drvdata->spinlock);
1530 config->cntr_idx = val;
1531 spin_unlock(&drvdata->spinlock);
1534 static DEVICE_ATTR_RW(cntr_idx);
1536 static ssize_t cntrldvr_show(struct device *dev,
1537 struct device_attribute *attr,
1542 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1543 struct etmv4_config *config = &drvdata->config;
1545 spin_lock(&drvdata->spinlock);
1546 idx = config->cntr_idx;
1547 val = config->cntrldvr[idx];
1548 spin_unlock(&drvdata->spinlock);
1549 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1552 static ssize_t cntrldvr_store(struct device *dev,
1553 struct device_attribute *attr,
1554 const char *buf, size_t size)
1558 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1559 struct etmv4_config *config = &drvdata->config;
1561 if (kstrtoul(buf, 16, &val))
1563 if (val > ETM_CNTR_MAX_VAL)
1566 spin_lock(&drvdata->spinlock);
1567 idx = config->cntr_idx;
1568 config->cntrldvr[idx] = val;
1569 spin_unlock(&drvdata->spinlock);
1572 static DEVICE_ATTR_RW(cntrldvr);
1574 static ssize_t cntr_val_show(struct device *dev,
1575 struct device_attribute *attr,
1580 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1581 struct etmv4_config *config = &drvdata->config;
1583 spin_lock(&drvdata->spinlock);
1584 idx = config->cntr_idx;
1585 val = config->cntr_val[idx];
1586 spin_unlock(&drvdata->spinlock);
1587 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1590 static ssize_t cntr_val_store(struct device *dev,
1591 struct device_attribute *attr,
1592 const char *buf, size_t size)
1596 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1597 struct etmv4_config *config = &drvdata->config;
1599 if (kstrtoul(buf, 16, &val))
1601 if (val > ETM_CNTR_MAX_VAL)
1604 spin_lock(&drvdata->spinlock);
1605 idx = config->cntr_idx;
1606 config->cntr_val[idx] = val;
1607 spin_unlock(&drvdata->spinlock);
1610 static DEVICE_ATTR_RW(cntr_val);
1612 static ssize_t cntr_ctrl_show(struct device *dev,
1613 struct device_attribute *attr,
1618 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1619 struct etmv4_config *config = &drvdata->config;
1621 spin_lock(&drvdata->spinlock);
1622 idx = config->cntr_idx;
1623 val = config->cntr_ctrl[idx];
1624 spin_unlock(&drvdata->spinlock);
1625 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1628 static ssize_t cntr_ctrl_store(struct device *dev,
1629 struct device_attribute *attr,
1630 const char *buf, size_t size)
1634 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1635 struct etmv4_config *config = &drvdata->config;
1637 if (kstrtoul(buf, 16, &val))
1640 spin_lock(&drvdata->spinlock);
1641 idx = config->cntr_idx;
1642 config->cntr_ctrl[idx] = val;
1643 spin_unlock(&drvdata->spinlock);
1646 static DEVICE_ATTR_RW(cntr_ctrl);
1648 static ssize_t res_idx_show(struct device *dev,
1649 struct device_attribute *attr,
1653 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1654 struct etmv4_config *config = &drvdata->config;
1656 val = config->res_idx;
1657 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1660 static ssize_t res_idx_store(struct device *dev,
1661 struct device_attribute *attr,
1662 const char *buf, size_t size)
1665 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1666 struct etmv4_config *config = &drvdata->config;
1668 if (kstrtoul(buf, 16, &val))
1671 * Resource selector pair 0 is always implemented and reserved,
1672 * namely an idx with 0 and 1 is illegal.
1674 if ((val < 2) || (val >= 2 * drvdata->nr_resource))
1678 * Use spinlock to ensure index doesn't change while it gets
1679 * dereferenced multiple times within a spinlock block elsewhere.
1681 spin_lock(&drvdata->spinlock);
1682 config->res_idx = val;
1683 spin_unlock(&drvdata->spinlock);
1686 static DEVICE_ATTR_RW(res_idx);
1688 static ssize_t res_ctrl_show(struct device *dev,
1689 struct device_attribute *attr,
1694 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1695 struct etmv4_config *config = &drvdata->config;
1697 spin_lock(&drvdata->spinlock);
1698 idx = config->res_idx;
1699 val = config->res_ctrl[idx];
1700 spin_unlock(&drvdata->spinlock);
1701 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1704 static ssize_t res_ctrl_store(struct device *dev,
1705 struct device_attribute *attr,
1706 const char *buf, size_t size)
1710 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1711 struct etmv4_config *config = &drvdata->config;
1713 if (kstrtoul(buf, 16, &val))
1716 spin_lock(&drvdata->spinlock);
1717 idx = config->res_idx;
1718 /* For odd idx pair inversal bit is RES0 */
1720 /* PAIRINV, bit[21] */
1722 config->res_ctrl[idx] = val & GENMASK(21, 0);
1723 spin_unlock(&drvdata->spinlock);
1726 static DEVICE_ATTR_RW(res_ctrl);
1728 static ssize_t sshot_idx_show(struct device *dev,
1729 struct device_attribute *attr, char *buf)
1732 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1733 struct etmv4_config *config = &drvdata->config;
1735 val = config->ss_idx;
1736 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1739 static ssize_t sshot_idx_store(struct device *dev,
1740 struct device_attribute *attr,
1741 const char *buf, size_t size)
1744 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1745 struct etmv4_config *config = &drvdata->config;
1747 if (kstrtoul(buf, 16, &val))
1749 if (val >= drvdata->nr_ss_cmp)
1752 spin_lock(&drvdata->spinlock);
1753 config->ss_idx = val;
1754 spin_unlock(&drvdata->spinlock);
1757 static DEVICE_ATTR_RW(sshot_idx);
1759 static ssize_t sshot_ctrl_show(struct device *dev,
1760 struct device_attribute *attr,
1764 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1765 struct etmv4_config *config = &drvdata->config;
1767 spin_lock(&drvdata->spinlock);
1768 val = config->ss_ctrl[config->ss_idx];
1769 spin_unlock(&drvdata->spinlock);
1770 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1773 static ssize_t sshot_ctrl_store(struct device *dev,
1774 struct device_attribute *attr,
1775 const char *buf, size_t size)
1779 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1780 struct etmv4_config *config = &drvdata->config;
1782 if (kstrtoul(buf, 16, &val))
1785 spin_lock(&drvdata->spinlock);
1786 idx = config->ss_idx;
1787 config->ss_ctrl[idx] = val & GENMASK(24, 0);
1788 /* must clear bit 31 in related status register on programming */
1789 config->ss_status[idx] &= ~BIT(31);
1790 spin_unlock(&drvdata->spinlock);
1793 static DEVICE_ATTR_RW(sshot_ctrl);
1795 static ssize_t sshot_status_show(struct device *dev,
1796 struct device_attribute *attr, char *buf)
1799 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1800 struct etmv4_config *config = &drvdata->config;
1802 spin_lock(&drvdata->spinlock);
1803 val = config->ss_status[config->ss_idx];
1804 spin_unlock(&drvdata->spinlock);
1805 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1807 static DEVICE_ATTR_RO(sshot_status);
1809 static ssize_t sshot_pe_ctrl_show(struct device *dev,
1810 struct device_attribute *attr,
1814 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1815 struct etmv4_config *config = &drvdata->config;
1817 spin_lock(&drvdata->spinlock);
1818 val = config->ss_pe_cmp[config->ss_idx];
1819 spin_unlock(&drvdata->spinlock);
1820 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1823 static ssize_t sshot_pe_ctrl_store(struct device *dev,
1824 struct device_attribute *attr,
1825 const char *buf, size_t size)
1829 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1830 struct etmv4_config *config = &drvdata->config;
1832 if (kstrtoul(buf, 16, &val))
1835 spin_lock(&drvdata->spinlock);
1836 idx = config->ss_idx;
1837 config->ss_pe_cmp[idx] = val & GENMASK(7, 0);
1838 /* must clear bit 31 in related status register on programming */
1839 config->ss_status[idx] &= ~BIT(31);
1840 spin_unlock(&drvdata->spinlock);
1843 static DEVICE_ATTR_RW(sshot_pe_ctrl);
1845 static ssize_t ctxid_idx_show(struct device *dev,
1846 struct device_attribute *attr,
1850 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1851 struct etmv4_config *config = &drvdata->config;
1853 val = config->ctxid_idx;
1854 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1857 static ssize_t ctxid_idx_store(struct device *dev,
1858 struct device_attribute *attr,
1859 const char *buf, size_t size)
1862 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1863 struct etmv4_config *config = &drvdata->config;
1865 if (kstrtoul(buf, 16, &val))
1867 if (val >= drvdata->numcidc)
1871 * Use spinlock to ensure index doesn't change while it gets
1872 * dereferenced multiple times within a spinlock block elsewhere.
1874 spin_lock(&drvdata->spinlock);
1875 config->ctxid_idx = val;
1876 spin_unlock(&drvdata->spinlock);
1879 static DEVICE_ATTR_RW(ctxid_idx);
1881 static ssize_t ctxid_pid_show(struct device *dev,
1882 struct device_attribute *attr,
1887 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1888 struct etmv4_config *config = &drvdata->config;
1891 * Don't use contextID tracing if coming from a PID namespace. See
1892 * comment in ctxid_pid_store().
1894 if (task_active_pid_ns(current) != &init_pid_ns)
1897 spin_lock(&drvdata->spinlock);
1898 idx = config->ctxid_idx;
1899 val = (unsigned long)config->ctxid_pid[idx];
1900 spin_unlock(&drvdata->spinlock);
1901 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1904 static ssize_t ctxid_pid_store(struct device *dev,
1905 struct device_attribute *attr,
1906 const char *buf, size_t size)
1910 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1911 struct etmv4_config *config = &drvdata->config;
1914 * When contextID tracing is enabled the tracers will insert the
1915 * value found in the contextID register in the trace stream. But if
1916 * a process is in a namespace the PID of that process as seen from the
1917 * namespace won't be what the kernel sees, something that makes the
1918 * feature confusing and can potentially leak kernel only information.
1919 * As such refuse to use the feature if @current is not in the initial
1922 if (task_active_pid_ns(current) != &init_pid_ns)
1926 * only implemented when ctxid tracing is enabled, i.e. at least one
1927 * ctxid comparator is implemented and ctxid is greater than 0 bits
1930 if (!drvdata->ctxid_size || !drvdata->numcidc)
1932 if (kstrtoul(buf, 16, &pid))
1935 spin_lock(&drvdata->spinlock);
1936 idx = config->ctxid_idx;
1937 config->ctxid_pid[idx] = (u64)pid;
1938 spin_unlock(&drvdata->spinlock);
1941 static DEVICE_ATTR_RW(ctxid_pid);
1943 static ssize_t ctxid_masks_show(struct device *dev,
1944 struct device_attribute *attr,
1947 unsigned long val1, val2;
1948 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1949 struct etmv4_config *config = &drvdata->config;
1952 * Don't use contextID tracing if coming from a PID namespace. See
1953 * comment in ctxid_pid_store().
1955 if (task_active_pid_ns(current) != &init_pid_ns)
1958 spin_lock(&drvdata->spinlock);
1959 val1 = config->ctxid_mask0;
1960 val2 = config->ctxid_mask1;
1961 spin_unlock(&drvdata->spinlock);
1962 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1965 static ssize_t ctxid_masks_store(struct device *dev,
1966 struct device_attribute *attr,
1967 const char *buf, size_t size)
1970 unsigned long val1, val2, mask;
1971 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1972 struct etmv4_config *config = &drvdata->config;
1976 * Don't use contextID tracing if coming from a PID namespace. See
1977 * comment in ctxid_pid_store().
1979 if (task_active_pid_ns(current) != &init_pid_ns)
1983 * only implemented when ctxid tracing is enabled, i.e. at least one
1984 * ctxid comparator is implemented and ctxid is greater than 0 bits
1987 if (!drvdata->ctxid_size || !drvdata->numcidc)
1989 /* one mask if <= 4 comparators, two for up to 8 */
1990 nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
1991 if ((drvdata->numcidc > 4) && (nr_inputs != 2))
1994 spin_lock(&drvdata->spinlock);
1996 * each byte[0..3] controls mask value applied to ctxid
1999 switch (drvdata->numcidc) {
2001 /* COMP0, bits[7:0] */
2002 config->ctxid_mask0 = val1 & 0xFF;
2005 /* COMP1, bits[15:8] */
2006 config->ctxid_mask0 = val1 & 0xFFFF;
2009 /* COMP2, bits[23:16] */
2010 config->ctxid_mask0 = val1 & 0xFFFFFF;
2013 /* COMP3, bits[31:24] */
2014 config->ctxid_mask0 = val1;
2017 /* COMP4, bits[7:0] */
2018 config->ctxid_mask0 = val1;
2019 config->ctxid_mask1 = val2 & 0xFF;
2022 /* COMP5, bits[15:8] */
2023 config->ctxid_mask0 = val1;
2024 config->ctxid_mask1 = val2 & 0xFFFF;
2027 /* COMP6, bits[23:16] */
2028 config->ctxid_mask0 = val1;
2029 config->ctxid_mask1 = val2 & 0xFFFFFF;
2032 /* COMP7, bits[31:24] */
2033 config->ctxid_mask0 = val1;
2034 config->ctxid_mask1 = val2;
2040 * If software sets a mask bit to 1, it must program relevant byte
2041 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
2042 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
2043 * of ctxid comparator0 value (corresponding to byte 0) register.
2045 mask = config->ctxid_mask0;
2046 for (i = 0; i < drvdata->numcidc; i++) {
2047 /* mask value of corresponding ctxid comparator */
2048 maskbyte = mask & ETMv4_EVENT_MASK;
2050 * each bit corresponds to a byte of respective ctxid comparator
2053 for (j = 0; j < 8; j++) {
2055 config->ctxid_pid[i] &= ~(0xFFUL << (j * 8));
2058 /* Select the next ctxid comparator mask value */
2060 /* ctxid comparators[4-7] */
2061 mask = config->ctxid_mask1;
2066 spin_unlock(&drvdata->spinlock);
2069 static DEVICE_ATTR_RW(ctxid_masks);
2071 static ssize_t vmid_idx_show(struct device *dev,
2072 struct device_attribute *attr,
2076 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2077 struct etmv4_config *config = &drvdata->config;
2079 val = config->vmid_idx;
2080 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2083 static ssize_t vmid_idx_store(struct device *dev,
2084 struct device_attribute *attr,
2085 const char *buf, size_t size)
2088 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2089 struct etmv4_config *config = &drvdata->config;
2091 if (kstrtoul(buf, 16, &val))
2093 if (val >= drvdata->numvmidc)
2097 * Use spinlock to ensure index doesn't change while it gets
2098 * dereferenced multiple times within a spinlock block elsewhere.
2100 spin_lock(&drvdata->spinlock);
2101 config->vmid_idx = val;
2102 spin_unlock(&drvdata->spinlock);
2105 static DEVICE_ATTR_RW(vmid_idx);
2107 static ssize_t vmid_val_show(struct device *dev,
2108 struct device_attribute *attr,
2112 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2113 struct etmv4_config *config = &drvdata->config;
2115 val = (unsigned long)config->vmid_val[config->vmid_idx];
2116 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2119 static ssize_t vmid_val_store(struct device *dev,
2120 struct device_attribute *attr,
2121 const char *buf, size_t size)
2124 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2125 struct etmv4_config *config = &drvdata->config;
2128 * only implemented when vmid tracing is enabled, i.e. at least one
2129 * vmid comparator is implemented and at least 8 bit vmid size
2131 if (!drvdata->vmid_size || !drvdata->numvmidc)
2133 if (kstrtoul(buf, 16, &val))
2136 spin_lock(&drvdata->spinlock);
2137 config->vmid_val[config->vmid_idx] = (u64)val;
2138 spin_unlock(&drvdata->spinlock);
2141 static DEVICE_ATTR_RW(vmid_val);
2143 static ssize_t vmid_masks_show(struct device *dev,
2144 struct device_attribute *attr, char *buf)
2146 unsigned long val1, val2;
2147 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2148 struct etmv4_config *config = &drvdata->config;
2150 spin_lock(&drvdata->spinlock);
2151 val1 = config->vmid_mask0;
2152 val2 = config->vmid_mask1;
2153 spin_unlock(&drvdata->spinlock);
2154 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
2157 static ssize_t vmid_masks_store(struct device *dev,
2158 struct device_attribute *attr,
2159 const char *buf, size_t size)
2162 unsigned long val1, val2, mask;
2163 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2164 struct etmv4_config *config = &drvdata->config;
2168 * only implemented when vmid tracing is enabled, i.e. at least one
2169 * vmid comparator is implemented and at least 8 bit vmid size
2171 if (!drvdata->vmid_size || !drvdata->numvmidc)
2173 /* one mask if <= 4 comparators, two for up to 8 */
2174 nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
2175 if ((drvdata->numvmidc > 4) && (nr_inputs != 2))
2178 spin_lock(&drvdata->spinlock);
2181 * each byte[0..3] controls mask value applied to vmid
2184 switch (drvdata->numvmidc) {
2186 /* COMP0, bits[7:0] */
2187 config->vmid_mask0 = val1 & 0xFF;
2190 /* COMP1, bits[15:8] */
2191 config->vmid_mask0 = val1 & 0xFFFF;
2194 /* COMP2, bits[23:16] */
2195 config->vmid_mask0 = val1 & 0xFFFFFF;
2198 /* COMP3, bits[31:24] */
2199 config->vmid_mask0 = val1;
2202 /* COMP4, bits[7:0] */
2203 config->vmid_mask0 = val1;
2204 config->vmid_mask1 = val2 & 0xFF;
2207 /* COMP5, bits[15:8] */
2208 config->vmid_mask0 = val1;
2209 config->vmid_mask1 = val2 & 0xFFFF;
2212 /* COMP6, bits[23:16] */
2213 config->vmid_mask0 = val1;
2214 config->vmid_mask1 = val2 & 0xFFFFFF;
2217 /* COMP7, bits[31:24] */
2218 config->vmid_mask0 = val1;
2219 config->vmid_mask1 = val2;
2226 * If software sets a mask bit to 1, it must program relevant byte
2227 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2228 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2229 * of vmid comparator0 value (corresponding to byte 0) register.
2231 mask = config->vmid_mask0;
2232 for (i = 0; i < drvdata->numvmidc; i++) {
2233 /* mask value of corresponding vmid comparator */
2234 maskbyte = mask & ETMv4_EVENT_MASK;
2236 * each bit corresponds to a byte of respective vmid comparator
2239 for (j = 0; j < 8; j++) {
2241 config->vmid_val[i] &= ~(0xFFUL << (j * 8));
2244 /* Select the next vmid comparator mask value */
2246 /* vmid comparators[4-7] */
2247 mask = config->vmid_mask1;
2251 spin_unlock(&drvdata->spinlock);
2254 static DEVICE_ATTR_RW(vmid_masks);
2256 static ssize_t cpu_show(struct device *dev,
2257 struct device_attribute *attr, char *buf)
2260 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2263 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2266 static DEVICE_ATTR_RO(cpu);
2268 static struct attribute *coresight_etmv4_attrs[] = {
2269 &dev_attr_nr_pe_cmp.attr,
2270 &dev_attr_nr_addr_cmp.attr,
2271 &dev_attr_nr_cntr.attr,
2272 &dev_attr_nr_ext_inp.attr,
2273 &dev_attr_numcidc.attr,
2274 &dev_attr_numvmidc.attr,
2275 &dev_attr_nrseqstate.attr,
2276 &dev_attr_nr_resource.attr,
2277 &dev_attr_nr_ss_cmp.attr,
2278 &dev_attr_reset.attr,
2279 &dev_attr_mode.attr,
2281 &dev_attr_event.attr,
2282 &dev_attr_event_instren.attr,
2283 &dev_attr_event_ts.attr,
2284 &dev_attr_syncfreq.attr,
2285 &dev_attr_cyc_threshold.attr,
2286 &dev_attr_bb_ctrl.attr,
2287 &dev_attr_event_vinst.attr,
2288 &dev_attr_s_exlevel_vinst.attr,
2289 &dev_attr_ns_exlevel_vinst.attr,
2290 &dev_attr_addr_idx.attr,
2291 &dev_attr_addr_instdatatype.attr,
2292 &dev_attr_addr_single.attr,
2293 &dev_attr_addr_range.attr,
2294 &dev_attr_addr_start.attr,
2295 &dev_attr_addr_stop.attr,
2296 &dev_attr_addr_ctxtype.attr,
2297 &dev_attr_addr_context.attr,
2298 &dev_attr_addr_exlevel_s_ns.attr,
2299 &dev_attr_addr_cmp_view.attr,
2300 &dev_attr_vinst_pe_cmp_start_stop.attr,
2301 &dev_attr_sshot_idx.attr,
2302 &dev_attr_sshot_ctrl.attr,
2303 &dev_attr_sshot_pe_ctrl.attr,
2304 &dev_attr_sshot_status.attr,
2305 &dev_attr_seq_idx.attr,
2306 &dev_attr_seq_state.attr,
2307 &dev_attr_seq_event.attr,
2308 &dev_attr_seq_reset_event.attr,
2309 &dev_attr_cntr_idx.attr,
2310 &dev_attr_cntrldvr.attr,
2311 &dev_attr_cntr_val.attr,
2312 &dev_attr_cntr_ctrl.attr,
2313 &dev_attr_res_idx.attr,
2314 &dev_attr_res_ctrl.attr,
2315 &dev_attr_ctxid_idx.attr,
2316 &dev_attr_ctxid_pid.attr,
2317 &dev_attr_ctxid_masks.attr,
2318 &dev_attr_vmid_idx.attr,
2319 &dev_attr_vmid_val.attr,
2320 &dev_attr_vmid_masks.attr,
2330 static void do_smp_cross_read(void *data)
2332 struct etmv4_reg *reg = data;
2334 reg->data = readl_relaxed(reg->addr);
2337 static u32 etmv4_cross_read(const struct device *dev, u32 offset)
2339 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
2340 struct etmv4_reg reg;
2342 reg.addr = drvdata->base + offset;
2344 * smp cross call ensures the CPU will be powered up before
2345 * accessing the ETMv4 trace core registers
2347 smp_call_function_single(drvdata->cpu, do_smp_cross_read, ®, 1);
2351 #define coresight_etm4x_reg(name, offset) \
2352 coresight_simple_reg32(struct etmv4_drvdata, name, offset)
2354 #define coresight_etm4x_cross_read(name, offset) \
2355 coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read, \
2358 coresight_etm4x_reg(trcpdcr, TRCPDCR);
2359 coresight_etm4x_reg(trcpdsr, TRCPDSR);
2360 coresight_etm4x_reg(trclsr, TRCLSR);
2361 coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS);
2362 coresight_etm4x_reg(trcdevid, TRCDEVID);
2363 coresight_etm4x_reg(trcdevtype, TRCDEVTYPE);
2364 coresight_etm4x_reg(trcpidr0, TRCPIDR0);
2365 coresight_etm4x_reg(trcpidr1, TRCPIDR1);
2366 coresight_etm4x_reg(trcpidr2, TRCPIDR2);
2367 coresight_etm4x_reg(trcpidr3, TRCPIDR3);
2368 coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
2369 coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
2370 coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
2372 static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2373 &dev_attr_trcoslsr.attr,
2374 &dev_attr_trcpdcr.attr,
2375 &dev_attr_trcpdsr.attr,
2376 &dev_attr_trclsr.attr,
2377 &dev_attr_trcconfig.attr,
2378 &dev_attr_trctraceid.attr,
2379 &dev_attr_trcauthstatus.attr,
2380 &dev_attr_trcdevid.attr,
2381 &dev_attr_trcdevtype.attr,
2382 &dev_attr_trcpidr0.attr,
2383 &dev_attr_trcpidr1.attr,
2384 &dev_attr_trcpidr2.attr,
2385 &dev_attr_trcpidr3.attr,
2389 coresight_etm4x_cross_read(trcidr0, TRCIDR0);
2390 coresight_etm4x_cross_read(trcidr1, TRCIDR1);
2391 coresight_etm4x_cross_read(trcidr2, TRCIDR2);
2392 coresight_etm4x_cross_read(trcidr3, TRCIDR3);
2393 coresight_etm4x_cross_read(trcidr4, TRCIDR4);
2394 coresight_etm4x_cross_read(trcidr5, TRCIDR5);
2395 /* trcidr[6,7] are reserved */
2396 coresight_etm4x_cross_read(trcidr8, TRCIDR8);
2397 coresight_etm4x_cross_read(trcidr9, TRCIDR9);
2398 coresight_etm4x_cross_read(trcidr10, TRCIDR10);
2399 coresight_etm4x_cross_read(trcidr11, TRCIDR11);
2400 coresight_etm4x_cross_read(trcidr12, TRCIDR12);
2401 coresight_etm4x_cross_read(trcidr13, TRCIDR13);
2403 static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2404 &dev_attr_trcidr0.attr,
2405 &dev_attr_trcidr1.attr,
2406 &dev_attr_trcidr2.attr,
2407 &dev_attr_trcidr3.attr,
2408 &dev_attr_trcidr4.attr,
2409 &dev_attr_trcidr5.attr,
2410 /* trcidr[6,7] are reserved */
2411 &dev_attr_trcidr8.attr,
2412 &dev_attr_trcidr9.attr,
2413 &dev_attr_trcidr10.attr,
2414 &dev_attr_trcidr11.attr,
2415 &dev_attr_trcidr12.attr,
2416 &dev_attr_trcidr13.attr,
2420 static const struct attribute_group coresight_etmv4_group = {
2421 .attrs = coresight_etmv4_attrs,
2424 static const struct attribute_group coresight_etmv4_mgmt_group = {
2425 .attrs = coresight_etmv4_mgmt_attrs,
2429 static const struct attribute_group coresight_etmv4_trcidr_group = {
2430 .attrs = coresight_etmv4_trcidr_attrs,
2434 const struct attribute_group *coresight_etmv4_groups[] = {
2435 &coresight_etmv4_group,
2436 &coresight_etmv4_mgmt_group,
2437 &coresight_etmv4_trcidr_group,