1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7 #include <linux/pid_namespace.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/sysfs.h>
10 #include "coresight-etm4x.h"
11 #include "coresight-priv.h"
13 static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
16 struct etmv4_config *config = &drvdata->config;
18 idx = config->addr_idx;
21 * TRCACATRn.TYPE bit[1:0]: type of comparison
22 * the trace unit performs
24 if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
29 * We are performing instruction address comparison. Set the
30 * relevant bit of ViewInst Include/Exclude Control register
31 * for corresponding address comparator pair.
33 if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
34 config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
37 if (exclude == true) {
39 * Set exclude bit and unset the include bit
40 * corresponding to comparator pair
42 config->viiectlr |= BIT(idx / 2 + 16);
43 config->viiectlr &= ~BIT(idx / 2);
46 * Set include bit and unset exclude bit
47 * corresponding to comparator pair
49 config->viiectlr |= BIT(idx / 2);
50 config->viiectlr &= ~BIT(idx / 2 + 16);
56 static ssize_t nr_pe_cmp_show(struct device *dev,
57 struct device_attribute *attr,
61 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
63 val = drvdata->nr_pe_cmp;
64 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
66 static DEVICE_ATTR_RO(nr_pe_cmp);
68 static ssize_t nr_addr_cmp_show(struct device *dev,
69 struct device_attribute *attr,
73 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
75 val = drvdata->nr_addr_cmp;
76 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
78 static DEVICE_ATTR_RO(nr_addr_cmp);
80 static ssize_t nr_cntr_show(struct device *dev,
81 struct device_attribute *attr,
85 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
87 val = drvdata->nr_cntr;
88 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
90 static DEVICE_ATTR_RO(nr_cntr);
92 static ssize_t nr_ext_inp_show(struct device *dev,
93 struct device_attribute *attr,
97 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
99 val = drvdata->nr_ext_inp;
100 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
102 static DEVICE_ATTR_RO(nr_ext_inp);
104 static ssize_t numcidc_show(struct device *dev,
105 struct device_attribute *attr,
109 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
111 val = drvdata->numcidc;
112 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
114 static DEVICE_ATTR_RO(numcidc);
116 static ssize_t numvmidc_show(struct device *dev,
117 struct device_attribute *attr,
121 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
123 val = drvdata->numvmidc;
124 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
126 static DEVICE_ATTR_RO(numvmidc);
128 static ssize_t nrseqstate_show(struct device *dev,
129 struct device_attribute *attr,
133 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
135 val = drvdata->nrseqstate;
136 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
138 static DEVICE_ATTR_RO(nrseqstate);
140 static ssize_t nr_resource_show(struct device *dev,
141 struct device_attribute *attr,
145 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
147 val = drvdata->nr_resource;
148 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
150 static DEVICE_ATTR_RO(nr_resource);
152 static ssize_t nr_ss_cmp_show(struct device *dev,
153 struct device_attribute *attr,
157 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
159 val = drvdata->nr_ss_cmp;
160 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
162 static DEVICE_ATTR_RO(nr_ss_cmp);
164 static ssize_t reset_store(struct device *dev,
165 struct device_attribute *attr,
166 const char *buf, size_t size)
170 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
171 struct etmv4_config *config = &drvdata->config;
173 if (kstrtoul(buf, 16, &val))
176 spin_lock(&drvdata->spinlock);
180 /* Disable data tracing: do not trace load and store data transfers */
181 config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
182 config->cfg &= ~(BIT(1) | BIT(2));
184 /* Disable data value and data address tracing */
185 config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
186 ETM_MODE_DATA_TRACE_VAL);
187 config->cfg &= ~(BIT(16) | BIT(17));
189 /* Disable all events tracing */
190 config->eventctrl0 = 0x0;
191 config->eventctrl1 = 0x0;
193 /* Disable timestamp event */
194 config->ts_ctrl = 0x0;
196 /* Disable stalling */
197 config->stall_ctrl = 0x0;
199 /* Reset trace synchronization period to 2^8 = 256 bytes*/
200 if (drvdata->syncpr == false)
201 config->syncfreq = 0x8;
204 * Enable ViewInst to trace everything with start-stop logic in
205 * started state. ARM recommends start-stop logic is set before
208 config->vinst_ctrl |= BIT(0);
209 if (drvdata->nr_addr_cmp == true) {
210 config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
211 /* SSSTATUS, bit[9] */
212 config->vinst_ctrl |= BIT(9);
215 /* No address range filtering for ViewInst */
216 config->viiectlr = 0x0;
218 /* No start-stop filtering for ViewInst */
219 config->vissctlr = 0x0;
221 /* Disable seq events */
222 for (i = 0; i < drvdata->nrseqstate-1; i++)
223 config->seq_ctrl[i] = 0x0;
224 config->seq_rst = 0x0;
225 config->seq_state = 0x0;
227 /* Disable external input events */
228 config->ext_inp = 0x0;
230 config->cntr_idx = 0x0;
231 for (i = 0; i < drvdata->nr_cntr; i++) {
232 config->cntrldvr[i] = 0x0;
233 config->cntr_ctrl[i] = 0x0;
234 config->cntr_val[i] = 0x0;
237 config->res_idx = 0x0;
238 for (i = 0; i < drvdata->nr_resource; i++)
239 config->res_ctrl[i] = 0x0;
241 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
242 config->ss_ctrl[i] = 0x0;
243 config->ss_pe_cmp[i] = 0x0;
246 config->addr_idx = 0x0;
247 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
248 config->addr_val[i] = 0x0;
249 config->addr_acc[i] = 0x0;
250 config->addr_type[i] = ETM_ADDR_TYPE_NONE;
253 config->ctxid_idx = 0x0;
254 for (i = 0; i < drvdata->numcidc; i++)
255 config->ctxid_pid[i] = 0x0;
257 config->ctxid_mask0 = 0x0;
258 config->ctxid_mask1 = 0x0;
260 config->vmid_idx = 0x0;
261 for (i = 0; i < drvdata->numvmidc; i++)
262 config->vmid_val[i] = 0x0;
263 config->vmid_mask0 = 0x0;
264 config->vmid_mask1 = 0x0;
266 drvdata->trcid = drvdata->cpu + 1;
268 spin_unlock(&drvdata->spinlock);
272 static DEVICE_ATTR_WO(reset);
274 static ssize_t mode_show(struct device *dev,
275 struct device_attribute *attr,
279 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
280 struct etmv4_config *config = &drvdata->config;
283 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
286 static ssize_t mode_store(struct device *dev,
287 struct device_attribute *attr,
288 const char *buf, size_t size)
290 unsigned long val, mode;
291 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
292 struct etmv4_config *config = &drvdata->config;
294 if (kstrtoul(buf, 16, &val))
297 spin_lock(&drvdata->spinlock);
298 config->mode = val & ETMv4_MODE_ALL;
300 if (config->mode & ETM_MODE_EXCLUDE)
301 etm4_set_mode_exclude(drvdata, true);
303 etm4_set_mode_exclude(drvdata, false);
305 if (drvdata->instrp0 == true) {
306 /* start by clearing instruction P0 field */
307 config->cfg &= ~(BIT(1) | BIT(2));
308 if (config->mode & ETM_MODE_LOAD)
309 /* 0b01 Trace load instructions as P0 instructions */
310 config->cfg |= BIT(1);
311 if (config->mode & ETM_MODE_STORE)
312 /* 0b10 Trace store instructions as P0 instructions */
313 config->cfg |= BIT(2);
314 if (config->mode & ETM_MODE_LOAD_STORE)
316 * 0b11 Trace load and store instructions
319 config->cfg |= BIT(1) | BIT(2);
322 /* bit[3], Branch broadcast mode */
323 if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
324 config->cfg |= BIT(3);
326 config->cfg &= ~BIT(3);
328 /* bit[4], Cycle counting instruction trace bit */
329 if ((config->mode & ETMv4_MODE_CYCACC) &&
330 (drvdata->trccci == true))
331 config->cfg |= BIT(4);
333 config->cfg &= ~BIT(4);
335 /* bit[6], Context ID tracing bit */
336 if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
337 config->cfg |= BIT(6);
339 config->cfg &= ~BIT(6);
341 if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
342 config->cfg |= BIT(7);
344 config->cfg &= ~BIT(7);
346 /* bits[10:8], Conditional instruction tracing bit */
347 mode = ETM_MODE_COND(config->mode);
348 if (drvdata->trccond == true) {
349 config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
350 config->cfg |= mode << 8;
353 /* bit[11], Global timestamp tracing bit */
354 if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
355 config->cfg |= BIT(11);
357 config->cfg &= ~BIT(11);
359 /* bit[12], Return stack enable bit */
360 if ((config->mode & ETM_MODE_RETURNSTACK) &&
361 (drvdata->retstack == true))
362 config->cfg |= BIT(12);
364 config->cfg &= ~BIT(12);
366 /* bits[14:13], Q element enable field */
367 mode = ETM_MODE_QELEM(config->mode);
368 /* start by clearing QE bits */
369 config->cfg &= ~(BIT(13) | BIT(14));
371 * if supported, Q elements with instruction counts are enabled.
372 * Always set the low bit for any requested mode. Valid combos are
373 * 0b00, 0b01 and 0b11.
375 if (mode && drvdata->q_support)
376 config->cfg |= BIT(13);
378 * if supported, Q elements with and without instruction
381 if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
382 config->cfg |= BIT(14);
384 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
385 if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
386 (drvdata->atbtrig == true))
387 config->eventctrl1 |= BIT(11);
389 config->eventctrl1 &= ~BIT(11);
391 /* bit[12], Low-power state behavior override bit */
392 if ((config->mode & ETM_MODE_LPOVERRIDE) &&
393 (drvdata->lpoverride == true))
394 config->eventctrl1 |= BIT(12);
396 config->eventctrl1 &= ~BIT(12);
398 /* bit[8], Instruction stall bit */
399 if (config->mode & ETM_MODE_ISTALL_EN)
400 config->stall_ctrl |= BIT(8);
402 config->stall_ctrl &= ~BIT(8);
404 /* bit[10], Prioritize instruction trace bit */
405 if (config->mode & ETM_MODE_INSTPRIO)
406 config->stall_ctrl |= BIT(10);
408 config->stall_ctrl &= ~BIT(10);
410 /* bit[13], Trace overflow prevention bit */
411 if ((config->mode & ETM_MODE_NOOVERFLOW) &&
412 (drvdata->nooverflow == true))
413 config->stall_ctrl |= BIT(13);
415 config->stall_ctrl &= ~BIT(13);
417 /* bit[9] Start/stop logic control bit */
418 if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
419 config->vinst_ctrl |= BIT(9);
421 config->vinst_ctrl &= ~BIT(9);
423 /* bit[10], Whether a trace unit must trace a Reset exception */
424 if (config->mode & ETM_MODE_TRACE_RESET)
425 config->vinst_ctrl |= BIT(10);
427 config->vinst_ctrl &= ~BIT(10);
429 /* bit[11], Whether a trace unit must trace a system error exception */
430 if ((config->mode & ETM_MODE_TRACE_ERR) &&
431 (drvdata->trc_error == true))
432 config->vinst_ctrl |= BIT(11);
434 config->vinst_ctrl &= ~BIT(11);
436 if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
437 etm4_config_trace_mode(config);
439 spin_unlock(&drvdata->spinlock);
443 static DEVICE_ATTR_RW(mode);
445 static ssize_t pe_show(struct device *dev,
446 struct device_attribute *attr,
450 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
451 struct etmv4_config *config = &drvdata->config;
453 val = config->pe_sel;
454 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
457 static ssize_t pe_store(struct device *dev,
458 struct device_attribute *attr,
459 const char *buf, size_t size)
462 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
463 struct etmv4_config *config = &drvdata->config;
465 if (kstrtoul(buf, 16, &val))
468 spin_lock(&drvdata->spinlock);
469 if (val > drvdata->nr_pe) {
470 spin_unlock(&drvdata->spinlock);
474 config->pe_sel = val;
475 spin_unlock(&drvdata->spinlock);
478 static DEVICE_ATTR_RW(pe);
480 static ssize_t event_show(struct device *dev,
481 struct device_attribute *attr,
485 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
486 struct etmv4_config *config = &drvdata->config;
488 val = config->eventctrl0;
489 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
492 static ssize_t event_store(struct device *dev,
493 struct device_attribute *attr,
494 const char *buf, size_t size)
497 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
498 struct etmv4_config *config = &drvdata->config;
500 if (kstrtoul(buf, 16, &val))
503 spin_lock(&drvdata->spinlock);
504 switch (drvdata->nr_event) {
506 /* EVENT0, bits[7:0] */
507 config->eventctrl0 = val & 0xFF;
510 /* EVENT1, bits[15:8] */
511 config->eventctrl0 = val & 0xFFFF;
514 /* EVENT2, bits[23:16] */
515 config->eventctrl0 = val & 0xFFFFFF;
518 /* EVENT3, bits[31:24] */
519 config->eventctrl0 = val;
524 spin_unlock(&drvdata->spinlock);
527 static DEVICE_ATTR_RW(event);
529 static ssize_t event_instren_show(struct device *dev,
530 struct device_attribute *attr,
534 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
535 struct etmv4_config *config = &drvdata->config;
537 val = BMVAL(config->eventctrl1, 0, 3);
538 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
541 static ssize_t event_instren_store(struct device *dev,
542 struct device_attribute *attr,
543 const char *buf, size_t size)
546 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
547 struct etmv4_config *config = &drvdata->config;
549 if (kstrtoul(buf, 16, &val))
552 spin_lock(&drvdata->spinlock);
553 /* start by clearing all instruction event enable bits */
554 config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
555 switch (drvdata->nr_event) {
557 /* generate Event element for event 1 */
558 config->eventctrl1 |= val & BIT(1);
561 /* generate Event element for event 1 and 2 */
562 config->eventctrl1 |= val & (BIT(0) | BIT(1));
565 /* generate Event element for event 1, 2 and 3 */
566 config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
569 /* generate Event element for all 4 events */
570 config->eventctrl1 |= val & 0xF;
575 spin_unlock(&drvdata->spinlock);
578 static DEVICE_ATTR_RW(event_instren);
580 static ssize_t event_ts_show(struct device *dev,
581 struct device_attribute *attr,
585 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
586 struct etmv4_config *config = &drvdata->config;
588 val = config->ts_ctrl;
589 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
592 static ssize_t event_ts_store(struct device *dev,
593 struct device_attribute *attr,
594 const char *buf, size_t size)
597 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
598 struct etmv4_config *config = &drvdata->config;
600 if (kstrtoul(buf, 16, &val))
602 if (!drvdata->ts_size)
605 config->ts_ctrl = val & ETMv4_EVENT_MASK;
608 static DEVICE_ATTR_RW(event_ts);
610 static ssize_t syncfreq_show(struct device *dev,
611 struct device_attribute *attr,
615 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
616 struct etmv4_config *config = &drvdata->config;
618 val = config->syncfreq;
619 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
622 static ssize_t syncfreq_store(struct device *dev,
623 struct device_attribute *attr,
624 const char *buf, size_t size)
627 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
628 struct etmv4_config *config = &drvdata->config;
630 if (kstrtoul(buf, 16, &val))
632 if (drvdata->syncpr == true)
635 config->syncfreq = val & ETMv4_SYNC_MASK;
638 static DEVICE_ATTR_RW(syncfreq);
640 static ssize_t cyc_threshold_show(struct device *dev,
641 struct device_attribute *attr,
645 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
646 struct etmv4_config *config = &drvdata->config;
648 val = config->ccctlr;
649 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
652 static ssize_t cyc_threshold_store(struct device *dev,
653 struct device_attribute *attr,
654 const char *buf, size_t size)
657 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
658 struct etmv4_config *config = &drvdata->config;
660 if (kstrtoul(buf, 16, &val))
663 /* mask off max threshold before checking min value */
664 val &= ETM_CYC_THRESHOLD_MASK;
665 if (val < drvdata->ccitmin)
668 config->ccctlr = val;
671 static DEVICE_ATTR_RW(cyc_threshold);
673 static ssize_t bb_ctrl_show(struct device *dev,
674 struct device_attribute *attr,
678 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
679 struct etmv4_config *config = &drvdata->config;
681 val = config->bb_ctrl;
682 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
685 static ssize_t bb_ctrl_store(struct device *dev,
686 struct device_attribute *attr,
687 const char *buf, size_t size)
690 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
691 struct etmv4_config *config = &drvdata->config;
693 if (kstrtoul(buf, 16, &val))
695 if (drvdata->trcbb == false)
697 if (!drvdata->nr_addr_cmp)
701 * Bit[8] controls include(1) / exclude(0), bits[0-7] select
702 * individual range comparators. If include then at least 1
703 * range must be selected.
705 if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0))
708 config->bb_ctrl = val & GENMASK(8, 0);
711 static DEVICE_ATTR_RW(bb_ctrl);
713 static ssize_t event_vinst_show(struct device *dev,
714 struct device_attribute *attr,
718 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
719 struct etmv4_config *config = &drvdata->config;
721 val = config->vinst_ctrl & ETMv4_EVENT_MASK;
722 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
725 static ssize_t event_vinst_store(struct device *dev,
726 struct device_attribute *attr,
727 const char *buf, size_t size)
730 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
731 struct etmv4_config *config = &drvdata->config;
733 if (kstrtoul(buf, 16, &val))
736 spin_lock(&drvdata->spinlock);
737 val &= ETMv4_EVENT_MASK;
738 config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
739 config->vinst_ctrl |= val;
740 spin_unlock(&drvdata->spinlock);
743 static DEVICE_ATTR_RW(event_vinst);
745 static ssize_t s_exlevel_vinst_show(struct device *dev,
746 struct device_attribute *attr,
750 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
751 struct etmv4_config *config = &drvdata->config;
753 val = BMVAL(config->vinst_ctrl, 16, 19);
754 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
757 static ssize_t s_exlevel_vinst_store(struct device *dev,
758 struct device_attribute *attr,
759 const char *buf, size_t size)
762 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
763 struct etmv4_config *config = &drvdata->config;
765 if (kstrtoul(buf, 16, &val))
768 spin_lock(&drvdata->spinlock);
769 /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
770 config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
771 /* enable instruction tracing for corresponding exception level */
772 val &= drvdata->s_ex_level;
773 config->vinst_ctrl |= (val << 16);
774 spin_unlock(&drvdata->spinlock);
777 static DEVICE_ATTR_RW(s_exlevel_vinst);
779 static ssize_t ns_exlevel_vinst_show(struct device *dev,
780 struct device_attribute *attr,
784 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
785 struct etmv4_config *config = &drvdata->config;
787 /* EXLEVEL_NS, bits[23:20] */
788 val = BMVAL(config->vinst_ctrl, 20, 23);
789 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
792 static ssize_t ns_exlevel_vinst_store(struct device *dev,
793 struct device_attribute *attr,
794 const char *buf, size_t size)
797 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
798 struct etmv4_config *config = &drvdata->config;
800 if (kstrtoul(buf, 16, &val))
803 spin_lock(&drvdata->spinlock);
804 /* clear EXLEVEL_NS bits (bit[23] is never implemented */
805 config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
806 /* enable instruction tracing for corresponding exception level */
807 val &= drvdata->ns_ex_level;
808 config->vinst_ctrl |= (val << 20);
809 spin_unlock(&drvdata->spinlock);
812 static DEVICE_ATTR_RW(ns_exlevel_vinst);
814 static ssize_t addr_idx_show(struct device *dev,
815 struct device_attribute *attr,
819 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
820 struct etmv4_config *config = &drvdata->config;
822 val = config->addr_idx;
823 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
826 static ssize_t addr_idx_store(struct device *dev,
827 struct device_attribute *attr,
828 const char *buf, size_t size)
831 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
832 struct etmv4_config *config = &drvdata->config;
834 if (kstrtoul(buf, 16, &val))
836 if (val >= drvdata->nr_addr_cmp * 2)
840 * Use spinlock to ensure index doesn't change while it gets
841 * dereferenced multiple times within a spinlock block elsewhere.
843 spin_lock(&drvdata->spinlock);
844 config->addr_idx = val;
845 spin_unlock(&drvdata->spinlock);
848 static DEVICE_ATTR_RW(addr_idx);
850 static ssize_t addr_instdatatype_show(struct device *dev,
851 struct device_attribute *attr,
856 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
857 struct etmv4_config *config = &drvdata->config;
859 spin_lock(&drvdata->spinlock);
860 idx = config->addr_idx;
861 val = BMVAL(config->addr_acc[idx], 0, 1);
862 len = scnprintf(buf, PAGE_SIZE, "%s\n",
863 val == ETM_INSTR_ADDR ? "instr" :
864 (val == ETM_DATA_LOAD_ADDR ? "data_load" :
865 (val == ETM_DATA_STORE_ADDR ? "data_store" :
866 "data_load_store")));
867 spin_unlock(&drvdata->spinlock);
871 static ssize_t addr_instdatatype_store(struct device *dev,
872 struct device_attribute *attr,
873 const char *buf, size_t size)
877 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
878 struct etmv4_config *config = &drvdata->config;
880 if (strlen(buf) >= 20)
882 if (sscanf(buf, "%s", str) != 1)
885 spin_lock(&drvdata->spinlock);
886 idx = config->addr_idx;
887 if (!strcmp(str, "instr"))
888 /* TYPE, bits[1:0] */
889 config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
891 spin_unlock(&drvdata->spinlock);
894 static DEVICE_ATTR_RW(addr_instdatatype);
896 static ssize_t addr_single_show(struct device *dev,
897 struct device_attribute *attr,
902 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
903 struct etmv4_config *config = &drvdata->config;
905 idx = config->addr_idx;
906 spin_lock(&drvdata->spinlock);
907 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
908 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
909 spin_unlock(&drvdata->spinlock);
912 val = (unsigned long)config->addr_val[idx];
913 spin_unlock(&drvdata->spinlock);
914 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
917 static ssize_t addr_single_store(struct device *dev,
918 struct device_attribute *attr,
919 const char *buf, size_t size)
923 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
924 struct etmv4_config *config = &drvdata->config;
926 if (kstrtoul(buf, 16, &val))
929 spin_lock(&drvdata->spinlock);
930 idx = config->addr_idx;
931 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
932 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
933 spin_unlock(&drvdata->spinlock);
937 config->addr_val[idx] = (u64)val;
938 config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
939 spin_unlock(&drvdata->spinlock);
942 static DEVICE_ATTR_RW(addr_single);
944 static ssize_t addr_range_show(struct device *dev,
945 struct device_attribute *attr,
949 unsigned long val1, val2;
950 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
951 struct etmv4_config *config = &drvdata->config;
953 spin_lock(&drvdata->spinlock);
954 idx = config->addr_idx;
956 spin_unlock(&drvdata->spinlock);
959 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
960 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
961 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
962 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
963 spin_unlock(&drvdata->spinlock);
967 val1 = (unsigned long)config->addr_val[idx];
968 val2 = (unsigned long)config->addr_val[idx + 1];
969 spin_unlock(&drvdata->spinlock);
970 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
973 static ssize_t addr_range_store(struct device *dev,
974 struct device_attribute *attr,
975 const char *buf, size_t size)
978 unsigned long val1, val2;
979 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
980 struct etmv4_config *config = &drvdata->config;
982 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
984 /* lower address comparator cannot have a higher address value */
988 spin_lock(&drvdata->spinlock);
989 idx = config->addr_idx;
991 spin_unlock(&drvdata->spinlock);
995 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
996 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
997 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
998 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
999 spin_unlock(&drvdata->spinlock);
1003 config->addr_val[idx] = (u64)val1;
1004 config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1005 config->addr_val[idx + 1] = (u64)val2;
1006 config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1008 * Program include or exclude control bits for vinst or vdata
1009 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1011 if (config->mode & ETM_MODE_EXCLUDE)
1012 etm4_set_mode_exclude(drvdata, true);
1014 etm4_set_mode_exclude(drvdata, false);
1016 spin_unlock(&drvdata->spinlock);
1019 static DEVICE_ATTR_RW(addr_range);
1021 static ssize_t addr_start_show(struct device *dev,
1022 struct device_attribute *attr,
1027 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1028 struct etmv4_config *config = &drvdata->config;
1030 spin_lock(&drvdata->spinlock);
1031 idx = config->addr_idx;
1033 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1034 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1035 spin_unlock(&drvdata->spinlock);
1039 val = (unsigned long)config->addr_val[idx];
1040 spin_unlock(&drvdata->spinlock);
1041 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1044 static ssize_t addr_start_store(struct device *dev,
1045 struct device_attribute *attr,
1046 const char *buf, size_t size)
1050 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1051 struct etmv4_config *config = &drvdata->config;
1053 if (kstrtoul(buf, 16, &val))
1056 spin_lock(&drvdata->spinlock);
1057 idx = config->addr_idx;
1058 if (!drvdata->nr_addr_cmp) {
1059 spin_unlock(&drvdata->spinlock);
1062 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1063 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1064 spin_unlock(&drvdata->spinlock);
1068 config->addr_val[idx] = (u64)val;
1069 config->addr_type[idx] = ETM_ADDR_TYPE_START;
1070 config->vissctlr |= BIT(idx);
1071 /* SSSTATUS, bit[9] - turn on start/stop logic */
1072 config->vinst_ctrl |= BIT(9);
1073 spin_unlock(&drvdata->spinlock);
1076 static DEVICE_ATTR_RW(addr_start);
1078 static ssize_t addr_stop_show(struct device *dev,
1079 struct device_attribute *attr,
1084 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1085 struct etmv4_config *config = &drvdata->config;
1087 spin_lock(&drvdata->spinlock);
1088 idx = config->addr_idx;
1090 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1091 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1092 spin_unlock(&drvdata->spinlock);
1096 val = (unsigned long)config->addr_val[idx];
1097 spin_unlock(&drvdata->spinlock);
1098 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1101 static ssize_t addr_stop_store(struct device *dev,
1102 struct device_attribute *attr,
1103 const char *buf, size_t size)
1107 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1108 struct etmv4_config *config = &drvdata->config;
1110 if (kstrtoul(buf, 16, &val))
1113 spin_lock(&drvdata->spinlock);
1114 idx = config->addr_idx;
1115 if (!drvdata->nr_addr_cmp) {
1116 spin_unlock(&drvdata->spinlock);
1119 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1120 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1121 spin_unlock(&drvdata->spinlock);
1125 config->addr_val[idx] = (u64)val;
1126 config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1127 config->vissctlr |= BIT(idx + 16);
1128 /* SSSTATUS, bit[9] - turn on start/stop logic */
1129 config->vinst_ctrl |= BIT(9);
1130 spin_unlock(&drvdata->spinlock);
1133 static DEVICE_ATTR_RW(addr_stop);
1135 static ssize_t addr_ctxtype_show(struct device *dev,
1136 struct device_attribute *attr,
1141 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1142 struct etmv4_config *config = &drvdata->config;
1144 spin_lock(&drvdata->spinlock);
1145 idx = config->addr_idx;
1146 /* CONTEXTTYPE, bits[3:2] */
1147 val = BMVAL(config->addr_acc[idx], 2, 3);
1148 len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1149 (val == ETM_CTX_CTXID ? "ctxid" :
1150 (val == ETM_CTX_VMID ? "vmid" : "all")));
1151 spin_unlock(&drvdata->spinlock);
1155 static ssize_t addr_ctxtype_store(struct device *dev,
1156 struct device_attribute *attr,
1157 const char *buf, size_t size)
1161 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1162 struct etmv4_config *config = &drvdata->config;
1164 if (strlen(buf) >= 10)
1166 if (sscanf(buf, "%s", str) != 1)
1169 spin_lock(&drvdata->spinlock);
1170 idx = config->addr_idx;
1171 if (!strcmp(str, "none"))
1172 /* start by clearing context type bits */
1173 config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1174 else if (!strcmp(str, "ctxid")) {
1175 /* 0b01 The trace unit performs a Context ID */
1176 if (drvdata->numcidc) {
1177 config->addr_acc[idx] |= BIT(2);
1178 config->addr_acc[idx] &= ~BIT(3);
1180 } else if (!strcmp(str, "vmid")) {
1181 /* 0b10 The trace unit performs a VMID */
1182 if (drvdata->numvmidc) {
1183 config->addr_acc[idx] &= ~BIT(2);
1184 config->addr_acc[idx] |= BIT(3);
1186 } else if (!strcmp(str, "all")) {
1188 * 0b11 The trace unit performs a Context ID
1189 * comparison and a VMID
1191 if (drvdata->numcidc)
1192 config->addr_acc[idx] |= BIT(2);
1193 if (drvdata->numvmidc)
1194 config->addr_acc[idx] |= BIT(3);
1196 spin_unlock(&drvdata->spinlock);
1199 static DEVICE_ATTR_RW(addr_ctxtype);
1201 static ssize_t addr_context_show(struct device *dev,
1202 struct device_attribute *attr,
1207 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1208 struct etmv4_config *config = &drvdata->config;
1210 spin_lock(&drvdata->spinlock);
1211 idx = config->addr_idx;
1212 /* context ID comparator bits[6:4] */
1213 val = BMVAL(config->addr_acc[idx], 4, 6);
1214 spin_unlock(&drvdata->spinlock);
1215 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1218 static ssize_t addr_context_store(struct device *dev,
1219 struct device_attribute *attr,
1220 const char *buf, size_t size)
1224 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1225 struct etmv4_config *config = &drvdata->config;
1227 if (kstrtoul(buf, 16, &val))
1229 if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1231 if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
1232 drvdata->numcidc : drvdata->numvmidc))
1235 spin_lock(&drvdata->spinlock);
1236 idx = config->addr_idx;
1237 /* clear context ID comparator bits[6:4] */
1238 config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1239 config->addr_acc[idx] |= (val << 4);
1240 spin_unlock(&drvdata->spinlock);
1243 static DEVICE_ATTR_RW(addr_context);
1245 static ssize_t seq_idx_show(struct device *dev,
1246 struct device_attribute *attr,
1250 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1251 struct etmv4_config *config = &drvdata->config;
1253 val = config->seq_idx;
1254 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1257 static ssize_t seq_idx_store(struct device *dev,
1258 struct device_attribute *attr,
1259 const char *buf, size_t size)
1262 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1263 struct etmv4_config *config = &drvdata->config;
1265 if (kstrtoul(buf, 16, &val))
1267 if (val >= drvdata->nrseqstate - 1)
1271 * Use spinlock to ensure index doesn't change while it gets
1272 * dereferenced multiple times within a spinlock block elsewhere.
1274 spin_lock(&drvdata->spinlock);
1275 config->seq_idx = val;
1276 spin_unlock(&drvdata->spinlock);
1279 static DEVICE_ATTR_RW(seq_idx);
1281 static ssize_t seq_state_show(struct device *dev,
1282 struct device_attribute *attr,
1286 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1287 struct etmv4_config *config = &drvdata->config;
1289 val = config->seq_state;
1290 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1293 static ssize_t seq_state_store(struct device *dev,
1294 struct device_attribute *attr,
1295 const char *buf, size_t size)
1298 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1299 struct etmv4_config *config = &drvdata->config;
1301 if (kstrtoul(buf, 16, &val))
1303 if (val >= drvdata->nrseqstate)
1306 config->seq_state = val;
1309 static DEVICE_ATTR_RW(seq_state);
1311 static ssize_t seq_event_show(struct device *dev,
1312 struct device_attribute *attr,
1317 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1318 struct etmv4_config *config = &drvdata->config;
1320 spin_lock(&drvdata->spinlock);
1321 idx = config->seq_idx;
1322 val = config->seq_ctrl[idx];
1323 spin_unlock(&drvdata->spinlock);
1324 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1327 static ssize_t seq_event_store(struct device *dev,
1328 struct device_attribute *attr,
1329 const char *buf, size_t size)
1333 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1334 struct etmv4_config *config = &drvdata->config;
1336 if (kstrtoul(buf, 16, &val))
1339 spin_lock(&drvdata->spinlock);
1340 idx = config->seq_idx;
1341 /* Seq control has two masks B[15:8] F[7:0] */
1342 config->seq_ctrl[idx] = val & 0xFFFF;
1343 spin_unlock(&drvdata->spinlock);
1346 static DEVICE_ATTR_RW(seq_event);
1348 static ssize_t seq_reset_event_show(struct device *dev,
1349 struct device_attribute *attr,
1353 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1354 struct etmv4_config *config = &drvdata->config;
1356 val = config->seq_rst;
1357 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1360 static ssize_t seq_reset_event_store(struct device *dev,
1361 struct device_attribute *attr,
1362 const char *buf, size_t size)
1365 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1366 struct etmv4_config *config = &drvdata->config;
1368 if (kstrtoul(buf, 16, &val))
1370 if (!(drvdata->nrseqstate))
1373 config->seq_rst = val & ETMv4_EVENT_MASK;
1376 static DEVICE_ATTR_RW(seq_reset_event);
1378 static ssize_t cntr_idx_show(struct device *dev,
1379 struct device_attribute *attr,
1383 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1384 struct etmv4_config *config = &drvdata->config;
1386 val = config->cntr_idx;
1387 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1390 static ssize_t cntr_idx_store(struct device *dev,
1391 struct device_attribute *attr,
1392 const char *buf, size_t size)
1395 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1396 struct etmv4_config *config = &drvdata->config;
1398 if (kstrtoul(buf, 16, &val))
1400 if (val >= drvdata->nr_cntr)
1404 * Use spinlock to ensure index doesn't change while it gets
1405 * dereferenced multiple times within a spinlock block elsewhere.
1407 spin_lock(&drvdata->spinlock);
1408 config->cntr_idx = val;
1409 spin_unlock(&drvdata->spinlock);
1412 static DEVICE_ATTR_RW(cntr_idx);
1414 static ssize_t cntrldvr_show(struct device *dev,
1415 struct device_attribute *attr,
1420 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1421 struct etmv4_config *config = &drvdata->config;
1423 spin_lock(&drvdata->spinlock);
1424 idx = config->cntr_idx;
1425 val = config->cntrldvr[idx];
1426 spin_unlock(&drvdata->spinlock);
1427 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1430 static ssize_t cntrldvr_store(struct device *dev,
1431 struct device_attribute *attr,
1432 const char *buf, size_t size)
1436 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1437 struct etmv4_config *config = &drvdata->config;
1439 if (kstrtoul(buf, 16, &val))
1441 if (val > ETM_CNTR_MAX_VAL)
1444 spin_lock(&drvdata->spinlock);
1445 idx = config->cntr_idx;
1446 config->cntrldvr[idx] = val;
1447 spin_unlock(&drvdata->spinlock);
1450 static DEVICE_ATTR_RW(cntrldvr);
1452 static ssize_t cntr_val_show(struct device *dev,
1453 struct device_attribute *attr,
1458 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1459 struct etmv4_config *config = &drvdata->config;
1461 spin_lock(&drvdata->spinlock);
1462 idx = config->cntr_idx;
1463 val = config->cntr_val[idx];
1464 spin_unlock(&drvdata->spinlock);
1465 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1468 static ssize_t cntr_val_store(struct device *dev,
1469 struct device_attribute *attr,
1470 const char *buf, size_t size)
1474 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1475 struct etmv4_config *config = &drvdata->config;
1477 if (kstrtoul(buf, 16, &val))
1479 if (val > ETM_CNTR_MAX_VAL)
1482 spin_lock(&drvdata->spinlock);
1483 idx = config->cntr_idx;
1484 config->cntr_val[idx] = val;
1485 spin_unlock(&drvdata->spinlock);
1488 static DEVICE_ATTR_RW(cntr_val);
1490 static ssize_t cntr_ctrl_show(struct device *dev,
1491 struct device_attribute *attr,
1496 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1497 struct etmv4_config *config = &drvdata->config;
1499 spin_lock(&drvdata->spinlock);
1500 idx = config->cntr_idx;
1501 val = config->cntr_ctrl[idx];
1502 spin_unlock(&drvdata->spinlock);
1503 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1506 static ssize_t cntr_ctrl_store(struct device *dev,
1507 struct device_attribute *attr,
1508 const char *buf, size_t size)
1512 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1513 struct etmv4_config *config = &drvdata->config;
1515 if (kstrtoul(buf, 16, &val))
1518 spin_lock(&drvdata->spinlock);
1519 idx = config->cntr_idx;
1520 config->cntr_ctrl[idx] = val;
1521 spin_unlock(&drvdata->spinlock);
1524 static DEVICE_ATTR_RW(cntr_ctrl);
1526 static ssize_t res_idx_show(struct device *dev,
1527 struct device_attribute *attr,
1531 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1532 struct etmv4_config *config = &drvdata->config;
1534 val = config->res_idx;
1535 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1538 static ssize_t res_idx_store(struct device *dev,
1539 struct device_attribute *attr,
1540 const char *buf, size_t size)
1543 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1544 struct etmv4_config *config = &drvdata->config;
1546 if (kstrtoul(buf, 16, &val))
1548 /* Resource selector pair 0 is always implemented and reserved */
1549 if ((val == 0) || (val >= drvdata->nr_resource))
1553 * Use spinlock to ensure index doesn't change while it gets
1554 * dereferenced multiple times within a spinlock block elsewhere.
1556 spin_lock(&drvdata->spinlock);
1557 config->res_idx = val;
1558 spin_unlock(&drvdata->spinlock);
1561 static DEVICE_ATTR_RW(res_idx);
1563 static ssize_t res_ctrl_show(struct device *dev,
1564 struct device_attribute *attr,
1569 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1570 struct etmv4_config *config = &drvdata->config;
1572 spin_lock(&drvdata->spinlock);
1573 idx = config->res_idx;
1574 val = config->res_ctrl[idx];
1575 spin_unlock(&drvdata->spinlock);
1576 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1579 static ssize_t res_ctrl_store(struct device *dev,
1580 struct device_attribute *attr,
1581 const char *buf, size_t size)
1585 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1586 struct etmv4_config *config = &drvdata->config;
1588 if (kstrtoul(buf, 16, &val))
1591 spin_lock(&drvdata->spinlock);
1592 idx = config->res_idx;
1593 /* For odd idx pair inversal bit is RES0 */
1595 /* PAIRINV, bit[21] */
1597 config->res_ctrl[idx] = val & GENMASK(21, 0);
1598 spin_unlock(&drvdata->spinlock);
1601 static DEVICE_ATTR_RW(res_ctrl);
1603 static ssize_t ctxid_idx_show(struct device *dev,
1604 struct device_attribute *attr,
1608 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1609 struct etmv4_config *config = &drvdata->config;
1611 val = config->ctxid_idx;
1612 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1615 static ssize_t ctxid_idx_store(struct device *dev,
1616 struct device_attribute *attr,
1617 const char *buf, size_t size)
1620 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1621 struct etmv4_config *config = &drvdata->config;
1623 if (kstrtoul(buf, 16, &val))
1625 if (val >= drvdata->numcidc)
1629 * Use spinlock to ensure index doesn't change while it gets
1630 * dereferenced multiple times within a spinlock block elsewhere.
1632 spin_lock(&drvdata->spinlock);
1633 config->ctxid_idx = val;
1634 spin_unlock(&drvdata->spinlock);
1637 static DEVICE_ATTR_RW(ctxid_idx);
1639 static ssize_t ctxid_pid_show(struct device *dev,
1640 struct device_attribute *attr,
1645 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1646 struct etmv4_config *config = &drvdata->config;
1649 * Don't use contextID tracing if coming from a PID namespace. See
1650 * comment in ctxid_pid_store().
1652 if (task_active_pid_ns(current) != &init_pid_ns)
1655 spin_lock(&drvdata->spinlock);
1656 idx = config->ctxid_idx;
1657 val = (unsigned long)config->ctxid_pid[idx];
1658 spin_unlock(&drvdata->spinlock);
1659 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1662 static ssize_t ctxid_pid_store(struct device *dev,
1663 struct device_attribute *attr,
1664 const char *buf, size_t size)
1668 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1669 struct etmv4_config *config = &drvdata->config;
1672 * When contextID tracing is enabled the tracers will insert the
1673 * value found in the contextID register in the trace stream. But if
1674 * a process is in a namespace the PID of that process as seen from the
1675 * namespace won't be what the kernel sees, something that makes the
1676 * feature confusing and can potentially leak kernel only information.
1677 * As such refuse to use the feature if @current is not in the initial
1680 if (task_active_pid_ns(current) != &init_pid_ns)
1684 * only implemented when ctxid tracing is enabled, i.e. at least one
1685 * ctxid comparator is implemented and ctxid is greater than 0 bits
1688 if (!drvdata->ctxid_size || !drvdata->numcidc)
1690 if (kstrtoul(buf, 16, &pid))
1693 spin_lock(&drvdata->spinlock);
1694 idx = config->ctxid_idx;
1695 config->ctxid_pid[idx] = (u64)pid;
1696 spin_unlock(&drvdata->spinlock);
1699 static DEVICE_ATTR_RW(ctxid_pid);
1701 static ssize_t ctxid_masks_show(struct device *dev,
1702 struct device_attribute *attr,
1705 unsigned long val1, val2;
1706 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1707 struct etmv4_config *config = &drvdata->config;
1710 * Don't use contextID tracing if coming from a PID namespace. See
1711 * comment in ctxid_pid_store().
1713 if (task_active_pid_ns(current) != &init_pid_ns)
1716 spin_lock(&drvdata->spinlock);
1717 val1 = config->ctxid_mask0;
1718 val2 = config->ctxid_mask1;
1719 spin_unlock(&drvdata->spinlock);
1720 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1723 static ssize_t ctxid_masks_store(struct device *dev,
1724 struct device_attribute *attr,
1725 const char *buf, size_t size)
1728 unsigned long val1, val2, mask;
1729 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1730 struct etmv4_config *config = &drvdata->config;
1733 * Don't use contextID tracing if coming from a PID namespace. See
1734 * comment in ctxid_pid_store().
1736 if (task_active_pid_ns(current) != &init_pid_ns)
1740 * only implemented when ctxid tracing is enabled, i.e. at least one
1741 * ctxid comparator is implemented and ctxid is greater than 0 bits
1744 if (!drvdata->ctxid_size || !drvdata->numcidc)
1746 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1749 spin_lock(&drvdata->spinlock);
1751 * each byte[0..3] controls mask value applied to ctxid
1754 switch (drvdata->numcidc) {
1756 /* COMP0, bits[7:0] */
1757 config->ctxid_mask0 = val1 & 0xFF;
1760 /* COMP1, bits[15:8] */
1761 config->ctxid_mask0 = val1 & 0xFFFF;
1764 /* COMP2, bits[23:16] */
1765 config->ctxid_mask0 = val1 & 0xFFFFFF;
1768 /* COMP3, bits[31:24] */
1769 config->ctxid_mask0 = val1;
1772 /* COMP4, bits[7:0] */
1773 config->ctxid_mask0 = val1;
1774 config->ctxid_mask1 = val2 & 0xFF;
1777 /* COMP5, bits[15:8] */
1778 config->ctxid_mask0 = val1;
1779 config->ctxid_mask1 = val2 & 0xFFFF;
1782 /* COMP6, bits[23:16] */
1783 config->ctxid_mask0 = val1;
1784 config->ctxid_mask1 = val2 & 0xFFFFFF;
1787 /* COMP7, bits[31:24] */
1788 config->ctxid_mask0 = val1;
1789 config->ctxid_mask1 = val2;
1795 * If software sets a mask bit to 1, it must program relevant byte
1796 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
1797 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
1798 * of ctxid comparator0 value (corresponding to byte 0) register.
1800 mask = config->ctxid_mask0;
1801 for (i = 0; i < drvdata->numcidc; i++) {
1802 /* mask value of corresponding ctxid comparator */
1803 maskbyte = mask & ETMv4_EVENT_MASK;
1805 * each bit corresponds to a byte of respective ctxid comparator
1808 for (j = 0; j < 8; j++) {
1810 config->ctxid_pid[i] &= ~(0xFFUL << (j * 8));
1813 /* Select the next ctxid comparator mask value */
1815 /* ctxid comparators[4-7] */
1816 mask = config->ctxid_mask1;
1821 spin_unlock(&drvdata->spinlock);
1824 static DEVICE_ATTR_RW(ctxid_masks);
1826 static ssize_t vmid_idx_show(struct device *dev,
1827 struct device_attribute *attr,
1831 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1832 struct etmv4_config *config = &drvdata->config;
1834 val = config->vmid_idx;
1835 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1838 static ssize_t vmid_idx_store(struct device *dev,
1839 struct device_attribute *attr,
1840 const char *buf, size_t size)
1843 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1844 struct etmv4_config *config = &drvdata->config;
1846 if (kstrtoul(buf, 16, &val))
1848 if (val >= drvdata->numvmidc)
1852 * Use spinlock to ensure index doesn't change while it gets
1853 * dereferenced multiple times within a spinlock block elsewhere.
1855 spin_lock(&drvdata->spinlock);
1856 config->vmid_idx = val;
1857 spin_unlock(&drvdata->spinlock);
1860 static DEVICE_ATTR_RW(vmid_idx);
1862 static ssize_t vmid_val_show(struct device *dev,
1863 struct device_attribute *attr,
1867 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1868 struct etmv4_config *config = &drvdata->config;
1870 val = (unsigned long)config->vmid_val[config->vmid_idx];
1871 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1874 static ssize_t vmid_val_store(struct device *dev,
1875 struct device_attribute *attr,
1876 const char *buf, size_t size)
1879 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1880 struct etmv4_config *config = &drvdata->config;
1883 * only implemented when vmid tracing is enabled, i.e. at least one
1884 * vmid comparator is implemented and at least 8 bit vmid size
1886 if (!drvdata->vmid_size || !drvdata->numvmidc)
1888 if (kstrtoul(buf, 16, &val))
1891 spin_lock(&drvdata->spinlock);
1892 config->vmid_val[config->vmid_idx] = (u64)val;
1893 spin_unlock(&drvdata->spinlock);
1896 static DEVICE_ATTR_RW(vmid_val);
1898 static ssize_t vmid_masks_show(struct device *dev,
1899 struct device_attribute *attr, char *buf)
1901 unsigned long val1, val2;
1902 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1903 struct etmv4_config *config = &drvdata->config;
1905 spin_lock(&drvdata->spinlock);
1906 val1 = config->vmid_mask0;
1907 val2 = config->vmid_mask1;
1908 spin_unlock(&drvdata->spinlock);
1909 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1912 static ssize_t vmid_masks_store(struct device *dev,
1913 struct device_attribute *attr,
1914 const char *buf, size_t size)
1917 unsigned long val1, val2, mask;
1918 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1919 struct etmv4_config *config = &drvdata->config;
1922 * only implemented when vmid tracing is enabled, i.e. at least one
1923 * vmid comparator is implemented and at least 8 bit vmid size
1925 if (!drvdata->vmid_size || !drvdata->numvmidc)
1927 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1930 spin_lock(&drvdata->spinlock);
1933 * each byte[0..3] controls mask value applied to vmid
1936 switch (drvdata->numvmidc) {
1938 /* COMP0, bits[7:0] */
1939 config->vmid_mask0 = val1 & 0xFF;
1942 /* COMP1, bits[15:8] */
1943 config->vmid_mask0 = val1 & 0xFFFF;
1946 /* COMP2, bits[23:16] */
1947 config->vmid_mask0 = val1 & 0xFFFFFF;
1950 /* COMP3, bits[31:24] */
1951 config->vmid_mask0 = val1;
1954 /* COMP4, bits[7:0] */
1955 config->vmid_mask0 = val1;
1956 config->vmid_mask1 = val2 & 0xFF;
1959 /* COMP5, bits[15:8] */
1960 config->vmid_mask0 = val1;
1961 config->vmid_mask1 = val2 & 0xFFFF;
1964 /* COMP6, bits[23:16] */
1965 config->vmid_mask0 = val1;
1966 config->vmid_mask1 = val2 & 0xFFFFFF;
1969 /* COMP7, bits[31:24] */
1970 config->vmid_mask0 = val1;
1971 config->vmid_mask1 = val2;
1978 * If software sets a mask bit to 1, it must program relevant byte
1979 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
1980 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
1981 * of vmid comparator0 value (corresponding to byte 0) register.
1983 mask = config->vmid_mask0;
1984 for (i = 0; i < drvdata->numvmidc; i++) {
1985 /* mask value of corresponding vmid comparator */
1986 maskbyte = mask & ETMv4_EVENT_MASK;
1988 * each bit corresponds to a byte of respective vmid comparator
1991 for (j = 0; j < 8; j++) {
1993 config->vmid_val[i] &= ~(0xFFUL << (j * 8));
1996 /* Select the next vmid comparator mask value */
1998 /* vmid comparators[4-7] */
1999 mask = config->vmid_mask1;
2003 spin_unlock(&drvdata->spinlock);
2006 static DEVICE_ATTR_RW(vmid_masks);
2008 static ssize_t cpu_show(struct device *dev,
2009 struct device_attribute *attr, char *buf)
2012 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2015 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2018 static DEVICE_ATTR_RO(cpu);
2020 static struct attribute *coresight_etmv4_attrs[] = {
2021 &dev_attr_nr_pe_cmp.attr,
2022 &dev_attr_nr_addr_cmp.attr,
2023 &dev_attr_nr_cntr.attr,
2024 &dev_attr_nr_ext_inp.attr,
2025 &dev_attr_numcidc.attr,
2026 &dev_attr_numvmidc.attr,
2027 &dev_attr_nrseqstate.attr,
2028 &dev_attr_nr_resource.attr,
2029 &dev_attr_nr_ss_cmp.attr,
2030 &dev_attr_reset.attr,
2031 &dev_attr_mode.attr,
2033 &dev_attr_event.attr,
2034 &dev_attr_event_instren.attr,
2035 &dev_attr_event_ts.attr,
2036 &dev_attr_syncfreq.attr,
2037 &dev_attr_cyc_threshold.attr,
2038 &dev_attr_bb_ctrl.attr,
2039 &dev_attr_event_vinst.attr,
2040 &dev_attr_s_exlevel_vinst.attr,
2041 &dev_attr_ns_exlevel_vinst.attr,
2042 &dev_attr_addr_idx.attr,
2043 &dev_attr_addr_instdatatype.attr,
2044 &dev_attr_addr_single.attr,
2045 &dev_attr_addr_range.attr,
2046 &dev_attr_addr_start.attr,
2047 &dev_attr_addr_stop.attr,
2048 &dev_attr_addr_ctxtype.attr,
2049 &dev_attr_addr_context.attr,
2050 &dev_attr_seq_idx.attr,
2051 &dev_attr_seq_state.attr,
2052 &dev_attr_seq_event.attr,
2053 &dev_attr_seq_reset_event.attr,
2054 &dev_attr_cntr_idx.attr,
2055 &dev_attr_cntrldvr.attr,
2056 &dev_attr_cntr_val.attr,
2057 &dev_attr_cntr_ctrl.attr,
2058 &dev_attr_res_idx.attr,
2059 &dev_attr_res_ctrl.attr,
2060 &dev_attr_ctxid_idx.attr,
2061 &dev_attr_ctxid_pid.attr,
2062 &dev_attr_ctxid_masks.attr,
2063 &dev_attr_vmid_idx.attr,
2064 &dev_attr_vmid_val.attr,
2065 &dev_attr_vmid_masks.attr,
2075 static void do_smp_cross_read(void *data)
2077 struct etmv4_reg *reg = data;
2079 reg->data = readl_relaxed(reg->addr);
2082 static u32 etmv4_cross_read(const struct device *dev, u32 offset)
2084 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
2085 struct etmv4_reg reg;
2087 reg.addr = drvdata->base + offset;
2089 * smp cross call ensures the CPU will be powered up before
2090 * accessing the ETMv4 trace core registers
2092 smp_call_function_single(drvdata->cpu, do_smp_cross_read, ®, 1);
2096 #define coresight_etm4x_reg(name, offset) \
2097 coresight_simple_reg32(struct etmv4_drvdata, name, offset)
2099 #define coresight_etm4x_cross_read(name, offset) \
2100 coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read, \
2103 coresight_etm4x_reg(trcpdcr, TRCPDCR);
2104 coresight_etm4x_reg(trcpdsr, TRCPDSR);
2105 coresight_etm4x_reg(trclsr, TRCLSR);
2106 coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS);
2107 coresight_etm4x_reg(trcdevid, TRCDEVID);
2108 coresight_etm4x_reg(trcdevtype, TRCDEVTYPE);
2109 coresight_etm4x_reg(trcpidr0, TRCPIDR0);
2110 coresight_etm4x_reg(trcpidr1, TRCPIDR1);
2111 coresight_etm4x_reg(trcpidr2, TRCPIDR2);
2112 coresight_etm4x_reg(trcpidr3, TRCPIDR3);
2113 coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
2114 coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
2115 coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
2117 static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2118 &dev_attr_trcoslsr.attr,
2119 &dev_attr_trcpdcr.attr,
2120 &dev_attr_trcpdsr.attr,
2121 &dev_attr_trclsr.attr,
2122 &dev_attr_trcconfig.attr,
2123 &dev_attr_trctraceid.attr,
2124 &dev_attr_trcauthstatus.attr,
2125 &dev_attr_trcdevid.attr,
2126 &dev_attr_trcdevtype.attr,
2127 &dev_attr_trcpidr0.attr,
2128 &dev_attr_trcpidr1.attr,
2129 &dev_attr_trcpidr2.attr,
2130 &dev_attr_trcpidr3.attr,
2134 coresight_etm4x_cross_read(trcidr0, TRCIDR0);
2135 coresight_etm4x_cross_read(trcidr1, TRCIDR1);
2136 coresight_etm4x_cross_read(trcidr2, TRCIDR2);
2137 coresight_etm4x_cross_read(trcidr3, TRCIDR3);
2138 coresight_etm4x_cross_read(trcidr4, TRCIDR4);
2139 coresight_etm4x_cross_read(trcidr5, TRCIDR5);
2140 /* trcidr[6,7] are reserved */
2141 coresight_etm4x_cross_read(trcidr8, TRCIDR8);
2142 coresight_etm4x_cross_read(trcidr9, TRCIDR9);
2143 coresight_etm4x_cross_read(trcidr10, TRCIDR10);
2144 coresight_etm4x_cross_read(trcidr11, TRCIDR11);
2145 coresight_etm4x_cross_read(trcidr12, TRCIDR12);
2146 coresight_etm4x_cross_read(trcidr13, TRCIDR13);
2148 static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2149 &dev_attr_trcidr0.attr,
2150 &dev_attr_trcidr1.attr,
2151 &dev_attr_trcidr2.attr,
2152 &dev_attr_trcidr3.attr,
2153 &dev_attr_trcidr4.attr,
2154 &dev_attr_trcidr5.attr,
2155 /* trcidr[6,7] are reserved */
2156 &dev_attr_trcidr8.attr,
2157 &dev_attr_trcidr9.attr,
2158 &dev_attr_trcidr10.attr,
2159 &dev_attr_trcidr11.attr,
2160 &dev_attr_trcidr12.attr,
2161 &dev_attr_trcidr13.attr,
2165 static const struct attribute_group coresight_etmv4_group = {
2166 .attrs = coresight_etmv4_attrs,
2169 static const struct attribute_group coresight_etmv4_mgmt_group = {
2170 .attrs = coresight_etmv4_mgmt_attrs,
2174 static const struct attribute_group coresight_etmv4_trcidr_group = {
2175 .attrs = coresight_etmv4_trcidr_attrs,
2179 const struct attribute_group *coresight_etmv4_groups[] = {
2180 &coresight_etmv4_group,
2181 &coresight_etmv4_mgmt_group,
2182 &coresight_etmv4_trcidr_group,