2 * Copyright(C) 2015 Linaro Limited. All rights reserved.
3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/pm_runtime.h>
19 #include <linux/sysfs.h>
20 #include "coresight-etm4x.h"
21 #include "coresight-priv.h"
23 static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
26 struct etmv4_config *config = &drvdata->config;
28 idx = config->addr_idx;
31 * TRCACATRn.TYPE bit[1:0]: type of comparison
32 * the trace unit performs
34 if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
39 * We are performing instruction address comparison. Set the
40 * relevant bit of ViewInst Include/Exclude Control register
41 * for corresponding address comparator pair.
43 if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
44 config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
47 if (exclude == true) {
49 * Set exclude bit and unset the include bit
50 * corresponding to comparator pair
52 config->viiectlr |= BIT(idx / 2 + 16);
53 config->viiectlr &= ~BIT(idx / 2);
56 * Set include bit and unset exclude bit
57 * corresponding to comparator pair
59 config->viiectlr |= BIT(idx / 2);
60 config->viiectlr &= ~BIT(idx / 2 + 16);
66 static ssize_t nr_pe_cmp_show(struct device *dev,
67 struct device_attribute *attr,
71 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
73 val = drvdata->nr_pe_cmp;
74 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
76 static DEVICE_ATTR_RO(nr_pe_cmp);
78 static ssize_t nr_addr_cmp_show(struct device *dev,
79 struct device_attribute *attr,
83 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
85 val = drvdata->nr_addr_cmp;
86 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
88 static DEVICE_ATTR_RO(nr_addr_cmp);
90 static ssize_t nr_cntr_show(struct device *dev,
91 struct device_attribute *attr,
95 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
97 val = drvdata->nr_cntr;
98 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
100 static DEVICE_ATTR_RO(nr_cntr);
102 static ssize_t nr_ext_inp_show(struct device *dev,
103 struct device_attribute *attr,
107 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
109 val = drvdata->nr_ext_inp;
110 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
112 static DEVICE_ATTR_RO(nr_ext_inp);
114 static ssize_t numcidc_show(struct device *dev,
115 struct device_attribute *attr,
119 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
121 val = drvdata->numcidc;
122 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
124 static DEVICE_ATTR_RO(numcidc);
126 static ssize_t numvmidc_show(struct device *dev,
127 struct device_attribute *attr,
131 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
133 val = drvdata->numvmidc;
134 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
136 static DEVICE_ATTR_RO(numvmidc);
138 static ssize_t nrseqstate_show(struct device *dev,
139 struct device_attribute *attr,
143 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
145 val = drvdata->nrseqstate;
146 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
148 static DEVICE_ATTR_RO(nrseqstate);
150 static ssize_t nr_resource_show(struct device *dev,
151 struct device_attribute *attr,
155 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
157 val = drvdata->nr_resource;
158 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
160 static DEVICE_ATTR_RO(nr_resource);
162 static ssize_t nr_ss_cmp_show(struct device *dev,
163 struct device_attribute *attr,
167 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
169 val = drvdata->nr_ss_cmp;
170 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
172 static DEVICE_ATTR_RO(nr_ss_cmp);
174 static ssize_t reset_store(struct device *dev,
175 struct device_attribute *attr,
176 const char *buf, size_t size)
180 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
181 struct etmv4_config *config = &drvdata->config;
183 if (kstrtoul(buf, 16, &val))
186 spin_lock(&drvdata->spinlock);
190 /* Disable data tracing: do not trace load and store data transfers */
191 config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
192 config->cfg &= ~(BIT(1) | BIT(2));
194 /* Disable data value and data address tracing */
195 config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
196 ETM_MODE_DATA_TRACE_VAL);
197 config->cfg &= ~(BIT(16) | BIT(17));
199 /* Disable all events tracing */
200 config->eventctrl0 = 0x0;
201 config->eventctrl1 = 0x0;
203 /* Disable timestamp event */
204 config->ts_ctrl = 0x0;
206 /* Disable stalling */
207 config->stall_ctrl = 0x0;
209 /* Reset trace synchronization period to 2^8 = 256 bytes*/
210 if (drvdata->syncpr == false)
211 config->syncfreq = 0x8;
214 * Enable ViewInst to trace everything with start-stop logic in
215 * started state. ARM recommends start-stop logic is set before
218 config->vinst_ctrl |= BIT(0);
219 if (drvdata->nr_addr_cmp == true) {
220 config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
221 /* SSSTATUS, bit[9] */
222 config->vinst_ctrl |= BIT(9);
225 /* No address range filtering for ViewInst */
226 config->viiectlr = 0x0;
228 /* No start-stop filtering for ViewInst */
229 config->vissctlr = 0x0;
231 /* Disable seq events */
232 for (i = 0; i < drvdata->nrseqstate-1; i++)
233 config->seq_ctrl[i] = 0x0;
234 config->seq_rst = 0x0;
235 config->seq_state = 0x0;
237 /* Disable external input events */
238 config->ext_inp = 0x0;
240 config->cntr_idx = 0x0;
241 for (i = 0; i < drvdata->nr_cntr; i++) {
242 config->cntrldvr[i] = 0x0;
243 config->cntr_ctrl[i] = 0x0;
244 config->cntr_val[i] = 0x0;
247 config->res_idx = 0x0;
248 for (i = 0; i < drvdata->nr_resource; i++)
249 config->res_ctrl[i] = 0x0;
251 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
252 config->ss_ctrl[i] = 0x0;
253 config->ss_pe_cmp[i] = 0x0;
256 config->addr_idx = 0x0;
257 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
258 config->addr_val[i] = 0x0;
259 config->addr_acc[i] = 0x0;
260 config->addr_type[i] = ETM_ADDR_TYPE_NONE;
263 config->ctxid_idx = 0x0;
264 for (i = 0; i < drvdata->numcidc; i++) {
265 config->ctxid_pid[i] = 0x0;
266 config->ctxid_vpid[i] = 0x0;
269 config->ctxid_mask0 = 0x0;
270 config->ctxid_mask1 = 0x0;
272 config->vmid_idx = 0x0;
273 for (i = 0; i < drvdata->numvmidc; i++)
274 config->vmid_val[i] = 0x0;
275 config->vmid_mask0 = 0x0;
276 config->vmid_mask1 = 0x0;
278 drvdata->trcid = drvdata->cpu + 1;
280 spin_unlock(&drvdata->spinlock);
284 static DEVICE_ATTR_WO(reset);
286 static ssize_t mode_show(struct device *dev,
287 struct device_attribute *attr,
291 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
292 struct etmv4_config *config = &drvdata->config;
295 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
298 static ssize_t mode_store(struct device *dev,
299 struct device_attribute *attr,
300 const char *buf, size_t size)
302 unsigned long val, mode;
303 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
304 struct etmv4_config *config = &drvdata->config;
306 if (kstrtoul(buf, 16, &val))
309 spin_lock(&drvdata->spinlock);
310 config->mode = val & ETMv4_MODE_ALL;
312 if (config->mode & ETM_MODE_EXCLUDE)
313 etm4_set_mode_exclude(drvdata, true);
315 etm4_set_mode_exclude(drvdata, false);
317 if (drvdata->instrp0 == true) {
318 /* start by clearing instruction P0 field */
319 config->cfg &= ~(BIT(1) | BIT(2));
320 if (config->mode & ETM_MODE_LOAD)
321 /* 0b01 Trace load instructions as P0 instructions */
322 config->cfg |= BIT(1);
323 if (config->mode & ETM_MODE_STORE)
324 /* 0b10 Trace store instructions as P0 instructions */
325 config->cfg |= BIT(2);
326 if (config->mode & ETM_MODE_LOAD_STORE)
328 * 0b11 Trace load and store instructions
331 config->cfg |= BIT(1) | BIT(2);
334 /* bit[3], Branch broadcast mode */
335 if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
336 config->cfg |= BIT(3);
338 config->cfg &= ~BIT(3);
340 /* bit[4], Cycle counting instruction trace bit */
341 if ((config->mode & ETMv4_MODE_CYCACC) &&
342 (drvdata->trccci == true))
343 config->cfg |= BIT(4);
345 config->cfg &= ~BIT(4);
347 /* bit[6], Context ID tracing bit */
348 if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
349 config->cfg |= BIT(6);
351 config->cfg &= ~BIT(6);
353 if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
354 config->cfg |= BIT(7);
356 config->cfg &= ~BIT(7);
358 /* bits[10:8], Conditional instruction tracing bit */
359 mode = ETM_MODE_COND(config->mode);
360 if (drvdata->trccond == true) {
361 config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
362 config->cfg |= mode << 8;
365 /* bit[11], Global timestamp tracing bit */
366 if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
367 config->cfg |= BIT(11);
369 config->cfg &= ~BIT(11);
371 /* bit[12], Return stack enable bit */
372 if ((config->mode & ETM_MODE_RETURNSTACK) &&
373 (drvdata->retstack == true))
374 config->cfg |= BIT(12);
376 config->cfg &= ~BIT(12);
378 /* bits[14:13], Q element enable field */
379 mode = ETM_MODE_QELEM(config->mode);
380 /* start by clearing QE bits */
381 config->cfg &= ~(BIT(13) | BIT(14));
383 * if supported, Q elements with instruction counts are enabled.
384 * Always set the low bit for any requested mode. Valid combos are
385 * 0b00, 0b01 and 0b11.
387 if (mode && drvdata->q_support)
388 config->cfg |= BIT(13);
390 * if supported, Q elements with and without instruction
393 if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
394 config->cfg |= BIT(14);
396 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
397 if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
398 (drvdata->atbtrig == true))
399 config->eventctrl1 |= BIT(11);
401 config->eventctrl1 &= ~BIT(11);
403 /* bit[12], Low-power state behavior override bit */
404 if ((config->mode & ETM_MODE_LPOVERRIDE) &&
405 (drvdata->lpoverride == true))
406 config->eventctrl1 |= BIT(12);
408 config->eventctrl1 &= ~BIT(12);
410 /* bit[8], Instruction stall bit */
411 if (config->mode & ETM_MODE_ISTALL_EN)
412 config->stall_ctrl |= BIT(8);
414 config->stall_ctrl &= ~BIT(8);
416 /* bit[10], Prioritize instruction trace bit */
417 if (config->mode & ETM_MODE_INSTPRIO)
418 config->stall_ctrl |= BIT(10);
420 config->stall_ctrl &= ~BIT(10);
422 /* bit[13], Trace overflow prevention bit */
423 if ((config->mode & ETM_MODE_NOOVERFLOW) &&
424 (drvdata->nooverflow == true))
425 config->stall_ctrl |= BIT(13);
427 config->stall_ctrl &= ~BIT(13);
429 /* bit[9] Start/stop logic control bit */
430 if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
431 config->vinst_ctrl |= BIT(9);
433 config->vinst_ctrl &= ~BIT(9);
435 /* bit[10], Whether a trace unit must trace a Reset exception */
436 if (config->mode & ETM_MODE_TRACE_RESET)
437 config->vinst_ctrl |= BIT(10);
439 config->vinst_ctrl &= ~BIT(10);
441 /* bit[11], Whether a trace unit must trace a system error exception */
442 if ((config->mode & ETM_MODE_TRACE_ERR) &&
443 (drvdata->trc_error == true))
444 config->vinst_ctrl |= BIT(11);
446 config->vinst_ctrl &= ~BIT(11);
448 if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
449 etm4_config_trace_mode(config);
451 spin_unlock(&drvdata->spinlock);
455 static DEVICE_ATTR_RW(mode);
457 static ssize_t pe_show(struct device *dev,
458 struct device_attribute *attr,
462 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
463 struct etmv4_config *config = &drvdata->config;
465 val = config->pe_sel;
466 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
469 static ssize_t pe_store(struct device *dev,
470 struct device_attribute *attr,
471 const char *buf, size_t size)
474 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
475 struct etmv4_config *config = &drvdata->config;
477 if (kstrtoul(buf, 16, &val))
480 spin_lock(&drvdata->spinlock);
481 if (val > drvdata->nr_pe) {
482 spin_unlock(&drvdata->spinlock);
486 config->pe_sel = val;
487 spin_unlock(&drvdata->spinlock);
490 static DEVICE_ATTR_RW(pe);
492 static ssize_t event_show(struct device *dev,
493 struct device_attribute *attr,
497 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
498 struct etmv4_config *config = &drvdata->config;
500 val = config->eventctrl0;
501 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
504 static ssize_t event_store(struct device *dev,
505 struct device_attribute *attr,
506 const char *buf, size_t size)
509 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
510 struct etmv4_config *config = &drvdata->config;
512 if (kstrtoul(buf, 16, &val))
515 spin_lock(&drvdata->spinlock);
516 switch (drvdata->nr_event) {
518 /* EVENT0, bits[7:0] */
519 config->eventctrl0 = val & 0xFF;
522 /* EVENT1, bits[15:8] */
523 config->eventctrl0 = val & 0xFFFF;
526 /* EVENT2, bits[23:16] */
527 config->eventctrl0 = val & 0xFFFFFF;
530 /* EVENT3, bits[31:24] */
531 config->eventctrl0 = val;
536 spin_unlock(&drvdata->spinlock);
539 static DEVICE_ATTR_RW(event);
541 static ssize_t event_instren_show(struct device *dev,
542 struct device_attribute *attr,
546 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
547 struct etmv4_config *config = &drvdata->config;
549 val = BMVAL(config->eventctrl1, 0, 3);
550 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
553 static ssize_t event_instren_store(struct device *dev,
554 struct device_attribute *attr,
555 const char *buf, size_t size)
558 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
559 struct etmv4_config *config = &drvdata->config;
561 if (kstrtoul(buf, 16, &val))
564 spin_lock(&drvdata->spinlock);
565 /* start by clearing all instruction event enable bits */
566 config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
567 switch (drvdata->nr_event) {
569 /* generate Event element for event 1 */
570 config->eventctrl1 |= val & BIT(1);
573 /* generate Event element for event 1 and 2 */
574 config->eventctrl1 |= val & (BIT(0) | BIT(1));
577 /* generate Event element for event 1, 2 and 3 */
578 config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
581 /* generate Event element for all 4 events */
582 config->eventctrl1 |= val & 0xF;
587 spin_unlock(&drvdata->spinlock);
590 static DEVICE_ATTR_RW(event_instren);
592 static ssize_t event_ts_show(struct device *dev,
593 struct device_attribute *attr,
597 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
598 struct etmv4_config *config = &drvdata->config;
600 val = config->ts_ctrl;
601 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
604 static ssize_t event_ts_store(struct device *dev,
605 struct device_attribute *attr,
606 const char *buf, size_t size)
609 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
610 struct etmv4_config *config = &drvdata->config;
612 if (kstrtoul(buf, 16, &val))
614 if (!drvdata->ts_size)
617 config->ts_ctrl = val & ETMv4_EVENT_MASK;
620 static DEVICE_ATTR_RW(event_ts);
622 static ssize_t syncfreq_show(struct device *dev,
623 struct device_attribute *attr,
627 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
628 struct etmv4_config *config = &drvdata->config;
630 val = config->syncfreq;
631 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
634 static ssize_t syncfreq_store(struct device *dev,
635 struct device_attribute *attr,
636 const char *buf, size_t size)
639 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
640 struct etmv4_config *config = &drvdata->config;
642 if (kstrtoul(buf, 16, &val))
644 if (drvdata->syncpr == true)
647 config->syncfreq = val & ETMv4_SYNC_MASK;
650 static DEVICE_ATTR_RW(syncfreq);
652 static ssize_t cyc_threshold_show(struct device *dev,
653 struct device_attribute *attr,
657 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
658 struct etmv4_config *config = &drvdata->config;
660 val = config->ccctlr;
661 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
664 static ssize_t cyc_threshold_store(struct device *dev,
665 struct device_attribute *attr,
666 const char *buf, size_t size)
669 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
670 struct etmv4_config *config = &drvdata->config;
672 if (kstrtoul(buf, 16, &val))
675 /* mask off max threshold before checking min value */
676 val &= ETM_CYC_THRESHOLD_MASK;
677 if (val < drvdata->ccitmin)
680 config->ccctlr = val;
683 static DEVICE_ATTR_RW(cyc_threshold);
685 static ssize_t bb_ctrl_show(struct device *dev,
686 struct device_attribute *attr,
690 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
691 struct etmv4_config *config = &drvdata->config;
693 val = config->bb_ctrl;
694 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
697 static ssize_t bb_ctrl_store(struct device *dev,
698 struct device_attribute *attr,
699 const char *buf, size_t size)
702 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
703 struct etmv4_config *config = &drvdata->config;
705 if (kstrtoul(buf, 16, &val))
707 if (drvdata->trcbb == false)
709 if (!drvdata->nr_addr_cmp)
713 * Bit[8] controls include(1) / exclude(0), bits[0-7] select
714 * individual range comparators. If include then at least 1
715 * range must be selected.
717 if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0))
720 config->bb_ctrl = val & GENMASK(8, 0);
723 static DEVICE_ATTR_RW(bb_ctrl);
725 static ssize_t event_vinst_show(struct device *dev,
726 struct device_attribute *attr,
730 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
731 struct etmv4_config *config = &drvdata->config;
733 val = config->vinst_ctrl & ETMv4_EVENT_MASK;
734 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
737 static ssize_t event_vinst_store(struct device *dev,
738 struct device_attribute *attr,
739 const char *buf, size_t size)
742 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
743 struct etmv4_config *config = &drvdata->config;
745 if (kstrtoul(buf, 16, &val))
748 spin_lock(&drvdata->spinlock);
749 val &= ETMv4_EVENT_MASK;
750 config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
751 config->vinst_ctrl |= val;
752 spin_unlock(&drvdata->spinlock);
755 static DEVICE_ATTR_RW(event_vinst);
757 static ssize_t s_exlevel_vinst_show(struct device *dev,
758 struct device_attribute *attr,
762 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
763 struct etmv4_config *config = &drvdata->config;
765 val = BMVAL(config->vinst_ctrl, 16, 19);
766 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
769 static ssize_t s_exlevel_vinst_store(struct device *dev,
770 struct device_attribute *attr,
771 const char *buf, size_t size)
774 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
775 struct etmv4_config *config = &drvdata->config;
777 if (kstrtoul(buf, 16, &val))
780 spin_lock(&drvdata->spinlock);
781 /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
782 config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
783 /* enable instruction tracing for corresponding exception level */
784 val &= drvdata->s_ex_level;
785 config->vinst_ctrl |= (val << 16);
786 spin_unlock(&drvdata->spinlock);
789 static DEVICE_ATTR_RW(s_exlevel_vinst);
791 static ssize_t ns_exlevel_vinst_show(struct device *dev,
792 struct device_attribute *attr,
796 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
797 struct etmv4_config *config = &drvdata->config;
799 /* EXLEVEL_NS, bits[23:20] */
800 val = BMVAL(config->vinst_ctrl, 20, 23);
801 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
804 static ssize_t ns_exlevel_vinst_store(struct device *dev,
805 struct device_attribute *attr,
806 const char *buf, size_t size)
809 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
810 struct etmv4_config *config = &drvdata->config;
812 if (kstrtoul(buf, 16, &val))
815 spin_lock(&drvdata->spinlock);
816 /* clear EXLEVEL_NS bits (bit[23] is never implemented */
817 config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
818 /* enable instruction tracing for corresponding exception level */
819 val &= drvdata->ns_ex_level;
820 config->vinst_ctrl |= (val << 20);
821 spin_unlock(&drvdata->spinlock);
824 static DEVICE_ATTR_RW(ns_exlevel_vinst);
826 static ssize_t addr_idx_show(struct device *dev,
827 struct device_attribute *attr,
831 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
832 struct etmv4_config *config = &drvdata->config;
834 val = config->addr_idx;
835 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
838 static ssize_t addr_idx_store(struct device *dev,
839 struct device_attribute *attr,
840 const char *buf, size_t size)
843 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
844 struct etmv4_config *config = &drvdata->config;
846 if (kstrtoul(buf, 16, &val))
848 if (val >= drvdata->nr_addr_cmp * 2)
852 * Use spinlock to ensure index doesn't change while it gets
853 * dereferenced multiple times within a spinlock block elsewhere.
855 spin_lock(&drvdata->spinlock);
856 config->addr_idx = val;
857 spin_unlock(&drvdata->spinlock);
860 static DEVICE_ATTR_RW(addr_idx);
862 static ssize_t addr_instdatatype_show(struct device *dev,
863 struct device_attribute *attr,
868 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
869 struct etmv4_config *config = &drvdata->config;
871 spin_lock(&drvdata->spinlock);
872 idx = config->addr_idx;
873 val = BMVAL(config->addr_acc[idx], 0, 1);
874 len = scnprintf(buf, PAGE_SIZE, "%s\n",
875 val == ETM_INSTR_ADDR ? "instr" :
876 (val == ETM_DATA_LOAD_ADDR ? "data_load" :
877 (val == ETM_DATA_STORE_ADDR ? "data_store" :
878 "data_load_store")));
879 spin_unlock(&drvdata->spinlock);
883 static ssize_t addr_instdatatype_store(struct device *dev,
884 struct device_attribute *attr,
885 const char *buf, size_t size)
889 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
890 struct etmv4_config *config = &drvdata->config;
892 if (strlen(buf) >= 20)
894 if (sscanf(buf, "%s", str) != 1)
897 spin_lock(&drvdata->spinlock);
898 idx = config->addr_idx;
899 if (!strcmp(str, "instr"))
900 /* TYPE, bits[1:0] */
901 config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
903 spin_unlock(&drvdata->spinlock);
906 static DEVICE_ATTR_RW(addr_instdatatype);
908 static ssize_t addr_single_show(struct device *dev,
909 struct device_attribute *attr,
914 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
915 struct etmv4_config *config = &drvdata->config;
917 idx = config->addr_idx;
918 spin_lock(&drvdata->spinlock);
919 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
920 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
921 spin_unlock(&drvdata->spinlock);
924 val = (unsigned long)config->addr_val[idx];
925 spin_unlock(&drvdata->spinlock);
926 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
929 static ssize_t addr_single_store(struct device *dev,
930 struct device_attribute *attr,
931 const char *buf, size_t size)
935 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
936 struct etmv4_config *config = &drvdata->config;
938 if (kstrtoul(buf, 16, &val))
941 spin_lock(&drvdata->spinlock);
942 idx = config->addr_idx;
943 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
944 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
945 spin_unlock(&drvdata->spinlock);
949 config->addr_val[idx] = (u64)val;
950 config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
951 spin_unlock(&drvdata->spinlock);
954 static DEVICE_ATTR_RW(addr_single);
956 static ssize_t addr_range_show(struct device *dev,
957 struct device_attribute *attr,
961 unsigned long val1, val2;
962 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
963 struct etmv4_config *config = &drvdata->config;
965 spin_lock(&drvdata->spinlock);
966 idx = config->addr_idx;
968 spin_unlock(&drvdata->spinlock);
971 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
972 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
973 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
974 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
975 spin_unlock(&drvdata->spinlock);
979 val1 = (unsigned long)config->addr_val[idx];
980 val2 = (unsigned long)config->addr_val[idx + 1];
981 spin_unlock(&drvdata->spinlock);
982 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
985 static ssize_t addr_range_store(struct device *dev,
986 struct device_attribute *attr,
987 const char *buf, size_t size)
990 unsigned long val1, val2;
991 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
992 struct etmv4_config *config = &drvdata->config;
994 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
996 /* lower address comparator cannot have a higher address value */
1000 spin_lock(&drvdata->spinlock);
1001 idx = config->addr_idx;
1003 spin_unlock(&drvdata->spinlock);
1007 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
1008 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1009 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1010 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1011 spin_unlock(&drvdata->spinlock);
1015 config->addr_val[idx] = (u64)val1;
1016 config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1017 config->addr_val[idx + 1] = (u64)val2;
1018 config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1020 * Program include or exclude control bits for vinst or vdata
1021 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1023 if (config->mode & ETM_MODE_EXCLUDE)
1024 etm4_set_mode_exclude(drvdata, true);
1026 etm4_set_mode_exclude(drvdata, false);
1028 spin_unlock(&drvdata->spinlock);
1031 static DEVICE_ATTR_RW(addr_range);
1033 static ssize_t addr_start_show(struct device *dev,
1034 struct device_attribute *attr,
1039 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1040 struct etmv4_config *config = &drvdata->config;
1042 spin_lock(&drvdata->spinlock);
1043 idx = config->addr_idx;
1045 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1046 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1047 spin_unlock(&drvdata->spinlock);
1051 val = (unsigned long)config->addr_val[idx];
1052 spin_unlock(&drvdata->spinlock);
1053 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1056 static ssize_t addr_start_store(struct device *dev,
1057 struct device_attribute *attr,
1058 const char *buf, size_t size)
1062 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1063 struct etmv4_config *config = &drvdata->config;
1065 if (kstrtoul(buf, 16, &val))
1068 spin_lock(&drvdata->spinlock);
1069 idx = config->addr_idx;
1070 if (!drvdata->nr_addr_cmp) {
1071 spin_unlock(&drvdata->spinlock);
1074 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1075 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1076 spin_unlock(&drvdata->spinlock);
1080 config->addr_val[idx] = (u64)val;
1081 config->addr_type[idx] = ETM_ADDR_TYPE_START;
1082 config->vissctlr |= BIT(idx);
1083 /* SSSTATUS, bit[9] - turn on start/stop logic */
1084 config->vinst_ctrl |= BIT(9);
1085 spin_unlock(&drvdata->spinlock);
1088 static DEVICE_ATTR_RW(addr_start);
1090 static ssize_t addr_stop_show(struct device *dev,
1091 struct device_attribute *attr,
1096 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1097 struct etmv4_config *config = &drvdata->config;
1099 spin_lock(&drvdata->spinlock);
1100 idx = config->addr_idx;
1102 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1103 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1104 spin_unlock(&drvdata->spinlock);
1108 val = (unsigned long)config->addr_val[idx];
1109 spin_unlock(&drvdata->spinlock);
1110 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1113 static ssize_t addr_stop_store(struct device *dev,
1114 struct device_attribute *attr,
1115 const char *buf, size_t size)
1119 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1120 struct etmv4_config *config = &drvdata->config;
1122 if (kstrtoul(buf, 16, &val))
1125 spin_lock(&drvdata->spinlock);
1126 idx = config->addr_idx;
1127 if (!drvdata->nr_addr_cmp) {
1128 spin_unlock(&drvdata->spinlock);
1131 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1132 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1133 spin_unlock(&drvdata->spinlock);
1137 config->addr_val[idx] = (u64)val;
1138 config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1139 config->vissctlr |= BIT(idx + 16);
1140 /* SSSTATUS, bit[9] - turn on start/stop logic */
1141 config->vinst_ctrl |= BIT(9);
1142 spin_unlock(&drvdata->spinlock);
1145 static DEVICE_ATTR_RW(addr_stop);
1147 static ssize_t addr_ctxtype_show(struct device *dev,
1148 struct device_attribute *attr,
1153 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1154 struct etmv4_config *config = &drvdata->config;
1156 spin_lock(&drvdata->spinlock);
1157 idx = config->addr_idx;
1158 /* CONTEXTTYPE, bits[3:2] */
1159 val = BMVAL(config->addr_acc[idx], 2, 3);
1160 len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1161 (val == ETM_CTX_CTXID ? "ctxid" :
1162 (val == ETM_CTX_VMID ? "vmid" : "all")));
1163 spin_unlock(&drvdata->spinlock);
1167 static ssize_t addr_ctxtype_store(struct device *dev,
1168 struct device_attribute *attr,
1169 const char *buf, size_t size)
1173 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1174 struct etmv4_config *config = &drvdata->config;
1176 if (strlen(buf) >= 10)
1178 if (sscanf(buf, "%s", str) != 1)
1181 spin_lock(&drvdata->spinlock);
1182 idx = config->addr_idx;
1183 if (!strcmp(str, "none"))
1184 /* start by clearing context type bits */
1185 config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1186 else if (!strcmp(str, "ctxid")) {
1187 /* 0b01 The trace unit performs a Context ID */
1188 if (drvdata->numcidc) {
1189 config->addr_acc[idx] |= BIT(2);
1190 config->addr_acc[idx] &= ~BIT(3);
1192 } else if (!strcmp(str, "vmid")) {
1193 /* 0b10 The trace unit performs a VMID */
1194 if (drvdata->numvmidc) {
1195 config->addr_acc[idx] &= ~BIT(2);
1196 config->addr_acc[idx] |= BIT(3);
1198 } else if (!strcmp(str, "all")) {
1200 * 0b11 The trace unit performs a Context ID
1201 * comparison and a VMID
1203 if (drvdata->numcidc)
1204 config->addr_acc[idx] |= BIT(2);
1205 if (drvdata->numvmidc)
1206 config->addr_acc[idx] |= BIT(3);
1208 spin_unlock(&drvdata->spinlock);
1211 static DEVICE_ATTR_RW(addr_ctxtype);
1213 static ssize_t addr_context_show(struct device *dev,
1214 struct device_attribute *attr,
1219 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1220 struct etmv4_config *config = &drvdata->config;
1222 spin_lock(&drvdata->spinlock);
1223 idx = config->addr_idx;
1224 /* context ID comparator bits[6:4] */
1225 val = BMVAL(config->addr_acc[idx], 4, 6);
1226 spin_unlock(&drvdata->spinlock);
1227 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1230 static ssize_t addr_context_store(struct device *dev,
1231 struct device_attribute *attr,
1232 const char *buf, size_t size)
1236 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1237 struct etmv4_config *config = &drvdata->config;
1239 if (kstrtoul(buf, 16, &val))
1241 if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1243 if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
1244 drvdata->numcidc : drvdata->numvmidc))
1247 spin_lock(&drvdata->spinlock);
1248 idx = config->addr_idx;
1249 /* clear context ID comparator bits[6:4] */
1250 config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1251 config->addr_acc[idx] |= (val << 4);
1252 spin_unlock(&drvdata->spinlock);
1255 static DEVICE_ATTR_RW(addr_context);
1257 static ssize_t seq_idx_show(struct device *dev,
1258 struct device_attribute *attr,
1262 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1263 struct etmv4_config *config = &drvdata->config;
1265 val = config->seq_idx;
1266 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1269 static ssize_t seq_idx_store(struct device *dev,
1270 struct device_attribute *attr,
1271 const char *buf, size_t size)
1274 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1275 struct etmv4_config *config = &drvdata->config;
1277 if (kstrtoul(buf, 16, &val))
1279 if (val >= drvdata->nrseqstate - 1)
1283 * Use spinlock to ensure index doesn't change while it gets
1284 * dereferenced multiple times within a spinlock block elsewhere.
1286 spin_lock(&drvdata->spinlock);
1287 config->seq_idx = val;
1288 spin_unlock(&drvdata->spinlock);
1291 static DEVICE_ATTR_RW(seq_idx);
1293 static ssize_t seq_state_show(struct device *dev,
1294 struct device_attribute *attr,
1298 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1299 struct etmv4_config *config = &drvdata->config;
1301 val = config->seq_state;
1302 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1305 static ssize_t seq_state_store(struct device *dev,
1306 struct device_attribute *attr,
1307 const char *buf, size_t size)
1310 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1311 struct etmv4_config *config = &drvdata->config;
1313 if (kstrtoul(buf, 16, &val))
1315 if (val >= drvdata->nrseqstate)
1318 config->seq_state = val;
1321 static DEVICE_ATTR_RW(seq_state);
1323 static ssize_t seq_event_show(struct device *dev,
1324 struct device_attribute *attr,
1329 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1330 struct etmv4_config *config = &drvdata->config;
1332 spin_lock(&drvdata->spinlock);
1333 idx = config->seq_idx;
1334 val = config->seq_ctrl[idx];
1335 spin_unlock(&drvdata->spinlock);
1336 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1339 static ssize_t seq_event_store(struct device *dev,
1340 struct device_attribute *attr,
1341 const char *buf, size_t size)
1345 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1346 struct etmv4_config *config = &drvdata->config;
1348 if (kstrtoul(buf, 16, &val))
1351 spin_lock(&drvdata->spinlock);
1352 idx = config->seq_idx;
1353 /* Seq control has two masks B[15:8] F[7:0] */
1354 config->seq_ctrl[idx] = val & 0xFFFF;
1355 spin_unlock(&drvdata->spinlock);
1358 static DEVICE_ATTR_RW(seq_event);
1360 static ssize_t seq_reset_event_show(struct device *dev,
1361 struct device_attribute *attr,
1365 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1366 struct etmv4_config *config = &drvdata->config;
1368 val = config->seq_rst;
1369 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1372 static ssize_t seq_reset_event_store(struct device *dev,
1373 struct device_attribute *attr,
1374 const char *buf, size_t size)
1377 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1378 struct etmv4_config *config = &drvdata->config;
1380 if (kstrtoul(buf, 16, &val))
1382 if (!(drvdata->nrseqstate))
1385 config->seq_rst = val & ETMv4_EVENT_MASK;
1388 static DEVICE_ATTR_RW(seq_reset_event);
1390 static ssize_t cntr_idx_show(struct device *dev,
1391 struct device_attribute *attr,
1395 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1396 struct etmv4_config *config = &drvdata->config;
1398 val = config->cntr_idx;
1399 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1402 static ssize_t cntr_idx_store(struct device *dev,
1403 struct device_attribute *attr,
1404 const char *buf, size_t size)
1407 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1408 struct etmv4_config *config = &drvdata->config;
1410 if (kstrtoul(buf, 16, &val))
1412 if (val >= drvdata->nr_cntr)
1416 * Use spinlock to ensure index doesn't change while it gets
1417 * dereferenced multiple times within a spinlock block elsewhere.
1419 spin_lock(&drvdata->spinlock);
1420 config->cntr_idx = val;
1421 spin_unlock(&drvdata->spinlock);
1424 static DEVICE_ATTR_RW(cntr_idx);
1426 static ssize_t cntrldvr_show(struct device *dev,
1427 struct device_attribute *attr,
1432 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1433 struct etmv4_config *config = &drvdata->config;
1435 spin_lock(&drvdata->spinlock);
1436 idx = config->cntr_idx;
1437 val = config->cntrldvr[idx];
1438 spin_unlock(&drvdata->spinlock);
1439 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1442 static ssize_t cntrldvr_store(struct device *dev,
1443 struct device_attribute *attr,
1444 const char *buf, size_t size)
1448 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1449 struct etmv4_config *config = &drvdata->config;
1451 if (kstrtoul(buf, 16, &val))
1453 if (val > ETM_CNTR_MAX_VAL)
1456 spin_lock(&drvdata->spinlock);
1457 idx = config->cntr_idx;
1458 config->cntrldvr[idx] = val;
1459 spin_unlock(&drvdata->spinlock);
1462 static DEVICE_ATTR_RW(cntrldvr);
1464 static ssize_t cntr_val_show(struct device *dev,
1465 struct device_attribute *attr,
1470 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1471 struct etmv4_config *config = &drvdata->config;
1473 spin_lock(&drvdata->spinlock);
1474 idx = config->cntr_idx;
1475 val = config->cntr_val[idx];
1476 spin_unlock(&drvdata->spinlock);
1477 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1480 static ssize_t cntr_val_store(struct device *dev,
1481 struct device_attribute *attr,
1482 const char *buf, size_t size)
1486 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1487 struct etmv4_config *config = &drvdata->config;
1489 if (kstrtoul(buf, 16, &val))
1491 if (val > ETM_CNTR_MAX_VAL)
1494 spin_lock(&drvdata->spinlock);
1495 idx = config->cntr_idx;
1496 config->cntr_val[idx] = val;
1497 spin_unlock(&drvdata->spinlock);
1500 static DEVICE_ATTR_RW(cntr_val);
1502 static ssize_t cntr_ctrl_show(struct device *dev,
1503 struct device_attribute *attr,
1508 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1509 struct etmv4_config *config = &drvdata->config;
1511 spin_lock(&drvdata->spinlock);
1512 idx = config->cntr_idx;
1513 val = config->cntr_ctrl[idx];
1514 spin_unlock(&drvdata->spinlock);
1515 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1518 static ssize_t cntr_ctrl_store(struct device *dev,
1519 struct device_attribute *attr,
1520 const char *buf, size_t size)
1524 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1525 struct etmv4_config *config = &drvdata->config;
1527 if (kstrtoul(buf, 16, &val))
1530 spin_lock(&drvdata->spinlock);
1531 idx = config->cntr_idx;
1532 config->cntr_ctrl[idx] = val;
1533 spin_unlock(&drvdata->spinlock);
1536 static DEVICE_ATTR_RW(cntr_ctrl);
1538 static ssize_t res_idx_show(struct device *dev,
1539 struct device_attribute *attr,
1543 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1544 struct etmv4_config *config = &drvdata->config;
1546 val = config->res_idx;
1547 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1550 static ssize_t res_idx_store(struct device *dev,
1551 struct device_attribute *attr,
1552 const char *buf, size_t size)
1555 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1556 struct etmv4_config *config = &drvdata->config;
1558 if (kstrtoul(buf, 16, &val))
1560 /* Resource selector pair 0 is always implemented and reserved */
1561 if ((val == 0) || (val >= drvdata->nr_resource))
1565 * Use spinlock to ensure index doesn't change while it gets
1566 * dereferenced multiple times within a spinlock block elsewhere.
1568 spin_lock(&drvdata->spinlock);
1569 config->res_idx = val;
1570 spin_unlock(&drvdata->spinlock);
1573 static DEVICE_ATTR_RW(res_idx);
1575 static ssize_t res_ctrl_show(struct device *dev,
1576 struct device_attribute *attr,
1581 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1582 struct etmv4_config *config = &drvdata->config;
1584 spin_lock(&drvdata->spinlock);
1585 idx = config->res_idx;
1586 val = config->res_ctrl[idx];
1587 spin_unlock(&drvdata->spinlock);
1588 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1591 static ssize_t res_ctrl_store(struct device *dev,
1592 struct device_attribute *attr,
1593 const char *buf, size_t size)
1597 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1598 struct etmv4_config *config = &drvdata->config;
1600 if (kstrtoul(buf, 16, &val))
1603 spin_lock(&drvdata->spinlock);
1604 idx = config->res_idx;
1605 /* For odd idx pair inversal bit is RES0 */
1607 /* PAIRINV, bit[21] */
1609 config->res_ctrl[idx] = val & GENMASK(21, 0);
1610 spin_unlock(&drvdata->spinlock);
1613 static DEVICE_ATTR_RW(res_ctrl);
1615 static ssize_t ctxid_idx_show(struct device *dev,
1616 struct device_attribute *attr,
1620 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1621 struct etmv4_config *config = &drvdata->config;
1623 val = config->ctxid_idx;
1624 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1627 static ssize_t ctxid_idx_store(struct device *dev,
1628 struct device_attribute *attr,
1629 const char *buf, size_t size)
1632 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1633 struct etmv4_config *config = &drvdata->config;
1635 if (kstrtoul(buf, 16, &val))
1637 if (val >= drvdata->numcidc)
1641 * Use spinlock to ensure index doesn't change while it gets
1642 * dereferenced multiple times within a spinlock block elsewhere.
1644 spin_lock(&drvdata->spinlock);
1645 config->ctxid_idx = val;
1646 spin_unlock(&drvdata->spinlock);
1649 static DEVICE_ATTR_RW(ctxid_idx);
1651 static ssize_t ctxid_pid_show(struct device *dev,
1652 struct device_attribute *attr,
1657 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1658 struct etmv4_config *config = &drvdata->config;
1660 spin_lock(&drvdata->spinlock);
1661 idx = config->ctxid_idx;
1662 val = (unsigned long)config->ctxid_vpid[idx];
1663 spin_unlock(&drvdata->spinlock);
1664 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1667 static ssize_t ctxid_pid_store(struct device *dev,
1668 struct device_attribute *attr,
1669 const char *buf, size_t size)
1672 unsigned long vpid, pid;
1673 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1674 struct etmv4_config *config = &drvdata->config;
1677 * only implemented when ctxid tracing is enabled, i.e. at least one
1678 * ctxid comparator is implemented and ctxid is greater than 0 bits
1681 if (!drvdata->ctxid_size || !drvdata->numcidc)
1683 if (kstrtoul(buf, 16, &vpid))
1686 pid = coresight_vpid_to_pid(vpid);
1688 spin_lock(&drvdata->spinlock);
1689 idx = config->ctxid_idx;
1690 config->ctxid_pid[idx] = (u64)pid;
1691 config->ctxid_vpid[idx] = (u64)vpid;
1692 spin_unlock(&drvdata->spinlock);
1695 static DEVICE_ATTR_RW(ctxid_pid);
1697 static ssize_t ctxid_masks_show(struct device *dev,
1698 struct device_attribute *attr,
1701 unsigned long val1, val2;
1702 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1703 struct etmv4_config *config = &drvdata->config;
1705 spin_lock(&drvdata->spinlock);
1706 val1 = config->ctxid_mask0;
1707 val2 = config->ctxid_mask1;
1708 spin_unlock(&drvdata->spinlock);
1709 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1712 static ssize_t ctxid_masks_store(struct device *dev,
1713 struct device_attribute *attr,
1714 const char *buf, size_t size)
1717 unsigned long val1, val2, mask;
1718 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1719 struct etmv4_config *config = &drvdata->config;
1722 * only implemented when ctxid tracing is enabled, i.e. at least one
1723 * ctxid comparator is implemented and ctxid is greater than 0 bits
1726 if (!drvdata->ctxid_size || !drvdata->numcidc)
1728 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1731 spin_lock(&drvdata->spinlock);
1733 * each byte[0..3] controls mask value applied to ctxid
1736 switch (drvdata->numcidc) {
1738 /* COMP0, bits[7:0] */
1739 config->ctxid_mask0 = val1 & 0xFF;
1742 /* COMP1, bits[15:8] */
1743 config->ctxid_mask0 = val1 & 0xFFFF;
1746 /* COMP2, bits[23:16] */
1747 config->ctxid_mask0 = val1 & 0xFFFFFF;
1750 /* COMP3, bits[31:24] */
1751 config->ctxid_mask0 = val1;
1754 /* COMP4, bits[7:0] */
1755 config->ctxid_mask0 = val1;
1756 config->ctxid_mask1 = val2 & 0xFF;
1759 /* COMP5, bits[15:8] */
1760 config->ctxid_mask0 = val1;
1761 config->ctxid_mask1 = val2 & 0xFFFF;
1764 /* COMP6, bits[23:16] */
1765 config->ctxid_mask0 = val1;
1766 config->ctxid_mask1 = val2 & 0xFFFFFF;
1769 /* COMP7, bits[31:24] */
1770 config->ctxid_mask0 = val1;
1771 config->ctxid_mask1 = val2;
1777 * If software sets a mask bit to 1, it must program relevant byte
1778 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
1779 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
1780 * of ctxid comparator0 value (corresponding to byte 0) register.
1782 mask = config->ctxid_mask0;
1783 for (i = 0; i < drvdata->numcidc; i++) {
1784 /* mask value of corresponding ctxid comparator */
1785 maskbyte = mask & ETMv4_EVENT_MASK;
1787 * each bit corresponds to a byte of respective ctxid comparator
1790 for (j = 0; j < 8; j++) {
1792 config->ctxid_pid[i] &= ~(0xFF << (j * 8));
1795 /* Select the next ctxid comparator mask value */
1797 /* ctxid comparators[4-7] */
1798 mask = config->ctxid_mask1;
1803 spin_unlock(&drvdata->spinlock);
1806 static DEVICE_ATTR_RW(ctxid_masks);
1808 static ssize_t vmid_idx_show(struct device *dev,
1809 struct device_attribute *attr,
1813 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1814 struct etmv4_config *config = &drvdata->config;
1816 val = config->vmid_idx;
1817 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1820 static ssize_t vmid_idx_store(struct device *dev,
1821 struct device_attribute *attr,
1822 const char *buf, size_t size)
1825 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1826 struct etmv4_config *config = &drvdata->config;
1828 if (kstrtoul(buf, 16, &val))
1830 if (val >= drvdata->numvmidc)
1834 * Use spinlock to ensure index doesn't change while it gets
1835 * dereferenced multiple times within a spinlock block elsewhere.
1837 spin_lock(&drvdata->spinlock);
1838 config->vmid_idx = val;
1839 spin_unlock(&drvdata->spinlock);
1842 static DEVICE_ATTR_RW(vmid_idx);
1844 static ssize_t vmid_val_show(struct device *dev,
1845 struct device_attribute *attr,
1849 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1850 struct etmv4_config *config = &drvdata->config;
1852 val = (unsigned long)config->vmid_val[config->vmid_idx];
1853 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1856 static ssize_t vmid_val_store(struct device *dev,
1857 struct device_attribute *attr,
1858 const char *buf, size_t size)
1861 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1862 struct etmv4_config *config = &drvdata->config;
1865 * only implemented when vmid tracing is enabled, i.e. at least one
1866 * vmid comparator is implemented and at least 8 bit vmid size
1868 if (!drvdata->vmid_size || !drvdata->numvmidc)
1870 if (kstrtoul(buf, 16, &val))
1873 spin_lock(&drvdata->spinlock);
1874 config->vmid_val[config->vmid_idx] = (u64)val;
1875 spin_unlock(&drvdata->spinlock);
1878 static DEVICE_ATTR_RW(vmid_val);
1880 static ssize_t vmid_masks_show(struct device *dev,
1881 struct device_attribute *attr, char *buf)
1883 unsigned long val1, val2;
1884 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1885 struct etmv4_config *config = &drvdata->config;
1887 spin_lock(&drvdata->spinlock);
1888 val1 = config->vmid_mask0;
1889 val2 = config->vmid_mask1;
1890 spin_unlock(&drvdata->spinlock);
1891 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1894 static ssize_t vmid_masks_store(struct device *dev,
1895 struct device_attribute *attr,
1896 const char *buf, size_t size)
1899 unsigned long val1, val2, mask;
1900 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1901 struct etmv4_config *config = &drvdata->config;
1904 * only implemented when vmid tracing is enabled, i.e. at least one
1905 * vmid comparator is implemented and at least 8 bit vmid size
1907 if (!drvdata->vmid_size || !drvdata->numvmidc)
1909 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1912 spin_lock(&drvdata->spinlock);
1915 * each byte[0..3] controls mask value applied to vmid
1918 switch (drvdata->numvmidc) {
1920 /* COMP0, bits[7:0] */
1921 config->vmid_mask0 = val1 & 0xFF;
1924 /* COMP1, bits[15:8] */
1925 config->vmid_mask0 = val1 & 0xFFFF;
1928 /* COMP2, bits[23:16] */
1929 config->vmid_mask0 = val1 & 0xFFFFFF;
1932 /* COMP3, bits[31:24] */
1933 config->vmid_mask0 = val1;
1936 /* COMP4, bits[7:0] */
1937 config->vmid_mask0 = val1;
1938 config->vmid_mask1 = val2 & 0xFF;
1941 /* COMP5, bits[15:8] */
1942 config->vmid_mask0 = val1;
1943 config->vmid_mask1 = val2 & 0xFFFF;
1946 /* COMP6, bits[23:16] */
1947 config->vmid_mask0 = val1;
1948 config->vmid_mask1 = val2 & 0xFFFFFF;
1951 /* COMP7, bits[31:24] */
1952 config->vmid_mask0 = val1;
1953 config->vmid_mask1 = val2;
1960 * If software sets a mask bit to 1, it must program relevant byte
1961 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
1962 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
1963 * of vmid comparator0 value (corresponding to byte 0) register.
1965 mask = config->vmid_mask0;
1966 for (i = 0; i < drvdata->numvmidc; i++) {
1967 /* mask value of corresponding vmid comparator */
1968 maskbyte = mask & ETMv4_EVENT_MASK;
1970 * each bit corresponds to a byte of respective vmid comparator
1973 for (j = 0; j < 8; j++) {
1975 config->vmid_val[i] &= ~(0xFF << (j * 8));
1978 /* Select the next vmid comparator mask value */
1980 /* vmid comparators[4-7] */
1981 mask = config->vmid_mask1;
1985 spin_unlock(&drvdata->spinlock);
1988 static DEVICE_ATTR_RW(vmid_masks);
1990 static ssize_t cpu_show(struct device *dev,
1991 struct device_attribute *attr, char *buf)
1994 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1997 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2000 static DEVICE_ATTR_RO(cpu);
2002 static struct attribute *coresight_etmv4_attrs[] = {
2003 &dev_attr_nr_pe_cmp.attr,
2004 &dev_attr_nr_addr_cmp.attr,
2005 &dev_attr_nr_cntr.attr,
2006 &dev_attr_nr_ext_inp.attr,
2007 &dev_attr_numcidc.attr,
2008 &dev_attr_numvmidc.attr,
2009 &dev_attr_nrseqstate.attr,
2010 &dev_attr_nr_resource.attr,
2011 &dev_attr_nr_ss_cmp.attr,
2012 &dev_attr_reset.attr,
2013 &dev_attr_mode.attr,
2015 &dev_attr_event.attr,
2016 &dev_attr_event_instren.attr,
2017 &dev_attr_event_ts.attr,
2018 &dev_attr_syncfreq.attr,
2019 &dev_attr_cyc_threshold.attr,
2020 &dev_attr_bb_ctrl.attr,
2021 &dev_attr_event_vinst.attr,
2022 &dev_attr_s_exlevel_vinst.attr,
2023 &dev_attr_ns_exlevel_vinst.attr,
2024 &dev_attr_addr_idx.attr,
2025 &dev_attr_addr_instdatatype.attr,
2026 &dev_attr_addr_single.attr,
2027 &dev_attr_addr_range.attr,
2028 &dev_attr_addr_start.attr,
2029 &dev_attr_addr_stop.attr,
2030 &dev_attr_addr_ctxtype.attr,
2031 &dev_attr_addr_context.attr,
2032 &dev_attr_seq_idx.attr,
2033 &dev_attr_seq_state.attr,
2034 &dev_attr_seq_event.attr,
2035 &dev_attr_seq_reset_event.attr,
2036 &dev_attr_cntr_idx.attr,
2037 &dev_attr_cntrldvr.attr,
2038 &dev_attr_cntr_val.attr,
2039 &dev_attr_cntr_ctrl.attr,
2040 &dev_attr_res_idx.attr,
2041 &dev_attr_res_ctrl.attr,
2042 &dev_attr_ctxid_idx.attr,
2043 &dev_attr_ctxid_pid.attr,
2044 &dev_attr_ctxid_masks.attr,
2045 &dev_attr_vmid_idx.attr,
2046 &dev_attr_vmid_val.attr,
2047 &dev_attr_vmid_masks.attr,
2057 static void do_smp_cross_read(void *data)
2059 struct etmv4_reg *reg = data;
2061 reg->data = readl_relaxed(reg->addr);
2064 static u32 etmv4_cross_read(const struct device *dev, u32 offset)
2066 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
2067 struct etmv4_reg reg;
2069 reg.addr = drvdata->base + offset;
2071 * smp cross call ensures the CPU will be powered up before
2072 * accessing the ETMv4 trace core registers
2074 smp_call_function_single(drvdata->cpu, do_smp_cross_read, ®, 1);
2078 #define coresight_etm4x_simple_func(name, offset) \
2079 coresight_simple_func(struct etmv4_drvdata, NULL, name, offset)
2081 #define coresight_etm4x_cross_read(name, offset) \
2082 coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read, \
2085 coresight_etm4x_simple_func(trcpdcr, TRCPDCR);
2086 coresight_etm4x_simple_func(trcpdsr, TRCPDSR);
2087 coresight_etm4x_simple_func(trclsr, TRCLSR);
2088 coresight_etm4x_simple_func(trcauthstatus, TRCAUTHSTATUS);
2089 coresight_etm4x_simple_func(trcdevid, TRCDEVID);
2090 coresight_etm4x_simple_func(trcdevtype, TRCDEVTYPE);
2091 coresight_etm4x_simple_func(trcpidr0, TRCPIDR0);
2092 coresight_etm4x_simple_func(trcpidr1, TRCPIDR1);
2093 coresight_etm4x_simple_func(trcpidr2, TRCPIDR2);
2094 coresight_etm4x_simple_func(trcpidr3, TRCPIDR3);
2095 coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
2096 coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
2097 coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
2099 static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2100 &dev_attr_trcoslsr.attr,
2101 &dev_attr_trcpdcr.attr,
2102 &dev_attr_trcpdsr.attr,
2103 &dev_attr_trclsr.attr,
2104 &dev_attr_trcconfig.attr,
2105 &dev_attr_trctraceid.attr,
2106 &dev_attr_trcauthstatus.attr,
2107 &dev_attr_trcdevid.attr,
2108 &dev_attr_trcdevtype.attr,
2109 &dev_attr_trcpidr0.attr,
2110 &dev_attr_trcpidr1.attr,
2111 &dev_attr_trcpidr2.attr,
2112 &dev_attr_trcpidr3.attr,
2116 coresight_etm4x_cross_read(trcidr0, TRCIDR0);
2117 coresight_etm4x_cross_read(trcidr1, TRCIDR1);
2118 coresight_etm4x_cross_read(trcidr2, TRCIDR2);
2119 coresight_etm4x_cross_read(trcidr3, TRCIDR3);
2120 coresight_etm4x_cross_read(trcidr4, TRCIDR4);
2121 coresight_etm4x_cross_read(trcidr5, TRCIDR5);
2122 /* trcidr[6,7] are reserved */
2123 coresight_etm4x_cross_read(trcidr8, TRCIDR8);
2124 coresight_etm4x_cross_read(trcidr9, TRCIDR9);
2125 coresight_etm4x_cross_read(trcidr10, TRCIDR10);
2126 coresight_etm4x_cross_read(trcidr11, TRCIDR11);
2127 coresight_etm4x_cross_read(trcidr12, TRCIDR12);
2128 coresight_etm4x_cross_read(trcidr13, TRCIDR13);
2130 static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2131 &dev_attr_trcidr0.attr,
2132 &dev_attr_trcidr1.attr,
2133 &dev_attr_trcidr2.attr,
2134 &dev_attr_trcidr3.attr,
2135 &dev_attr_trcidr4.attr,
2136 &dev_attr_trcidr5.attr,
2137 /* trcidr[6,7] are reserved */
2138 &dev_attr_trcidr8.attr,
2139 &dev_attr_trcidr9.attr,
2140 &dev_attr_trcidr10.attr,
2141 &dev_attr_trcidr11.attr,
2142 &dev_attr_trcidr12.attr,
2143 &dev_attr_trcidr13.attr,
2147 static const struct attribute_group coresight_etmv4_group = {
2148 .attrs = coresight_etmv4_attrs,
2151 static const struct attribute_group coresight_etmv4_mgmt_group = {
2152 .attrs = coresight_etmv4_mgmt_attrs,
2156 static const struct attribute_group coresight_etmv4_trcidr_group = {
2157 .attrs = coresight_etmv4_trcidr_attrs,
2161 const struct attribute_group *coresight_etmv4_groups[] = {
2162 &coresight_etmv4_group,
2163 &coresight_etmv4_mgmt_group,
2164 &coresight_etmv4_trcidr_group,