2 * Copyright(C) 2015 Linaro Limited. All rights reserved.
3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/pm_runtime.h>
19 #include <linux/sysfs.h>
20 #include "coresight-etm4x.h"
21 #include "coresight-priv.h"
23 static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
26 struct etmv4_config *config = &drvdata->config;
28 idx = config->addr_idx;
31 * TRCACATRn.TYPE bit[1:0]: type of comparison
32 * the trace unit performs
34 if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
39 * We are performing instruction address comparison. Set the
40 * relevant bit of ViewInst Include/Exclude Control register
41 * for corresponding address comparator pair.
43 if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
44 config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
47 if (exclude == true) {
49 * Set exclude bit and unset the include bit
50 * corresponding to comparator pair
52 config->viiectlr |= BIT(idx / 2 + 16);
53 config->viiectlr &= ~BIT(idx / 2);
56 * Set include bit and unset exclude bit
57 * corresponding to comparator pair
59 config->viiectlr |= BIT(idx / 2);
60 config->viiectlr &= ~BIT(idx / 2 + 16);
66 static ssize_t nr_pe_cmp_show(struct device *dev,
67 struct device_attribute *attr,
71 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
73 val = drvdata->nr_pe_cmp;
74 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
76 static DEVICE_ATTR_RO(nr_pe_cmp);
78 static ssize_t nr_addr_cmp_show(struct device *dev,
79 struct device_attribute *attr,
83 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
85 val = drvdata->nr_addr_cmp;
86 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
88 static DEVICE_ATTR_RO(nr_addr_cmp);
90 static ssize_t nr_cntr_show(struct device *dev,
91 struct device_attribute *attr,
95 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
97 val = drvdata->nr_cntr;
98 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
100 static DEVICE_ATTR_RO(nr_cntr);
102 static ssize_t nr_ext_inp_show(struct device *dev,
103 struct device_attribute *attr,
107 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
109 val = drvdata->nr_ext_inp;
110 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
112 static DEVICE_ATTR_RO(nr_ext_inp);
114 static ssize_t numcidc_show(struct device *dev,
115 struct device_attribute *attr,
119 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
121 val = drvdata->numcidc;
122 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
124 static DEVICE_ATTR_RO(numcidc);
126 static ssize_t numvmidc_show(struct device *dev,
127 struct device_attribute *attr,
131 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
133 val = drvdata->numvmidc;
134 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
136 static DEVICE_ATTR_RO(numvmidc);
138 static ssize_t nrseqstate_show(struct device *dev,
139 struct device_attribute *attr,
143 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
145 val = drvdata->nrseqstate;
146 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
148 static DEVICE_ATTR_RO(nrseqstate);
150 static ssize_t nr_resource_show(struct device *dev,
151 struct device_attribute *attr,
155 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
157 val = drvdata->nr_resource;
158 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
160 static DEVICE_ATTR_RO(nr_resource);
162 static ssize_t nr_ss_cmp_show(struct device *dev,
163 struct device_attribute *attr,
167 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
169 val = drvdata->nr_ss_cmp;
170 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
172 static DEVICE_ATTR_RO(nr_ss_cmp);
174 static ssize_t reset_store(struct device *dev,
175 struct device_attribute *attr,
176 const char *buf, size_t size)
180 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
181 struct etmv4_config *config = &drvdata->config;
183 if (kstrtoul(buf, 16, &val))
186 spin_lock(&drvdata->spinlock);
190 /* Disable data tracing: do not trace load and store data transfers */
191 config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
192 config->cfg &= ~(BIT(1) | BIT(2));
194 /* Disable data value and data address tracing */
195 config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
196 ETM_MODE_DATA_TRACE_VAL);
197 config->cfg &= ~(BIT(16) | BIT(17));
199 /* Disable all events tracing */
200 config->eventctrl0 = 0x0;
201 config->eventctrl1 = 0x0;
203 /* Disable timestamp event */
204 config->ts_ctrl = 0x0;
206 /* Disable stalling */
207 config->stall_ctrl = 0x0;
209 /* Reset trace synchronization period to 2^8 = 256 bytes*/
210 if (drvdata->syncpr == false)
211 config->syncfreq = 0x8;
214 * Enable ViewInst to trace everything with start-stop logic in
215 * started state. ARM recommends start-stop logic is set before
218 config->vinst_ctrl |= BIT(0);
219 if (drvdata->nr_addr_cmp == true) {
220 config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
221 /* SSSTATUS, bit[9] */
222 config->vinst_ctrl |= BIT(9);
225 /* No address range filtering for ViewInst */
226 config->viiectlr = 0x0;
228 /* No start-stop filtering for ViewInst */
229 config->vissctlr = 0x0;
231 /* Disable seq events */
232 for (i = 0; i < drvdata->nrseqstate-1; i++)
233 config->seq_ctrl[i] = 0x0;
234 config->seq_rst = 0x0;
235 config->seq_state = 0x0;
237 /* Disable external input events */
238 config->ext_inp = 0x0;
240 config->cntr_idx = 0x0;
241 for (i = 0; i < drvdata->nr_cntr; i++) {
242 config->cntrldvr[i] = 0x0;
243 config->cntr_ctrl[i] = 0x0;
244 config->cntr_val[i] = 0x0;
247 config->res_idx = 0x0;
248 for (i = 0; i < drvdata->nr_resource; i++)
249 config->res_ctrl[i] = 0x0;
251 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
252 config->ss_ctrl[i] = 0x0;
253 config->ss_pe_cmp[i] = 0x0;
256 config->addr_idx = 0x0;
257 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
258 config->addr_val[i] = 0x0;
259 config->addr_acc[i] = 0x0;
260 config->addr_type[i] = ETM_ADDR_TYPE_NONE;
263 config->ctxid_idx = 0x0;
264 for (i = 0; i < drvdata->numcidc; i++) {
265 config->ctxid_pid[i] = 0x0;
266 config->ctxid_vpid[i] = 0x0;
269 config->ctxid_mask0 = 0x0;
270 config->ctxid_mask1 = 0x0;
272 config->vmid_idx = 0x0;
273 for (i = 0; i < drvdata->numvmidc; i++)
274 config->vmid_val[i] = 0x0;
275 config->vmid_mask0 = 0x0;
276 config->vmid_mask1 = 0x0;
278 drvdata->trcid = drvdata->cpu + 1;
280 spin_unlock(&drvdata->spinlock);
284 static DEVICE_ATTR_WO(reset);
286 static ssize_t mode_show(struct device *dev,
287 struct device_attribute *attr,
291 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
292 struct etmv4_config *config = &drvdata->config;
295 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
298 static ssize_t mode_store(struct device *dev,
299 struct device_attribute *attr,
300 const char *buf, size_t size)
302 unsigned long val, mode;
303 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
304 struct etmv4_config *config = &drvdata->config;
306 if (kstrtoul(buf, 16, &val))
309 spin_lock(&drvdata->spinlock);
310 config->mode = val & ETMv4_MODE_ALL;
312 if (config->mode & ETM_MODE_EXCLUDE)
313 etm4_set_mode_exclude(drvdata, true);
315 etm4_set_mode_exclude(drvdata, false);
317 if (drvdata->instrp0 == true) {
318 /* start by clearing instruction P0 field */
319 config->cfg &= ~(BIT(1) | BIT(2));
320 if (config->mode & ETM_MODE_LOAD)
321 /* 0b01 Trace load instructions as P0 instructions */
322 config->cfg |= BIT(1);
323 if (config->mode & ETM_MODE_STORE)
324 /* 0b10 Trace store instructions as P0 instructions */
325 config->cfg |= BIT(2);
326 if (config->mode & ETM_MODE_LOAD_STORE)
328 * 0b11 Trace load and store instructions
331 config->cfg |= BIT(1) | BIT(2);
334 /* bit[3], Branch broadcast mode */
335 if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
336 config->cfg |= BIT(3);
338 config->cfg &= ~BIT(3);
340 /* bit[4], Cycle counting instruction trace bit */
341 if ((config->mode & ETMv4_MODE_CYCACC) &&
342 (drvdata->trccci == true))
343 config->cfg |= BIT(4);
345 config->cfg &= ~BIT(4);
347 /* bit[6], Context ID tracing bit */
348 if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
349 config->cfg |= BIT(6);
351 config->cfg &= ~BIT(6);
353 if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
354 config->cfg |= BIT(7);
356 config->cfg &= ~BIT(7);
358 /* bits[10:8], Conditional instruction tracing bit */
359 mode = ETM_MODE_COND(config->mode);
360 if (drvdata->trccond == true) {
361 config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
362 config->cfg |= mode << 8;
365 /* bit[11], Global timestamp tracing bit */
366 if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
367 config->cfg |= BIT(11);
369 config->cfg &= ~BIT(11);
371 /* bit[12], Return stack enable bit */
372 if ((config->mode & ETM_MODE_RETURNSTACK) &&
373 (drvdata->retstack == true))
374 config->cfg |= BIT(12);
376 config->cfg &= ~BIT(12);
378 /* bits[14:13], Q element enable field */
379 mode = ETM_MODE_QELEM(config->mode);
380 /* start by clearing QE bits */
381 config->cfg &= ~(BIT(13) | BIT(14));
382 /* if supported, Q elements with instruction counts are enabled */
383 if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
384 config->cfg |= BIT(13);
386 * if supported, Q elements with and without instruction
389 if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
390 config->cfg |= BIT(14);
392 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
393 if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
394 (drvdata->atbtrig == true))
395 config->eventctrl1 |= BIT(11);
397 config->eventctrl1 &= ~BIT(11);
399 /* bit[12], Low-power state behavior override bit */
400 if ((config->mode & ETM_MODE_LPOVERRIDE) &&
401 (drvdata->lpoverride == true))
402 config->eventctrl1 |= BIT(12);
404 config->eventctrl1 &= ~BIT(12);
406 /* bit[8], Instruction stall bit */
407 if (config->mode & ETM_MODE_ISTALL_EN)
408 config->stall_ctrl |= BIT(8);
410 config->stall_ctrl &= ~BIT(8);
412 /* bit[10], Prioritize instruction trace bit */
413 if (config->mode & ETM_MODE_INSTPRIO)
414 config->stall_ctrl |= BIT(10);
416 config->stall_ctrl &= ~BIT(10);
418 /* bit[13], Trace overflow prevention bit */
419 if ((config->mode & ETM_MODE_NOOVERFLOW) &&
420 (drvdata->nooverflow == true))
421 config->stall_ctrl |= BIT(13);
423 config->stall_ctrl &= ~BIT(13);
425 /* bit[9] Start/stop logic control bit */
426 if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
427 config->vinst_ctrl |= BIT(9);
429 config->vinst_ctrl &= ~BIT(9);
431 /* bit[10], Whether a trace unit must trace a Reset exception */
432 if (config->mode & ETM_MODE_TRACE_RESET)
433 config->vinst_ctrl |= BIT(10);
435 config->vinst_ctrl &= ~BIT(10);
437 /* bit[11], Whether a trace unit must trace a system error exception */
438 if ((config->mode & ETM_MODE_TRACE_ERR) &&
439 (drvdata->trc_error == true))
440 config->vinst_ctrl |= BIT(11);
442 config->vinst_ctrl &= ~BIT(11);
444 if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
445 etm4_config_trace_mode(config);
447 spin_unlock(&drvdata->spinlock);
451 static DEVICE_ATTR_RW(mode);
453 static ssize_t pe_show(struct device *dev,
454 struct device_attribute *attr,
458 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
459 struct etmv4_config *config = &drvdata->config;
461 val = config->pe_sel;
462 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
465 static ssize_t pe_store(struct device *dev,
466 struct device_attribute *attr,
467 const char *buf, size_t size)
470 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
471 struct etmv4_config *config = &drvdata->config;
473 if (kstrtoul(buf, 16, &val))
476 spin_lock(&drvdata->spinlock);
477 if (val > drvdata->nr_pe) {
478 spin_unlock(&drvdata->spinlock);
482 config->pe_sel = val;
483 spin_unlock(&drvdata->spinlock);
486 static DEVICE_ATTR_RW(pe);
488 static ssize_t event_show(struct device *dev,
489 struct device_attribute *attr,
493 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
494 struct etmv4_config *config = &drvdata->config;
496 val = config->eventctrl0;
497 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
500 static ssize_t event_store(struct device *dev,
501 struct device_attribute *attr,
502 const char *buf, size_t size)
505 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
506 struct etmv4_config *config = &drvdata->config;
508 if (kstrtoul(buf, 16, &val))
511 spin_lock(&drvdata->spinlock);
512 switch (drvdata->nr_event) {
514 /* EVENT0, bits[7:0] */
515 config->eventctrl0 = val & 0xFF;
518 /* EVENT1, bits[15:8] */
519 config->eventctrl0 = val & 0xFFFF;
522 /* EVENT2, bits[23:16] */
523 config->eventctrl0 = val & 0xFFFFFF;
526 /* EVENT3, bits[31:24] */
527 config->eventctrl0 = val;
532 spin_unlock(&drvdata->spinlock);
535 static DEVICE_ATTR_RW(event);
537 static ssize_t event_instren_show(struct device *dev,
538 struct device_attribute *attr,
542 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
543 struct etmv4_config *config = &drvdata->config;
545 val = BMVAL(config->eventctrl1, 0, 3);
546 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
549 static ssize_t event_instren_store(struct device *dev,
550 struct device_attribute *attr,
551 const char *buf, size_t size)
554 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
555 struct etmv4_config *config = &drvdata->config;
557 if (kstrtoul(buf, 16, &val))
560 spin_lock(&drvdata->spinlock);
561 /* start by clearing all instruction event enable bits */
562 config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
563 switch (drvdata->nr_event) {
565 /* generate Event element for event 1 */
566 config->eventctrl1 |= val & BIT(1);
569 /* generate Event element for event 1 and 2 */
570 config->eventctrl1 |= val & (BIT(0) | BIT(1));
573 /* generate Event element for event 1, 2 and 3 */
574 config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
577 /* generate Event element for all 4 events */
578 config->eventctrl1 |= val & 0xF;
583 spin_unlock(&drvdata->spinlock);
586 static DEVICE_ATTR_RW(event_instren);
588 static ssize_t event_ts_show(struct device *dev,
589 struct device_attribute *attr,
593 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
594 struct etmv4_config *config = &drvdata->config;
596 val = config->ts_ctrl;
597 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
600 static ssize_t event_ts_store(struct device *dev,
601 struct device_attribute *attr,
602 const char *buf, size_t size)
605 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
606 struct etmv4_config *config = &drvdata->config;
608 if (kstrtoul(buf, 16, &val))
610 if (!drvdata->ts_size)
613 config->ts_ctrl = val & ETMv4_EVENT_MASK;
616 static DEVICE_ATTR_RW(event_ts);
618 static ssize_t syncfreq_show(struct device *dev,
619 struct device_attribute *attr,
623 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
624 struct etmv4_config *config = &drvdata->config;
626 val = config->syncfreq;
627 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
630 static ssize_t syncfreq_store(struct device *dev,
631 struct device_attribute *attr,
632 const char *buf, size_t size)
635 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
636 struct etmv4_config *config = &drvdata->config;
638 if (kstrtoul(buf, 16, &val))
640 if (drvdata->syncpr == true)
643 config->syncfreq = val & ETMv4_SYNC_MASK;
646 static DEVICE_ATTR_RW(syncfreq);
648 static ssize_t cyc_threshold_show(struct device *dev,
649 struct device_attribute *attr,
653 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
654 struct etmv4_config *config = &drvdata->config;
656 val = config->ccctlr;
657 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
660 static ssize_t cyc_threshold_store(struct device *dev,
661 struct device_attribute *attr,
662 const char *buf, size_t size)
665 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
666 struct etmv4_config *config = &drvdata->config;
668 if (kstrtoul(buf, 16, &val))
671 /* mask off max threshold before checking min value */
672 val &= ETM_CYC_THRESHOLD_MASK;
673 if (val < drvdata->ccitmin)
676 config->ccctlr = val;
679 static DEVICE_ATTR_RW(cyc_threshold);
681 static ssize_t bb_ctrl_show(struct device *dev,
682 struct device_attribute *attr,
686 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
687 struct etmv4_config *config = &drvdata->config;
689 val = config->bb_ctrl;
690 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
693 static ssize_t bb_ctrl_store(struct device *dev,
694 struct device_attribute *attr,
695 const char *buf, size_t size)
698 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
699 struct etmv4_config *config = &drvdata->config;
701 if (kstrtoul(buf, 16, &val))
703 if (drvdata->trcbb == false)
705 if (!drvdata->nr_addr_cmp)
709 * Bit[8] controls include(1) / exclude(0), bits[0-7] select
710 * individual range comparators. If include then at least 1
711 * range must be selected.
713 if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0))
716 config->bb_ctrl = val & GENMASK(8, 0);
719 static DEVICE_ATTR_RW(bb_ctrl);
721 static ssize_t event_vinst_show(struct device *dev,
722 struct device_attribute *attr,
726 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
727 struct etmv4_config *config = &drvdata->config;
729 val = config->vinst_ctrl & ETMv4_EVENT_MASK;
730 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
733 static ssize_t event_vinst_store(struct device *dev,
734 struct device_attribute *attr,
735 const char *buf, size_t size)
738 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
739 struct etmv4_config *config = &drvdata->config;
741 if (kstrtoul(buf, 16, &val))
744 spin_lock(&drvdata->spinlock);
745 val &= ETMv4_EVENT_MASK;
746 config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
747 config->vinst_ctrl |= val;
748 spin_unlock(&drvdata->spinlock);
751 static DEVICE_ATTR_RW(event_vinst);
753 static ssize_t s_exlevel_vinst_show(struct device *dev,
754 struct device_attribute *attr,
758 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
759 struct etmv4_config *config = &drvdata->config;
761 val = BMVAL(config->vinst_ctrl, 16, 19);
762 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
765 static ssize_t s_exlevel_vinst_store(struct device *dev,
766 struct device_attribute *attr,
767 const char *buf, size_t size)
770 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
771 struct etmv4_config *config = &drvdata->config;
773 if (kstrtoul(buf, 16, &val))
776 spin_lock(&drvdata->spinlock);
777 /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
778 config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
779 /* enable instruction tracing for corresponding exception level */
780 val &= drvdata->s_ex_level;
781 config->vinst_ctrl |= (val << 16);
782 spin_unlock(&drvdata->spinlock);
785 static DEVICE_ATTR_RW(s_exlevel_vinst);
787 static ssize_t ns_exlevel_vinst_show(struct device *dev,
788 struct device_attribute *attr,
792 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
793 struct etmv4_config *config = &drvdata->config;
795 /* EXLEVEL_NS, bits[23:20] */
796 val = BMVAL(config->vinst_ctrl, 20, 23);
797 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
800 static ssize_t ns_exlevel_vinst_store(struct device *dev,
801 struct device_attribute *attr,
802 const char *buf, size_t size)
805 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
806 struct etmv4_config *config = &drvdata->config;
808 if (kstrtoul(buf, 16, &val))
811 spin_lock(&drvdata->spinlock);
812 /* clear EXLEVEL_NS bits (bit[23] is never implemented */
813 config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
814 /* enable instruction tracing for corresponding exception level */
815 val &= drvdata->ns_ex_level;
816 config->vinst_ctrl |= (val << 20);
817 spin_unlock(&drvdata->spinlock);
820 static DEVICE_ATTR_RW(ns_exlevel_vinst);
822 static ssize_t addr_idx_show(struct device *dev,
823 struct device_attribute *attr,
827 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
828 struct etmv4_config *config = &drvdata->config;
830 val = config->addr_idx;
831 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
834 static ssize_t addr_idx_store(struct device *dev,
835 struct device_attribute *attr,
836 const char *buf, size_t size)
839 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
840 struct etmv4_config *config = &drvdata->config;
842 if (kstrtoul(buf, 16, &val))
844 if (val >= drvdata->nr_addr_cmp * 2)
848 * Use spinlock to ensure index doesn't change while it gets
849 * dereferenced multiple times within a spinlock block elsewhere.
851 spin_lock(&drvdata->spinlock);
852 config->addr_idx = val;
853 spin_unlock(&drvdata->spinlock);
856 static DEVICE_ATTR_RW(addr_idx);
858 static ssize_t addr_instdatatype_show(struct device *dev,
859 struct device_attribute *attr,
864 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
865 struct etmv4_config *config = &drvdata->config;
867 spin_lock(&drvdata->spinlock);
868 idx = config->addr_idx;
869 val = BMVAL(config->addr_acc[idx], 0, 1);
870 len = scnprintf(buf, PAGE_SIZE, "%s\n",
871 val == ETM_INSTR_ADDR ? "instr" :
872 (val == ETM_DATA_LOAD_ADDR ? "data_load" :
873 (val == ETM_DATA_STORE_ADDR ? "data_store" :
874 "data_load_store")));
875 spin_unlock(&drvdata->spinlock);
879 static ssize_t addr_instdatatype_store(struct device *dev,
880 struct device_attribute *attr,
881 const char *buf, size_t size)
885 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
886 struct etmv4_config *config = &drvdata->config;
888 if (strlen(buf) >= 20)
890 if (sscanf(buf, "%s", str) != 1)
893 spin_lock(&drvdata->spinlock);
894 idx = config->addr_idx;
895 if (!strcmp(str, "instr"))
896 /* TYPE, bits[1:0] */
897 config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
899 spin_unlock(&drvdata->spinlock);
902 static DEVICE_ATTR_RW(addr_instdatatype);
904 static ssize_t addr_single_show(struct device *dev,
905 struct device_attribute *attr,
910 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
911 struct etmv4_config *config = &drvdata->config;
913 idx = config->addr_idx;
914 spin_lock(&drvdata->spinlock);
915 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
916 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
917 spin_unlock(&drvdata->spinlock);
920 val = (unsigned long)config->addr_val[idx];
921 spin_unlock(&drvdata->spinlock);
922 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
925 static ssize_t addr_single_store(struct device *dev,
926 struct device_attribute *attr,
927 const char *buf, size_t size)
931 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
932 struct etmv4_config *config = &drvdata->config;
934 if (kstrtoul(buf, 16, &val))
937 spin_lock(&drvdata->spinlock);
938 idx = config->addr_idx;
939 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
940 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
941 spin_unlock(&drvdata->spinlock);
945 config->addr_val[idx] = (u64)val;
946 config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
947 spin_unlock(&drvdata->spinlock);
950 static DEVICE_ATTR_RW(addr_single);
952 static ssize_t addr_range_show(struct device *dev,
953 struct device_attribute *attr,
957 unsigned long val1, val2;
958 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
959 struct etmv4_config *config = &drvdata->config;
961 spin_lock(&drvdata->spinlock);
962 idx = config->addr_idx;
964 spin_unlock(&drvdata->spinlock);
967 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
968 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
969 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
970 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
971 spin_unlock(&drvdata->spinlock);
975 val1 = (unsigned long)config->addr_val[idx];
976 val2 = (unsigned long)config->addr_val[idx + 1];
977 spin_unlock(&drvdata->spinlock);
978 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
981 static ssize_t addr_range_store(struct device *dev,
982 struct device_attribute *attr,
983 const char *buf, size_t size)
986 unsigned long val1, val2;
987 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
988 struct etmv4_config *config = &drvdata->config;
990 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
992 /* lower address comparator cannot have a higher address value */
996 spin_lock(&drvdata->spinlock);
997 idx = config->addr_idx;
999 spin_unlock(&drvdata->spinlock);
1003 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
1004 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1005 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1006 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1007 spin_unlock(&drvdata->spinlock);
1011 config->addr_val[idx] = (u64)val1;
1012 config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1013 config->addr_val[idx + 1] = (u64)val2;
1014 config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1016 * Program include or exclude control bits for vinst or vdata
1017 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1019 if (config->mode & ETM_MODE_EXCLUDE)
1020 etm4_set_mode_exclude(drvdata, true);
1022 etm4_set_mode_exclude(drvdata, false);
1024 spin_unlock(&drvdata->spinlock);
1027 static DEVICE_ATTR_RW(addr_range);
1029 static ssize_t addr_start_show(struct device *dev,
1030 struct device_attribute *attr,
1035 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1036 struct etmv4_config *config = &drvdata->config;
1038 spin_lock(&drvdata->spinlock);
1039 idx = config->addr_idx;
1041 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1042 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1043 spin_unlock(&drvdata->spinlock);
1047 val = (unsigned long)config->addr_val[idx];
1048 spin_unlock(&drvdata->spinlock);
1049 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1052 static ssize_t addr_start_store(struct device *dev,
1053 struct device_attribute *attr,
1054 const char *buf, size_t size)
1058 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1059 struct etmv4_config *config = &drvdata->config;
1061 if (kstrtoul(buf, 16, &val))
1064 spin_lock(&drvdata->spinlock);
1065 idx = config->addr_idx;
1066 if (!drvdata->nr_addr_cmp) {
1067 spin_unlock(&drvdata->spinlock);
1070 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1071 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1072 spin_unlock(&drvdata->spinlock);
1076 config->addr_val[idx] = (u64)val;
1077 config->addr_type[idx] = ETM_ADDR_TYPE_START;
1078 config->vissctlr |= BIT(idx);
1079 /* SSSTATUS, bit[9] - turn on start/stop logic */
1080 config->vinst_ctrl |= BIT(9);
1081 spin_unlock(&drvdata->spinlock);
1084 static DEVICE_ATTR_RW(addr_start);
1086 static ssize_t addr_stop_show(struct device *dev,
1087 struct device_attribute *attr,
1092 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1093 struct etmv4_config *config = &drvdata->config;
1095 spin_lock(&drvdata->spinlock);
1096 idx = config->addr_idx;
1098 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1099 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1100 spin_unlock(&drvdata->spinlock);
1104 val = (unsigned long)config->addr_val[idx];
1105 spin_unlock(&drvdata->spinlock);
1106 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1109 static ssize_t addr_stop_store(struct device *dev,
1110 struct device_attribute *attr,
1111 const char *buf, size_t size)
1115 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1116 struct etmv4_config *config = &drvdata->config;
1118 if (kstrtoul(buf, 16, &val))
1121 spin_lock(&drvdata->spinlock);
1122 idx = config->addr_idx;
1123 if (!drvdata->nr_addr_cmp) {
1124 spin_unlock(&drvdata->spinlock);
1127 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1128 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1129 spin_unlock(&drvdata->spinlock);
1133 config->addr_val[idx] = (u64)val;
1134 config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1135 config->vissctlr |= BIT(idx + 16);
1136 /* SSSTATUS, bit[9] - turn on start/stop logic */
1137 config->vinst_ctrl |= BIT(9);
1138 spin_unlock(&drvdata->spinlock);
1141 static DEVICE_ATTR_RW(addr_stop);
1143 static ssize_t addr_ctxtype_show(struct device *dev,
1144 struct device_attribute *attr,
1149 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1150 struct etmv4_config *config = &drvdata->config;
1152 spin_lock(&drvdata->spinlock);
1153 idx = config->addr_idx;
1154 /* CONTEXTTYPE, bits[3:2] */
1155 val = BMVAL(config->addr_acc[idx], 2, 3);
1156 len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1157 (val == ETM_CTX_CTXID ? "ctxid" :
1158 (val == ETM_CTX_VMID ? "vmid" : "all")));
1159 spin_unlock(&drvdata->spinlock);
1163 static ssize_t addr_ctxtype_store(struct device *dev,
1164 struct device_attribute *attr,
1165 const char *buf, size_t size)
1169 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1170 struct etmv4_config *config = &drvdata->config;
1172 if (strlen(buf) >= 10)
1174 if (sscanf(buf, "%s", str) != 1)
1177 spin_lock(&drvdata->spinlock);
1178 idx = config->addr_idx;
1179 if (!strcmp(str, "none"))
1180 /* start by clearing context type bits */
1181 config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1182 else if (!strcmp(str, "ctxid")) {
1183 /* 0b01 The trace unit performs a Context ID */
1184 if (drvdata->numcidc) {
1185 config->addr_acc[idx] |= BIT(2);
1186 config->addr_acc[idx] &= ~BIT(3);
1188 } else if (!strcmp(str, "vmid")) {
1189 /* 0b10 The trace unit performs a VMID */
1190 if (drvdata->numvmidc) {
1191 config->addr_acc[idx] &= ~BIT(2);
1192 config->addr_acc[idx] |= BIT(3);
1194 } else if (!strcmp(str, "all")) {
1196 * 0b11 The trace unit performs a Context ID
1197 * comparison and a VMID
1199 if (drvdata->numcidc)
1200 config->addr_acc[idx] |= BIT(2);
1201 if (drvdata->numvmidc)
1202 config->addr_acc[idx] |= BIT(3);
1204 spin_unlock(&drvdata->spinlock);
1207 static DEVICE_ATTR_RW(addr_ctxtype);
1209 static ssize_t addr_context_show(struct device *dev,
1210 struct device_attribute *attr,
1215 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1216 struct etmv4_config *config = &drvdata->config;
1218 spin_lock(&drvdata->spinlock);
1219 idx = config->addr_idx;
1220 /* context ID comparator bits[6:4] */
1221 val = BMVAL(config->addr_acc[idx], 4, 6);
1222 spin_unlock(&drvdata->spinlock);
1223 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1226 static ssize_t addr_context_store(struct device *dev,
1227 struct device_attribute *attr,
1228 const char *buf, size_t size)
1232 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1233 struct etmv4_config *config = &drvdata->config;
1235 if (kstrtoul(buf, 16, &val))
1237 if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1239 if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
1240 drvdata->numcidc : drvdata->numvmidc))
1243 spin_lock(&drvdata->spinlock);
1244 idx = config->addr_idx;
1245 /* clear context ID comparator bits[6:4] */
1246 config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1247 config->addr_acc[idx] |= (val << 4);
1248 spin_unlock(&drvdata->spinlock);
1251 static DEVICE_ATTR_RW(addr_context);
1253 static ssize_t seq_idx_show(struct device *dev,
1254 struct device_attribute *attr,
1258 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1259 struct etmv4_config *config = &drvdata->config;
1261 val = config->seq_idx;
1262 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1265 static ssize_t seq_idx_store(struct device *dev,
1266 struct device_attribute *attr,
1267 const char *buf, size_t size)
1270 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1271 struct etmv4_config *config = &drvdata->config;
1273 if (kstrtoul(buf, 16, &val))
1275 if (val >= drvdata->nrseqstate - 1)
1279 * Use spinlock to ensure index doesn't change while it gets
1280 * dereferenced multiple times within a spinlock block elsewhere.
1282 spin_lock(&drvdata->spinlock);
1283 config->seq_idx = val;
1284 spin_unlock(&drvdata->spinlock);
1287 static DEVICE_ATTR_RW(seq_idx);
1289 static ssize_t seq_state_show(struct device *dev,
1290 struct device_attribute *attr,
1294 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1295 struct etmv4_config *config = &drvdata->config;
1297 val = config->seq_state;
1298 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1301 static ssize_t seq_state_store(struct device *dev,
1302 struct device_attribute *attr,
1303 const char *buf, size_t size)
1306 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1307 struct etmv4_config *config = &drvdata->config;
1309 if (kstrtoul(buf, 16, &val))
1311 if (val >= drvdata->nrseqstate)
1314 config->seq_state = val;
1317 static DEVICE_ATTR_RW(seq_state);
1319 static ssize_t seq_event_show(struct device *dev,
1320 struct device_attribute *attr,
1325 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1326 struct etmv4_config *config = &drvdata->config;
1328 spin_lock(&drvdata->spinlock);
1329 idx = config->seq_idx;
1330 val = config->seq_ctrl[idx];
1331 spin_unlock(&drvdata->spinlock);
1332 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1335 static ssize_t seq_event_store(struct device *dev,
1336 struct device_attribute *attr,
1337 const char *buf, size_t size)
1341 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1342 struct etmv4_config *config = &drvdata->config;
1344 if (kstrtoul(buf, 16, &val))
1347 spin_lock(&drvdata->spinlock);
1348 idx = config->seq_idx;
1349 /* Seq control has two masks B[15:8] F[7:0] */
1350 config->seq_ctrl[idx] = val & 0xFFFF;
1351 spin_unlock(&drvdata->spinlock);
1354 static DEVICE_ATTR_RW(seq_event);
1356 static ssize_t seq_reset_event_show(struct device *dev,
1357 struct device_attribute *attr,
1361 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1362 struct etmv4_config *config = &drvdata->config;
1364 val = config->seq_rst;
1365 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1368 static ssize_t seq_reset_event_store(struct device *dev,
1369 struct device_attribute *attr,
1370 const char *buf, size_t size)
1373 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1374 struct etmv4_config *config = &drvdata->config;
1376 if (kstrtoul(buf, 16, &val))
1378 if (!(drvdata->nrseqstate))
1381 config->seq_rst = val & ETMv4_EVENT_MASK;
1384 static DEVICE_ATTR_RW(seq_reset_event);
1386 static ssize_t cntr_idx_show(struct device *dev,
1387 struct device_attribute *attr,
1391 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1392 struct etmv4_config *config = &drvdata->config;
1394 val = config->cntr_idx;
1395 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1398 static ssize_t cntr_idx_store(struct device *dev,
1399 struct device_attribute *attr,
1400 const char *buf, size_t size)
1403 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1404 struct etmv4_config *config = &drvdata->config;
1406 if (kstrtoul(buf, 16, &val))
1408 if (val >= drvdata->nr_cntr)
1412 * Use spinlock to ensure index doesn't change while it gets
1413 * dereferenced multiple times within a spinlock block elsewhere.
1415 spin_lock(&drvdata->spinlock);
1416 config->cntr_idx = val;
1417 spin_unlock(&drvdata->spinlock);
1420 static DEVICE_ATTR_RW(cntr_idx);
1422 static ssize_t cntrldvr_show(struct device *dev,
1423 struct device_attribute *attr,
1428 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1429 struct etmv4_config *config = &drvdata->config;
1431 spin_lock(&drvdata->spinlock);
1432 idx = config->cntr_idx;
1433 val = config->cntrldvr[idx];
1434 spin_unlock(&drvdata->spinlock);
1435 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1438 static ssize_t cntrldvr_store(struct device *dev,
1439 struct device_attribute *attr,
1440 const char *buf, size_t size)
1444 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1445 struct etmv4_config *config = &drvdata->config;
1447 if (kstrtoul(buf, 16, &val))
1449 if (val > ETM_CNTR_MAX_VAL)
1452 spin_lock(&drvdata->spinlock);
1453 idx = config->cntr_idx;
1454 config->cntrldvr[idx] = val;
1455 spin_unlock(&drvdata->spinlock);
1458 static DEVICE_ATTR_RW(cntrldvr);
1460 static ssize_t cntr_val_show(struct device *dev,
1461 struct device_attribute *attr,
1466 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1467 struct etmv4_config *config = &drvdata->config;
1469 spin_lock(&drvdata->spinlock);
1470 idx = config->cntr_idx;
1471 val = config->cntr_val[idx];
1472 spin_unlock(&drvdata->spinlock);
1473 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1476 static ssize_t cntr_val_store(struct device *dev,
1477 struct device_attribute *attr,
1478 const char *buf, size_t size)
1482 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1483 struct etmv4_config *config = &drvdata->config;
1485 if (kstrtoul(buf, 16, &val))
1487 if (val > ETM_CNTR_MAX_VAL)
1490 spin_lock(&drvdata->spinlock);
1491 idx = config->cntr_idx;
1492 config->cntr_val[idx] = val;
1493 spin_unlock(&drvdata->spinlock);
1496 static DEVICE_ATTR_RW(cntr_val);
1498 static ssize_t cntr_ctrl_show(struct device *dev,
1499 struct device_attribute *attr,
1504 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1505 struct etmv4_config *config = &drvdata->config;
1507 spin_lock(&drvdata->spinlock);
1508 idx = config->cntr_idx;
1509 val = config->cntr_ctrl[idx];
1510 spin_unlock(&drvdata->spinlock);
1511 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1514 static ssize_t cntr_ctrl_store(struct device *dev,
1515 struct device_attribute *attr,
1516 const char *buf, size_t size)
1520 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1521 struct etmv4_config *config = &drvdata->config;
1523 if (kstrtoul(buf, 16, &val))
1526 spin_lock(&drvdata->spinlock);
1527 idx = config->cntr_idx;
1528 config->cntr_ctrl[idx] = val;
1529 spin_unlock(&drvdata->spinlock);
1532 static DEVICE_ATTR_RW(cntr_ctrl);
1534 static ssize_t res_idx_show(struct device *dev,
1535 struct device_attribute *attr,
1539 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1540 struct etmv4_config *config = &drvdata->config;
1542 val = config->res_idx;
1543 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1546 static ssize_t res_idx_store(struct device *dev,
1547 struct device_attribute *attr,
1548 const char *buf, size_t size)
1551 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1552 struct etmv4_config *config = &drvdata->config;
1554 if (kstrtoul(buf, 16, &val))
1556 /* Resource selector pair 0 is always implemented and reserved */
1557 if ((val == 0) || (val >= drvdata->nr_resource))
1561 * Use spinlock to ensure index doesn't change while it gets
1562 * dereferenced multiple times within a spinlock block elsewhere.
1564 spin_lock(&drvdata->spinlock);
1565 config->res_idx = val;
1566 spin_unlock(&drvdata->spinlock);
1569 static DEVICE_ATTR_RW(res_idx);
1571 static ssize_t res_ctrl_show(struct device *dev,
1572 struct device_attribute *attr,
1577 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1578 struct etmv4_config *config = &drvdata->config;
1580 spin_lock(&drvdata->spinlock);
1581 idx = config->res_idx;
1582 val = config->res_ctrl[idx];
1583 spin_unlock(&drvdata->spinlock);
1584 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1587 static ssize_t res_ctrl_store(struct device *dev,
1588 struct device_attribute *attr,
1589 const char *buf, size_t size)
1593 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1594 struct etmv4_config *config = &drvdata->config;
1596 if (kstrtoul(buf, 16, &val))
1599 spin_lock(&drvdata->spinlock);
1600 idx = config->res_idx;
1601 /* For odd idx pair inversal bit is RES0 */
1603 /* PAIRINV, bit[21] */
1605 config->res_ctrl[idx] = val & GENMASK(21, 0);
1606 spin_unlock(&drvdata->spinlock);
1609 static DEVICE_ATTR_RW(res_ctrl);
1611 static ssize_t ctxid_idx_show(struct device *dev,
1612 struct device_attribute *attr,
1616 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1617 struct etmv4_config *config = &drvdata->config;
1619 val = config->ctxid_idx;
1620 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1623 static ssize_t ctxid_idx_store(struct device *dev,
1624 struct device_attribute *attr,
1625 const char *buf, size_t size)
1628 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1629 struct etmv4_config *config = &drvdata->config;
1631 if (kstrtoul(buf, 16, &val))
1633 if (val >= drvdata->numcidc)
1637 * Use spinlock to ensure index doesn't change while it gets
1638 * dereferenced multiple times within a spinlock block elsewhere.
1640 spin_lock(&drvdata->spinlock);
1641 config->ctxid_idx = val;
1642 spin_unlock(&drvdata->spinlock);
1645 static DEVICE_ATTR_RW(ctxid_idx);
1647 static ssize_t ctxid_pid_show(struct device *dev,
1648 struct device_attribute *attr,
1653 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1654 struct etmv4_config *config = &drvdata->config;
1656 spin_lock(&drvdata->spinlock);
1657 idx = config->ctxid_idx;
1658 val = (unsigned long)config->ctxid_vpid[idx];
1659 spin_unlock(&drvdata->spinlock);
1660 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1663 static ssize_t ctxid_pid_store(struct device *dev,
1664 struct device_attribute *attr,
1665 const char *buf, size_t size)
1668 unsigned long vpid, pid;
1669 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1670 struct etmv4_config *config = &drvdata->config;
1673 * only implemented when ctxid tracing is enabled, i.e. at least one
1674 * ctxid comparator is implemented and ctxid is greater than 0 bits
1677 if (!drvdata->ctxid_size || !drvdata->numcidc)
1679 if (kstrtoul(buf, 16, &vpid))
1682 pid = coresight_vpid_to_pid(vpid);
1684 spin_lock(&drvdata->spinlock);
1685 idx = config->ctxid_idx;
1686 config->ctxid_pid[idx] = (u64)pid;
1687 config->ctxid_vpid[idx] = (u64)vpid;
1688 spin_unlock(&drvdata->spinlock);
1691 static DEVICE_ATTR_RW(ctxid_pid);
1693 static ssize_t ctxid_masks_show(struct device *dev,
1694 struct device_attribute *attr,
1697 unsigned long val1, val2;
1698 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1699 struct etmv4_config *config = &drvdata->config;
1701 spin_lock(&drvdata->spinlock);
1702 val1 = config->ctxid_mask0;
1703 val2 = config->ctxid_mask1;
1704 spin_unlock(&drvdata->spinlock);
1705 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1708 static ssize_t ctxid_masks_store(struct device *dev,
1709 struct device_attribute *attr,
1710 const char *buf, size_t size)
1713 unsigned long val1, val2, mask;
1714 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1715 struct etmv4_config *config = &drvdata->config;
1718 * only implemented when ctxid tracing is enabled, i.e. at least one
1719 * ctxid comparator is implemented and ctxid is greater than 0 bits
1722 if (!drvdata->ctxid_size || !drvdata->numcidc)
1724 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1727 spin_lock(&drvdata->spinlock);
1729 * each byte[0..3] controls mask value applied to ctxid
1732 switch (drvdata->numcidc) {
1734 /* COMP0, bits[7:0] */
1735 config->ctxid_mask0 = val1 & 0xFF;
1738 /* COMP1, bits[15:8] */
1739 config->ctxid_mask0 = val1 & 0xFFFF;
1742 /* COMP2, bits[23:16] */
1743 config->ctxid_mask0 = val1 & 0xFFFFFF;
1746 /* COMP3, bits[31:24] */
1747 config->ctxid_mask0 = val1;
1750 /* COMP4, bits[7:0] */
1751 config->ctxid_mask0 = val1;
1752 config->ctxid_mask1 = val2 & 0xFF;
1755 /* COMP5, bits[15:8] */
1756 config->ctxid_mask0 = val1;
1757 config->ctxid_mask1 = val2 & 0xFFFF;
1760 /* COMP6, bits[23:16] */
1761 config->ctxid_mask0 = val1;
1762 config->ctxid_mask1 = val2 & 0xFFFFFF;
1765 /* COMP7, bits[31:24] */
1766 config->ctxid_mask0 = val1;
1767 config->ctxid_mask1 = val2;
1773 * If software sets a mask bit to 1, it must program relevant byte
1774 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
1775 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
1776 * of ctxid comparator0 value (corresponding to byte 0) register.
1778 mask = config->ctxid_mask0;
1779 for (i = 0; i < drvdata->numcidc; i++) {
1780 /* mask value of corresponding ctxid comparator */
1781 maskbyte = mask & ETMv4_EVENT_MASK;
1783 * each bit corresponds to a byte of respective ctxid comparator
1786 for (j = 0; j < 8; j++) {
1788 config->ctxid_pid[i] &= ~(0xFF << (j * 8));
1791 /* Select the next ctxid comparator mask value */
1793 /* ctxid comparators[4-7] */
1794 mask = config->ctxid_mask1;
1799 spin_unlock(&drvdata->spinlock);
1802 static DEVICE_ATTR_RW(ctxid_masks);
1804 static ssize_t vmid_idx_show(struct device *dev,
1805 struct device_attribute *attr,
1809 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1810 struct etmv4_config *config = &drvdata->config;
1812 val = config->vmid_idx;
1813 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1816 static ssize_t vmid_idx_store(struct device *dev,
1817 struct device_attribute *attr,
1818 const char *buf, size_t size)
1821 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1822 struct etmv4_config *config = &drvdata->config;
1824 if (kstrtoul(buf, 16, &val))
1826 if (val >= drvdata->numvmidc)
1830 * Use spinlock to ensure index doesn't change while it gets
1831 * dereferenced multiple times within a spinlock block elsewhere.
1833 spin_lock(&drvdata->spinlock);
1834 config->vmid_idx = val;
1835 spin_unlock(&drvdata->spinlock);
1838 static DEVICE_ATTR_RW(vmid_idx);
1840 static ssize_t vmid_val_show(struct device *dev,
1841 struct device_attribute *attr,
1845 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1846 struct etmv4_config *config = &drvdata->config;
1848 val = (unsigned long)config->vmid_val[config->vmid_idx];
1849 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1852 static ssize_t vmid_val_store(struct device *dev,
1853 struct device_attribute *attr,
1854 const char *buf, size_t size)
1857 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1858 struct etmv4_config *config = &drvdata->config;
1861 * only implemented when vmid tracing is enabled, i.e. at least one
1862 * vmid comparator is implemented and at least 8 bit vmid size
1864 if (!drvdata->vmid_size || !drvdata->numvmidc)
1866 if (kstrtoul(buf, 16, &val))
1869 spin_lock(&drvdata->spinlock);
1870 config->vmid_val[config->vmid_idx] = (u64)val;
1871 spin_unlock(&drvdata->spinlock);
1874 static DEVICE_ATTR_RW(vmid_val);
1876 static ssize_t vmid_masks_show(struct device *dev,
1877 struct device_attribute *attr, char *buf)
1879 unsigned long val1, val2;
1880 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1881 struct etmv4_config *config = &drvdata->config;
1883 spin_lock(&drvdata->spinlock);
1884 val1 = config->vmid_mask0;
1885 val2 = config->vmid_mask1;
1886 spin_unlock(&drvdata->spinlock);
1887 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1890 static ssize_t vmid_masks_store(struct device *dev,
1891 struct device_attribute *attr,
1892 const char *buf, size_t size)
1895 unsigned long val1, val2, mask;
1896 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1897 struct etmv4_config *config = &drvdata->config;
1900 * only implemented when vmid tracing is enabled, i.e. at least one
1901 * vmid comparator is implemented and at least 8 bit vmid size
1903 if (!drvdata->vmid_size || !drvdata->numvmidc)
1905 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1908 spin_lock(&drvdata->spinlock);
1911 * each byte[0..3] controls mask value applied to vmid
1914 switch (drvdata->numvmidc) {
1916 /* COMP0, bits[7:0] */
1917 config->vmid_mask0 = val1 & 0xFF;
1920 /* COMP1, bits[15:8] */
1921 config->vmid_mask0 = val1 & 0xFFFF;
1924 /* COMP2, bits[23:16] */
1925 config->vmid_mask0 = val1 & 0xFFFFFF;
1928 /* COMP3, bits[31:24] */
1929 config->vmid_mask0 = val1;
1932 /* COMP4, bits[7:0] */
1933 config->vmid_mask0 = val1;
1934 config->vmid_mask1 = val2 & 0xFF;
1937 /* COMP5, bits[15:8] */
1938 config->vmid_mask0 = val1;
1939 config->vmid_mask1 = val2 & 0xFFFF;
1942 /* COMP6, bits[23:16] */
1943 config->vmid_mask0 = val1;
1944 config->vmid_mask1 = val2 & 0xFFFFFF;
1947 /* COMP7, bits[31:24] */
1948 config->vmid_mask0 = val1;
1949 config->vmid_mask1 = val2;
1956 * If software sets a mask bit to 1, it must program relevant byte
1957 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
1958 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
1959 * of vmid comparator0 value (corresponding to byte 0) register.
1961 mask = config->vmid_mask0;
1962 for (i = 0; i < drvdata->numvmidc; i++) {
1963 /* mask value of corresponding vmid comparator */
1964 maskbyte = mask & ETMv4_EVENT_MASK;
1966 * each bit corresponds to a byte of respective vmid comparator
1969 for (j = 0; j < 8; j++) {
1971 config->vmid_val[i] &= ~(0xFF << (j * 8));
1974 /* Select the next vmid comparator mask value */
1976 /* vmid comparators[4-7] */
1977 mask = config->vmid_mask1;
1981 spin_unlock(&drvdata->spinlock);
1984 static DEVICE_ATTR_RW(vmid_masks);
1986 static ssize_t cpu_show(struct device *dev,
1987 struct device_attribute *attr, char *buf)
1990 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1993 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
1996 static DEVICE_ATTR_RO(cpu);
1998 static struct attribute *coresight_etmv4_attrs[] = {
1999 &dev_attr_nr_pe_cmp.attr,
2000 &dev_attr_nr_addr_cmp.attr,
2001 &dev_attr_nr_cntr.attr,
2002 &dev_attr_nr_ext_inp.attr,
2003 &dev_attr_numcidc.attr,
2004 &dev_attr_numvmidc.attr,
2005 &dev_attr_nrseqstate.attr,
2006 &dev_attr_nr_resource.attr,
2007 &dev_attr_nr_ss_cmp.attr,
2008 &dev_attr_reset.attr,
2009 &dev_attr_mode.attr,
2011 &dev_attr_event.attr,
2012 &dev_attr_event_instren.attr,
2013 &dev_attr_event_ts.attr,
2014 &dev_attr_syncfreq.attr,
2015 &dev_attr_cyc_threshold.attr,
2016 &dev_attr_bb_ctrl.attr,
2017 &dev_attr_event_vinst.attr,
2018 &dev_attr_s_exlevel_vinst.attr,
2019 &dev_attr_ns_exlevel_vinst.attr,
2020 &dev_attr_addr_idx.attr,
2021 &dev_attr_addr_instdatatype.attr,
2022 &dev_attr_addr_single.attr,
2023 &dev_attr_addr_range.attr,
2024 &dev_attr_addr_start.attr,
2025 &dev_attr_addr_stop.attr,
2026 &dev_attr_addr_ctxtype.attr,
2027 &dev_attr_addr_context.attr,
2028 &dev_attr_seq_idx.attr,
2029 &dev_attr_seq_state.attr,
2030 &dev_attr_seq_event.attr,
2031 &dev_attr_seq_reset_event.attr,
2032 &dev_attr_cntr_idx.attr,
2033 &dev_attr_cntrldvr.attr,
2034 &dev_attr_cntr_val.attr,
2035 &dev_attr_cntr_ctrl.attr,
2036 &dev_attr_res_idx.attr,
2037 &dev_attr_res_ctrl.attr,
2038 &dev_attr_ctxid_idx.attr,
2039 &dev_attr_ctxid_pid.attr,
2040 &dev_attr_ctxid_masks.attr,
2041 &dev_attr_vmid_idx.attr,
2042 &dev_attr_vmid_val.attr,
2043 &dev_attr_vmid_masks.attr,
2053 static void do_smp_cross_read(void *data)
2055 struct etmv4_reg *reg = data;
2057 reg->data = readl_relaxed(reg->addr);
2060 static u32 etmv4_cross_read(const struct device *dev, u32 offset)
2062 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
2063 struct etmv4_reg reg;
2065 reg.addr = drvdata->base + offset;
2067 * smp cross call ensures the CPU will be powered up before
2068 * accessing the ETMv4 trace core registers
2070 smp_call_function_single(drvdata->cpu, do_smp_cross_read, ®, 1);
2074 #define coresight_etm4x_reg(name, offset) \
2075 coresight_simple_reg32(struct etmv4_drvdata, name, offset)
2077 #define coresight_etm4x_cross_read(name, offset) \
2078 coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read, \
2081 coresight_etm4x_reg(trcpdcr, TRCPDCR);
2082 coresight_etm4x_reg(trcpdsr, TRCPDSR);
2083 coresight_etm4x_reg(trclsr, TRCLSR);
2084 coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS);
2085 coresight_etm4x_reg(trcdevid, TRCDEVID);
2086 coresight_etm4x_reg(trcdevtype, TRCDEVTYPE);
2087 coresight_etm4x_reg(trcpidr0, TRCPIDR0);
2088 coresight_etm4x_reg(trcpidr1, TRCPIDR1);
2089 coresight_etm4x_reg(trcpidr2, TRCPIDR2);
2090 coresight_etm4x_reg(trcpidr3, TRCPIDR3);
2091 coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
2092 coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
2093 coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
2095 static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2096 &dev_attr_trcoslsr.attr,
2097 &dev_attr_trcpdcr.attr,
2098 &dev_attr_trcpdsr.attr,
2099 &dev_attr_trclsr.attr,
2100 &dev_attr_trcconfig.attr,
2101 &dev_attr_trctraceid.attr,
2102 &dev_attr_trcauthstatus.attr,
2103 &dev_attr_trcdevid.attr,
2104 &dev_attr_trcdevtype.attr,
2105 &dev_attr_trcpidr0.attr,
2106 &dev_attr_trcpidr1.attr,
2107 &dev_attr_trcpidr2.attr,
2108 &dev_attr_trcpidr3.attr,
2112 coresight_etm4x_cross_read(trcidr0, TRCIDR0);
2113 coresight_etm4x_cross_read(trcidr1, TRCIDR1);
2114 coresight_etm4x_cross_read(trcidr2, TRCIDR2);
2115 coresight_etm4x_cross_read(trcidr3, TRCIDR3);
2116 coresight_etm4x_cross_read(trcidr4, TRCIDR4);
2117 coresight_etm4x_cross_read(trcidr5, TRCIDR5);
2118 /* trcidr[6,7] are reserved */
2119 coresight_etm4x_cross_read(trcidr8, TRCIDR8);
2120 coresight_etm4x_cross_read(trcidr9, TRCIDR9);
2121 coresight_etm4x_cross_read(trcidr10, TRCIDR10);
2122 coresight_etm4x_cross_read(trcidr11, TRCIDR11);
2123 coresight_etm4x_cross_read(trcidr12, TRCIDR12);
2124 coresight_etm4x_cross_read(trcidr13, TRCIDR13);
2126 static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2127 &dev_attr_trcidr0.attr,
2128 &dev_attr_trcidr1.attr,
2129 &dev_attr_trcidr2.attr,
2130 &dev_attr_trcidr3.attr,
2131 &dev_attr_trcidr4.attr,
2132 &dev_attr_trcidr5.attr,
2133 /* trcidr[6,7] are reserved */
2134 &dev_attr_trcidr8.attr,
2135 &dev_attr_trcidr9.attr,
2136 &dev_attr_trcidr10.attr,
2137 &dev_attr_trcidr11.attr,
2138 &dev_attr_trcidr12.attr,
2139 &dev_attr_trcidr13.attr,
2143 static const struct attribute_group coresight_etmv4_group = {
2144 .attrs = coresight_etmv4_attrs,
2147 static const struct attribute_group coresight_etmv4_mgmt_group = {
2148 .attrs = coresight_etmv4_mgmt_attrs,
2152 static const struct attribute_group coresight_etmv4_trcidr_group = {
2153 .attrs = coresight_etmv4_trcidr_attrs,
2157 const struct attribute_group *coresight_etmv4_groups[] = {
2158 &coresight_etmv4_group,
2159 &coresight_etmv4_mgmt_group,
2160 &coresight_etmv4_trcidr_group,