1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7 #include <linux/pid_namespace.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/sysfs.h>
10 #include "coresight-etm4x.h"
11 #include "coresight-priv.h"
12 #include "coresight-syscfg.h"
14 static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
17 struct etmv4_config *config = &drvdata->config;
19 idx = config->addr_idx;
22 * TRCACATRn.TYPE bit[1:0]: type of comparison
23 * the trace unit performs
25 if (FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]) == TRCACATRn_TYPE_ADDR) {
30 * We are performing instruction address comparison. Set the
31 * relevant bit of ViewInst Include/Exclude Control register
32 * for corresponding address comparator pair.
34 if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
35 config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
38 if (exclude == true) {
40 * Set exclude bit and unset the include bit
41 * corresponding to comparator pair
43 config->viiectlr |= BIT(idx / 2 + 16);
44 config->viiectlr &= ~BIT(idx / 2);
47 * Set include bit and unset exclude bit
48 * corresponding to comparator pair
50 config->viiectlr |= BIT(idx / 2);
51 config->viiectlr &= ~BIT(idx / 2 + 16);
57 static ssize_t nr_pe_cmp_show(struct device *dev,
58 struct device_attribute *attr,
62 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
64 val = drvdata->nr_pe_cmp;
65 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
67 static DEVICE_ATTR_RO(nr_pe_cmp);
69 static ssize_t nr_addr_cmp_show(struct device *dev,
70 struct device_attribute *attr,
74 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
76 val = drvdata->nr_addr_cmp;
77 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
79 static DEVICE_ATTR_RO(nr_addr_cmp);
81 static ssize_t nr_cntr_show(struct device *dev,
82 struct device_attribute *attr,
86 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
88 val = drvdata->nr_cntr;
89 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
91 static DEVICE_ATTR_RO(nr_cntr);
93 static ssize_t nr_ext_inp_show(struct device *dev,
94 struct device_attribute *attr,
98 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
100 val = drvdata->nr_ext_inp;
101 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
103 static DEVICE_ATTR_RO(nr_ext_inp);
105 static ssize_t numcidc_show(struct device *dev,
106 struct device_attribute *attr,
110 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
112 val = drvdata->numcidc;
113 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
115 static DEVICE_ATTR_RO(numcidc);
117 static ssize_t numvmidc_show(struct device *dev,
118 struct device_attribute *attr,
122 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
124 val = drvdata->numvmidc;
125 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
127 static DEVICE_ATTR_RO(numvmidc);
129 static ssize_t nrseqstate_show(struct device *dev,
130 struct device_attribute *attr,
134 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
136 val = drvdata->nrseqstate;
137 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
139 static DEVICE_ATTR_RO(nrseqstate);
141 static ssize_t nr_resource_show(struct device *dev,
142 struct device_attribute *attr,
146 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
148 val = drvdata->nr_resource;
149 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
151 static DEVICE_ATTR_RO(nr_resource);
153 static ssize_t nr_ss_cmp_show(struct device *dev,
154 struct device_attribute *attr,
158 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
160 val = drvdata->nr_ss_cmp;
161 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
163 static DEVICE_ATTR_RO(nr_ss_cmp);
165 static ssize_t reset_store(struct device *dev,
166 struct device_attribute *attr,
167 const char *buf, size_t size)
171 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
172 struct etmv4_config *config = &drvdata->config;
174 if (kstrtoul(buf, 16, &val))
177 spin_lock(&drvdata->spinlock);
181 /* Disable data tracing: do not trace load and store data transfers */
182 config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
183 config->cfg &= ~(TRCCONFIGR_INSTP0_LOAD | TRCCONFIGR_INSTP0_STORE);
185 /* Disable data value and data address tracing */
186 config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
187 ETM_MODE_DATA_TRACE_VAL);
188 config->cfg &= ~(TRCCONFIGR_DA | TRCCONFIGR_DV);
190 /* Disable all events tracing */
191 config->eventctrl0 = 0x0;
192 config->eventctrl1 = 0x0;
194 /* Disable timestamp event */
195 config->ts_ctrl = 0x0;
197 /* Disable stalling */
198 config->stall_ctrl = 0x0;
200 /* Reset trace synchronization period to 2^8 = 256 bytes*/
201 if (drvdata->syncpr == false)
202 config->syncfreq = 0x8;
205 * Enable ViewInst to trace everything with start-stop logic in
206 * started state. ARM recommends start-stop logic is set before
209 config->vinst_ctrl = FIELD_PREP(TRCVICTLR_EVENT_MASK, 0x01);
210 if (drvdata->nr_addr_cmp > 0) {
211 config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
212 /* SSSTATUS, bit[9] */
213 config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
216 /* No address range filtering for ViewInst */
217 config->viiectlr = 0x0;
219 /* No start-stop filtering for ViewInst */
220 config->vissctlr = 0x0;
221 config->vipcssctlr = 0x0;
223 /* Disable seq events */
224 for (i = 0; i < drvdata->nrseqstate-1; i++)
225 config->seq_ctrl[i] = 0x0;
226 config->seq_rst = 0x0;
227 config->seq_state = 0x0;
229 /* Disable external input events */
230 config->ext_inp = 0x0;
232 config->cntr_idx = 0x0;
233 for (i = 0; i < drvdata->nr_cntr; i++) {
234 config->cntrldvr[i] = 0x0;
235 config->cntr_ctrl[i] = 0x0;
236 config->cntr_val[i] = 0x0;
239 config->res_idx = 0x0;
240 for (i = 2; i < 2 * drvdata->nr_resource; i++)
241 config->res_ctrl[i] = 0x0;
243 config->ss_idx = 0x0;
244 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
245 config->ss_ctrl[i] = 0x0;
246 config->ss_pe_cmp[i] = 0x0;
249 config->addr_idx = 0x0;
250 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
251 config->addr_val[i] = 0x0;
252 config->addr_acc[i] = 0x0;
253 config->addr_type[i] = ETM_ADDR_TYPE_NONE;
256 config->ctxid_idx = 0x0;
257 for (i = 0; i < drvdata->numcidc; i++)
258 config->ctxid_pid[i] = 0x0;
260 config->ctxid_mask0 = 0x0;
261 config->ctxid_mask1 = 0x0;
263 config->vmid_idx = 0x0;
264 for (i = 0; i < drvdata->numvmidc; i++)
265 config->vmid_val[i] = 0x0;
266 config->vmid_mask0 = 0x0;
267 config->vmid_mask1 = 0x0;
269 drvdata->trcid = drvdata->cpu + 1;
271 spin_unlock(&drvdata->spinlock);
273 cscfg_csdev_reset_feats(to_coresight_device(dev));
277 static DEVICE_ATTR_WO(reset);
279 static ssize_t mode_show(struct device *dev,
280 struct device_attribute *attr,
284 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
285 struct etmv4_config *config = &drvdata->config;
288 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
291 static ssize_t mode_store(struct device *dev,
292 struct device_attribute *attr,
293 const char *buf, size_t size)
295 unsigned long val, mode;
296 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
297 struct etmv4_config *config = &drvdata->config;
299 if (kstrtoul(buf, 16, &val))
302 spin_lock(&drvdata->spinlock);
303 config->mode = val & ETMv4_MODE_ALL;
305 if (drvdata->instrp0 == true) {
306 /* start by clearing instruction P0 field */
307 config->cfg &= ~TRCCONFIGR_INSTP0_LOAD_STORE;
308 if (config->mode & ETM_MODE_LOAD)
309 /* 0b01 Trace load instructions as P0 instructions */
310 config->cfg |= TRCCONFIGR_INSTP0_LOAD;
311 if (config->mode & ETM_MODE_STORE)
312 /* 0b10 Trace store instructions as P0 instructions */
313 config->cfg |= TRCCONFIGR_INSTP0_STORE;
314 if (config->mode & ETM_MODE_LOAD_STORE)
316 * 0b11 Trace load and store instructions
319 config->cfg |= TRCCONFIGR_INSTP0_LOAD_STORE;
322 /* bit[3], Branch broadcast mode */
323 if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
324 config->cfg |= TRCCONFIGR_BB;
326 config->cfg &= ~TRCCONFIGR_BB;
328 /* bit[4], Cycle counting instruction trace bit */
329 if ((config->mode & ETMv4_MODE_CYCACC) &&
330 (drvdata->trccci == true))
331 config->cfg |= TRCCONFIGR_CCI;
333 config->cfg &= ~TRCCONFIGR_CCI;
335 /* bit[6], Context ID tracing bit */
336 if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
337 config->cfg |= TRCCONFIGR_CID;
339 config->cfg &= ~TRCCONFIGR_CID;
341 if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
342 config->cfg |= TRCCONFIGR_VMID;
344 config->cfg &= ~TRCCONFIGR_VMID;
346 /* bits[10:8], Conditional instruction tracing bit */
347 mode = ETM_MODE_COND(config->mode);
348 if (drvdata->trccond == true) {
349 config->cfg &= ~TRCCONFIGR_COND_MASK;
350 config->cfg |= mode << __bf_shf(TRCCONFIGR_COND_MASK);
353 /* bit[11], Global timestamp tracing bit */
354 if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
355 config->cfg |= TRCCONFIGR_TS;
357 config->cfg &= ~TRCCONFIGR_TS;
359 /* bit[12], Return stack enable bit */
360 if ((config->mode & ETM_MODE_RETURNSTACK) &&
361 (drvdata->retstack == true))
362 config->cfg |= TRCCONFIGR_RS;
364 config->cfg &= ~TRCCONFIGR_RS;
366 /* bits[14:13], Q element enable field */
367 mode = ETM_MODE_QELEM(config->mode);
368 /* start by clearing QE bits */
369 config->cfg &= ~(TRCCONFIGR_QE_W_COUNTS | TRCCONFIGR_QE_WO_COUNTS);
371 * if supported, Q elements with instruction counts are enabled.
372 * Always set the low bit for any requested mode. Valid combos are
373 * 0b00, 0b01 and 0b11.
375 if (mode && drvdata->q_support)
376 config->cfg |= TRCCONFIGR_QE_W_COUNTS;
378 * if supported, Q elements with and without instruction
381 if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
382 config->cfg |= TRCCONFIGR_QE_WO_COUNTS;
384 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
385 if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
386 (drvdata->atbtrig == true))
387 config->eventctrl1 |= TRCEVENTCTL1R_ATB;
389 config->eventctrl1 &= ~TRCEVENTCTL1R_ATB;
391 /* bit[12], Low-power state behavior override bit */
392 if ((config->mode & ETM_MODE_LPOVERRIDE) &&
393 (drvdata->lpoverride == true))
394 config->eventctrl1 |= TRCEVENTCTL1R_LPOVERRIDE;
396 config->eventctrl1 &= ~TRCEVENTCTL1R_LPOVERRIDE;
398 /* bit[8], Instruction stall bit */
399 if ((config->mode & ETM_MODE_ISTALL_EN) && (drvdata->stallctl == true))
400 config->stall_ctrl |= TRCSTALLCTLR_ISTALL;
402 config->stall_ctrl &= ~TRCSTALLCTLR_ISTALL;
404 /* bit[10], Prioritize instruction trace bit */
405 if (config->mode & ETM_MODE_INSTPRIO)
406 config->stall_ctrl |= TRCSTALLCTLR_INSTPRIORITY;
408 config->stall_ctrl &= ~TRCSTALLCTLR_INSTPRIORITY;
410 /* bit[13], Trace overflow prevention bit */
411 if ((config->mode & ETM_MODE_NOOVERFLOW) &&
412 (drvdata->nooverflow == true))
413 config->stall_ctrl |= TRCSTALLCTLR_NOOVERFLOW;
415 config->stall_ctrl &= ~TRCSTALLCTLR_NOOVERFLOW;
417 /* bit[9] Start/stop logic control bit */
418 if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
419 config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
421 config->vinst_ctrl &= ~TRCVICTLR_SSSTATUS;
423 /* bit[10], Whether a trace unit must trace a Reset exception */
424 if (config->mode & ETM_MODE_TRACE_RESET)
425 config->vinst_ctrl |= TRCVICTLR_TRCRESET;
427 config->vinst_ctrl &= ~TRCVICTLR_TRCRESET;
429 /* bit[11], Whether a trace unit must trace a system error exception */
430 if ((config->mode & ETM_MODE_TRACE_ERR) &&
431 (drvdata->trc_error == true))
432 config->vinst_ctrl |= TRCVICTLR_TRCERR;
434 config->vinst_ctrl &= ~TRCVICTLR_TRCERR;
436 if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
437 etm4_config_trace_mode(config);
439 spin_unlock(&drvdata->spinlock);
443 static DEVICE_ATTR_RW(mode);
445 static ssize_t pe_show(struct device *dev,
446 struct device_attribute *attr,
450 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
451 struct etmv4_config *config = &drvdata->config;
453 val = config->pe_sel;
454 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
457 static ssize_t pe_store(struct device *dev,
458 struct device_attribute *attr,
459 const char *buf, size_t size)
462 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
463 struct etmv4_config *config = &drvdata->config;
465 if (kstrtoul(buf, 16, &val))
468 spin_lock(&drvdata->spinlock);
469 if (val > drvdata->nr_pe) {
470 spin_unlock(&drvdata->spinlock);
474 config->pe_sel = val;
475 spin_unlock(&drvdata->spinlock);
478 static DEVICE_ATTR_RW(pe);
480 static ssize_t event_show(struct device *dev,
481 struct device_attribute *attr,
485 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
486 struct etmv4_config *config = &drvdata->config;
488 val = config->eventctrl0;
489 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
492 static ssize_t event_store(struct device *dev,
493 struct device_attribute *attr,
494 const char *buf, size_t size)
497 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
498 struct etmv4_config *config = &drvdata->config;
500 if (kstrtoul(buf, 16, &val))
503 spin_lock(&drvdata->spinlock);
504 switch (drvdata->nr_event) {
506 /* EVENT0, bits[7:0] */
507 config->eventctrl0 = val & 0xFF;
510 /* EVENT1, bits[15:8] */
511 config->eventctrl0 = val & 0xFFFF;
514 /* EVENT2, bits[23:16] */
515 config->eventctrl0 = val & 0xFFFFFF;
518 /* EVENT3, bits[31:24] */
519 config->eventctrl0 = val;
524 spin_unlock(&drvdata->spinlock);
527 static DEVICE_ATTR_RW(event);
529 static ssize_t event_instren_show(struct device *dev,
530 struct device_attribute *attr,
534 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
535 struct etmv4_config *config = &drvdata->config;
537 val = FIELD_GET(TRCEVENTCTL1R_INSTEN_MASK, config->eventctrl1);
538 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
541 static ssize_t event_instren_store(struct device *dev,
542 struct device_attribute *attr,
543 const char *buf, size_t size)
546 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
547 struct etmv4_config *config = &drvdata->config;
549 if (kstrtoul(buf, 16, &val))
552 spin_lock(&drvdata->spinlock);
553 /* start by clearing all instruction event enable bits */
554 config->eventctrl1 &= ~TRCEVENTCTL1R_INSTEN_MASK;
555 switch (drvdata->nr_event) {
557 /* generate Event element for event 1 */
558 config->eventctrl1 |= val & TRCEVENTCTL1R_INSTEN_1;
561 /* generate Event element for event 1 and 2 */
562 config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 | TRCEVENTCTL1R_INSTEN_1);
565 /* generate Event element for event 1, 2 and 3 */
566 config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 |
567 TRCEVENTCTL1R_INSTEN_1 |
568 TRCEVENTCTL1R_INSTEN_2);
571 /* generate Event element for all 4 events */
572 config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 |
573 TRCEVENTCTL1R_INSTEN_1 |
574 TRCEVENTCTL1R_INSTEN_2 |
575 TRCEVENTCTL1R_INSTEN_3);
580 spin_unlock(&drvdata->spinlock);
583 static DEVICE_ATTR_RW(event_instren);
585 static ssize_t event_ts_show(struct device *dev,
586 struct device_attribute *attr,
590 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
591 struct etmv4_config *config = &drvdata->config;
593 val = config->ts_ctrl;
594 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
597 static ssize_t event_ts_store(struct device *dev,
598 struct device_attribute *attr,
599 const char *buf, size_t size)
602 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
603 struct etmv4_config *config = &drvdata->config;
605 if (kstrtoul(buf, 16, &val))
607 if (!drvdata->ts_size)
610 config->ts_ctrl = val & ETMv4_EVENT_MASK;
613 static DEVICE_ATTR_RW(event_ts);
615 static ssize_t syncfreq_show(struct device *dev,
616 struct device_attribute *attr,
620 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
621 struct etmv4_config *config = &drvdata->config;
623 val = config->syncfreq;
624 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
627 static ssize_t syncfreq_store(struct device *dev,
628 struct device_attribute *attr,
629 const char *buf, size_t size)
632 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
633 struct etmv4_config *config = &drvdata->config;
635 if (kstrtoul(buf, 16, &val))
637 if (drvdata->syncpr == true)
640 config->syncfreq = val & ETMv4_SYNC_MASK;
643 static DEVICE_ATTR_RW(syncfreq);
645 static ssize_t cyc_threshold_show(struct device *dev,
646 struct device_attribute *attr,
650 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
651 struct etmv4_config *config = &drvdata->config;
653 val = config->ccctlr;
654 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
657 static ssize_t cyc_threshold_store(struct device *dev,
658 struct device_attribute *attr,
659 const char *buf, size_t size)
662 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
663 struct etmv4_config *config = &drvdata->config;
665 if (kstrtoul(buf, 16, &val))
668 /* mask off max threshold before checking min value */
669 val &= ETM_CYC_THRESHOLD_MASK;
670 if (val < drvdata->ccitmin)
673 config->ccctlr = val;
676 static DEVICE_ATTR_RW(cyc_threshold);
678 static ssize_t bb_ctrl_show(struct device *dev,
679 struct device_attribute *attr,
683 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
684 struct etmv4_config *config = &drvdata->config;
686 val = config->bb_ctrl;
687 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
690 static ssize_t bb_ctrl_store(struct device *dev,
691 struct device_attribute *attr,
692 const char *buf, size_t size)
695 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
696 struct etmv4_config *config = &drvdata->config;
698 if (kstrtoul(buf, 16, &val))
700 if (drvdata->trcbb == false)
702 if (!drvdata->nr_addr_cmp)
706 * Bit[8] controls include(1) / exclude(0), bits[0-7] select
707 * individual range comparators. If include then at least 1
708 * range must be selected.
710 if ((val & TRCBBCTLR_MODE) && (FIELD_GET(TRCBBCTLR_RANGE_MASK, val) == 0))
713 config->bb_ctrl = val & (TRCBBCTLR_MODE | TRCBBCTLR_RANGE_MASK);
716 static DEVICE_ATTR_RW(bb_ctrl);
718 static ssize_t event_vinst_show(struct device *dev,
719 struct device_attribute *attr,
723 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
724 struct etmv4_config *config = &drvdata->config;
726 val = FIELD_GET(TRCVICTLR_EVENT_MASK, config->vinst_ctrl);
727 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
730 static ssize_t event_vinst_store(struct device *dev,
731 struct device_attribute *attr,
732 const char *buf, size_t size)
735 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
736 struct etmv4_config *config = &drvdata->config;
738 if (kstrtoul(buf, 16, &val))
741 spin_lock(&drvdata->spinlock);
742 val &= TRCVICTLR_EVENT_MASK >> __bf_shf(TRCVICTLR_EVENT_MASK);
743 config->vinst_ctrl &= ~TRCVICTLR_EVENT_MASK;
744 config->vinst_ctrl |= FIELD_PREP(TRCVICTLR_EVENT_MASK, val);
745 spin_unlock(&drvdata->spinlock);
748 static DEVICE_ATTR_RW(event_vinst);
750 static ssize_t s_exlevel_vinst_show(struct device *dev,
751 struct device_attribute *attr,
755 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
756 struct etmv4_config *config = &drvdata->config;
758 val = FIELD_GET(TRCVICTLR_EXLEVEL_S_MASK, config->vinst_ctrl);
759 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
762 static ssize_t s_exlevel_vinst_store(struct device *dev,
763 struct device_attribute *attr,
764 const char *buf, size_t size)
767 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
768 struct etmv4_config *config = &drvdata->config;
770 if (kstrtoul(buf, 16, &val))
773 spin_lock(&drvdata->spinlock);
774 /* clear all EXLEVEL_S bits */
775 config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_S_MASK;
776 /* enable instruction tracing for corresponding exception level */
777 val &= drvdata->s_ex_level;
778 config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_S_MASK);
779 spin_unlock(&drvdata->spinlock);
782 static DEVICE_ATTR_RW(s_exlevel_vinst);
784 static ssize_t ns_exlevel_vinst_show(struct device *dev,
785 struct device_attribute *attr,
789 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
790 struct etmv4_config *config = &drvdata->config;
792 /* EXLEVEL_NS, bits[23:20] */
793 val = FIELD_GET(TRCVICTLR_EXLEVEL_NS_MASK, config->vinst_ctrl);
794 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
797 static ssize_t ns_exlevel_vinst_store(struct device *dev,
798 struct device_attribute *attr,
799 const char *buf, size_t size)
802 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
803 struct etmv4_config *config = &drvdata->config;
805 if (kstrtoul(buf, 16, &val))
808 spin_lock(&drvdata->spinlock);
809 /* clear EXLEVEL_NS bits */
810 config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_NS_MASK;
811 /* enable instruction tracing for corresponding exception level */
812 val &= drvdata->ns_ex_level;
813 config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_NS_MASK);
814 spin_unlock(&drvdata->spinlock);
817 static DEVICE_ATTR_RW(ns_exlevel_vinst);
819 static ssize_t addr_idx_show(struct device *dev,
820 struct device_attribute *attr,
824 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
825 struct etmv4_config *config = &drvdata->config;
827 val = config->addr_idx;
828 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
831 static ssize_t addr_idx_store(struct device *dev,
832 struct device_attribute *attr,
833 const char *buf, size_t size)
836 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
837 struct etmv4_config *config = &drvdata->config;
839 if (kstrtoul(buf, 16, &val))
841 if (val >= drvdata->nr_addr_cmp * 2)
845 * Use spinlock to ensure index doesn't change while it gets
846 * dereferenced multiple times within a spinlock block elsewhere.
848 spin_lock(&drvdata->spinlock);
849 config->addr_idx = val;
850 spin_unlock(&drvdata->spinlock);
853 static DEVICE_ATTR_RW(addr_idx);
855 static ssize_t addr_instdatatype_show(struct device *dev,
856 struct device_attribute *attr,
861 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
862 struct etmv4_config *config = &drvdata->config;
864 spin_lock(&drvdata->spinlock);
865 idx = config->addr_idx;
866 val = FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]);
867 len = scnprintf(buf, PAGE_SIZE, "%s\n",
868 val == TRCACATRn_TYPE_ADDR ? "instr" :
869 (val == TRCACATRn_TYPE_DATA_LOAD_ADDR ? "data_load" :
870 (val == TRCACATRn_TYPE_DATA_STORE_ADDR ? "data_store" :
871 "data_load_store")));
872 spin_unlock(&drvdata->spinlock);
876 static ssize_t addr_instdatatype_store(struct device *dev,
877 struct device_attribute *attr,
878 const char *buf, size_t size)
882 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
883 struct etmv4_config *config = &drvdata->config;
885 if (strlen(buf) >= 20)
887 if (sscanf(buf, "%s", str) != 1)
890 spin_lock(&drvdata->spinlock);
891 idx = config->addr_idx;
892 if (!strcmp(str, "instr"))
893 /* TYPE, bits[1:0] */
894 config->addr_acc[idx] &= ~TRCACATRn_TYPE_MASK;
896 spin_unlock(&drvdata->spinlock);
899 static DEVICE_ATTR_RW(addr_instdatatype);
901 static ssize_t addr_single_show(struct device *dev,
902 struct device_attribute *attr,
907 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
908 struct etmv4_config *config = &drvdata->config;
910 idx = config->addr_idx;
911 spin_lock(&drvdata->spinlock);
912 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
913 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
914 spin_unlock(&drvdata->spinlock);
917 val = (unsigned long)config->addr_val[idx];
918 spin_unlock(&drvdata->spinlock);
919 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
922 static ssize_t addr_single_store(struct device *dev,
923 struct device_attribute *attr,
924 const char *buf, size_t size)
928 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
929 struct etmv4_config *config = &drvdata->config;
931 if (kstrtoul(buf, 16, &val))
934 spin_lock(&drvdata->spinlock);
935 idx = config->addr_idx;
936 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
937 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
938 spin_unlock(&drvdata->spinlock);
942 config->addr_val[idx] = (u64)val;
943 config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
944 spin_unlock(&drvdata->spinlock);
947 static DEVICE_ATTR_RW(addr_single);
949 static ssize_t addr_range_show(struct device *dev,
950 struct device_attribute *attr,
954 unsigned long val1, val2;
955 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
956 struct etmv4_config *config = &drvdata->config;
958 spin_lock(&drvdata->spinlock);
959 idx = config->addr_idx;
961 spin_unlock(&drvdata->spinlock);
964 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
965 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
966 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
967 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
968 spin_unlock(&drvdata->spinlock);
972 val1 = (unsigned long)config->addr_val[idx];
973 val2 = (unsigned long)config->addr_val[idx + 1];
974 spin_unlock(&drvdata->spinlock);
975 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
978 static ssize_t addr_range_store(struct device *dev,
979 struct device_attribute *attr,
980 const char *buf, size_t size)
983 unsigned long val1, val2;
984 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
985 struct etmv4_config *config = &drvdata->config;
986 int elements, exclude;
988 elements = sscanf(buf, "%lx %lx %x", &val1, &val2, &exclude);
990 /* exclude is optional, but need at least two parameter */
993 /* lower address comparator cannot have a higher address value */
997 spin_lock(&drvdata->spinlock);
998 idx = config->addr_idx;
1000 spin_unlock(&drvdata->spinlock);
1004 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
1005 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1006 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1007 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1008 spin_unlock(&drvdata->spinlock);
1012 config->addr_val[idx] = (u64)val1;
1013 config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1014 config->addr_val[idx + 1] = (u64)val2;
1015 config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1017 * Program include or exclude control bits for vinst or vdata
1018 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1019 * use supplied value, or default to bit set in 'mode'
1022 exclude = config->mode & ETM_MODE_EXCLUDE;
1023 etm4_set_mode_exclude(drvdata, exclude ? true : false);
1025 spin_unlock(&drvdata->spinlock);
1028 static DEVICE_ATTR_RW(addr_range);
1030 static ssize_t addr_start_show(struct device *dev,
1031 struct device_attribute *attr,
1036 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1037 struct etmv4_config *config = &drvdata->config;
1039 spin_lock(&drvdata->spinlock);
1040 idx = config->addr_idx;
1042 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1043 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1044 spin_unlock(&drvdata->spinlock);
1048 val = (unsigned long)config->addr_val[idx];
1049 spin_unlock(&drvdata->spinlock);
1050 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1053 static ssize_t addr_start_store(struct device *dev,
1054 struct device_attribute *attr,
1055 const char *buf, size_t size)
1059 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1060 struct etmv4_config *config = &drvdata->config;
1062 if (kstrtoul(buf, 16, &val))
1065 spin_lock(&drvdata->spinlock);
1066 idx = config->addr_idx;
1067 if (!drvdata->nr_addr_cmp) {
1068 spin_unlock(&drvdata->spinlock);
1071 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1072 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1073 spin_unlock(&drvdata->spinlock);
1077 config->addr_val[idx] = (u64)val;
1078 config->addr_type[idx] = ETM_ADDR_TYPE_START;
1079 config->vissctlr |= BIT(idx);
1080 spin_unlock(&drvdata->spinlock);
1083 static DEVICE_ATTR_RW(addr_start);
1085 static ssize_t addr_stop_show(struct device *dev,
1086 struct device_attribute *attr,
1091 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1092 struct etmv4_config *config = &drvdata->config;
1094 spin_lock(&drvdata->spinlock);
1095 idx = config->addr_idx;
1097 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1098 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1099 spin_unlock(&drvdata->spinlock);
1103 val = (unsigned long)config->addr_val[idx];
1104 spin_unlock(&drvdata->spinlock);
1105 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1108 static ssize_t addr_stop_store(struct device *dev,
1109 struct device_attribute *attr,
1110 const char *buf, size_t size)
1114 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1115 struct etmv4_config *config = &drvdata->config;
1117 if (kstrtoul(buf, 16, &val))
1120 spin_lock(&drvdata->spinlock);
1121 idx = config->addr_idx;
1122 if (!drvdata->nr_addr_cmp) {
1123 spin_unlock(&drvdata->spinlock);
1126 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1127 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1128 spin_unlock(&drvdata->spinlock);
1132 config->addr_val[idx] = (u64)val;
1133 config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1134 config->vissctlr |= BIT(idx + 16);
1135 spin_unlock(&drvdata->spinlock);
1138 static DEVICE_ATTR_RW(addr_stop);
1140 static ssize_t addr_ctxtype_show(struct device *dev,
1141 struct device_attribute *attr,
1146 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1147 struct etmv4_config *config = &drvdata->config;
1149 spin_lock(&drvdata->spinlock);
1150 idx = config->addr_idx;
1151 /* CONTEXTTYPE, bits[3:2] */
1152 val = FIELD_GET(TRCACATRn_CONTEXTTYPE_MASK, config->addr_acc[idx]);
1153 len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1154 (val == ETM_CTX_CTXID ? "ctxid" :
1155 (val == ETM_CTX_VMID ? "vmid" : "all")));
1156 spin_unlock(&drvdata->spinlock);
1160 static ssize_t addr_ctxtype_store(struct device *dev,
1161 struct device_attribute *attr,
1162 const char *buf, size_t size)
1166 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1167 struct etmv4_config *config = &drvdata->config;
1169 if (strlen(buf) >= 10)
1171 if (sscanf(buf, "%s", str) != 1)
1174 spin_lock(&drvdata->spinlock);
1175 idx = config->addr_idx;
1176 if (!strcmp(str, "none"))
1177 /* start by clearing context type bits */
1178 config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_MASK;
1179 else if (!strcmp(str, "ctxid")) {
1180 /* 0b01 The trace unit performs a Context ID */
1181 if (drvdata->numcidc) {
1182 config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_CTXID;
1183 config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_VMID;
1185 } else if (!strcmp(str, "vmid")) {
1186 /* 0b10 The trace unit performs a VMID */
1187 if (drvdata->numvmidc) {
1188 config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_CTXID;
1189 config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID;
1191 } else if (!strcmp(str, "all")) {
1193 * 0b11 The trace unit performs a Context ID
1194 * comparison and a VMID
1196 if (drvdata->numcidc)
1197 config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_CTXID;
1198 if (drvdata->numvmidc)
1199 config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID;
1201 spin_unlock(&drvdata->spinlock);
1204 static DEVICE_ATTR_RW(addr_ctxtype);
1206 static ssize_t addr_context_show(struct device *dev,
1207 struct device_attribute *attr,
1212 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1213 struct etmv4_config *config = &drvdata->config;
1215 spin_lock(&drvdata->spinlock);
1216 idx = config->addr_idx;
1217 /* context ID comparator bits[6:4] */
1218 val = FIELD_GET(TRCACATRn_CONTEXT_MASK, config->addr_acc[idx]);
1219 spin_unlock(&drvdata->spinlock);
1220 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1223 static ssize_t addr_context_store(struct device *dev,
1224 struct device_attribute *attr,
1225 const char *buf, size_t size)
1229 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1230 struct etmv4_config *config = &drvdata->config;
1232 if (kstrtoul(buf, 16, &val))
1234 if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1236 if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
1237 drvdata->numcidc : drvdata->numvmidc))
1240 spin_lock(&drvdata->spinlock);
1241 idx = config->addr_idx;
1242 /* clear context ID comparator bits[6:4] */
1243 config->addr_acc[idx] &= ~TRCACATRn_CONTEXT_MASK;
1244 config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_CONTEXT_MASK);
1245 spin_unlock(&drvdata->spinlock);
1248 static DEVICE_ATTR_RW(addr_context);
1250 static ssize_t addr_exlevel_s_ns_show(struct device *dev,
1251 struct device_attribute *attr,
1256 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1257 struct etmv4_config *config = &drvdata->config;
1259 spin_lock(&drvdata->spinlock);
1260 idx = config->addr_idx;
1261 val = FIELD_GET(TRCACATRn_EXLEVEL_MASK, config->addr_acc[idx]);
1262 spin_unlock(&drvdata->spinlock);
1263 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1266 static ssize_t addr_exlevel_s_ns_store(struct device *dev,
1267 struct device_attribute *attr,
1268 const char *buf, size_t size)
1272 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1273 struct etmv4_config *config = &drvdata->config;
1275 if (kstrtoul(buf, 0, &val))
1278 if (val & ~(TRCACATRn_EXLEVEL_MASK >> __bf_shf(TRCACATRn_EXLEVEL_MASK)))
1281 spin_lock(&drvdata->spinlock);
1282 idx = config->addr_idx;
1283 /* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
1284 config->addr_acc[idx] &= ~TRCACATRn_EXLEVEL_MASK;
1285 config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_EXLEVEL_MASK);
1286 spin_unlock(&drvdata->spinlock);
1289 static DEVICE_ATTR_RW(addr_exlevel_s_ns);
1291 static const char * const addr_type_names[] = {
1299 static ssize_t addr_cmp_view_show(struct device *dev,
1300 struct device_attribute *attr, char *buf)
1303 unsigned long addr_v, addr_v2, addr_ctrl;
1304 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1305 struct etmv4_config *config = &drvdata->config;
1307 bool exclude = false;
1309 spin_lock(&drvdata->spinlock);
1310 idx = config->addr_idx;
1311 addr_v = config->addr_val[idx];
1312 addr_ctrl = config->addr_acc[idx];
1313 addr_type = config->addr_type[idx];
1314 if (addr_type == ETM_ADDR_TYPE_RANGE) {
1318 addr_v = config->addr_val[idx];
1320 addr_v2 = config->addr_val[idx + 1];
1322 exclude = config->viiectlr & BIT(idx / 2 + 16);
1324 spin_unlock(&drvdata->spinlock);
1326 size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] %s %#lx", idx,
1327 addr_type_names[addr_type], addr_v);
1328 if (addr_type == ETM_ADDR_TYPE_RANGE) {
1329 size += scnprintf(buf + size, PAGE_SIZE - size,
1330 " %#lx %s", addr_v2,
1331 exclude ? "exclude" : "include");
1333 size += scnprintf(buf + size, PAGE_SIZE - size,
1334 " ctrl(%#lx)\n", addr_ctrl);
1336 size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] unused\n", idx);
1340 static DEVICE_ATTR_RO(addr_cmp_view);
1342 static ssize_t vinst_pe_cmp_start_stop_show(struct device *dev,
1343 struct device_attribute *attr,
1347 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1348 struct etmv4_config *config = &drvdata->config;
1350 if (!drvdata->nr_pe_cmp)
1352 val = config->vipcssctlr;
1353 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1355 static ssize_t vinst_pe_cmp_start_stop_store(struct device *dev,
1356 struct device_attribute *attr,
1357 const char *buf, size_t size)
1360 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1361 struct etmv4_config *config = &drvdata->config;
1363 if (kstrtoul(buf, 16, &val))
1365 if (!drvdata->nr_pe_cmp)
1368 spin_lock(&drvdata->spinlock);
1369 config->vipcssctlr = val;
1370 spin_unlock(&drvdata->spinlock);
1373 static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop);
1375 static ssize_t seq_idx_show(struct device *dev,
1376 struct device_attribute *attr,
1380 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1381 struct etmv4_config *config = &drvdata->config;
1383 val = config->seq_idx;
1384 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1387 static ssize_t seq_idx_store(struct device *dev,
1388 struct device_attribute *attr,
1389 const char *buf, size_t size)
1392 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1393 struct etmv4_config *config = &drvdata->config;
1395 if (kstrtoul(buf, 16, &val))
1397 if (val >= drvdata->nrseqstate - 1)
1401 * Use spinlock to ensure index doesn't change while it gets
1402 * dereferenced multiple times within a spinlock block elsewhere.
1404 spin_lock(&drvdata->spinlock);
1405 config->seq_idx = val;
1406 spin_unlock(&drvdata->spinlock);
1409 static DEVICE_ATTR_RW(seq_idx);
1411 static ssize_t seq_state_show(struct device *dev,
1412 struct device_attribute *attr,
1416 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1417 struct etmv4_config *config = &drvdata->config;
1419 val = config->seq_state;
1420 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1423 static ssize_t seq_state_store(struct device *dev,
1424 struct device_attribute *attr,
1425 const char *buf, size_t size)
1428 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1429 struct etmv4_config *config = &drvdata->config;
1431 if (kstrtoul(buf, 16, &val))
1433 if (val >= drvdata->nrseqstate)
1436 config->seq_state = val;
1439 static DEVICE_ATTR_RW(seq_state);
1441 static ssize_t seq_event_show(struct device *dev,
1442 struct device_attribute *attr,
1447 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1448 struct etmv4_config *config = &drvdata->config;
1450 spin_lock(&drvdata->spinlock);
1451 idx = config->seq_idx;
1452 val = config->seq_ctrl[idx];
1453 spin_unlock(&drvdata->spinlock);
1454 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1457 static ssize_t seq_event_store(struct device *dev,
1458 struct device_attribute *attr,
1459 const char *buf, size_t size)
1463 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1464 struct etmv4_config *config = &drvdata->config;
1466 if (kstrtoul(buf, 16, &val))
1469 spin_lock(&drvdata->spinlock);
1470 idx = config->seq_idx;
1471 /* Seq control has two masks B[15:8] F[7:0] */
1472 config->seq_ctrl[idx] = val & 0xFFFF;
1473 spin_unlock(&drvdata->spinlock);
1476 static DEVICE_ATTR_RW(seq_event);
1478 static ssize_t seq_reset_event_show(struct device *dev,
1479 struct device_attribute *attr,
1483 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1484 struct etmv4_config *config = &drvdata->config;
1486 val = config->seq_rst;
1487 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1490 static ssize_t seq_reset_event_store(struct device *dev,
1491 struct device_attribute *attr,
1492 const char *buf, size_t size)
1495 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1496 struct etmv4_config *config = &drvdata->config;
1498 if (kstrtoul(buf, 16, &val))
1500 if (!(drvdata->nrseqstate))
1503 config->seq_rst = val & ETMv4_EVENT_MASK;
1506 static DEVICE_ATTR_RW(seq_reset_event);
1508 static ssize_t cntr_idx_show(struct device *dev,
1509 struct device_attribute *attr,
1513 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1514 struct etmv4_config *config = &drvdata->config;
1516 val = config->cntr_idx;
1517 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1520 static ssize_t cntr_idx_store(struct device *dev,
1521 struct device_attribute *attr,
1522 const char *buf, size_t size)
1525 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1526 struct etmv4_config *config = &drvdata->config;
1528 if (kstrtoul(buf, 16, &val))
1530 if (val >= drvdata->nr_cntr)
1534 * Use spinlock to ensure index doesn't change while it gets
1535 * dereferenced multiple times within a spinlock block elsewhere.
1537 spin_lock(&drvdata->spinlock);
1538 config->cntr_idx = val;
1539 spin_unlock(&drvdata->spinlock);
1542 static DEVICE_ATTR_RW(cntr_idx);
1544 static ssize_t cntrldvr_show(struct device *dev,
1545 struct device_attribute *attr,
1550 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1551 struct etmv4_config *config = &drvdata->config;
1553 spin_lock(&drvdata->spinlock);
1554 idx = config->cntr_idx;
1555 val = config->cntrldvr[idx];
1556 spin_unlock(&drvdata->spinlock);
1557 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1560 static ssize_t cntrldvr_store(struct device *dev,
1561 struct device_attribute *attr,
1562 const char *buf, size_t size)
1566 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1567 struct etmv4_config *config = &drvdata->config;
1569 if (kstrtoul(buf, 16, &val))
1571 if (val > ETM_CNTR_MAX_VAL)
1574 spin_lock(&drvdata->spinlock);
1575 idx = config->cntr_idx;
1576 config->cntrldvr[idx] = val;
1577 spin_unlock(&drvdata->spinlock);
1580 static DEVICE_ATTR_RW(cntrldvr);
1582 static ssize_t cntr_val_show(struct device *dev,
1583 struct device_attribute *attr,
1588 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1589 struct etmv4_config *config = &drvdata->config;
1591 spin_lock(&drvdata->spinlock);
1592 idx = config->cntr_idx;
1593 val = config->cntr_val[idx];
1594 spin_unlock(&drvdata->spinlock);
1595 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1598 static ssize_t cntr_val_store(struct device *dev,
1599 struct device_attribute *attr,
1600 const char *buf, size_t size)
1604 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1605 struct etmv4_config *config = &drvdata->config;
1607 if (kstrtoul(buf, 16, &val))
1609 if (val > ETM_CNTR_MAX_VAL)
1612 spin_lock(&drvdata->spinlock);
1613 idx = config->cntr_idx;
1614 config->cntr_val[idx] = val;
1615 spin_unlock(&drvdata->spinlock);
1618 static DEVICE_ATTR_RW(cntr_val);
1620 static ssize_t cntr_ctrl_show(struct device *dev,
1621 struct device_attribute *attr,
1626 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1627 struct etmv4_config *config = &drvdata->config;
1629 spin_lock(&drvdata->spinlock);
1630 idx = config->cntr_idx;
1631 val = config->cntr_ctrl[idx];
1632 spin_unlock(&drvdata->spinlock);
1633 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1636 static ssize_t cntr_ctrl_store(struct device *dev,
1637 struct device_attribute *attr,
1638 const char *buf, size_t size)
1642 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1643 struct etmv4_config *config = &drvdata->config;
1645 if (kstrtoul(buf, 16, &val))
1648 spin_lock(&drvdata->spinlock);
1649 idx = config->cntr_idx;
1650 config->cntr_ctrl[idx] = val;
1651 spin_unlock(&drvdata->spinlock);
1654 static DEVICE_ATTR_RW(cntr_ctrl);
1656 static ssize_t res_idx_show(struct device *dev,
1657 struct device_attribute *attr,
1661 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1662 struct etmv4_config *config = &drvdata->config;
1664 val = config->res_idx;
1665 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1668 static ssize_t res_idx_store(struct device *dev,
1669 struct device_attribute *attr,
1670 const char *buf, size_t size)
1673 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1674 struct etmv4_config *config = &drvdata->config;
1676 if (kstrtoul(buf, 16, &val))
1679 * Resource selector pair 0 is always implemented and reserved,
1680 * namely an idx with 0 and 1 is illegal.
1682 if ((val < 2) || (val >= 2 * drvdata->nr_resource))
1686 * Use spinlock to ensure index doesn't change while it gets
1687 * dereferenced multiple times within a spinlock block elsewhere.
1689 spin_lock(&drvdata->spinlock);
1690 config->res_idx = val;
1691 spin_unlock(&drvdata->spinlock);
1694 static DEVICE_ATTR_RW(res_idx);
1696 static ssize_t res_ctrl_show(struct device *dev,
1697 struct device_attribute *attr,
1702 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1703 struct etmv4_config *config = &drvdata->config;
1705 spin_lock(&drvdata->spinlock);
1706 idx = config->res_idx;
1707 val = config->res_ctrl[idx];
1708 spin_unlock(&drvdata->spinlock);
1709 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1712 static ssize_t res_ctrl_store(struct device *dev,
1713 struct device_attribute *attr,
1714 const char *buf, size_t size)
1718 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1719 struct etmv4_config *config = &drvdata->config;
1721 if (kstrtoul(buf, 16, &val))
1724 spin_lock(&drvdata->spinlock);
1725 idx = config->res_idx;
1726 /* For odd idx pair inversal bit is RES0 */
1728 /* PAIRINV, bit[21] */
1729 val &= ~TRCRSCTLRn_PAIRINV;
1730 config->res_ctrl[idx] = val & (TRCRSCTLRn_PAIRINV |
1732 TRCRSCTLRn_GROUP_MASK |
1733 TRCRSCTLRn_SELECT_MASK);
1734 spin_unlock(&drvdata->spinlock);
1737 static DEVICE_ATTR_RW(res_ctrl);
1739 static ssize_t sshot_idx_show(struct device *dev,
1740 struct device_attribute *attr, char *buf)
1743 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1744 struct etmv4_config *config = &drvdata->config;
1746 val = config->ss_idx;
1747 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1750 static ssize_t sshot_idx_store(struct device *dev,
1751 struct device_attribute *attr,
1752 const char *buf, size_t size)
1755 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1756 struct etmv4_config *config = &drvdata->config;
1758 if (kstrtoul(buf, 16, &val))
1760 if (val >= drvdata->nr_ss_cmp)
1763 spin_lock(&drvdata->spinlock);
1764 config->ss_idx = val;
1765 spin_unlock(&drvdata->spinlock);
1768 static DEVICE_ATTR_RW(sshot_idx);
1770 static ssize_t sshot_ctrl_show(struct device *dev,
1771 struct device_attribute *attr,
1775 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1776 struct etmv4_config *config = &drvdata->config;
1778 spin_lock(&drvdata->spinlock);
1779 val = config->ss_ctrl[config->ss_idx];
1780 spin_unlock(&drvdata->spinlock);
1781 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1784 static ssize_t sshot_ctrl_store(struct device *dev,
1785 struct device_attribute *attr,
1786 const char *buf, size_t size)
1790 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1791 struct etmv4_config *config = &drvdata->config;
1793 if (kstrtoul(buf, 16, &val))
1796 spin_lock(&drvdata->spinlock);
1797 idx = config->ss_idx;
1798 config->ss_ctrl[idx] = FIELD_PREP(TRCSSCCRn_SAC_ARC_RST_MASK, val);
1799 /* must clear bit 31 in related status register on programming */
1800 config->ss_status[idx] &= ~TRCSSCSRn_STATUS;
1801 spin_unlock(&drvdata->spinlock);
1804 static DEVICE_ATTR_RW(sshot_ctrl);
1806 static ssize_t sshot_status_show(struct device *dev,
1807 struct device_attribute *attr, char *buf)
1810 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1811 struct etmv4_config *config = &drvdata->config;
1813 spin_lock(&drvdata->spinlock);
1814 val = config->ss_status[config->ss_idx];
1815 spin_unlock(&drvdata->spinlock);
1816 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1818 static DEVICE_ATTR_RO(sshot_status);
1820 static ssize_t sshot_pe_ctrl_show(struct device *dev,
1821 struct device_attribute *attr,
1825 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1826 struct etmv4_config *config = &drvdata->config;
1828 spin_lock(&drvdata->spinlock);
1829 val = config->ss_pe_cmp[config->ss_idx];
1830 spin_unlock(&drvdata->spinlock);
1831 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1834 static ssize_t sshot_pe_ctrl_store(struct device *dev,
1835 struct device_attribute *attr,
1836 const char *buf, size_t size)
1840 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1841 struct etmv4_config *config = &drvdata->config;
1843 if (kstrtoul(buf, 16, &val))
1846 spin_lock(&drvdata->spinlock);
1847 idx = config->ss_idx;
1848 config->ss_pe_cmp[idx] = FIELD_PREP(TRCSSPCICRn_PC_MASK, val);
1849 /* must clear bit 31 in related status register on programming */
1850 config->ss_status[idx] &= ~TRCSSCSRn_STATUS;
1851 spin_unlock(&drvdata->spinlock);
1854 static DEVICE_ATTR_RW(sshot_pe_ctrl);
1856 static ssize_t ctxid_idx_show(struct device *dev,
1857 struct device_attribute *attr,
1861 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1862 struct etmv4_config *config = &drvdata->config;
1864 val = config->ctxid_idx;
1865 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1868 static ssize_t ctxid_idx_store(struct device *dev,
1869 struct device_attribute *attr,
1870 const char *buf, size_t size)
1873 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1874 struct etmv4_config *config = &drvdata->config;
1876 if (kstrtoul(buf, 16, &val))
1878 if (val >= drvdata->numcidc)
1882 * Use spinlock to ensure index doesn't change while it gets
1883 * dereferenced multiple times within a spinlock block elsewhere.
1885 spin_lock(&drvdata->spinlock);
1886 config->ctxid_idx = val;
1887 spin_unlock(&drvdata->spinlock);
1890 static DEVICE_ATTR_RW(ctxid_idx);
1892 static ssize_t ctxid_pid_show(struct device *dev,
1893 struct device_attribute *attr,
1898 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1899 struct etmv4_config *config = &drvdata->config;
1902 * Don't use contextID tracing if coming from a PID namespace. See
1903 * comment in ctxid_pid_store().
1905 if (task_active_pid_ns(current) != &init_pid_ns)
1908 spin_lock(&drvdata->spinlock);
1909 idx = config->ctxid_idx;
1910 val = (unsigned long)config->ctxid_pid[idx];
1911 spin_unlock(&drvdata->spinlock);
1912 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1915 static ssize_t ctxid_pid_store(struct device *dev,
1916 struct device_attribute *attr,
1917 const char *buf, size_t size)
1921 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1922 struct etmv4_config *config = &drvdata->config;
1925 * When contextID tracing is enabled the tracers will insert the
1926 * value found in the contextID register in the trace stream. But if
1927 * a process is in a namespace the PID of that process as seen from the
1928 * namespace won't be what the kernel sees, something that makes the
1929 * feature confusing and can potentially leak kernel only information.
1930 * As such refuse to use the feature if @current is not in the initial
1933 if (task_active_pid_ns(current) != &init_pid_ns)
1937 * only implemented when ctxid tracing is enabled, i.e. at least one
1938 * ctxid comparator is implemented and ctxid is greater than 0 bits
1941 if (!drvdata->ctxid_size || !drvdata->numcidc)
1943 if (kstrtoul(buf, 16, &pid))
1946 spin_lock(&drvdata->spinlock);
1947 idx = config->ctxid_idx;
1948 config->ctxid_pid[idx] = (u64)pid;
1949 spin_unlock(&drvdata->spinlock);
1952 static DEVICE_ATTR_RW(ctxid_pid);
1954 static ssize_t ctxid_masks_show(struct device *dev,
1955 struct device_attribute *attr,
1958 unsigned long val1, val2;
1959 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1960 struct etmv4_config *config = &drvdata->config;
1963 * Don't use contextID tracing if coming from a PID namespace. See
1964 * comment in ctxid_pid_store().
1966 if (task_active_pid_ns(current) != &init_pid_ns)
1969 spin_lock(&drvdata->spinlock);
1970 val1 = config->ctxid_mask0;
1971 val2 = config->ctxid_mask1;
1972 spin_unlock(&drvdata->spinlock);
1973 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1976 static ssize_t ctxid_masks_store(struct device *dev,
1977 struct device_attribute *attr,
1978 const char *buf, size_t size)
1981 unsigned long val1, val2, mask;
1982 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1983 struct etmv4_config *config = &drvdata->config;
1987 * Don't use contextID tracing if coming from a PID namespace. See
1988 * comment in ctxid_pid_store().
1990 if (task_active_pid_ns(current) != &init_pid_ns)
1994 * only implemented when ctxid tracing is enabled, i.e. at least one
1995 * ctxid comparator is implemented and ctxid is greater than 0 bits
1998 if (!drvdata->ctxid_size || !drvdata->numcidc)
2000 /* one mask if <= 4 comparators, two for up to 8 */
2001 nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
2002 if ((drvdata->numcidc > 4) && (nr_inputs != 2))
2005 spin_lock(&drvdata->spinlock);
2007 * each byte[0..3] controls mask value applied to ctxid
2010 switch (drvdata->numcidc) {
2012 /* COMP0, bits[7:0] */
2013 config->ctxid_mask0 = val1 & 0xFF;
2016 /* COMP1, bits[15:8] */
2017 config->ctxid_mask0 = val1 & 0xFFFF;
2020 /* COMP2, bits[23:16] */
2021 config->ctxid_mask0 = val1 & 0xFFFFFF;
2024 /* COMP3, bits[31:24] */
2025 config->ctxid_mask0 = val1;
2028 /* COMP4, bits[7:0] */
2029 config->ctxid_mask0 = val1;
2030 config->ctxid_mask1 = val2 & 0xFF;
2033 /* COMP5, bits[15:8] */
2034 config->ctxid_mask0 = val1;
2035 config->ctxid_mask1 = val2 & 0xFFFF;
2038 /* COMP6, bits[23:16] */
2039 config->ctxid_mask0 = val1;
2040 config->ctxid_mask1 = val2 & 0xFFFFFF;
2043 /* COMP7, bits[31:24] */
2044 config->ctxid_mask0 = val1;
2045 config->ctxid_mask1 = val2;
2051 * If software sets a mask bit to 1, it must program relevant byte
2052 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
2053 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
2054 * of ctxid comparator0 value (corresponding to byte 0) register.
2056 mask = config->ctxid_mask0;
2057 for (i = 0; i < drvdata->numcidc; i++) {
2058 /* mask value of corresponding ctxid comparator */
2059 maskbyte = mask & ETMv4_EVENT_MASK;
2061 * each bit corresponds to a byte of respective ctxid comparator
2064 for (j = 0; j < 8; j++) {
2066 config->ctxid_pid[i] &= ~(0xFFUL << (j * 8));
2069 /* Select the next ctxid comparator mask value */
2071 /* ctxid comparators[4-7] */
2072 mask = config->ctxid_mask1;
2077 spin_unlock(&drvdata->spinlock);
2080 static DEVICE_ATTR_RW(ctxid_masks);
2082 static ssize_t vmid_idx_show(struct device *dev,
2083 struct device_attribute *attr,
2087 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2088 struct etmv4_config *config = &drvdata->config;
2090 val = config->vmid_idx;
2091 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2094 static ssize_t vmid_idx_store(struct device *dev,
2095 struct device_attribute *attr,
2096 const char *buf, size_t size)
2099 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2100 struct etmv4_config *config = &drvdata->config;
2102 if (kstrtoul(buf, 16, &val))
2104 if (val >= drvdata->numvmidc)
2108 * Use spinlock to ensure index doesn't change while it gets
2109 * dereferenced multiple times within a spinlock block elsewhere.
2111 spin_lock(&drvdata->spinlock);
2112 config->vmid_idx = val;
2113 spin_unlock(&drvdata->spinlock);
2116 static DEVICE_ATTR_RW(vmid_idx);
2118 static ssize_t vmid_val_show(struct device *dev,
2119 struct device_attribute *attr,
2123 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2124 struct etmv4_config *config = &drvdata->config;
2127 * Don't use virtual contextID tracing if coming from a PID namespace.
2128 * See comment in ctxid_pid_store().
2130 if (!task_is_in_init_pid_ns(current))
2133 spin_lock(&drvdata->spinlock);
2134 val = (unsigned long)config->vmid_val[config->vmid_idx];
2135 spin_unlock(&drvdata->spinlock);
2136 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2139 static ssize_t vmid_val_store(struct device *dev,
2140 struct device_attribute *attr,
2141 const char *buf, size_t size)
2144 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2145 struct etmv4_config *config = &drvdata->config;
2148 * Don't use virtual contextID tracing if coming from a PID namespace.
2149 * See comment in ctxid_pid_store().
2151 if (!task_is_in_init_pid_ns(current))
2155 * only implemented when vmid tracing is enabled, i.e. at least one
2156 * vmid comparator is implemented and at least 8 bit vmid size
2158 if (!drvdata->vmid_size || !drvdata->numvmidc)
2160 if (kstrtoul(buf, 16, &val))
2163 spin_lock(&drvdata->spinlock);
2164 config->vmid_val[config->vmid_idx] = (u64)val;
2165 spin_unlock(&drvdata->spinlock);
2168 static DEVICE_ATTR_RW(vmid_val);
2170 static ssize_t vmid_masks_show(struct device *dev,
2171 struct device_attribute *attr, char *buf)
2173 unsigned long val1, val2;
2174 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2175 struct etmv4_config *config = &drvdata->config;
2178 * Don't use virtual contextID tracing if coming from a PID namespace.
2179 * See comment in ctxid_pid_store().
2181 if (!task_is_in_init_pid_ns(current))
2184 spin_lock(&drvdata->spinlock);
2185 val1 = config->vmid_mask0;
2186 val2 = config->vmid_mask1;
2187 spin_unlock(&drvdata->spinlock);
2188 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
2191 static ssize_t vmid_masks_store(struct device *dev,
2192 struct device_attribute *attr,
2193 const char *buf, size_t size)
2196 unsigned long val1, val2, mask;
2197 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2198 struct etmv4_config *config = &drvdata->config;
2202 * Don't use virtual contextID tracing if coming from a PID namespace.
2203 * See comment in ctxid_pid_store().
2205 if (!task_is_in_init_pid_ns(current))
2209 * only implemented when vmid tracing is enabled, i.e. at least one
2210 * vmid comparator is implemented and at least 8 bit vmid size
2212 if (!drvdata->vmid_size || !drvdata->numvmidc)
2214 /* one mask if <= 4 comparators, two for up to 8 */
2215 nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
2216 if ((drvdata->numvmidc > 4) && (nr_inputs != 2))
2219 spin_lock(&drvdata->spinlock);
2222 * each byte[0..3] controls mask value applied to vmid
2225 switch (drvdata->numvmidc) {
2227 /* COMP0, bits[7:0] */
2228 config->vmid_mask0 = val1 & 0xFF;
2231 /* COMP1, bits[15:8] */
2232 config->vmid_mask0 = val1 & 0xFFFF;
2235 /* COMP2, bits[23:16] */
2236 config->vmid_mask0 = val1 & 0xFFFFFF;
2239 /* COMP3, bits[31:24] */
2240 config->vmid_mask0 = val1;
2243 /* COMP4, bits[7:0] */
2244 config->vmid_mask0 = val1;
2245 config->vmid_mask1 = val2 & 0xFF;
2248 /* COMP5, bits[15:8] */
2249 config->vmid_mask0 = val1;
2250 config->vmid_mask1 = val2 & 0xFFFF;
2253 /* COMP6, bits[23:16] */
2254 config->vmid_mask0 = val1;
2255 config->vmid_mask1 = val2 & 0xFFFFFF;
2258 /* COMP7, bits[31:24] */
2259 config->vmid_mask0 = val1;
2260 config->vmid_mask1 = val2;
2267 * If software sets a mask bit to 1, it must program relevant byte
2268 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2269 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2270 * of vmid comparator0 value (corresponding to byte 0) register.
2272 mask = config->vmid_mask0;
2273 for (i = 0; i < drvdata->numvmidc; i++) {
2274 /* mask value of corresponding vmid comparator */
2275 maskbyte = mask & ETMv4_EVENT_MASK;
2277 * each bit corresponds to a byte of respective vmid comparator
2280 for (j = 0; j < 8; j++) {
2282 config->vmid_val[i] &= ~(0xFFUL << (j * 8));
2285 /* Select the next vmid comparator mask value */
2287 /* vmid comparators[4-7] */
2288 mask = config->vmid_mask1;
2292 spin_unlock(&drvdata->spinlock);
2295 static DEVICE_ATTR_RW(vmid_masks);
2297 static ssize_t cpu_show(struct device *dev,
2298 struct device_attribute *attr, char *buf)
2301 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2304 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2307 static DEVICE_ATTR_RO(cpu);
2309 static struct attribute *coresight_etmv4_attrs[] = {
2310 &dev_attr_nr_pe_cmp.attr,
2311 &dev_attr_nr_addr_cmp.attr,
2312 &dev_attr_nr_cntr.attr,
2313 &dev_attr_nr_ext_inp.attr,
2314 &dev_attr_numcidc.attr,
2315 &dev_attr_numvmidc.attr,
2316 &dev_attr_nrseqstate.attr,
2317 &dev_attr_nr_resource.attr,
2318 &dev_attr_nr_ss_cmp.attr,
2319 &dev_attr_reset.attr,
2320 &dev_attr_mode.attr,
2322 &dev_attr_event.attr,
2323 &dev_attr_event_instren.attr,
2324 &dev_attr_event_ts.attr,
2325 &dev_attr_syncfreq.attr,
2326 &dev_attr_cyc_threshold.attr,
2327 &dev_attr_bb_ctrl.attr,
2328 &dev_attr_event_vinst.attr,
2329 &dev_attr_s_exlevel_vinst.attr,
2330 &dev_attr_ns_exlevel_vinst.attr,
2331 &dev_attr_addr_idx.attr,
2332 &dev_attr_addr_instdatatype.attr,
2333 &dev_attr_addr_single.attr,
2334 &dev_attr_addr_range.attr,
2335 &dev_attr_addr_start.attr,
2336 &dev_attr_addr_stop.attr,
2337 &dev_attr_addr_ctxtype.attr,
2338 &dev_attr_addr_context.attr,
2339 &dev_attr_addr_exlevel_s_ns.attr,
2340 &dev_attr_addr_cmp_view.attr,
2341 &dev_attr_vinst_pe_cmp_start_stop.attr,
2342 &dev_attr_sshot_idx.attr,
2343 &dev_attr_sshot_ctrl.attr,
2344 &dev_attr_sshot_pe_ctrl.attr,
2345 &dev_attr_sshot_status.attr,
2346 &dev_attr_seq_idx.attr,
2347 &dev_attr_seq_state.attr,
2348 &dev_attr_seq_event.attr,
2349 &dev_attr_seq_reset_event.attr,
2350 &dev_attr_cntr_idx.attr,
2351 &dev_attr_cntrldvr.attr,
2352 &dev_attr_cntr_val.attr,
2353 &dev_attr_cntr_ctrl.attr,
2354 &dev_attr_res_idx.attr,
2355 &dev_attr_res_ctrl.attr,
2356 &dev_attr_ctxid_idx.attr,
2357 &dev_attr_ctxid_pid.attr,
2358 &dev_attr_ctxid_masks.attr,
2359 &dev_attr_vmid_idx.attr,
2360 &dev_attr_vmid_val.attr,
2361 &dev_attr_vmid_masks.attr,
2367 struct coresight_device *csdev;
2372 static void do_smp_cross_read(void *data)
2374 struct etmv4_reg *reg = data;
2376 reg->data = etm4x_relaxed_read32(®->csdev->access, reg->offset);
2379 static u32 etmv4_cross_read(const struct etmv4_drvdata *drvdata, u32 offset)
2381 struct etmv4_reg reg;
2383 reg.offset = offset;
2384 reg.csdev = drvdata->csdev;
2387 * smp cross call ensures the CPU will be powered up before
2388 * accessing the ETMv4 trace core registers
2390 smp_call_function_single(drvdata->cpu, do_smp_cross_read, ®, 1);
2394 static inline u32 coresight_etm4x_attr_to_offset(struct device_attribute *attr)
2396 struct dev_ext_attribute *eattr;
2398 eattr = container_of(attr, struct dev_ext_attribute, attr);
2399 return (u32)(unsigned long)eattr->var;
2402 static ssize_t coresight_etm4x_reg_show(struct device *dev,
2403 struct device_attribute *d_attr,
2407 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2409 offset = coresight_etm4x_attr_to_offset(d_attr);
2411 pm_runtime_get_sync(dev->parent);
2412 val = etmv4_cross_read(drvdata, offset);
2413 pm_runtime_put_sync(dev->parent);
2415 return scnprintf(buf, PAGE_SIZE, "0x%x\n", val);
2419 etm4x_register_implemented(struct etmv4_drvdata *drvdata, u32 offset)
2422 ETM_COMMON_SYSREG_LIST_CASES
2424 * Common registers to ETE & ETM4x accessible via system
2425 * instructions are always implemented.
2429 ETM4x_ONLY_SYSREG_LIST_CASES
2431 * We only support etm4x and ete. So if the device is not
2432 * ETE, it must be ETMv4x.
2434 return !etm4x_is_ete(drvdata);
2436 ETM4x_MMAP_LIST_CASES
2438 * Registers accessible only via memory-mapped registers
2439 * must not be accessed via system instructions.
2440 * We cannot access the drvdata->csdev here, as this
2441 * function is called during the device creation, via
2442 * coresight_register() and the csdev is not initialized
2443 * until that is done. So rely on the drvdata->base to
2444 * detect if we have a memory mapped access.
2445 * Also ETE doesn't implement memory mapped access, thus
2446 * it is sufficient to check that we are using mmio.
2448 return !!drvdata->base;
2450 ETE_ONLY_SYSREG_LIST_CASES
2451 return etm4x_is_ete(drvdata);
2458 * Hide the ETM4x registers that may not be available on the
2460 * There are certain management registers unavailable via system
2461 * instructions. Make those sysfs attributes hidden on such
2465 coresight_etm4x_attr_reg_implemented(struct kobject *kobj,
2466 struct attribute *attr, int unused)
2468 struct device *dev = kobj_to_dev(kobj);
2469 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2470 struct device_attribute *d_attr;
2473 d_attr = container_of(attr, struct device_attribute, attr);
2474 offset = coresight_etm4x_attr_to_offset(d_attr);
2476 if (etm4x_register_implemented(drvdata, offset))
2481 #define coresight_etm4x_reg(name, offset) \
2482 &((struct dev_ext_attribute[]) { \
2484 __ATTR(name, 0444, coresight_etm4x_reg_show, NULL), \
2485 (void *)(unsigned long)offset \
2489 static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2490 coresight_etm4x_reg(trcpdcr, TRCPDCR),
2491 coresight_etm4x_reg(trcpdsr, TRCPDSR),
2492 coresight_etm4x_reg(trclsr, TRCLSR),
2493 coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS),
2494 coresight_etm4x_reg(trcdevid, TRCDEVID),
2495 coresight_etm4x_reg(trcdevtype, TRCDEVTYPE),
2496 coresight_etm4x_reg(trcpidr0, TRCPIDR0),
2497 coresight_etm4x_reg(trcpidr1, TRCPIDR1),
2498 coresight_etm4x_reg(trcpidr2, TRCPIDR2),
2499 coresight_etm4x_reg(trcpidr3, TRCPIDR3),
2500 coresight_etm4x_reg(trcoslsr, TRCOSLSR),
2501 coresight_etm4x_reg(trcconfig, TRCCONFIGR),
2502 coresight_etm4x_reg(trctraceid, TRCTRACEIDR),
2503 coresight_etm4x_reg(trcdevarch, TRCDEVARCH),
2507 static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2508 coresight_etm4x_reg(trcidr0, TRCIDR0),
2509 coresight_etm4x_reg(trcidr1, TRCIDR1),
2510 coresight_etm4x_reg(trcidr2, TRCIDR2),
2511 coresight_etm4x_reg(trcidr3, TRCIDR3),
2512 coresight_etm4x_reg(trcidr4, TRCIDR4),
2513 coresight_etm4x_reg(trcidr5, TRCIDR5),
2514 /* trcidr[6,7] are reserved */
2515 coresight_etm4x_reg(trcidr8, TRCIDR8),
2516 coresight_etm4x_reg(trcidr9, TRCIDR9),
2517 coresight_etm4x_reg(trcidr10, TRCIDR10),
2518 coresight_etm4x_reg(trcidr11, TRCIDR11),
2519 coresight_etm4x_reg(trcidr12, TRCIDR12),
2520 coresight_etm4x_reg(trcidr13, TRCIDR13),
2524 static const struct attribute_group coresight_etmv4_group = {
2525 .attrs = coresight_etmv4_attrs,
2528 static const struct attribute_group coresight_etmv4_mgmt_group = {
2529 .is_visible = coresight_etm4x_attr_reg_implemented,
2530 .attrs = coresight_etmv4_mgmt_attrs,
2534 static const struct attribute_group coresight_etmv4_trcidr_group = {
2535 .attrs = coresight_etmv4_trcidr_attrs,
2539 const struct attribute_group *coresight_etmv4_groups[] = {
2540 &coresight_etmv4_group,
2541 &coresight_etmv4_mgmt_group,
2542 &coresight_etmv4_trcidr_group,