1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Virtual Processor Dispatch Trace Log
5 * (C) Copyright IBM Corporation 2009
7 * Author: Jeremy Kerr <jk@ozlabs.org>
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
13 #include <linux/uaccess.h>
14 #include <linux/debugfs.h>
15 #include <asm/firmware.h>
17 #include <asm/lppaca.h>
18 #include <asm/plpar_wrappers.h>
19 #include <asm/machdep.h>
23 struct dtl_entry *buf;
29 static DEFINE_PER_CPU(struct dtl, cpu_dtl);
31 static u8 dtl_event_mask = DTL_LOG_ALL;
35 * Size of per-cpu log buffers. Firmware requires that the buffer does
36 * not cross a 4k boundary.
38 static int dtl_buf_entries = N_DISPATCH_LOG;
40 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
43 * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls
44 * reading from the dispatch trace log. If other code wants to consume
45 * DTL entries, it can set this pointer to a function that will get
46 * called once for each DTL entry that gets processed.
48 static void (*dtl_consumer)(struct dtl_entry *entry, u64 index);
52 struct dtl_entry *write_ptr;
53 struct dtl_entry *buf;
54 struct dtl_entry *buf_end;
57 static DEFINE_PER_CPU(struct dtl_ring, dtl_rings);
59 static atomic_t dtl_count;
62 * The cpu accounting code controls the DTL ring buffer, and we get
63 * given entries as they are processed.
65 static void consume_dtle(struct dtl_entry *dtle, u64 index)
67 struct dtl_ring *dtlr = this_cpu_ptr(&dtl_rings);
68 struct dtl_entry *wp = dtlr->write_ptr;
69 struct lppaca *vpa = local_paca->lppaca_ptr;
77 /* check for hypervisor ring buffer overflow, ignore this entry if so */
78 if (index + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx))
82 if (wp == dtlr->buf_end)
86 /* incrementing write_index makes the new entry visible */
91 static int dtl_start(struct dtl *dtl)
93 struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
96 dtlr->buf_end = dtl->buf + dtl->buf_entries;
97 dtlr->write_index = 0;
99 /* setting write_ptr enables logging into our buffer */
101 dtlr->write_ptr = dtl->buf;
103 /* enable event logging */
104 lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask;
106 dtl_consumer = consume_dtle;
107 atomic_inc(&dtl_count);
111 static void dtl_stop(struct dtl *dtl)
113 struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
115 dtlr->write_ptr = NULL;
120 /* restore dtl_enable_mask */
121 lppaca_of(dtl->cpu).dtl_enable_mask = DTL_LOG_PREEMPT;
123 if (atomic_dec_and_test(&dtl_count))
127 static u64 dtl_current_index(struct dtl *dtl)
129 return per_cpu(dtl_rings, dtl->cpu).write_index;
132 #else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
134 static int dtl_start(struct dtl *dtl)
139 /* Register our dtl buffer with the hypervisor. The HV expects the
140 * buffer size to be passed in the second word of the buffer */
141 ((u32 *)dtl->buf)[1] = cpu_to_be32(DISPATCH_LOG_BYTES);
143 hwcpu = get_hard_smp_processor_id(dtl->cpu);
144 addr = __pa(dtl->buf);
145 ret = register_dtl(hwcpu, addr);
147 printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) "
148 "failed with %d\n", __func__, dtl->cpu, hwcpu, ret);
152 /* set our initial buffer indices */
153 lppaca_of(dtl->cpu).dtl_idx = 0;
155 /* ensure that our updates to the lppaca fields have occurred before
156 * we actually enable the logging */
159 /* enable event logging */
160 lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask;
165 static void dtl_stop(struct dtl *dtl)
167 int hwcpu = get_hard_smp_processor_id(dtl->cpu);
169 lppaca_of(dtl->cpu).dtl_enable_mask = 0x0;
171 unregister_dtl(hwcpu);
174 static u64 dtl_current_index(struct dtl *dtl)
176 return be64_to_cpu(lppaca_of(dtl->cpu).dtl_idx);
178 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
180 static int dtl_enable(struct dtl *dtl)
184 struct dtl_entry *buf = NULL;
189 /* only allow one reader */
193 /* ensure there are no other conflicting dtl users */
194 if (!read_trylock(&dtl_access_lock))
197 n_entries = dtl_buf_entries;
198 buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu));
200 printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
202 read_unlock(&dtl_access_lock);
206 spin_lock(&dtl->lock);
209 /* store the original allocation size for use during read */
210 dtl->buf_entries = n_entries;
217 spin_unlock(&dtl->lock);
220 read_unlock(&dtl_access_lock);
221 kmem_cache_free(dtl_cache, buf);
227 static void dtl_disable(struct dtl *dtl)
229 spin_lock(&dtl->lock);
231 kmem_cache_free(dtl_cache, dtl->buf);
233 dtl->buf_entries = 0;
234 spin_unlock(&dtl->lock);
235 read_unlock(&dtl_access_lock);
240 static int dtl_file_open(struct inode *inode, struct file *filp)
242 struct dtl *dtl = inode->i_private;
245 rc = dtl_enable(dtl);
249 filp->private_data = dtl;
253 static int dtl_file_release(struct inode *inode, struct file *filp)
255 struct dtl *dtl = inode->i_private;
260 static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,
263 long int rc, n_read, n_req, read_size;
265 u64 cur_idx, last_idx, i;
267 if ((len % sizeof(struct dtl_entry)) != 0)
270 dtl = filp->private_data;
272 /* requested number of entries to read */
273 n_req = len / sizeof(struct dtl_entry);
275 /* actual number of entries read */
278 spin_lock(&dtl->lock);
280 cur_idx = dtl_current_index(dtl);
281 last_idx = dtl->last_idx;
283 if (last_idx + dtl->buf_entries <= cur_idx)
284 last_idx = cur_idx - dtl->buf_entries + 1;
286 if (last_idx + n_req > cur_idx)
287 n_req = cur_idx - last_idx;
290 dtl->last_idx = last_idx + n_req;
292 spin_unlock(&dtl->lock);
297 i = last_idx % dtl->buf_entries;
299 /* read the tail of the buffer if we've wrapped */
300 if (i + n_req > dtl->buf_entries) {
301 read_size = dtl->buf_entries - i;
303 rc = copy_to_user(buf, &dtl->buf[i],
304 read_size * sizeof(struct dtl_entry));
311 buf += read_size * sizeof(struct dtl_entry);
314 /* .. and now the head */
315 rc = copy_to_user(buf, &dtl->buf[i], n_req * sizeof(struct dtl_entry));
321 return n_read * sizeof(struct dtl_entry);
324 static const struct file_operations dtl_fops = {
325 .open = dtl_file_open,
326 .release = dtl_file_release,
327 .read = dtl_file_read,
331 static struct dentry *dtl_dir;
333 static void dtl_setup_file(struct dtl *dtl)
337 sprintf(name, "cpu-%d", dtl->cpu);
339 debugfs_create_file(name, 0400, dtl_dir, dtl, &dtl_fops);
342 static int dtl_init(void)
346 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
349 /* set up common debugfs structure */
351 dtl_dir = debugfs_create_dir("dtl", arch_debugfs_dir);
353 debugfs_create_x8("dtl_event_mask", 0600, dtl_dir, &dtl_event_mask);
354 debugfs_create_u32("dtl_buf_entries", 0400, dtl_dir, &dtl_buf_entries);
356 /* set up the per-cpu log structures */
357 for_each_possible_cpu(i) {
358 struct dtl *dtl = &per_cpu(cpu_dtl, i);
359 spin_lock_init(&dtl->lock);
367 machine_arch_initcall(pseries, dtl_init);
368 #endif /* CONFIG_DTL */
370 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
372 * Scan the dispatch trace log and count up the stolen time.
373 * Should be called with interrupts disabled.
375 static notrace u64 scan_dispatch_log(u64 stop_tb)
377 u64 i = local_paca->dtl_ridx;
378 struct dtl_entry *dtl = local_paca->dtl_curr;
379 struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
380 struct lppaca *vpa = local_paca->lppaca_ptr;
388 if (i == be64_to_cpu(vpa->dtl_idx))
390 while (i < be64_to_cpu(vpa->dtl_idx)) {
391 dtb = be64_to_cpu(dtl->timebase);
392 tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
393 be32_to_cpu(dtl->ready_to_enqueue_time);
395 if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
396 /* buffer has overflowed */
397 i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
398 dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
405 dtl_consumer(dtl, i);
411 dtl = local_paca->dispatch_log;
413 local_paca->dtl_ridx = i;
414 local_paca->dtl_curr = dtl;
419 * Accumulate stolen time by scanning the dispatch trace log.
420 * Called on entry from user mode.
422 void notrace pseries_accumulate_stolen_time(void)
425 struct cpu_accounting_data *acct = &local_paca->accounting;
427 sst = scan_dispatch_log(acct->starttime_user);
428 ust = scan_dispatch_log(acct->starttime);
431 acct->steal_time += ust + sst;
434 u64 pseries_calculate_stolen_time(u64 stop_tb)
436 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
439 if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx))
440 return scan_dispatch_log(stop_tb);