GNU Linux-libre 4.14.290-gnu1
[releases.git] / arch / powerpc / platforms / pseries / dtl.c
1 /*
2  * Virtual Processor Dispatch Trace Log
3  *
4  * (C) Copyright IBM Corporation 2009
5  *
6  * Author: Jeremy Kerr <jk@ozlabs.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2, or (at your option)
11  * any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <asm/smp.h>
26 #include <linux/uaccess.h>
27 #include <asm/firmware.h>
28 #include <asm/lppaca.h>
29 #include <asm/debugfs.h>
30 #include <asm/plpar_wrappers.h>
31 #include <asm/machdep.h>
32
33 struct dtl {
34         struct dtl_entry        *buf;
35         struct dentry           *file;
36         int                     cpu;
37         int                     buf_entries;
38         u64                     last_idx;
39         spinlock_t              lock;
40 };
41 static DEFINE_PER_CPU(struct dtl, cpu_dtl);
42
43 /*
44  * Dispatch trace log event mask:
45  * 0x7: 0x1: voluntary virtual processor waits
46  *      0x2: time-slice preempts
47  *      0x4: virtual partition memory page faults
48  */
49 static u8 dtl_event_mask = 0x7;
50
51
52 /*
53  * Size of per-cpu log buffers. Firmware requires that the buffer does
54  * not cross a 4k boundary.
55  */
56 static int dtl_buf_entries = N_DISPATCH_LOG;
57
58 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
59 struct dtl_ring {
60         u64     write_index;
61         struct dtl_entry *write_ptr;
62         struct dtl_entry *buf;
63         struct dtl_entry *buf_end;
64         u8      saved_dtl_mask;
65 };
66
67 static DEFINE_PER_CPU(struct dtl_ring, dtl_rings);
68
69 static atomic_t dtl_count;
70
71 /*
72  * The cpu accounting code controls the DTL ring buffer, and we get
73  * given entries as they are processed.
74  */
75 static void consume_dtle(struct dtl_entry *dtle, u64 index)
76 {
77         struct dtl_ring *dtlr = this_cpu_ptr(&dtl_rings);
78         struct dtl_entry *wp = dtlr->write_ptr;
79         struct lppaca *vpa = local_paca->lppaca_ptr;
80
81         if (!wp)
82                 return;
83
84         *wp = *dtle;
85         barrier();
86
87         /* check for hypervisor ring buffer overflow, ignore this entry if so */
88         if (index + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx))
89                 return;
90
91         ++wp;
92         if (wp == dtlr->buf_end)
93                 wp = dtlr->buf;
94         dtlr->write_ptr = wp;
95
96         /* incrementing write_index makes the new entry visible */
97         smp_wmb();
98         ++dtlr->write_index;
99 }
100
101 static int dtl_start(struct dtl *dtl)
102 {
103         struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
104
105         dtlr->buf = dtl->buf;
106         dtlr->buf_end = dtl->buf + dtl->buf_entries;
107         dtlr->write_index = 0;
108
109         /* setting write_ptr enables logging into our buffer */
110         smp_wmb();
111         dtlr->write_ptr = dtl->buf;
112
113         /* enable event logging */
114         dtlr->saved_dtl_mask = lppaca_of(dtl->cpu).dtl_enable_mask;
115         lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask;
116
117         dtl_consumer = consume_dtle;
118         atomic_inc(&dtl_count);
119         return 0;
120 }
121
122 static void dtl_stop(struct dtl *dtl)
123 {
124         struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
125
126         dtlr->write_ptr = NULL;
127         smp_wmb();
128
129         dtlr->buf = NULL;
130
131         /* restore dtl_enable_mask */
132         lppaca_of(dtl->cpu).dtl_enable_mask = dtlr->saved_dtl_mask;
133
134         if (atomic_dec_and_test(&dtl_count))
135                 dtl_consumer = NULL;
136 }
137
138 static u64 dtl_current_index(struct dtl *dtl)
139 {
140         return per_cpu(dtl_rings, dtl->cpu).write_index;
141 }
142
143 #else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
144
145 static int dtl_start(struct dtl *dtl)
146 {
147         unsigned long addr;
148         int ret, hwcpu;
149
150         /* Register our dtl buffer with the hypervisor. The HV expects the
151          * buffer size to be passed in the second word of the buffer */
152         ((u32 *)dtl->buf)[1] = cpu_to_be32(DISPATCH_LOG_BYTES);
153
154         hwcpu = get_hard_smp_processor_id(dtl->cpu);
155         addr = __pa(dtl->buf);
156         ret = register_dtl(hwcpu, addr);
157         if (ret) {
158                 printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) "
159                        "failed with %d\n", __func__, dtl->cpu, hwcpu, ret);
160                 return -EIO;
161         }
162
163         /* set our initial buffer indices */
164         lppaca_of(dtl->cpu).dtl_idx = 0;
165
166         /* ensure that our updates to the lppaca fields have occurred before
167          * we actually enable the logging */
168         smp_wmb();
169
170         /* enable event logging */
171         lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask;
172
173         return 0;
174 }
175
176 static void dtl_stop(struct dtl *dtl)
177 {
178         int hwcpu = get_hard_smp_processor_id(dtl->cpu);
179
180         lppaca_of(dtl->cpu).dtl_enable_mask = 0x0;
181
182         unregister_dtl(hwcpu);
183 }
184
185 static u64 dtl_current_index(struct dtl *dtl)
186 {
187         return be64_to_cpu(lppaca_of(dtl->cpu).dtl_idx);
188 }
189 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
190
191 static int dtl_enable(struct dtl *dtl)
192 {
193         long int n_entries;
194         long int rc;
195         struct dtl_entry *buf = NULL;
196
197         if (!dtl_cache)
198                 return -ENOMEM;
199
200         /* only allow one reader */
201         if (dtl->buf)
202                 return -EBUSY;
203
204         n_entries = dtl_buf_entries;
205         buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu));
206         if (!buf) {
207                 printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
208                                 __func__, dtl->cpu);
209                 return -ENOMEM;
210         }
211
212         spin_lock(&dtl->lock);
213         rc = -EBUSY;
214         if (!dtl->buf) {
215                 /* store the original allocation size for use during read */
216                 dtl->buf_entries = n_entries;
217                 dtl->buf = buf;
218                 dtl->last_idx = 0;
219                 rc = dtl_start(dtl);
220                 if (rc)
221                         dtl->buf = NULL;
222         }
223         spin_unlock(&dtl->lock);
224
225         if (rc)
226                 kmem_cache_free(dtl_cache, buf);
227         return rc;
228 }
229
230 static void dtl_disable(struct dtl *dtl)
231 {
232         spin_lock(&dtl->lock);
233         dtl_stop(dtl);
234         kmem_cache_free(dtl_cache, dtl->buf);
235         dtl->buf = NULL;
236         dtl->buf_entries = 0;
237         spin_unlock(&dtl->lock);
238 }
239
240 /* file interface */
241
242 static int dtl_file_open(struct inode *inode, struct file *filp)
243 {
244         struct dtl *dtl = inode->i_private;
245         int rc;
246
247         rc = dtl_enable(dtl);
248         if (rc)
249                 return rc;
250
251         filp->private_data = dtl;
252         return 0;
253 }
254
255 static int dtl_file_release(struct inode *inode, struct file *filp)
256 {
257         struct dtl *dtl = inode->i_private;
258         dtl_disable(dtl);
259         return 0;
260 }
261
262 static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,
263                 loff_t *pos)
264 {
265         long int rc, n_read, n_req, read_size;
266         struct dtl *dtl;
267         u64 cur_idx, last_idx, i;
268
269         if ((len % sizeof(struct dtl_entry)) != 0)
270                 return -EINVAL;
271
272         dtl = filp->private_data;
273
274         /* requested number of entries to read */
275         n_req = len / sizeof(struct dtl_entry);
276
277         /* actual number of entries read */
278         n_read = 0;
279
280         spin_lock(&dtl->lock);
281
282         cur_idx = dtl_current_index(dtl);
283         last_idx = dtl->last_idx;
284
285         if (last_idx + dtl->buf_entries <= cur_idx)
286                 last_idx = cur_idx - dtl->buf_entries + 1;
287
288         if (last_idx + n_req > cur_idx)
289                 n_req = cur_idx - last_idx;
290
291         if (n_req > 0)
292                 dtl->last_idx = last_idx + n_req;
293
294         spin_unlock(&dtl->lock);
295
296         if (n_req <= 0)
297                 return 0;
298
299         i = last_idx % dtl->buf_entries;
300
301         /* read the tail of the buffer if we've wrapped */
302         if (i + n_req > dtl->buf_entries) {
303                 read_size = dtl->buf_entries - i;
304
305                 rc = copy_to_user(buf, &dtl->buf[i],
306                                 read_size * sizeof(struct dtl_entry));
307                 if (rc)
308                         return -EFAULT;
309
310                 i = 0;
311                 n_req -= read_size;
312                 n_read += read_size;
313                 buf += read_size * sizeof(struct dtl_entry);
314         }
315
316         /* .. and now the head */
317         rc = copy_to_user(buf, &dtl->buf[i], n_req * sizeof(struct dtl_entry));
318         if (rc)
319                 return -EFAULT;
320
321         n_read += n_req;
322
323         return n_read * sizeof(struct dtl_entry);
324 }
325
326 static const struct file_operations dtl_fops = {
327         .open           = dtl_file_open,
328         .release        = dtl_file_release,
329         .read           = dtl_file_read,
330         .llseek         = no_llseek,
331 };
332
333 static struct dentry *dtl_dir;
334
335 static int dtl_setup_file(struct dtl *dtl)
336 {
337         char name[10];
338
339         sprintf(name, "cpu-%d", dtl->cpu);
340
341         dtl->file = debugfs_create_file(name, 0400, dtl_dir, dtl, &dtl_fops);
342         if (!dtl->file)
343                 return -ENOMEM;
344
345         return 0;
346 }
347
348 static int dtl_init(void)
349 {
350         struct dentry *event_mask_file, *buf_entries_file;
351         int rc, i;
352
353         if (!firmware_has_feature(FW_FEATURE_SPLPAR))
354                 return -ENODEV;
355
356         /* set up common debugfs structure */
357
358         rc = -ENOMEM;
359         dtl_dir = debugfs_create_dir("dtl", powerpc_debugfs_root);
360         if (!dtl_dir) {
361                 printk(KERN_WARNING "%s: can't create dtl root dir\n",
362                                 __func__);
363                 goto err;
364         }
365
366         event_mask_file = debugfs_create_x8("dtl_event_mask", 0600,
367                                 dtl_dir, &dtl_event_mask);
368         buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0400,
369                                 dtl_dir, &dtl_buf_entries);
370
371         if (!event_mask_file || !buf_entries_file) {
372                 printk(KERN_WARNING "%s: can't create dtl files\n", __func__);
373                 goto err_remove_dir;
374         }
375
376         /* set up the per-cpu log structures */
377         for_each_possible_cpu(i) {
378                 struct dtl *dtl = &per_cpu(cpu_dtl, i);
379                 spin_lock_init(&dtl->lock);
380                 dtl->cpu = i;
381
382                 rc = dtl_setup_file(dtl);
383                 if (rc)
384                         goto err_remove_dir;
385         }
386
387         return 0;
388
389 err_remove_dir:
390         debugfs_remove_recursive(dtl_dir);
391 err:
392         return rc;
393 }
394 machine_arch_initcall(pseries, dtl_init);