GNU Linux-libre 5.19-rc6-gnu
[releases.git] / arch / x86 / kvm / mmu / mmutrace.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #if !defined(_TRACE_KVMMMU_H) || defined(TRACE_HEADER_MULTI_READ)
3 #define _TRACE_KVMMMU_H
4
5 #include <linux/tracepoint.h>
6 #include <linux/trace_events.h>
7
8 #undef TRACE_SYSTEM
9 #define TRACE_SYSTEM kvmmmu
10
11 #define KVM_MMU_PAGE_FIELDS             \
12         __field(__u8, mmu_valid_gen)    \
13         __field(__u64, gfn)             \
14         __field(__u32, role)            \
15         __field(__u32, root_count)      \
16         __field(bool, unsync)
17
18 #define KVM_MMU_PAGE_ASSIGN(sp)                         \
19         __entry->mmu_valid_gen = sp->mmu_valid_gen;     \
20         __entry->gfn = sp->gfn;                         \
21         __entry->role = sp->role.word;                  \
22         __entry->root_count = sp->root_count;           \
23         __entry->unsync = sp->unsync;
24
25 #define KVM_MMU_PAGE_PRINTK() ({                                        \
26         const char *saved_ptr = trace_seq_buffer_ptr(p);                \
27         static const char *access_str[] = {                             \
28                 "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux"  \
29         };                                                              \
30         union kvm_mmu_page_role role;                                   \
31                                                                         \
32         role.word = __entry->role;                                      \
33                                                                         \
34         trace_seq_printf(p, "sp gen %u gfn %llx l%u %u-byte q%u%s %s%s" \
35                          " %snxe %sad root %u %s%c",                    \
36                          __entry->mmu_valid_gen,                        \
37                          __entry->gfn, role.level,                      \
38                          role.has_4_byte_gpte ? 4 : 8,                  \
39                          role.quadrant,                                 \
40                          role.direct ? " direct" : "",                  \
41                          access_str[role.access],                       \
42                          role.invalid ? " invalid" : "",                \
43                          role.efer_nx ? "" : "!",                       \
44                          role.ad_disabled ? "!" : "",                   \
45                          __entry->root_count,                           \
46                          __entry->unsync ? "unsync" : "sync", 0);       \
47         saved_ptr;                                                      \
48                 })
49
50 #define kvm_mmu_trace_pferr_flags       \
51         { PFERR_PRESENT_MASK, "P" },    \
52         { PFERR_WRITE_MASK, "W" },      \
53         { PFERR_USER_MASK, "U" },       \
54         { PFERR_RSVD_MASK, "RSVD" },    \
55         { PFERR_FETCH_MASK, "F" }
56
57 TRACE_DEFINE_ENUM(RET_PF_CONTINUE);
58 TRACE_DEFINE_ENUM(RET_PF_RETRY);
59 TRACE_DEFINE_ENUM(RET_PF_EMULATE);
60 TRACE_DEFINE_ENUM(RET_PF_INVALID);
61 TRACE_DEFINE_ENUM(RET_PF_FIXED);
62 TRACE_DEFINE_ENUM(RET_PF_SPURIOUS);
63
64 /*
65  * A pagetable walk has started
66  */
67 TRACE_EVENT(
68         kvm_mmu_pagetable_walk,
69         TP_PROTO(u64 addr, u32 pferr),
70         TP_ARGS(addr, pferr),
71
72         TP_STRUCT__entry(
73                 __field(__u64, addr)
74                 __field(__u32, pferr)
75         ),
76
77         TP_fast_assign(
78                 __entry->addr = addr;
79                 __entry->pferr = pferr;
80         ),
81
82         TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr,
83                   __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
84 );
85
86
87 /* We just walked a paging element */
88 TRACE_EVENT(
89         kvm_mmu_paging_element,
90         TP_PROTO(u64 pte, int level),
91         TP_ARGS(pte, level),
92
93         TP_STRUCT__entry(
94                 __field(__u64, pte)
95                 __field(__u32, level)
96                 ),
97
98         TP_fast_assign(
99                 __entry->pte = pte;
100                 __entry->level = level;
101                 ),
102
103         TP_printk("pte %llx level %u", __entry->pte, __entry->level)
104 );
105
106 DECLARE_EVENT_CLASS(kvm_mmu_set_bit_class,
107
108         TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
109
110         TP_ARGS(table_gfn, index, size),
111
112         TP_STRUCT__entry(
113                 __field(__u64, gpa)
114         ),
115
116         TP_fast_assign(
117                 __entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
118                                 + index * size;
119                 ),
120
121         TP_printk("gpa %llx", __entry->gpa)
122 );
123
124 /* We set a pte accessed bit */
125 DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
126
127         TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
128
129         TP_ARGS(table_gfn, index, size)
130 );
131
132 /* We set a pte dirty bit */
133 DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
134
135         TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
136
137         TP_ARGS(table_gfn, index, size)
138 );
139
140 TRACE_EVENT(
141         kvm_mmu_walker_error,
142         TP_PROTO(u32 pferr),
143         TP_ARGS(pferr),
144
145         TP_STRUCT__entry(
146                 __field(__u32, pferr)
147                 ),
148
149         TP_fast_assign(
150                 __entry->pferr = pferr;
151                 ),
152
153         TP_printk("pferr %x %s", __entry->pferr,
154                   __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
155 );
156
157 TRACE_EVENT(
158         kvm_mmu_get_page,
159         TP_PROTO(struct kvm_mmu_page *sp, bool created),
160         TP_ARGS(sp, created),
161
162         TP_STRUCT__entry(
163                 KVM_MMU_PAGE_FIELDS
164                 __field(bool, created)
165                 ),
166
167         TP_fast_assign(
168                 KVM_MMU_PAGE_ASSIGN(sp)
169                 __entry->created = created;
170                 ),
171
172         TP_printk("%s %s", KVM_MMU_PAGE_PRINTK(),
173                   __entry->created ? "new" : "existing")
174 );
175
176 DECLARE_EVENT_CLASS(kvm_mmu_page_class,
177
178         TP_PROTO(struct kvm_mmu_page *sp),
179         TP_ARGS(sp),
180
181         TP_STRUCT__entry(
182                 KVM_MMU_PAGE_FIELDS
183         ),
184
185         TP_fast_assign(
186                 KVM_MMU_PAGE_ASSIGN(sp)
187         ),
188
189         TP_printk("%s", KVM_MMU_PAGE_PRINTK())
190 );
191
192 DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_sync_page,
193         TP_PROTO(struct kvm_mmu_page *sp),
194
195         TP_ARGS(sp)
196 );
197
198 DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_unsync_page,
199         TP_PROTO(struct kvm_mmu_page *sp),
200
201         TP_ARGS(sp)
202 );
203
204 DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
205         TP_PROTO(struct kvm_mmu_page *sp),
206
207         TP_ARGS(sp)
208 );
209
210 TRACE_EVENT(
211         mark_mmio_spte,
212         TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte),
213         TP_ARGS(sptep, gfn, spte),
214
215         TP_STRUCT__entry(
216                 __field(void *, sptep)
217                 __field(gfn_t, gfn)
218                 __field(unsigned, access)
219                 __field(unsigned int, gen)
220         ),
221
222         TP_fast_assign(
223                 __entry->sptep = sptep;
224                 __entry->gfn = gfn;
225                 __entry->access = spte & ACC_ALL;
226                 __entry->gen = get_mmio_spte_generation(spte);
227         ),
228
229         TP_printk("sptep:%p gfn %llx access %x gen %x", __entry->sptep,
230                   __entry->gfn, __entry->access, __entry->gen)
231 );
232
233 TRACE_EVENT(
234         handle_mmio_page_fault,
235         TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
236         TP_ARGS(addr, gfn, access),
237
238         TP_STRUCT__entry(
239                 __field(u64, addr)
240                 __field(gfn_t, gfn)
241                 __field(unsigned, access)
242         ),
243
244         TP_fast_assign(
245                 __entry->addr = addr;
246                 __entry->gfn = gfn;
247                 __entry->access = access;
248         ),
249
250         TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
251                   __entry->access)
252 );
253
254 TRACE_EVENT(
255         fast_page_fault,
256         TP_PROTO(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
257                  u64 *sptep, u64 old_spte, int ret),
258         TP_ARGS(vcpu, fault, sptep, old_spte, ret),
259
260         TP_STRUCT__entry(
261                 __field(int, vcpu_id)
262                 __field(gpa_t, cr2_or_gpa)
263                 __field(u32, error_code)
264                 __field(u64 *, sptep)
265                 __field(u64, old_spte)
266                 __field(u64, new_spte)
267                 __field(int, ret)
268         ),
269
270         TP_fast_assign(
271                 __entry->vcpu_id = vcpu->vcpu_id;
272                 __entry->cr2_or_gpa = fault->addr;
273                 __entry->error_code = fault->error_code;
274                 __entry->sptep = sptep;
275                 __entry->old_spte = old_spte;
276                 __entry->new_spte = *sptep;
277                 __entry->ret = ret;
278         ),
279
280         TP_printk("vcpu %d gva %llx error_code %s sptep %p old %#llx"
281                   " new %llx spurious %d fixed %d", __entry->vcpu_id,
282                   __entry->cr2_or_gpa, __print_flags(__entry->error_code, "|",
283                   kvm_mmu_trace_pferr_flags), __entry->sptep,
284                   __entry->old_spte, __entry->new_spte,
285                   __entry->ret == RET_PF_SPURIOUS, __entry->ret == RET_PF_FIXED
286         )
287 );
288
289 TRACE_EVENT(
290         kvm_mmu_zap_all_fast,
291         TP_PROTO(struct kvm *kvm),
292         TP_ARGS(kvm),
293
294         TP_STRUCT__entry(
295                 __field(__u8, mmu_valid_gen)
296                 __field(unsigned int, mmu_used_pages)
297         ),
298
299         TP_fast_assign(
300                 __entry->mmu_valid_gen = kvm->arch.mmu_valid_gen;
301                 __entry->mmu_used_pages = kvm->arch.n_used_mmu_pages;
302         ),
303
304         TP_printk("kvm-mmu-valid-gen %u used_pages %x",
305                   __entry->mmu_valid_gen, __entry->mmu_used_pages
306         )
307 );
308
309
310 TRACE_EVENT(
311         check_mmio_spte,
312         TP_PROTO(u64 spte, unsigned int kvm_gen, unsigned int spte_gen),
313         TP_ARGS(spte, kvm_gen, spte_gen),
314
315         TP_STRUCT__entry(
316                 __field(unsigned int, kvm_gen)
317                 __field(unsigned int, spte_gen)
318                 __field(u64, spte)
319         ),
320
321         TP_fast_assign(
322                 __entry->kvm_gen = kvm_gen;
323                 __entry->spte_gen = spte_gen;
324                 __entry->spte = spte;
325         ),
326
327         TP_printk("spte %llx kvm_gen %x spte-gen %x valid %d", __entry->spte,
328                   __entry->kvm_gen, __entry->spte_gen,
329                   __entry->kvm_gen == __entry->spte_gen
330         )
331 );
332
333 TRACE_EVENT(
334         kvm_mmu_set_spte,
335         TP_PROTO(int level, gfn_t gfn, u64 *sptep),
336         TP_ARGS(level, gfn, sptep),
337
338         TP_STRUCT__entry(
339                 __field(u64, gfn)
340                 __field(u64, spte)
341                 __field(u64, sptep)
342                 __field(u8, level)
343                 /* These depend on page entry type, so compute them now.  */
344                 __field(bool, r)
345                 __field(bool, x)
346                 __field(signed char, u)
347         ),
348
349         TP_fast_assign(
350                 __entry->gfn = gfn;
351                 __entry->spte = *sptep;
352                 __entry->sptep = virt_to_phys(sptep);
353                 __entry->level = level;
354                 __entry->r = shadow_present_mask || (__entry->spte & PT_PRESENT_MASK);
355                 __entry->x = is_executable_pte(__entry->spte);
356                 __entry->u = shadow_user_mask ? !!(__entry->spte & shadow_user_mask) : -1;
357         ),
358
359         TP_printk("gfn %llx spte %llx (%s%s%s%s) level %d at %llx",
360                   __entry->gfn, __entry->spte,
361                   __entry->r ? "r" : "-",
362                   __entry->spte & PT_WRITABLE_MASK ? "w" : "-",
363                   __entry->x ? "x" : "-",
364                   __entry->u == -1 ? "" : (__entry->u ? "u" : "-"),
365                   __entry->level, __entry->sptep
366         )
367 );
368
369 TRACE_EVENT(
370         kvm_mmu_spte_requested,
371         TP_PROTO(struct kvm_page_fault *fault),
372         TP_ARGS(fault),
373
374         TP_STRUCT__entry(
375                 __field(u64, gfn)
376                 __field(u64, pfn)
377                 __field(u8, level)
378         ),
379
380         TP_fast_assign(
381                 __entry->gfn = fault->gfn;
382                 __entry->pfn = fault->pfn | (fault->gfn & (KVM_PAGES_PER_HPAGE(fault->goal_level) - 1));
383                 __entry->level = fault->goal_level;
384         ),
385
386         TP_printk("gfn %llx pfn %llx level %d",
387                   __entry->gfn, __entry->pfn, __entry->level
388         )
389 );
390
391 TRACE_EVENT(
392         kvm_tdp_mmu_spte_changed,
393         TP_PROTO(int as_id, gfn_t gfn, int level, u64 old_spte, u64 new_spte),
394         TP_ARGS(as_id, gfn, level, old_spte, new_spte),
395
396         TP_STRUCT__entry(
397                 __field(u64, gfn)
398                 __field(u64, old_spte)
399                 __field(u64, new_spte)
400                 /* Level cannot be larger than 5 on x86, so it fits in a u8. */
401                 __field(u8, level)
402                 /* as_id can only be 0 or 1 x86, so it fits in a u8. */
403                 __field(u8, as_id)
404         ),
405
406         TP_fast_assign(
407                 __entry->gfn = gfn;
408                 __entry->old_spte = old_spte;
409                 __entry->new_spte = new_spte;
410                 __entry->level = level;
411                 __entry->as_id = as_id;
412         ),
413
414         TP_printk("as id %d gfn %llx level %d old_spte %llx new_spte %llx",
415                   __entry->as_id, __entry->gfn, __entry->level,
416                   __entry->old_spte, __entry->new_spte
417         )
418 );
419
420 TRACE_EVENT(
421         kvm_mmu_split_huge_page,
422         TP_PROTO(u64 gfn, u64 spte, int level, int errno),
423         TP_ARGS(gfn, spte, level, errno),
424
425         TP_STRUCT__entry(
426                 __field(u64, gfn)
427                 __field(u64, spte)
428                 __field(int, level)
429                 __field(int, errno)
430         ),
431
432         TP_fast_assign(
433                 __entry->gfn = gfn;
434                 __entry->spte = spte;
435                 __entry->level = level;
436                 __entry->errno = errno;
437         ),
438
439         TP_printk("gfn %llx spte %llx level %d errno %d",
440                   __entry->gfn, __entry->spte, __entry->level, __entry->errno)
441 );
442
443 #endif /* _TRACE_KVMMMU_H */
444
445 #undef TRACE_INCLUDE_PATH
446 #define TRACE_INCLUDE_PATH mmu
447 #undef TRACE_INCLUDE_FILE
448 #define TRACE_INCLUDE_FILE mmutrace
449
450 /* This part must be outside protection */
451 #include <trace/define_trace.h>