arm64: zynqmp: Make zynqmp_firmware driver optional
[linux-2.6-microblaze.git] / arch / x86 / kvm / mmutrace.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #if !defined(_TRACE_KVMMMU_H) || defined(TRACE_HEADER_MULTI_READ)
3 #define _TRACE_KVMMMU_H
4
5 #include <linux/tracepoint.h>
6 #include <linux/trace_events.h>
7
8 #undef TRACE_SYSTEM
9 #define TRACE_SYSTEM kvmmmu
10
11 #define KVM_MMU_PAGE_FIELDS             \
12         __field(__u8, mmu_valid_gen)    \
13         __field(__u64, gfn)             \
14         __field(__u32, role)            \
15         __field(__u32, root_count)      \
16         __field(bool, unsync)
17
18 #define KVM_MMU_PAGE_ASSIGN(sp)                         \
19         __entry->mmu_valid_gen = sp->mmu_valid_gen;     \
20         __entry->gfn = sp->gfn;                         \
21         __entry->role = sp->role.word;                  \
22         __entry->root_count = sp->root_count;           \
23         __entry->unsync = sp->unsync;
24
25 #define KVM_MMU_PAGE_PRINTK() ({                                        \
26         const char *saved_ptr = trace_seq_buffer_ptr(p);                \
27         static const char *access_str[] = {                             \
28                 "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux"  \
29         };                                                              \
30         union kvm_mmu_page_role role;                                   \
31                                                                         \
32         role.word = __entry->role;                                      \
33                                                                         \
34         trace_seq_printf(p, "sp gen %u gfn %llx l%u %u-byte q%u%s %s%s" \
35                          " %snxe %sad root %u %s%c",                    \
36                          __entry->mmu_valid_gen,                        \
37                          __entry->gfn, role.level,                      \
38                          role.gpte_is_8_bytes ? 8 : 4,                  \
39                          role.quadrant,                                 \
40                          role.direct ? " direct" : "",                  \
41                          access_str[role.access],                       \
42                          role.invalid ? " invalid" : "",                \
43                          role.nxe ? "" : "!",                           \
44                          role.ad_disabled ? "!" : "",                   \
45                          __entry->root_count,                           \
46                          __entry->unsync ? "unsync" : "sync", 0);       \
47         saved_ptr;                                                      \
48                 })
49
50 #define kvm_mmu_trace_pferr_flags       \
51         { PFERR_PRESENT_MASK, "P" },    \
52         { PFERR_WRITE_MASK, "W" },      \
53         { PFERR_USER_MASK, "U" },       \
54         { PFERR_RSVD_MASK, "RSVD" },    \
55         { PFERR_FETCH_MASK, "F" }
56
57 /*
58  * A pagetable walk has started
59  */
60 TRACE_EVENT(
61         kvm_mmu_pagetable_walk,
62         TP_PROTO(u64 addr, u32 pferr),
63         TP_ARGS(addr, pferr),
64
65         TP_STRUCT__entry(
66                 __field(__u64, addr)
67                 __field(__u32, pferr)
68         ),
69
70         TP_fast_assign(
71                 __entry->addr = addr;
72                 __entry->pferr = pferr;
73         ),
74
75         TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr,
76                   __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
77 );
78
79
80 /* We just walked a paging element */
81 TRACE_EVENT(
82         kvm_mmu_paging_element,
83         TP_PROTO(u64 pte, int level),
84         TP_ARGS(pte, level),
85
86         TP_STRUCT__entry(
87                 __field(__u64, pte)
88                 __field(__u32, level)
89                 ),
90
91         TP_fast_assign(
92                 __entry->pte = pte;
93                 __entry->level = level;
94                 ),
95
96         TP_printk("pte %llx level %u", __entry->pte, __entry->level)
97 );
98
99 DECLARE_EVENT_CLASS(kvm_mmu_set_bit_class,
100
101         TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
102
103         TP_ARGS(table_gfn, index, size),
104
105         TP_STRUCT__entry(
106                 __field(__u64, gpa)
107         ),
108
109         TP_fast_assign(
110                 __entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
111                                 + index * size;
112                 ),
113
114         TP_printk("gpa %llx", __entry->gpa)
115 );
116
117 /* We set a pte accessed bit */
118 DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
119
120         TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
121
122         TP_ARGS(table_gfn, index, size)
123 );
124
125 /* We set a pte dirty bit */
126 DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
127
128         TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
129
130         TP_ARGS(table_gfn, index, size)
131 );
132
133 TRACE_EVENT(
134         kvm_mmu_walker_error,
135         TP_PROTO(u32 pferr),
136         TP_ARGS(pferr),
137
138         TP_STRUCT__entry(
139                 __field(__u32, pferr)
140                 ),
141
142         TP_fast_assign(
143                 __entry->pferr = pferr;
144                 ),
145
146         TP_printk("pferr %x %s", __entry->pferr,
147                   __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
148 );
149
150 TRACE_EVENT(
151         kvm_mmu_get_page,
152         TP_PROTO(struct kvm_mmu_page *sp, bool created),
153         TP_ARGS(sp, created),
154
155         TP_STRUCT__entry(
156                 KVM_MMU_PAGE_FIELDS
157                 __field(bool, created)
158                 ),
159
160         TP_fast_assign(
161                 KVM_MMU_PAGE_ASSIGN(sp)
162                 __entry->created = created;
163                 ),
164
165         TP_printk("%s %s", KVM_MMU_PAGE_PRINTK(),
166                   __entry->created ? "new" : "existing")
167 );
168
169 DECLARE_EVENT_CLASS(kvm_mmu_page_class,
170
171         TP_PROTO(struct kvm_mmu_page *sp),
172         TP_ARGS(sp),
173
174         TP_STRUCT__entry(
175                 KVM_MMU_PAGE_FIELDS
176         ),
177
178         TP_fast_assign(
179                 KVM_MMU_PAGE_ASSIGN(sp)
180         ),
181
182         TP_printk("%s", KVM_MMU_PAGE_PRINTK())
183 );
184
185 DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_sync_page,
186         TP_PROTO(struct kvm_mmu_page *sp),
187
188         TP_ARGS(sp)
189 );
190
191 DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_unsync_page,
192         TP_PROTO(struct kvm_mmu_page *sp),
193
194         TP_ARGS(sp)
195 );
196
197 DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
198         TP_PROTO(struct kvm_mmu_page *sp),
199
200         TP_ARGS(sp)
201 );
202
203 TRACE_EVENT(
204         mark_mmio_spte,
205         TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen),
206         TP_ARGS(sptep, gfn, access, gen),
207
208         TP_STRUCT__entry(
209                 __field(void *, sptep)
210                 __field(gfn_t, gfn)
211                 __field(unsigned, access)
212                 __field(unsigned int, gen)
213         ),
214
215         TP_fast_assign(
216                 __entry->sptep = sptep;
217                 __entry->gfn = gfn;
218                 __entry->access = access;
219                 __entry->gen = gen;
220         ),
221
222         TP_printk("sptep:%p gfn %llx access %x gen %x", __entry->sptep,
223                   __entry->gfn, __entry->access, __entry->gen)
224 );
225
226 TRACE_EVENT(
227         handle_mmio_page_fault,
228         TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
229         TP_ARGS(addr, gfn, access),
230
231         TP_STRUCT__entry(
232                 __field(u64, addr)
233                 __field(gfn_t, gfn)
234                 __field(unsigned, access)
235         ),
236
237         TP_fast_assign(
238                 __entry->addr = addr;
239                 __entry->gfn = gfn;
240                 __entry->access = access;
241         ),
242
243         TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
244                   __entry->access)
245 );
246
247 #define __spte_satisfied(__spte)                                \
248         (__entry->retry && is_writable_pte(__entry->__spte))
249
250 TRACE_EVENT(
251         fast_page_fault,
252         TP_PROTO(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 error_code,
253                  u64 *sptep, u64 old_spte, bool retry),
254         TP_ARGS(vcpu, cr2_or_gpa, error_code, sptep, old_spte, retry),
255
256         TP_STRUCT__entry(
257                 __field(int, vcpu_id)
258                 __field(gpa_t, cr2_or_gpa)
259                 __field(u32, error_code)
260                 __field(u64 *, sptep)
261                 __field(u64, old_spte)
262                 __field(u64, new_spte)
263                 __field(bool, retry)
264         ),
265
266         TP_fast_assign(
267                 __entry->vcpu_id = vcpu->vcpu_id;
268                 __entry->cr2_or_gpa = cr2_or_gpa;
269                 __entry->error_code = error_code;
270                 __entry->sptep = sptep;
271                 __entry->old_spte = old_spte;
272                 __entry->new_spte = *sptep;
273                 __entry->retry = retry;
274         ),
275
276         TP_printk("vcpu %d gva %llx error_code %s sptep %p old %#llx"
277                   " new %llx spurious %d fixed %d", __entry->vcpu_id,
278                   __entry->cr2_or_gpa, __print_flags(__entry->error_code, "|",
279                   kvm_mmu_trace_pferr_flags), __entry->sptep,
280                   __entry->old_spte, __entry->new_spte,
281                   __spte_satisfied(old_spte), __spte_satisfied(new_spte)
282         )
283 );
284
285 TRACE_EVENT(
286         kvm_mmu_zap_all_fast,
287         TP_PROTO(struct kvm *kvm),
288         TP_ARGS(kvm),
289
290         TP_STRUCT__entry(
291                 __field(__u8, mmu_valid_gen)
292                 __field(unsigned int, mmu_used_pages)
293         ),
294
295         TP_fast_assign(
296                 __entry->mmu_valid_gen = kvm->arch.mmu_valid_gen;
297                 __entry->mmu_used_pages = kvm->arch.n_used_mmu_pages;
298         ),
299
300         TP_printk("kvm-mmu-valid-gen %u used_pages %x",
301                   __entry->mmu_valid_gen, __entry->mmu_used_pages
302         )
303 );
304
305
306 TRACE_EVENT(
307         check_mmio_spte,
308         TP_PROTO(u64 spte, unsigned int kvm_gen, unsigned int spte_gen),
309         TP_ARGS(spte, kvm_gen, spte_gen),
310
311         TP_STRUCT__entry(
312                 __field(unsigned int, kvm_gen)
313                 __field(unsigned int, spte_gen)
314                 __field(u64, spte)
315         ),
316
317         TP_fast_assign(
318                 __entry->kvm_gen = kvm_gen;
319                 __entry->spte_gen = spte_gen;
320                 __entry->spte = spte;
321         ),
322
323         TP_printk("spte %llx kvm_gen %x spte-gen %x valid %d", __entry->spte,
324                   __entry->kvm_gen, __entry->spte_gen,
325                   __entry->kvm_gen == __entry->spte_gen
326         )
327 );
328
329 TRACE_EVENT(
330         kvm_mmu_set_spte,
331         TP_PROTO(int level, gfn_t gfn, u64 *sptep),
332         TP_ARGS(level, gfn, sptep),
333
334         TP_STRUCT__entry(
335                 __field(u64, gfn)
336                 __field(u64, spte)
337                 __field(u64, sptep)
338                 __field(u8, level)
339                 /* These depend on page entry type, so compute them now.  */
340                 __field(bool, r)
341                 __field(bool, x)
342                 __field(u8, u)
343         ),
344
345         TP_fast_assign(
346                 __entry->gfn = gfn;
347                 __entry->spte = *sptep;
348                 __entry->sptep = virt_to_phys(sptep);
349                 __entry->level = level;
350                 __entry->r = shadow_present_mask || (__entry->spte & PT_PRESENT_MASK);
351                 __entry->x = is_executable_pte(__entry->spte);
352                 __entry->u = shadow_user_mask ? !!(__entry->spte & shadow_user_mask) : -1;
353         ),
354
355         TP_printk("gfn %llx spte %llx (%s%s%s%s) level %d at %llx",
356                   __entry->gfn, __entry->spte,
357                   __entry->r ? "r" : "-",
358                   __entry->spte & PT_WRITABLE_MASK ? "w" : "-",
359                   __entry->x ? "x" : "-",
360                   __entry->u == -1 ? "" : (__entry->u ? "u" : "-"),
361                   __entry->level, __entry->sptep
362         )
363 );
364
365 TRACE_EVENT(
366         kvm_mmu_spte_requested,
367         TP_PROTO(gpa_t addr, int level, kvm_pfn_t pfn),
368         TP_ARGS(addr, level, pfn),
369
370         TP_STRUCT__entry(
371                 __field(u64, gfn)
372                 __field(u64, pfn)
373                 __field(u8, level)
374         ),
375
376         TP_fast_assign(
377                 __entry->gfn = addr >> PAGE_SHIFT;
378                 __entry->pfn = pfn | (__entry->gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
379                 __entry->level = level;
380         ),
381
382         TP_printk("gfn %llx pfn %llx level %d",
383                   __entry->gfn, __entry->pfn, __entry->level
384         )
385 );
386
387 #endif /* _TRACE_KVMMMU_H */
388
389 #undef TRACE_INCLUDE_PATH
390 #define TRACE_INCLUDE_PATH .
391 #undef TRACE_INCLUDE_FILE
392 #define TRACE_INCLUDE_FILE mmutrace
393
394 /* This part must be outside protection */
395 #include <trace/define_trace.h>