phy: qcom-qmp: add sc8280xp UFS PHY
[linux-2.6-microblaze.git] / arch / x86 / kvm / pmu.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine -- Performance Monitoring Unit support
4  *
5  * Copyright 2015 Red Hat, Inc. and/or its affiliates.
6  *
7  * Authors:
8  *   Avi Kivity   <avi@redhat.com>
9  *   Gleb Natapov <gleb@redhat.com>
10  *   Wei Huang    <wei@redhat.com>
11  */
12
13 #include <linux/types.h>
14 #include <linux/kvm_host.h>
15 #include <linux/perf_event.h>
16 #include <linux/bsearch.h>
17 #include <linux/sort.h>
18 #include <asm/perf_event.h>
19 #include "x86.h"
20 #include "cpuid.h"
21 #include "lapic.h"
22 #include "pmu.h"
23
24 /* This is enough to filter the vast majority of currently defined events. */
25 #define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
26
27 /* NOTE:
28  * - Each perf counter is defined as "struct kvm_pmc";
29  * - There are two types of perf counters: general purpose (gp) and fixed.
30  *   gp counters are stored in gp_counters[] and fixed counters are stored
31  *   in fixed_counters[] respectively. Both of them are part of "struct
32  *   kvm_pmu";
33  * - pmu.c understands the difference between gp counters and fixed counters.
34  *   However AMD doesn't support fixed-counters;
35  * - There are three types of index to access perf counters (PMC):
36  *     1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
37  *        has MSR_K7_PERFCTRn.
38  *     2. MSR Index (named idx): This normally is used by RDPMC instruction.
39  *        For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
40  *        C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
41  *        that it also supports fixed counters. idx can be used to as index to
42  *        gp and fixed counters.
43  *     3. Global PMC Index (named pmc): pmc is an index specific to PMU
44  *        code. Each pmc, stored in kvm_pmc.idx field, is unique across
45  *        all perf counters (both gp and fixed). The mapping relationship
46  *        between pmc and perf counters is as the following:
47  *        * Intel: [0 .. INTEL_PMC_MAX_GENERIC-1] <=> gp counters
48  *                 [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
49  *        * AMD:   [0 .. AMD64_NUM_COUNTERS-1] <=> gp counters
50  */
51
52 static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
53 {
54         struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
55         struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
56
57         kvm_pmu_deliver_pmi(vcpu);
58 }
59
60 static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
61 {
62         struct kvm_pmu *pmu = pmc_to_pmu(pmc);
63
64         /* Ignore counters that have been reprogrammed already. */
65         if (test_and_set_bit(pmc->idx, pmu->reprogram_pmi))
66                 return;
67
68         __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
69         kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
70
71         if (!pmc->intr)
72                 return;
73
74         /*
75          * Inject PMI. If vcpu was in a guest mode during NMI PMI
76          * can be ejected on a guest mode re-entry. Otherwise we can't
77          * be sure that vcpu wasn't executing hlt instruction at the
78          * time of vmexit and is not going to re-enter guest mode until
79          * woken up. So we should wake it, but this is impossible from
80          * NMI context. Do it from irq work instead.
81          */
82         if (in_pmi && !kvm_handling_nmi_from_guest(pmc->vcpu))
83                 irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
84         else
85                 kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
86 }
87
88 static void kvm_perf_overflow(struct perf_event *perf_event,
89                               struct perf_sample_data *data,
90                               struct pt_regs *regs)
91 {
92         struct kvm_pmc *pmc = perf_event->overflow_handler_context;
93
94         __kvm_perf_overflow(pmc, true);
95 }
96
97 static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
98                                   unsigned config, bool exclude_user,
99                                   bool exclude_kernel, bool intr,
100                                   bool in_tx, bool in_tx_cp)
101 {
102         struct perf_event *event;
103         struct perf_event_attr attr = {
104                 .type = type,
105                 .size = sizeof(attr),
106                 .pinned = true,
107                 .exclude_idle = true,
108                 .exclude_host = 1,
109                 .exclude_user = exclude_user,
110                 .exclude_kernel = exclude_kernel,
111                 .config = config,
112         };
113
114         if (type == PERF_TYPE_HARDWARE && config >= PERF_COUNT_HW_MAX)
115                 return;
116
117         attr.sample_period = get_sample_period(pmc, pmc->counter);
118
119         if (in_tx)
120                 attr.config |= HSW_IN_TX;
121         if (in_tx_cp) {
122                 /*
123                  * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
124                  * period. Just clear the sample period so at least
125                  * allocating the counter doesn't fail.
126                  */
127                 attr.sample_period = 0;
128                 attr.config |= HSW_IN_TX_CHECKPOINTED;
129         }
130
131         event = perf_event_create_kernel_counter(&attr, -1, current,
132                                                  kvm_perf_overflow, pmc);
133         if (IS_ERR(event)) {
134                 pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
135                             PTR_ERR(event), pmc->idx);
136                 return;
137         }
138
139         pmc->perf_event = event;
140         pmc_to_pmu(pmc)->event_count++;
141         clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
142         pmc->is_paused = false;
143         pmc->intr = intr;
144 }
145
146 static void pmc_pause_counter(struct kvm_pmc *pmc)
147 {
148         u64 counter = pmc->counter;
149
150         if (!pmc->perf_event || pmc->is_paused)
151                 return;
152
153         /* update counter, reset event value to avoid redundant accumulation */
154         counter += perf_event_pause(pmc->perf_event, true);
155         pmc->counter = counter & pmc_bitmask(pmc);
156         pmc->is_paused = true;
157 }
158
159 static bool pmc_resume_counter(struct kvm_pmc *pmc)
160 {
161         if (!pmc->perf_event)
162                 return false;
163
164         /* recalibrate sample period and check if it's accepted by perf core */
165         if (perf_event_period(pmc->perf_event,
166                               get_sample_period(pmc, pmc->counter)))
167                 return false;
168
169         /* reuse perf_event to serve as pmc_reprogram_counter() does*/
170         perf_event_enable(pmc->perf_event);
171         pmc->is_paused = false;
172
173         clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
174         return true;
175 }
176
177 static int cmp_u64(const void *a, const void *b)
178 {
179         return *(__u64 *)a - *(__u64 *)b;
180 }
181
182 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
183 {
184         unsigned config, type = PERF_TYPE_RAW;
185         struct kvm *kvm = pmc->vcpu->kvm;
186         struct kvm_pmu_event_filter *filter;
187         bool allow_event = true;
188
189         if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
190                 printk_once("kvm pmu: pin control bit is ignored\n");
191
192         pmc->eventsel = eventsel;
193
194         pmc_pause_counter(pmc);
195
196         if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
197                 return;
198
199         filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
200         if (filter) {
201                 __u64 key = eventsel & AMD64_RAW_EVENT_MASK_NB;
202
203                 if (bsearch(&key, filter->events, filter->nevents,
204                             sizeof(__u64), cmp_u64))
205                         allow_event = filter->action == KVM_PMU_EVENT_ALLOW;
206                 else
207                         allow_event = filter->action == KVM_PMU_EVENT_DENY;
208         }
209         if (!allow_event)
210                 return;
211
212         if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
213                           ARCH_PERFMON_EVENTSEL_INV |
214                           ARCH_PERFMON_EVENTSEL_CMASK |
215                           HSW_IN_TX |
216                           HSW_IN_TX_CHECKPOINTED))) {
217                 config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
218                 if (config != PERF_COUNT_HW_MAX)
219                         type = PERF_TYPE_HARDWARE;
220         }
221
222         if (type == PERF_TYPE_RAW)
223                 config = eventsel & X86_RAW_EVENT_MASK;
224
225         if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
226                 return;
227
228         pmc_release_perf_event(pmc);
229
230         pmc->current_config = eventsel;
231         pmc_reprogram_counter(pmc, type, config,
232                               !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
233                               !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
234                               eventsel & ARCH_PERFMON_EVENTSEL_INT,
235                               (eventsel & HSW_IN_TX),
236                               (eventsel & HSW_IN_TX_CHECKPOINTED));
237 }
238 EXPORT_SYMBOL_GPL(reprogram_gp_counter);
239
240 void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
241 {
242         unsigned en_field = ctrl & 0x3;
243         bool pmi = ctrl & 0x8;
244         struct kvm_pmu_event_filter *filter;
245         struct kvm *kvm = pmc->vcpu->kvm;
246
247         pmc_pause_counter(pmc);
248
249         if (!en_field || !pmc_is_enabled(pmc))
250                 return;
251
252         filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
253         if (filter) {
254                 if (filter->action == KVM_PMU_EVENT_DENY &&
255                     test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
256                         return;
257                 if (filter->action == KVM_PMU_EVENT_ALLOW &&
258                     !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
259                         return;
260         }
261
262         if (pmc->current_config == (u64)ctrl && pmc_resume_counter(pmc))
263                 return;
264
265         pmc_release_perf_event(pmc);
266
267         pmc->current_config = (u64)ctrl;
268         pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
269                               kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc),
270                               !(en_field & 0x2), /* exclude user */
271                               !(en_field & 0x1), /* exclude kernel */
272                               pmi, false, false);
273 }
274 EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
275
276 void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
277 {
278         struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
279
280         if (!pmc)
281                 return;
282
283         if (pmc_is_gp(pmc))
284                 reprogram_gp_counter(pmc, pmc->eventsel);
285         else {
286                 int idx = pmc_idx - INTEL_PMC_IDX_FIXED;
287                 u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx);
288
289                 reprogram_fixed_counter(pmc, ctrl, idx);
290         }
291 }
292 EXPORT_SYMBOL_GPL(reprogram_counter);
293
294 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
295 {
296         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
297         int bit;
298
299         for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
300                 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, bit);
301
302                 if (unlikely(!pmc || !pmc->perf_event)) {
303                         clear_bit(bit, pmu->reprogram_pmi);
304                         continue;
305                 }
306
307                 reprogram_counter(pmu, bit);
308         }
309
310         /*
311          * Unused perf_events are only released if the corresponding MSRs
312          * weren't accessed during the last vCPU time slice. kvm_arch_sched_in
313          * triggers KVM_REQ_PMU if cleanup is needed.
314          */
315         if (unlikely(pmu->need_cleanup))
316                 kvm_pmu_cleanup(vcpu);
317 }
318
319 /* check if idx is a valid index to access PMU */
320 bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
321 {
322         return kvm_x86_ops.pmu_ops->is_valid_rdpmc_ecx(vcpu, idx);
323 }
324
325 bool is_vmware_backdoor_pmc(u32 pmc_idx)
326 {
327         switch (pmc_idx) {
328         case VMWARE_BACKDOOR_PMC_HOST_TSC:
329         case VMWARE_BACKDOOR_PMC_REAL_TIME:
330         case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
331                 return true;
332         }
333         return false;
334 }
335
336 static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
337 {
338         u64 ctr_val;
339
340         switch (idx) {
341         case VMWARE_BACKDOOR_PMC_HOST_TSC:
342                 ctr_val = rdtsc();
343                 break;
344         case VMWARE_BACKDOOR_PMC_REAL_TIME:
345                 ctr_val = ktime_get_boottime_ns();
346                 break;
347         case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
348                 ctr_val = ktime_get_boottime_ns() +
349                         vcpu->kvm->arch.kvmclock_offset;
350                 break;
351         default:
352                 return 1;
353         }
354
355         *data = ctr_val;
356         return 0;
357 }
358
359 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
360 {
361         bool fast_mode = idx & (1u << 31);
362         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
363         struct kvm_pmc *pmc;
364         u64 mask = fast_mode ? ~0u : ~0ull;
365
366         if (!pmu->version)
367                 return 1;
368
369         if (is_vmware_backdoor_pmc(idx))
370                 return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
371
372         pmc = kvm_x86_ops.pmu_ops->rdpmc_ecx_to_pmc(vcpu, idx, &mask);
373         if (!pmc)
374                 return 1;
375
376         if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) &&
377             (static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
378             (kvm_read_cr0(vcpu) & X86_CR0_PE))
379                 return 1;
380
381         *data = pmc_read_counter(pmc) & mask;
382         return 0;
383 }
384
385 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
386 {
387         if (lapic_in_kernel(vcpu)) {
388                 if (kvm_x86_ops.pmu_ops->deliver_pmi)
389                         kvm_x86_ops.pmu_ops->deliver_pmi(vcpu);
390                 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
391         }
392 }
393
394 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
395 {
396         return kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr) ||
397                 kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, msr);
398 }
399
400 static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
401 {
402         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
403         struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr);
404
405         if (pmc)
406                 __set_bit(pmc->idx, pmu->pmc_in_use);
407 }
408
409 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
410 {
411         return kvm_x86_ops.pmu_ops->get_msr(vcpu, msr_info);
412 }
413
414 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
415 {
416         kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
417         return kvm_x86_ops.pmu_ops->set_msr(vcpu, msr_info);
418 }
419
420 /* refresh PMU settings. This function generally is called when underlying
421  * settings are changed (such as changes of PMU CPUID by guest VMs), which
422  * should rarely happen.
423  */
424 void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
425 {
426         kvm_x86_ops.pmu_ops->refresh(vcpu);
427 }
428
429 void kvm_pmu_reset(struct kvm_vcpu *vcpu)
430 {
431         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
432
433         irq_work_sync(&pmu->irq_work);
434         kvm_x86_ops.pmu_ops->reset(vcpu);
435 }
436
437 void kvm_pmu_init(struct kvm_vcpu *vcpu)
438 {
439         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
440
441         memset(pmu, 0, sizeof(*pmu));
442         kvm_x86_ops.pmu_ops->init(vcpu);
443         init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
444         pmu->event_count = 0;
445         pmu->need_cleanup = false;
446         kvm_pmu_refresh(vcpu);
447 }
448
449 static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
450 {
451         struct kvm_pmu *pmu = pmc_to_pmu(pmc);
452
453         if (pmc_is_fixed(pmc))
454                 return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
455                         pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3;
456
457         return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
458 }
459
460 /* Release perf_events for vPMCs that have been unused for a full time slice.  */
461 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
462 {
463         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
464         struct kvm_pmc *pmc = NULL;
465         DECLARE_BITMAP(bitmask, X86_PMC_IDX_MAX);
466         int i;
467
468         pmu->need_cleanup = false;
469
470         bitmap_andnot(bitmask, pmu->all_valid_pmc_idx,
471                       pmu->pmc_in_use, X86_PMC_IDX_MAX);
472
473         for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
474                 pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i);
475
476                 if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
477                         pmc_stop_counter(pmc);
478         }
479
480         if (kvm_x86_ops.pmu_ops->cleanup)
481                 kvm_x86_ops.pmu_ops->cleanup(vcpu);
482
483         bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
484 }
485
486 void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
487 {
488         kvm_pmu_reset(vcpu);
489 }
490
491 static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
492 {
493         struct kvm_pmu *pmu = pmc_to_pmu(pmc);
494         u64 prev_count;
495
496         prev_count = pmc->counter;
497         pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc);
498
499         reprogram_counter(pmu, pmc->idx);
500         if (pmc->counter < prev_count)
501                 __kvm_perf_overflow(pmc, false);
502 }
503
504 static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc,
505         unsigned int perf_hw_id)
506 {
507         u64 old_eventsel = pmc->eventsel;
508         unsigned int config;
509
510         pmc->eventsel &= (ARCH_PERFMON_EVENTSEL_EVENT | ARCH_PERFMON_EVENTSEL_UMASK);
511         config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
512         pmc->eventsel = old_eventsel;
513         return config == perf_hw_id;
514 }
515
516 static inline bool cpl_is_matched(struct kvm_pmc *pmc)
517 {
518         bool select_os, select_user;
519         u64 config = pmc->current_config;
520
521         if (pmc_is_gp(pmc)) {
522                 select_os = config & ARCH_PERFMON_EVENTSEL_OS;
523                 select_user = config & ARCH_PERFMON_EVENTSEL_USR;
524         } else {
525                 select_os = config & 0x1;
526                 select_user = config & 0x2;
527         }
528
529         return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user;
530 }
531
532 void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
533 {
534         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
535         struct kvm_pmc *pmc;
536         int i;
537
538         for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
539                 pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i);
540
541                 if (!pmc || !pmc_is_enabled(pmc) || !pmc_speculative_in_use(pmc))
542                         continue;
543
544                 /* Ignore checks for edge detect, pin control, invert and CMASK bits */
545                 if (eventsel_match_perf_hw_id(pmc, perf_hw_id) && cpl_is_matched(pmc))
546                         kvm_pmu_incr_counter(pmc);
547         }
548 }
549 EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event);
550
551 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
552 {
553         struct kvm_pmu_event_filter tmp, *filter;
554         size_t size;
555         int r;
556
557         if (copy_from_user(&tmp, argp, sizeof(tmp)))
558                 return -EFAULT;
559
560         if (tmp.action != KVM_PMU_EVENT_ALLOW &&
561             tmp.action != KVM_PMU_EVENT_DENY)
562                 return -EINVAL;
563
564         if (tmp.flags != 0)
565                 return -EINVAL;
566
567         if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
568                 return -E2BIG;
569
570         size = struct_size(filter, events, tmp.nevents);
571         filter = kmalloc(size, GFP_KERNEL_ACCOUNT);
572         if (!filter)
573                 return -ENOMEM;
574
575         r = -EFAULT;
576         if (copy_from_user(filter, argp, size))
577                 goto cleanup;
578
579         /* Ensure nevents can't be changed between the user copies. */
580         *filter = tmp;
581
582         /*
583          * Sort the in-kernel list so that we can search it with bsearch.
584          */
585         sort(&filter->events, filter->nevents, sizeof(__u64), cmp_u64, NULL);
586
587         mutex_lock(&kvm->lock);
588         filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
589                                      mutex_is_locked(&kvm->lock));
590         mutex_unlock(&kvm->lock);
591
592         synchronize_srcu_expedited(&kvm->srcu);
593         r = 0;
594 cleanup:
595         kfree(filter);
596         return r;
597 }