Merge tag 'drm-fixes-2021-07-30' of git://anongit.freedesktop.org/drm/drm
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 30 Jul 2021 05:10:05 +0000 (22:10 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 30 Jul 2021 05:10:05 +0000 (22:10 -0700)
Pull drm fixes from Dave Airlie:
 "Regular drm fixes pull, seems about the right size, lots of small
  fixes across the board, mostly amdgpu, but msm and i915 are in there
  along with panel and ttm.

  amdgpu:
   - Fix resource leak in an error path
   - Avoid stack contents exposure in error path
   - pmops check fix for S0ix vs S3
   - DCN 2.1 display fixes
   - DCN 2.0 display fix
   - Backlight control fix for laptops with HDR panels
   - Maintainers updates

  i915:
   - Fix vbt port mask
   - Fix around reading the right DSC disable fuse in display_ver 10
   - Split display version 9 and 10 in intel_setup_outputs

  msm:
   - iommu fault display fix
   - misc dp compliance fixes
   - dpu reg sizing fix

  panel:
   - Fix bpc for ytc700tlag_05_201c

  ttm:
   - debugfs init fixes"

* tag 'drm-fixes-2021-07-30' of git://anongit.freedesktop.org/drm/drm:
  maintainers: add bugs and chat URLs for amdgpu
  drm/amdgpu/display: only enable aux backlight control for OLED panels
  drm/amd/display: ensure dentist display clock update finished in DCN20
  drm/amd/display: Add missing DCN21 IP parameter
  drm/amd/display: Guard DST_Y_PREFETCH register overflow in DCN21
  drm/amdgpu: Check pmops for desired suspend state
  drm/msm/dp: Initialize dp->aux->drm_dev before registration
  drm/msm/dp: signal audio plugged change at dp_pm_resume
  drm/msm/dp: Initialize the INTF_CONFIG register
  drm/msm/dp: use dp_ctrl_off_link_stream during PHY compliance test run
  drm/msm: Fix display fault handling
  drm/msm/dpu: Fix sm8250_mdp register length
  drm/amdgpu: Avoid printing of stack contents on firmware load error
  drm/amdgpu: Fix resource leak on probe error path
  drm/i915/display: split DISPLAY_VER 9 and 10 in intel_setup_outputs()
  drm/i915: fix not reading DSC disable fuse in GLK
  drm/i915/bios: Fix ports mask
  drm/panel: panel-simple: Fix proper bpc for ytc700tlag_05_201c
  drm/ttm: Initialize debugfs from ttm_global_init()

70 files changed:
Documentation/virt/kvm/api.rst
arch/alpha/Kconfig
arch/alpha/boot/bootp.c
arch/alpha/boot/bootpz.c
arch/alpha/boot/misc.c
arch/alpha/configs/defconfig
arch/alpha/include/asm/compiler.h
arch/alpha/include/asm/syscall.h
arch/alpha/kernel/osf_sys.c
arch/alpha/kernel/perf_event.c
arch/alpha/kernel/process.c
arch/alpha/kernel/setup.c
arch/alpha/kernel/smp.c
arch/alpha/kernel/sys_nautilus.c
arch/alpha/kernel/traps.c
arch/alpha/math-emu/math.c
arch/arm/mach-rpc/riscpc.c
arch/arm64/kvm/mmu.c
arch/m68k/coldfire/m525x.c
arch/s390/include/asm/kvm_host.h
arch/s390/kvm/diag.c
arch/s390/kvm/kvm-s390.c
arch/x86/kvm/ioapic.c
arch/x86/kvm/ioapic.h
arch/x86/kvm/svm/avic.c
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/svm/svm_onhyperv.h
arch/x86/kvm/x86.c
drivers/infiniband/hw/bnxt_re/main.c
drivers/infiniband/hw/bnxt_re/qplib_res.c
drivers/infiniband/hw/bnxt_re/qplib_res.h
drivers/infiniband/hw/irdma/ctrl.c
drivers/infiniband/hw/irdma/hw.c
drivers/infiniband/hw/irdma/main.c
drivers/infiniband/hw/irdma/type.h
drivers/infiniband/hw/irdma/uk.c
drivers/infiniband/hw/irdma/verbs.c
drivers/infiniband/sw/rxe/rxe_mr.c
drivers/platform/x86/amd-pmc.c
drivers/platform/x86/gigabyte-wmi.c
drivers/platform/x86/intel-hid.c
drivers/platform/x86/think-lmi.c
drivers/platform/x86/think-lmi.h
drivers/platform/x86/wireless-hotkey.c
drivers/scsi/arm/acornscsi.c
drivers/scsi/arm/fas216.c
fs/Kconfig.binfmt
fs/Makefile
fs/binfmt_em86.c [deleted file]
fs/ext2/dir.c
fs/ext2/ext2.h
fs/ext2/namei.c
fs/internal.h
fs/reiserfs/stree.c
fs/reiserfs/super.c
include/linux/fs_context.h
include/uapi/linux/idxd.h
include/uapi/rdma/irdma-abi.h
kernel/cgroup/cgroup-v1.c
kernel/workqueue.c
net/unix/af_unix.c
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/aarch64/get-reg-list.c
tools/testing/selftests/kvm/access_tracking_perf_test.c [new file with mode: 0644]
tools/testing/selftests/kvm/dirty_log_perf_test.c
tools/testing/selftests/kvm/steal_time.c
virt/kvm/kvm_main.c

index c7b165c..dae68e6 100644 (file)
@@ -855,7 +855,7 @@ in-kernel irqchip (GIC), and for in-kernel irqchip can tell the GIC to
 use PPIs designated for specific cpus.  The irq field is interpreted
 like this::
 
 bits:  |  31 ... 28  | 27 ... 24 | 23  ... 16 | 15 ... 0 |
 bits:  |  31 ... 28  | 27 ... 24 | 23  ... 16 | 15 ... 0 |
   field: | vcpu2_index | irq_type  | vcpu_index |  irq_id  |
 
 The irq_type field has the following values:
@@ -2149,10 +2149,10 @@ prior to calling the KVM_RUN ioctl.
 Errors:
 
   ======   ============================================================
 ENOENT   no such register
 EINVAL   invalid register ID, or no such register or used with VMs in
 ENOENT   no such register
 EINVAL   invalid register ID, or no such register or used with VMs in
            protected virtualization mode on s390
 EPERM    (arm64) register access not allowed before vcpu finalization
 EPERM    (arm64) register access not allowed before vcpu finalization
   ======   ============================================================
 
 (These error codes are indicative only: do not rely on a specific error
@@ -2590,10 +2590,10 @@ following id bit patterns::
 Errors include:
 
   ======== ============================================================
 ENOENT   no such register
 EINVAL   invalid register ID, or no such register or used with VMs in
 ENOENT   no such register
 EINVAL   invalid register ID, or no such register or used with VMs in
            protected virtualization mode on s390
 EPERM    (arm64) register access not allowed before vcpu finalization
 EPERM    (arm64) register access not allowed before vcpu finalization
   ======== ============================================================
 
 (These error codes are indicative only: do not rely on a specific error
@@ -3112,13 +3112,13 @@ current state.  "addr" is ignored.
 Errors:
 
   ======     =================================================================
 EINVAL     the target is unknown, or the combination of features is invalid.
 ENOENT     a features bit specified is unknown.
 EINVAL     the target is unknown, or the combination of features is invalid.
 ENOENT     a features bit specified is unknown.
   ======     =================================================================
 
 This tells KVM what type of CPU to present to the guest, and what
-optional features it should have.  This will cause a reset of the cpu
-registers to their initial values.  If this is not called, KVM_RUN will
+optional features it should have.  This will cause a reset of the cpu
+registers to their initial values.  If this is not called, KVM_RUN will
 return ENOEXEC for that vcpu.
 
 The initial values are defined as:
@@ -3239,8 +3239,8 @@ VCPU matching underlying host.
 Errors:
 
   =====      ==============================================================
 E2BIG      the reg index list is too big to fit in the array specified by
            the user (the number required will be written into n).
 E2BIG      the reg index list is too big to fit in the array specified by
            the user (the number required will be written into n).
   =====      ==============================================================
 
 ::
@@ -3288,7 +3288,7 @@ specific device.
 ARM/arm64 divides the id field into two parts, a device id and an
 address type id specific to the individual device::
 
 bits:  | 63        ...       32 | 31    ...    16 | 15    ...    0 |
 bits:  | 63        ...       32 | 31    ...    16 | 15    ...    0 |
   field: |        0x00000000      |     device id   |  addr type id  |
 
 ARM/arm64 currently only require this when using the in-kernel GIC
@@ -7049,7 +7049,7 @@ In combination with KVM_CAP_X86_USER_SPACE_MSR, this allows user space to
 trap and emulate MSRs that are outside of the scope of KVM as well as
 limit the attack surface on KVM's MSR emulation code.
 
-8.28 KVM_CAP_ENFORCE_PV_CPUID
+8.28 KVM_CAP_ENFORCE_PV_FEATURE_CPUID
 -----------------------------
 
 Architectures: x86
index 77d3280..f4ffad7 100644 (file)
@@ -532,7 +532,7 @@ config SMP
          will run faster if you say N here.
 
          See also the SMP-HOWTO available at
-         <http://www.tldp.org/docs.html#howto>.
+         <https://www.tldp.org/docs.html#howto>.
 
          If you don't know what to do here, say N.
 
index 00266e6..b4faba2 100644 (file)
@@ -23,7 +23,7 @@
 #include "ksize.h"
 
 extern unsigned long switch_to_osf_pal(unsigned long nr,
-       struct pcb_struct * pcb_va, struct pcb_struct * pcb_pa,
+       struct pcb_struct *pcb_va, struct pcb_struct *pcb_pa,
        unsigned long *vptb);
 
 extern void move_stack(unsigned long new_stack);
index 43af718..90a2b34 100644 (file)
@@ -200,7 +200,7 @@ extern char _end;
        START_ADDR      KSEG address of the entry point of kernel code.
 
        ZERO_PGE        KSEG address of page full of zeroes, but 
-                       upon entry to kerne cvan be expected
+                       upon entry to kernel, it can be expected
                        to hold the parameter list and possible
                        INTRD information.
 
index d651922..325d4dd 100644 (file)
@@ -30,7 +30,7 @@ extern long srm_printk(const char *, ...)
      __attribute__ ((format (printf, 1, 2)));
 
 /*
- * gzip delarations
+ * gzip declarations
  */
 #define OF(args)  args
 #define STATIC static
index dd2dd9f..7f1ca30 100644 (file)
@@ -70,3 +70,4 @@ CONFIG_DEBUG_INFO=y
 CONFIG_ALPHA_LEGACY_START_ADDRESS=y
 CONFIG_MATHEMU=y
 CONFIG_CRYPTO_HMAC=y
+CONFIG_DEVTMPFS=y
index 5159ba2..ae64595 100644 (file)
@@ -4,15 +4,4 @@
 
 #include <uapi/asm/compiler.h>
 
-/* Some idiots over in <linux/compiler.h> thought inline should imply
-   always_inline.  This breaks stuff.  We'll include this file whenever
-   we run into such problems.  */
-
-#include <linux/compiler.h>
-#undef inline
-#undef __inline__
-#undef __inline
-#undef __always_inline
-#define __always_inline                inline __attribute__((always_inline))
-
 #endif /* __ALPHA_COMPILER_H */
index 11c688c..f21baba 100644 (file)
@@ -9,4 +9,10 @@ static inline int syscall_get_arch(struct task_struct *task)
        return AUDIT_ARCH_ALPHA;
 }
 
+static inline long syscall_get_return_value(struct task_struct *task,
+                                           struct pt_regs *regs)
+{
+       return regs->r0;
+}
+
 #endif /* _ASM_ALPHA_SYSCALL_H */
index d5367a1..d31167e 100644 (file)
@@ -834,7 +834,7 @@ SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer,
                        return -EFAULT;
                state = &current_thread_info()->ieee_state;
 
-               /* Update softare trap enable bits.  */
+               /* Update software trap enable bits.  */
                *state = (*state & ~IEEE_SW_MASK) | (swcr & IEEE_SW_MASK);
 
                /* Update the real fpcr.  */
@@ -854,7 +854,7 @@ SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer,
                state = &current_thread_info()->ieee_state;
                exc &= IEEE_STATUS_MASK;
 
-               /* Update softare trap enable bits.  */
+               /* Update software trap enable bits.  */
                swcr = (*state & IEEE_SW_MASK) | exc;
                *state |= exc;
 
index e7a59d9..efcf732 100644 (file)
@@ -574,7 +574,7 @@ static void alpha_pmu_start(struct perf_event *event, int flags)
  * Check that CPU performance counters are supported.
  * - currently support EV67 and later CPUs.
  * - actually some later revisions of the EV6 have the same PMC model as the
- *     EV67 but we don't do suffiently deep CPU detection to detect them.
+ *     EV67 but we don't do sufficiently deep CPU detection to detect them.
  *     Bad luck to the very few people who might have one, I guess.
  */
 static int supported_cpu(void)
index ef0c08e..a5123ea 100644 (file)
@@ -256,7 +256,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
                childstack->r26 = (unsigned long) ret_from_kernel_thread;
                childstack->r9 = usp;   /* function */
                childstack->r10 = kthread_arg;
-               childregs->hae = alpha_mv.hae_cache,
+               childregs->hae = alpha_mv.hae_cache;
                childti->pcb.usp = 0;
                return 0;
        }
index 7d56c21..b4fbbba 100644 (file)
@@ -319,18 +319,19 @@ setup_memory(void *kernel_end)
                       i, cluster->usage, cluster->start_pfn,
                       cluster->start_pfn + cluster->numpages);
 
-               /* Bit 0 is console/PALcode reserved.  Bit 1 is
-                  non-volatile memory -- we might want to mark
-                  this for later.  */
-               if (cluster->usage & 3)
-                       continue;
-
                end = cluster->start_pfn + cluster->numpages;
                if (end > max_low_pfn)
                        max_low_pfn = end;
 
                memblock_add(PFN_PHYS(cluster->start_pfn),
                             cluster->numpages << PAGE_SHIFT);
+
+               /* Bit 0 is console/PALcode reserved.  Bit 1 is
+                  non-volatile memory -- we might want to mark
+                  this for later.  */
+               if (cluster->usage & 3)
+                       memblock_reserve(PFN_PHYS(cluster->start_pfn),
+                                        cluster->numpages << PAGE_SHIFT);
        }
 
        /*
index 4b2575f..cb64e47 100644 (file)
@@ -582,7 +582,7 @@ void
 smp_send_stop(void)
 {
        cpumask_t to_whom;
-       cpumask_copy(&to_whom, cpu_possible_mask);
+       cpumask_copy(&to_whom, cpu_online_mask);
        cpumask_clear_cpu(smp_processor_id(), &to_whom);
 #ifdef DEBUG_IPI_MSG
        if (hard_smp_processor_id() != boot_cpu_id)
index 53adf43..96fd6ff 100644 (file)
@@ -212,7 +212,7 @@ nautilus_init_pci(void)
 
        /* Use default IO. */
        pci_add_resource(&bridge->windows, &ioport_resource);
-       /* Irongate PCI memory aperture, calculate requred size before
+       /* Irongate PCI memory aperture, calculate required size before
           setting it up. */
        pci_add_resource(&bridge->windows, &irongate_mem);
 
index 921d4b6..5398f98 100644 (file)
@@ -730,7 +730,7 @@ do_entUnaUser(void __user * va, unsigned long opcode,
        long error;
 
        /* Check the UAC bits to decide what the user wants us to do
-          with the unaliged access.  */
+          with the unaligned access.  */
 
        if (!(current_thread_info()->status & TS_UAC_NOPRINT)) {
                if (__ratelimit(&ratelimit)) {
index d568cd9..f7cef66 100644 (file)
@@ -65,7 +65,7 @@ static long (*save_emul) (unsigned long pc);
 long do_alpha_fp_emul_imprecise(struct pt_regs *, unsigned long);
 long do_alpha_fp_emul(unsigned long);
 
-int init_module(void)
+static int alpha_fp_emul_init_module(void)
 {
        save_emul_imprecise = alpha_fp_emul_imprecise;
        save_emul = alpha_fp_emul;
@@ -73,12 +73,14 @@ int init_module(void)
        alpha_fp_emul = do_alpha_fp_emul;
        return 0;
 }
+module_init(alpha_fp_emul_init_module);
 
-void cleanup_module(void)
+static void alpha_fp_emul_cleanup_module(void)
 {
        alpha_fp_emul_imprecise = save_emul_imprecise;
        alpha_fp_emul = save_emul;
 }
+module_exit(alpha_fp_emul_cleanup_module);
 
 #undef  alpha_fp_emul_imprecise
 #define alpha_fp_emul_imprecise                do_alpha_fp_emul_imprecise
@@ -401,3 +403,5 @@ alpha_fp_emul_imprecise (struct pt_regs *regs, unsigned long write_mask)
 egress:
        return si_code;
 }
+
+EXPORT_SYMBOL(__udiv_qrnnd);
index d23970b..f70fb9c 100644 (file)
@@ -49,6 +49,7 @@ static int __init parse_tag_acorn(const struct tag *tag)
                fallthrough;    /* ??? */
        case 256:
                vram_size += PAGE_SIZE * 256;
+               break;
        default:
                break;
        }
index 3155c9e..0625bf2 100644 (file)
@@ -947,7 +947,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                vma_shift = get_vma_page_shift(vma, hva);
        }
 
-       shared = (vma->vm_flags & VM_PFNMAP);
+       shared = (vma->vm_flags & VM_SHARED);
 
        switch (vma_shift) {
 #ifndef __PAGETABLE_PMD_FOLDED
index 2c4d2ca..4853751 100644 (file)
@@ -26,7 +26,7 @@ DEFINE_CLK(pll, "pll.0", MCF_CLK);
 DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
 
 static struct clk_lookup m525x_clk_lookup[] = {
-       CLKDEV_INIT(NULL, "pll.0", &pll),
+       CLKDEV_INIT(NULL, "pll.0", &clk_pll),
        CLKDEV_INIT(NULL, "sys.0", &clk_sys),
        CLKDEV_INIT("mcftmr.0", NULL, &clk_sys),
        CLKDEV_INIT("mcftmr.1", NULL, &clk_sys),
index 9b4473f..161a9e1 100644 (file)
@@ -445,15 +445,15 @@ struct kvm_vcpu_stat {
        u64 instruction_sigp_init_cpu_reset;
        u64 instruction_sigp_cpu_reset;
        u64 instruction_sigp_unknown;
-       u64 diagnose_10;
-       u64 diagnose_44;
-       u64 diagnose_9c;
-       u64 diagnose_9c_ignored;
-       u64 diagnose_9c_forward;
-       u64 diagnose_258;
-       u64 diagnose_308;
-       u64 diagnose_500;
-       u64 diagnose_other;
+       u64 instruction_diagnose_10;
+       u64 instruction_diagnose_44;
+       u64 instruction_diagnose_9c;
+       u64 diag_9c_ignored;
+       u64 diag_9c_forward;
+       u64 instruction_diagnose_258;
+       u64 instruction_diagnose_308;
+       u64 instruction_diagnose_500;
+       u64 instruction_diagnose_other;
        u64 pfault_sync;
 };
 
index 02c146f..807fa9d 100644 (file)
@@ -24,7 +24,7 @@ static int diag_release_pages(struct kvm_vcpu *vcpu)
 
        start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
        end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE;
-       vcpu->stat.diagnose_10++;
+       vcpu->stat.instruction_diagnose_10++;
 
        if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end
            || start < 2 * PAGE_SIZE)
@@ -74,7 +74,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
 
        VCPU_EVENT(vcpu, 3, "diag page reference parameter block at 0x%llx",
                   vcpu->run->s.regs.gprs[rx]);
-       vcpu->stat.diagnose_258++;
+       vcpu->stat.instruction_diagnose_258++;
        if (vcpu->run->s.regs.gprs[rx] & 7)
                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
        rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
@@ -145,7 +145,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
 static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
 {
        VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
-       vcpu->stat.diagnose_44++;
+       vcpu->stat.instruction_diagnose_44++;
        kvm_vcpu_on_spin(vcpu, true);
        return 0;
 }
@@ -169,7 +169,7 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
        int tid;
 
        tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
-       vcpu->stat.diagnose_9c++;
+       vcpu->stat.instruction_diagnose_9c++;
 
        /* yield to self */
        if (tid == vcpu->vcpu_id)
@@ -192,7 +192,7 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
                VCPU_EVENT(vcpu, 5,
                           "diag time slice end directed to %d: yield forwarded",
                           tid);
-               vcpu->stat.diagnose_9c_forward++;
+               vcpu->stat.diag_9c_forward++;
                return 0;
        }
 
@@ -203,7 +203,7 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
        return 0;
 no_yield:
        VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: ignored", tid);
-       vcpu->stat.diagnose_9c_ignored++;
+       vcpu->stat.diag_9c_ignored++;
        return 0;
 }
 
@@ -213,7 +213,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
        unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff;
 
        VCPU_EVENT(vcpu, 3, "diag ipl functions, subcode %lx", subcode);
-       vcpu->stat.diagnose_308++;
+       vcpu->stat.instruction_diagnose_308++;
        switch (subcode) {
        case 3:
                vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
@@ -245,7 +245,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
 {
        int ret;
 
-       vcpu->stat.diagnose_500++;
+       vcpu->stat.instruction_diagnose_500++;
        /* No virtio-ccw notification? Get out quickly. */
        if (!vcpu->kvm->arch.css_support ||
            (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
@@ -299,7 +299,7 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
        case 0x500:
                return __diag_virtio_hypercall(vcpu);
        default:
-               vcpu->stat.diagnose_other++;
+               vcpu->stat.instruction_diagnose_other++;
                return -EOPNOTSUPP;
        }
 }
index b655a7d..4527ac7 100644 (file)
@@ -163,15 +163,15 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
        STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
        STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
        STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
-       STATS_DESC_COUNTER(VCPU, diagnose_10),
-       STATS_DESC_COUNTER(VCPU, diagnose_44),
-       STATS_DESC_COUNTER(VCPU, diagnose_9c),
-       STATS_DESC_COUNTER(VCPU, diagnose_9c_ignored),
-       STATS_DESC_COUNTER(VCPU, diagnose_9c_forward),
-       STATS_DESC_COUNTER(VCPU, diagnose_258),
-       STATS_DESC_COUNTER(VCPU, diagnose_308),
-       STATS_DESC_COUNTER(VCPU, diagnose_500),
-       STATS_DESC_COUNTER(VCPU, diagnose_other),
+       STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
+       STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
+       STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
+       STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
+       STATS_DESC_COUNTER(VCPU, diag_9c_forward),
+       STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
+       STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
+       STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
+       STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
        STATS_DESC_COUNTER(VCPU, pfault_sync)
 };
 static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
index 698969e..ff005fe 100644 (file)
@@ -96,7 +96,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
 static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
 {
        ioapic->rtc_status.pending_eoi = 0;
-       bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);
+       bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID + 1);
 }
 
 static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
index 6604017..11e4065 100644 (file)
@@ -43,13 +43,13 @@ struct kvm_vcpu;
 
 struct dest_map {
        /* vcpu bitmap where IRQ has been sent */
-       DECLARE_BITMAP(map, KVM_MAX_VCPU_ID);
+       DECLARE_BITMAP(map, KVM_MAX_VCPU_ID + 1);
 
        /*
         * Vector sent to a given vcpu, only valid when
         * the vcpu's bit in map is set
         */
-       u8 vectors[KVM_MAX_VCPU_ID];
+       u8 vectors[KVM_MAX_VCPU_ID + 1];
 };
 
 
index 1d01da6..a8ad78a 100644 (file)
@@ -646,7 +646,7 @@ out:
 void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
-       struct vmcb *vmcb = svm->vmcb;
+       struct vmcb *vmcb = svm->vmcb01.ptr;
        bool activated = kvm_vcpu_apicv_active(vcpu);
 
        if (!enable_apicv)
index 3bd09c5..61738ff 100644 (file)
@@ -515,7 +515,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
         * Also covers avic_vapic_bar, avic_backing_page, avic_logical_id,
         * avic_physical_id.
         */
-       WARN_ON(svm->vmcb01.ptr->control.int_ctl & AVIC_ENABLE_MASK);
+       WARN_ON(kvm_apicv_activated(svm->vcpu.kvm));
 
        /* Copied from vmcb01.  msrpm_base can be overwritten later.  */
        svm->vmcb->control.nested_ctl = svm->vmcb01.ptr->control.nested_ctl;
@@ -702,8 +702,8 @@ out:
 }
 
 /* Copy state save area fields which are handled by VMRUN */
-void svm_copy_vmrun_state(struct vmcb_save_area *from_save,
-                         struct vmcb_save_area *to_save)
+void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
+                         struct vmcb_save_area *from_save)
 {
        to_save->es = from_save->es;
        to_save->cs = from_save->cs;
@@ -722,7 +722,7 @@ void svm_copy_vmrun_state(struct vmcb_save_area *from_save,
        to_save->cpl = 0;
 }
 
-void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
+void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
 {
        to_vmcb->save.fs = from_vmcb->save.fs;
        to_vmcb->save.gs = from_vmcb->save.gs;
@@ -1385,7 +1385,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
 
        svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
 
-       svm_copy_vmrun_state(save, &svm->vmcb01.ptr->save);
+       svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
        nested_load_control_from_vmcb12(svm, ctl);
 
        svm_switch_vmcb(svm, &svm->nested.vmcb02);
index 664d20f..e8ccab5 100644 (file)
@@ -1406,8 +1406,6 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
                goto error_free_vmsa_page;
        }
 
-       svm_vcpu_init_msrpm(vcpu, svm->msrpm);
-
        svm->vmcb01.ptr = page_address(vmcb01_page);
        svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);
 
@@ -1419,6 +1417,8 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
        svm_switch_vmcb(svm, &svm->vmcb01);
        init_vmcb(vcpu);
 
+       svm_vcpu_init_msrpm(vcpu, svm->msrpm);
+
        svm_init_osvw(vcpu);
        vcpu->arch.microcode_version = 0x01000065;
 
@@ -1568,8 +1568,11 @@ static void svm_set_vintr(struct vcpu_svm *svm)
 {
        struct vmcb_control_area *control;
 
-       /* The following fields are ignored when AVIC is enabled */
-       WARN_ON(kvm_vcpu_apicv_active(&svm->vcpu));
+       /*
+        * The following fields are ignored when AVIC is enabled
+        */
+       WARN_ON(kvm_apicv_activated(svm->vcpu.kvm));
+
        svm_set_intercept(svm, INTERCEPT_VINTR);
 
        /*
@@ -2147,11 +2150,12 @@ static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload)
        ret = kvm_skip_emulated_instruction(vcpu);
 
        if (vmload) {
-               nested_svm_vmloadsave(vmcb12, svm->vmcb);
+               svm_copy_vmloadsave_state(svm->vmcb, vmcb12);
                svm->sysenter_eip_hi = 0;
                svm->sysenter_esp_hi = 0;
-       } else
-               nested_svm_vmloadsave(svm->vmcb, vmcb12);
+       } else {
+               svm_copy_vmloadsave_state(vmcb12, svm->vmcb);
+       }
 
        kvm_vcpu_unmap(vcpu, &map, true);
 
@@ -4344,8 +4348,8 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
 
                BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
 
-               svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
-                                    map_save.hva + 0x400);
+               svm_copy_vmrun_state(map_save.hva + 0x400,
+                                    &svm->vmcb01.ptr->save);
 
                kvm_vcpu_unmap(vcpu, &map_save, true);
        }
@@ -4393,8 +4397,8 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
                                         &map_save) == -EINVAL)
                                return 1;
 
-                       svm_copy_vmrun_state(map_save.hva + 0x400,
-                                            &svm->vmcb01.ptr->save);
+                       svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
+                                            map_save.hva + 0x400);
 
                        kvm_vcpu_unmap(vcpu, &map_save, true);
                }
index 7e20907..bd0fe94 100644 (file)
@@ -464,9 +464,9 @@ void svm_leave_nested(struct vcpu_svm *svm);
 void svm_free_nested(struct vcpu_svm *svm);
 int svm_allocate_nested(struct vcpu_svm *svm);
 int nested_svm_vmrun(struct kvm_vcpu *vcpu);
-void svm_copy_vmrun_state(struct vmcb_save_area *from_save,
-                         struct vmcb_save_area *to_save);
-void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
+void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
+                         struct vmcb_save_area *from_save);
+void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
 int nested_svm_vmexit(struct vcpu_svm *svm);
 
 static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
index 9b9a55a..c53b8bf 100644 (file)
@@ -89,7 +89,7 @@ static inline void svm_hv_vmcb_dirty_nested_enlightenments(
         * as we mark it dirty unconditionally towards end of vcpu
         * init phase.
         */
-       if (vmcb && vmcb_is_clean(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS) &&
+       if (vmcb_is_clean(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS) &&
            hve->hv_enlightenments_control.msr_bitmap)
                vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
 }
index a4fd106..4116567 100644 (file)
@@ -3407,7 +3407,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        return 1;
                break;
        case MSR_KVM_ASYNC_PF_ACK:
-               if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
+               if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
                        return 1;
                if (data & 0x1) {
                        vcpu->arch.apf.pageready_pending = false;
@@ -3746,7 +3746,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                msr_info->data = vcpu->arch.apf.msr_int_val;
                break;
        case MSR_KVM_ASYNC_PF_ACK:
-               if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
+               if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
                        return 1;
 
                msr_info->data = 0;
index d567402..a8688a9 100644 (file)
@@ -120,6 +120,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
        if (!chip_ctx)
                return -ENOMEM;
        chip_ctx->chip_num = bp->chip_num;
+       chip_ctx->hw_stats_size = bp->hw_ring_stats_size;
 
        rdev->chip_ctx = chip_ctx;
        /* rest members to follow eventually */
@@ -550,6 +551,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
                                       dma_addr_t dma_map,
                                       u32 *fw_stats_ctx_id)
 {
+       struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx;
        struct hwrm_stat_ctx_alloc_output resp = {0};
        struct hwrm_stat_ctx_alloc_input req = {0};
        struct bnxt_en_dev *en_dev = rdev->en_dev;
@@ -566,7 +568,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
        bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
        req.update_period_ms = cpu_to_le32(1000);
        req.stats_dma_addr = cpu_to_le64(dma_map);
-       req.stats_dma_length = cpu_to_le16(sizeof(struct ctx_hw_stats_ext));
+       req.stats_dma_length = cpu_to_le16(chip_ctx->hw_stats_size);
        req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
        bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
                            sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
index 17f0701..44282a8 100644 (file)
@@ -56,6 +56,7 @@
 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
                                      struct bnxt_qplib_stats *stats);
 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
+                                     struct bnxt_qplib_chip_ctx *cctx,
                                      struct bnxt_qplib_stats *stats);
 
 /* PBL */
@@ -559,7 +560,7 @@ int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
                goto fail;
 stats_alloc:
        /* Stats */
-       rc = bnxt_qplib_alloc_stats_ctx(res->pdev, &ctx->stats);
+       rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats);
        if (rc)
                goto fail;
 
@@ -889,15 +890,12 @@ static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
 }
 
 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
+                                     struct bnxt_qplib_chip_ctx *cctx,
                                      struct bnxt_qplib_stats *stats)
 {
        memset(stats, 0, sizeof(*stats));
        stats->fw_id = -1;
-       /* 128 byte aligned context memory is required only for 57500.
-        * However making this unconditional, it does not harm previous
-        * generation.
-        */
-       stats->size = ALIGN(sizeof(struct ctx_hw_stats), 128);
+       stats->size = cctx->hw_stats_size;
        stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
                                        &stats->dma_map, GFP_KERNEL);
        if (!stats->dma) {
index c291f49..9103150 100644 (file)
@@ -54,6 +54,7 @@ struct bnxt_qplib_chip_ctx {
        u16     chip_num;
        u8      chip_rev;
        u8      chip_metal;
+       u16     hw_stats_size;
        struct bnxt_qplib_drv_modes modes;
 };
 
index b1023a7..f1e5515 100644 (file)
@@ -2845,7 +2845,7 @@ static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 *buf,
  * parses fpm commit info and copy base value
  * of hmc objects in hmc_info
  */
-static enum irdma_status_code
+static void
 irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
                              struct irdma_hmc_obj_info *info, u32 *sd)
 {
@@ -2915,7 +2915,6 @@ irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
        else
                *sd = (u32)(size >> 21);
 
-       return 0;
 }
 
 /**
@@ -4187,11 +4186,9 @@ enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
  * @dev: sc device struct
  * @count: allocate count
  */
-enum irdma_status_code irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
+void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
 {
        writel(count, dev->hw_regs[IRDMA_AEQALLOC]);
-
-       return 0;
 }
 
 /**
@@ -4434,9 +4431,9 @@ static enum irdma_status_code irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev,
        ret_code = irdma_sc_commit_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
                                           &commit_fpm_mem, true, wait_type);
        if (!ret_code)
-               ret_code = irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf,
-                                                        hmc_info->hmc_obj,
-                                                        &hmc_info->sd_table.sd_cnt);
+               irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf,
+                                             hmc_info->hmc_obj,
+                                             &hmc_info->sd_table.sd_cnt);
        print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
                             8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE,
                             false);
index 7afb8a6..00de5ee 100644 (file)
@@ -1920,7 +1920,7 @@ enum irdma_status_code irdma_ctrl_init_hw(struct irdma_pci_f *rf)
  * irdma_set_hw_rsrc - set hw memory resources.
  * @rf: RDMA PCI function
  */
-static u32 irdma_set_hw_rsrc(struct irdma_pci_f *rf)
+static void irdma_set_hw_rsrc(struct irdma_pci_f *rf)
 {
        rf->allocated_qps = (void *)(rf->mem_rsrc +
                   (sizeof(struct irdma_arp_entry) * rf->arp_table_size));
@@ -1937,8 +1937,6 @@ static u32 irdma_set_hw_rsrc(struct irdma_pci_f *rf)
        spin_lock_init(&rf->arp_lock);
        spin_lock_init(&rf->qptable_lock);
        spin_lock_init(&rf->qh_list_lock);
-
-       return 0;
 }
 
 /**
@@ -2000,9 +1998,7 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
 
        rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc;
 
-       ret = irdma_set_hw_rsrc(rf);
-       if (ret)
-               goto set_hw_rsrc_fail;
+       irdma_set_hw_rsrc(rf);
 
        set_bit(0, rf->allocated_mrs);
        set_bit(0, rf->allocated_qps);
@@ -2025,9 +2021,6 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
 
        return 0;
 
-set_hw_rsrc_fail:
-       kfree(rf->mem_rsrc);
-       rf->mem_rsrc = NULL;
 mem_rsrc_kzalloc_fail:
        kfree(rf->allocated_ws_nodes);
        rf->allocated_ws_nodes = NULL;
index ea59432..51a4135 100644 (file)
@@ -215,10 +215,10 @@ static void irdma_remove(struct auxiliary_device *aux_dev)
        pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(pf->pdev->devfn));
 }
 
-static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf)
+static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf,
+                                  struct ice_vsi *vsi)
 {
        struct irdma_pci_f *rf = iwdev->rf;
-       struct ice_vsi *vsi = ice_get_main_vsi(pf);
 
        rf->cdev = pf;
        rf->gen_ops.register_qset = irdma_lan_register_qset;
@@ -253,12 +253,15 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
                                                            struct iidc_auxiliary_dev,
                                                            adev);
        struct ice_pf *pf = iidc_adev->pf;
+       struct ice_vsi *vsi = ice_get_main_vsi(pf);
        struct iidc_qos_params qos_info = {};
        struct irdma_device *iwdev;
        struct irdma_pci_f *rf;
        struct irdma_l2params l2params = {};
        int err;
 
+       if (!vsi)
+               return -EIO;
        iwdev = ib_alloc_device(irdma_device, ibdev);
        if (!iwdev)
                return -ENOMEM;
@@ -268,7 +271,7 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
                return -ENOMEM;
        }
 
-       irdma_fill_device_info(iwdev, pf);
+       irdma_fill_device_info(iwdev, pf, vsi);
        rf = iwdev->rf;
 
        if (irdma_ctrl_init_hw(rf)) {
index 7387b83..874bc25 100644 (file)
@@ -1222,8 +1222,7 @@ enum irdma_status_code irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
                                         struct irdma_aeq_init_info *info);
 enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
                                              struct irdma_aeqe_info *info);
-enum irdma_status_code irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev,
-                                                  u32 count);
+void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count);
 
 void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
                      int abi_ver);
index a6d52c2..5fb92de 100644 (file)
@@ -931,7 +931,7 @@ enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp,
 enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
                                             struct irdma_post_rq_info *info)
 {
-       u32 total_size = 0, wqe_idx, i, byte_off;
+       u32 wqe_idx, i, byte_off;
        u32 addl_frag_cnt;
        __le64 *wqe;
        u64 hdr;
@@ -939,9 +939,6 @@ enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
        if (qp->max_rq_frag_cnt < info->num_sges)
                return IRDMA_ERR_INVALID_FRAG_COUNT;
 
-       for (i = 0; i < info->num_sges; i++)
-               total_size += info->sg_list[i].len;
-
        wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
        if (!wqe)
                return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
index 9712f69..717147e 100644 (file)
@@ -557,7 +557,7 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
  * @iwqp: qp ptr
  * @init_info: initialize info to return
  */
-static int irdma_setup_virt_qp(struct irdma_device *iwdev,
+static void irdma_setup_virt_qp(struct irdma_device *iwdev,
                               struct irdma_qp *iwqp,
                               struct irdma_qp_init_info *init_info)
 {
@@ -574,8 +574,6 @@ static int irdma_setup_virt_qp(struct irdma_device *iwdev,
                init_info->sq_pa = qpmr->sq_pbl.addr;
                init_info->rq_pa = qpmr->rq_pbl.addr;
        }
-
-       return 0;
 }
 
 /**
@@ -914,7 +912,7 @@ static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
                        }
                }
                init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
-               err_code = irdma_setup_virt_qp(iwdev, iwqp, &init_info);
+               irdma_setup_virt_qp(iwdev, iwqp, &init_info);
        } else {
                init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
                err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
index 6aabcb4..be4bcb4 100644 (file)
@@ -113,13 +113,14 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
        int                     num_buf;
        void                    *vaddr;
        int err;
+       int i;
 
        umem = ib_umem_get(pd->ibpd.device, start, length, access);
        if (IS_ERR(umem)) {
-               pr_warn("err %d from rxe_umem_get\n",
-                       (int)PTR_ERR(umem));
+               pr_warn("%s: Unable to pin memory region err = %d\n",
+                       __func__, (int)PTR_ERR(umem));
                err = PTR_ERR(umem);
-               goto err1;
+               goto err_out;
        }
 
        mr->umem = umem;
@@ -129,9 +130,9 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 
        err = rxe_mr_alloc(mr, num_buf);
        if (err) {
-               pr_warn("err %d from rxe_mr_alloc\n", err);
-               ib_umem_release(umem);
-               goto err1;
+               pr_warn("%s: Unable to allocate memory for map\n",
+                               __func__);
+               goto err_release_umem;
        }
 
        mr->page_shift = PAGE_SHIFT;
@@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 
                        vaddr = page_address(sg_page_iter_page(&sg_iter));
                        if (!vaddr) {
-                               pr_warn("null vaddr\n");
-                               ib_umem_release(umem);
+                               pr_warn("%s: Unable to get virtual address\n",
+                                               __func__);
                                err = -ENOMEM;
-                               goto err1;
+                               goto err_cleanup_map;
                        }
 
                        buf->addr = (uintptr_t)vaddr;
@@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 
        return 0;
 
-err1:
+err_cleanup_map:
+       for (i = 0; i < mr->num_map; i++)
+               kfree(mr->map[i]);
+       kfree(mr->map);
+err_release_umem:
+       ib_umem_release(umem);
+err_out:
        return err;
 }
 
index b9da58e..3481479 100644 (file)
 #define AMD_PMC_RESULT_CMD_UNKNOWN           0xFE
 #define AMD_PMC_RESULT_FAILED                0xFF
 
+/* FCH SSC Registers */
+#define FCH_S0I3_ENTRY_TIME_L_OFFSET   0x30
+#define FCH_S0I3_ENTRY_TIME_H_OFFSET   0x34
+#define FCH_S0I3_EXIT_TIME_L_OFFSET    0x38
+#define FCH_S0I3_EXIT_TIME_H_OFFSET    0x3C
+#define FCH_SSC_MAPPING_SIZE           0x800
+#define FCH_BASE_PHY_ADDR_LOW          0xFED81100
+#define FCH_BASE_PHY_ADDR_HIGH         0x00000000
+
+/* SMU Message Definations */
+#define SMU_MSG_GETSMUVERSION          0x02
+#define SMU_MSG_LOG_GETDRAM_ADDR_HI    0x04
+#define SMU_MSG_LOG_GETDRAM_ADDR_LO    0x05
+#define SMU_MSG_LOG_START              0x06
+#define SMU_MSG_LOG_RESET              0x07
+#define SMU_MSG_LOG_DUMP_DATA          0x08
+#define SMU_MSG_GET_SUP_CONSTRAINTS    0x09
 /* List of supported CPU ids */
 #define AMD_CPU_ID_RV                  0x15D0
 #define AMD_CPU_ID_RN                  0x1630
 #define AMD_CPU_ID_PCO                 AMD_CPU_ID_RV
 #define AMD_CPU_ID_CZN                 AMD_CPU_ID_RN
+#define AMD_CPU_ID_YC                  0x14B5
 
-#define AMD_SMU_FW_VERSION             0x0
 #define PMC_MSG_DELAY_MIN_US           100
 #define RESPONSE_REGISTER_LOOP_MAX     200
 
+#define SOC_SUBSYSTEM_IP_MAX   12
+#define DELAY_MIN_US           2000
+#define DELAY_MAX_US           3000
 enum amd_pmc_def {
        MSG_TEST = 0x01,
        MSG_OS_HINT_PCO,
        MSG_OS_HINT_RN,
 };
 
+struct amd_pmc_bit_map {
+       const char *name;
+       u32 bit_mask;
+};
+
+static const struct amd_pmc_bit_map soc15_ip_blk[] = {
+       {"DISPLAY",     BIT(0)},
+       {"CPU",         BIT(1)},
+       {"GFX",         BIT(2)},
+       {"VDD",         BIT(3)},
+       {"ACP",         BIT(4)},
+       {"VCN",         BIT(5)},
+       {"ISP",         BIT(6)},
+       {"NBIO",        BIT(7)},
+       {"DF",          BIT(8)},
+       {"USB0",        BIT(9)},
+       {"USB1",        BIT(10)},
+       {"LAPIC",       BIT(11)},
+       {}
+};
+
 struct amd_pmc_dev {
        void __iomem *regbase;
-       void __iomem *smu_base;
+       void __iomem *smu_virt_addr;
+       void __iomem *fch_virt_addr;
        u32 base_addr;
        u32 cpu_id;
+       u32 active_ips;
        struct device *dev;
+       struct mutex lock; /* generic mutex lock */
 #if IS_ENABLED(CONFIG_DEBUG_FS)
        struct dentry *dbgfs_dir;
 #endif /* CONFIG_DEBUG_FS */
 };
 
 static struct amd_pmc_dev pmc;
+static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret);
 
 static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
 {
@@ -85,18 +130,77 @@ static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u3
        iowrite32(val, dev->regbase + reg_offset);
 }
 
+struct smu_metrics {
+       u32 table_version;
+       u32 hint_count;
+       u32 s0i3_cyclecount;
+       u32 timein_s0i2;
+       u64 timeentering_s0i3_lastcapture;
+       u64 timeentering_s0i3_totaltime;
+       u64 timeto_resume_to_os_lastcapture;
+       u64 timeto_resume_to_os_totaltime;
+       u64 timein_s0i3_lastcapture;
+       u64 timein_s0i3_totaltime;
+       u64 timein_swdrips_lastcapture;
+       u64 timein_swdrips_totaltime;
+       u64 timecondition_notmet_lastcapture[SOC_SUBSYSTEM_IP_MAX];
+       u64 timecondition_notmet_totaltime[SOC_SUBSYSTEM_IP_MAX];
+} __packed;
+
 #ifdef CONFIG_DEBUG_FS
 static int smu_fw_info_show(struct seq_file *s, void *unused)
 {
        struct amd_pmc_dev *dev = s->private;
-       u32 value;
+       struct smu_metrics table;
+       int idx;
+
+       if (dev->cpu_id == AMD_CPU_ID_PCO)
+               return -EINVAL;
+
+       memcpy_fromio(&table, dev->smu_virt_addr, sizeof(struct smu_metrics));
+
+       seq_puts(s, "\n=== SMU Statistics ===\n");
+       seq_printf(s, "Table Version: %d\n", table.table_version);
+       seq_printf(s, "Hint Count: %d\n", table.hint_count);
+       seq_printf(s, "S0i3 Cycle Count: %d\n", table.s0i3_cyclecount);
+       seq_printf(s, "Time (in us) to S0i3: %lld\n", table.timeentering_s0i3_lastcapture);
+       seq_printf(s, "Time (in us) in S0i3: %lld\n", table.timein_s0i3_lastcapture);
+
+       seq_puts(s, "\n=== Active time (in us) ===\n");
+       for (idx = 0 ; idx < SOC_SUBSYSTEM_IP_MAX ; idx++) {
+               if (soc15_ip_blk[idx].bit_mask & dev->active_ips)
+                       seq_printf(s, "%-8s : %lld\n", soc15_ip_blk[idx].name,
+                                  table.timecondition_notmet_lastcapture[idx]);
+       }
 
-       value = ioread32(dev->smu_base + AMD_SMU_FW_VERSION);
-       seq_printf(s, "SMU FW Info: %x\n", value);
        return 0;
 }
 DEFINE_SHOW_ATTRIBUTE(smu_fw_info);
 
+static int s0ix_stats_show(struct seq_file *s, void *unused)
+{
+       struct amd_pmc_dev *dev = s->private;
+       u64 entry_time, exit_time, residency;
+
+       entry_time = ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_H_OFFSET);
+       entry_time = entry_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_L_OFFSET);
+
+       exit_time = ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_H_OFFSET);
+       exit_time = exit_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_L_OFFSET);
+
+       /* It's in 48MHz. We need to convert it */
+       residency = exit_time - entry_time;
+       do_div(residency, 48);
+
+       seq_puts(s, "=== S0ix statistics ===\n");
+       seq_printf(s, "S0ix Entry Time: %lld\n", entry_time);
+       seq_printf(s, "S0ix Exit Time: %lld\n", exit_time);
+       seq_printf(s, "Residency Time: %lld\n", residency);
+
+       return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(s0ix_stats);
+
 static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
 {
        debugfs_remove_recursive(dev->dbgfs_dir);
@@ -107,6 +211,8 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
        dev->dbgfs_dir = debugfs_create_dir("amd_pmc", NULL);
        debugfs_create_file("smu_fw_info", 0644, dev->dbgfs_dir, dev,
                            &smu_fw_info_fops);
+       debugfs_create_file("s0ix_stats", 0644, dev->dbgfs_dir, dev,
+                           &s0ix_stats_fops);
 }
 #else
 static inline void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
@@ -118,6 +224,32 @@ static inline void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
 }
 #endif /* CONFIG_DEBUG_FS */
 
+static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev)
+{
+       u32 phys_addr_low, phys_addr_hi;
+       u64 smu_phys_addr;
+
+       if (dev->cpu_id == AMD_CPU_ID_PCO)
+               return -EINVAL;
+
+       /* Get Active devices list from SMU */
+       amd_pmc_send_cmd(dev, 0, &dev->active_ips, SMU_MSG_GET_SUP_CONSTRAINTS, 1);
+
+       /* Get dram address */
+       amd_pmc_send_cmd(dev, 0, &phys_addr_low, SMU_MSG_LOG_GETDRAM_ADDR_LO, 1);
+       amd_pmc_send_cmd(dev, 0, &phys_addr_hi, SMU_MSG_LOG_GETDRAM_ADDR_HI, 1);
+       smu_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
+
+       dev->smu_virt_addr = devm_ioremap(dev->dev, smu_phys_addr, sizeof(struct smu_metrics));
+       if (!dev->smu_virt_addr)
+               return -ENOMEM;
+
+       /* Start the logging */
+       amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, 0);
+
+       return 0;
+}
+
 static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
 {
        u32 value;
@@ -132,19 +264,19 @@ static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
        dev_dbg(dev->dev, "AMD_PMC_REGISTER_MESSAGE:%x\n", value);
 }
 
-static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
+static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret)
 {
        int rc;
-       u8 msg;
        u32 val;
 
+       mutex_lock(&dev->lock);
        /* Wait until we get a valid response */
        rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE,
-                               val, val > 0, PMC_MSG_DELAY_MIN_US,
+                               val, val != 0, PMC_MSG_DELAY_MIN_US,
                                PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
        if (rc) {
                dev_err(dev->dev, "failed to talk to SMU\n");
-               return rc;
+               goto out_unlock;
        }
 
        /* Write zero to response register */
@@ -154,34 +286,91 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
        amd_pmc_reg_write(dev, AMD_PMC_REGISTER_ARGUMENT, set);
 
        /* Write message ID to message ID register */
-       msg = (dev->cpu_id == AMD_CPU_ID_RN) ? MSG_OS_HINT_RN : MSG_OS_HINT_PCO;
        amd_pmc_reg_write(dev, AMD_PMC_REGISTER_MESSAGE, msg);
-       return 0;
+
+       /* Wait until we get a valid response */
+       rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE,
+                               val, val != 0, PMC_MSG_DELAY_MIN_US,
+                               PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
+       if (rc) {
+               dev_err(dev->dev, "SMU response timed out\n");
+               goto out_unlock;
+       }
+
+       switch (val) {
+       case AMD_PMC_RESULT_OK:
+               if (ret) {
+                       /* PMFW may take longer time to return back the data */
+                       usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
+                       *data = amd_pmc_reg_read(dev, AMD_PMC_REGISTER_ARGUMENT);
+               }
+               break;
+       case AMD_PMC_RESULT_CMD_REJECT_BUSY:
+               dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
+               rc = -EBUSY;
+               goto out_unlock;
+       case AMD_PMC_RESULT_CMD_UNKNOWN:
+               dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
+               rc = -EINVAL;
+               goto out_unlock;
+       case AMD_PMC_RESULT_CMD_REJECT_PREREQ:
+       case AMD_PMC_RESULT_FAILED:
+       default:
+               dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
+               rc = -EIO;
+               goto out_unlock;
+       }
+
+out_unlock:
+       mutex_unlock(&dev->lock);
+       amd_pmc_dump_registers(dev);
+       return rc;
+}
+
+static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
+{
+       switch (dev->cpu_id) {
+       case AMD_CPU_ID_PCO:
+               return MSG_OS_HINT_PCO;
+       case AMD_CPU_ID_RN:
+       case AMD_CPU_ID_YC:
+               return MSG_OS_HINT_RN;
+       }
+       return -EINVAL;
 }
 
 static int __maybe_unused amd_pmc_suspend(struct device *dev)
 {
        struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
        int rc;
+       u8 msg;
+
+       /* Reset and Start SMU logging - to monitor the s0i3 stats */
+       amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_RESET, 0);
+       amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_START, 0);
 
-       rc = amd_pmc_send_cmd(pdev, 1);
+       msg = amd_pmc_get_os_hint(pdev);
+       rc = amd_pmc_send_cmd(pdev, 1, NULL, msg, 0);
        if (rc)
                dev_err(pdev->dev, "suspend failed\n");
 
-       amd_pmc_dump_registers(pdev);
-       return 0;
+       return rc;
 }
 
 static int __maybe_unused amd_pmc_resume(struct device *dev)
 {
        struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
        int rc;
+       u8 msg;
+
+       /* Let SMU know that we are looking for stats */
+       amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
 
-       rc = amd_pmc_send_cmd(pdev, 0);
+       msg = amd_pmc_get_os_hint(pdev);
+       rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, 0);
        if (rc)
                dev_err(pdev->dev, "resume failed\n");
 
-       amd_pmc_dump_registers(pdev);
        return 0;
 }
 
@@ -190,6 +379,7 @@ static const struct dev_pm_ops amd_pmc_pm_ops = {
 };
 
 static const struct pci_device_id pmc_pci_ids[] = {
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) },
@@ -201,9 +391,8 @@ static int amd_pmc_probe(struct platform_device *pdev)
 {
        struct amd_pmc_dev *dev = &pmc;
        struct pci_dev *rdev;
-       u32 base_addr_lo;
-       u32 base_addr_hi;
-       u64 base_addr;
+       u32 base_addr_lo, base_addr_hi;
+       u64 base_addr, fch_phys_addr;
        int err;
        u32 val;
 
@@ -248,16 +437,25 @@ static int amd_pmc_probe(struct platform_device *pdev)
        pci_dev_put(rdev);
        base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
 
-       dev->smu_base = devm_ioremap(dev->dev, base_addr, AMD_PMC_MAPPING_SIZE);
-       if (!dev->smu_base)
-               return -ENOMEM;
-
        dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMC_BASE_ADDR_OFFSET,
                                    AMD_PMC_MAPPING_SIZE);
        if (!dev->regbase)
                return -ENOMEM;
 
-       amd_pmc_dump_registers(dev);
+       mutex_init(&dev->lock);
+
+       /* Use FCH registers to get the S0ix stats */
+       base_addr_lo = FCH_BASE_PHY_ADDR_LOW;
+       base_addr_hi = FCH_BASE_PHY_ADDR_HIGH;
+       fch_phys_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
+       dev->fch_virt_addr = devm_ioremap(dev->dev, fch_phys_addr, FCH_SSC_MAPPING_SIZE);
+       if (!dev->fch_virt_addr)
+               return -ENOMEM;
+
+       /* Use SMU to get the s0i3 debug stats */
+       err = amd_pmc_setup_smu_logging(dev);
+       if (err)
+               dev_err(dev->dev, "SMU debugging info not supported on this platform\n");
 
        platform_set_drvdata(pdev, dev);
        amd_pmc_dbgfs_register(dev);
@@ -269,11 +467,14 @@ static int amd_pmc_remove(struct platform_device *pdev)
        struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
 
        amd_pmc_dbgfs_unregister(dev);
+       mutex_destroy(&dev->lock);
        return 0;
 }
 
 static const struct acpi_device_id amd_pmc_acpi_ids[] = {
        {"AMDI0005", 0},
+       {"AMDI0006", 0},
+       {"AMDI0007", 0},
        {"AMD0004", 0},
        { }
 };
index 5529d7b..fbb224a 100644 (file)
@@ -141,6 +141,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
 
 static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
index 078648a..e5fbe01 100644 (file)
@@ -25,6 +25,7 @@ static const struct acpi_device_id intel_hid_ids[] = {
        {"INT33D5", 0},
        {"INTC1051", 0},
        {"INTC1054", 0},
+       {"INTC1070", 0},
        {"", 0},
 };
 MODULE_DEVICE_TABLE(acpi, intel_hid_ids);
index 3671b5d..6cfed44 100644 (file)
@@ -571,6 +571,11 @@ static ssize_t current_value_store(struct kobject *kobj,
        else
                ret = tlmi_save_bios_settings("");
 
+       if (!ret && !tlmi_priv.pending_changes) {
+               tlmi_priv.pending_changes = true;
+               /* let userland know it may need to check reboot pending again */
+               kobject_uevent(&tlmi_priv.class_dev->kobj, KOBJ_CHANGE);
+       }
 out:
        kfree(auth_str);
        kfree(set_str);
@@ -647,6 +652,14 @@ static struct kobj_type tlmi_pwd_setting_ktype = {
        .sysfs_ops      = &tlmi_kobj_sysfs_ops,
 };
 
+static ssize_t pending_reboot_show(struct kobject *kobj, struct kobj_attribute *attr,
+                                  char *buf)
+{
+       return sprintf(buf, "%d\n", tlmi_priv.pending_changes);
+}
+
+static struct kobj_attribute pending_reboot = __ATTR_RO(pending_reboot);
+
 /* ---- Initialisation --------------------------------------------------------- */
 static void tlmi_release_attr(void)
 {
@@ -659,6 +672,7 @@ static void tlmi_release_attr(void)
                        kobject_put(&tlmi_priv.setting[i]->kobj);
                }
        }
+       sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &pending_reboot.attr);
        kset_unregister(tlmi_priv.attribute_kset);
 
        /* Authentication structures */
@@ -709,8 +723,8 @@ static int tlmi_sysfs_init(void)
 
                /* Build attribute */
                tlmi_priv.setting[i]->kobj.kset = tlmi_priv.attribute_kset;
-               ret = kobject_init_and_add(&tlmi_priv.setting[i]->kobj, &tlmi_attr_setting_ktype,
-                               NULL, "%s", tlmi_priv.setting[i]->display_name);
+               ret = kobject_add(&tlmi_priv.setting[i]->kobj, NULL,
+                                 "%s", tlmi_priv.setting[i]->display_name);
                if (ret)
                        goto fail_create_attr;
 
@@ -719,6 +733,10 @@ static int tlmi_sysfs_init(void)
                        goto fail_create_attr;
        }
 
+       ret = sysfs_create_file(&tlmi_priv.attribute_kset->kobj, &pending_reboot.attr);
+       if (ret)
+               goto fail_create_attr;
+
        /* Create authentication entries */
        tlmi_priv.authentication_kset = kset_create_and_add("authentication", NULL,
                                                                &tlmi_priv.class_dev->kobj);
@@ -727,8 +745,7 @@ static int tlmi_sysfs_init(void)
                goto fail_create_attr;
        }
        tlmi_priv.pwd_admin->kobj.kset = tlmi_priv.authentication_kset;
-       ret = kobject_init_and_add(&tlmi_priv.pwd_admin->kobj, &tlmi_pwd_setting_ktype,
-                       NULL, "%s", "Admin");
+       ret = kobject_add(&tlmi_priv.pwd_admin->kobj, NULL, "%s", "Admin");
        if (ret)
                goto fail_create_attr;
 
@@ -737,8 +754,7 @@ static int tlmi_sysfs_init(void)
                goto fail_create_attr;
 
        tlmi_priv.pwd_power->kobj.kset = tlmi_priv.authentication_kset;
-       ret = kobject_init_and_add(&tlmi_priv.pwd_power->kobj, &tlmi_pwd_setting_ktype,
-                       NULL, "%s", "System");
+       ret = kobject_add(&tlmi_priv.pwd_power->kobj, NULL, "%s", "System");
        if (ret)
                goto fail_create_attr;
 
@@ -818,6 +834,7 @@ static int tlmi_analyze(void)
                                pr_info("Error retrieving possible values for %d : %s\n",
                                                i, setting->display_name);
                }
+               kobject_init(&setting->kobj, &tlmi_attr_setting_ktype);
                tlmi_priv.setting[i] = setting;
                tlmi_priv.settings_count++;
                kfree(item);
@@ -844,10 +861,12 @@ static int tlmi_analyze(void)
        if (pwdcfg.password_state & TLMI_PAP_PWD)
                tlmi_priv.pwd_admin->valid = true;
 
+       kobject_init(&tlmi_priv.pwd_admin->kobj, &tlmi_pwd_setting_ktype);
+
        tlmi_priv.pwd_power = kzalloc(sizeof(struct tlmi_pwd_setting), GFP_KERNEL);
        if (!tlmi_priv.pwd_power) {
                ret = -ENOMEM;
-               goto fail_clear_attr;
+               goto fail_free_pwd_admin;
        }
        strscpy(tlmi_priv.pwd_power->kbdlang, "us", TLMI_LANG_MAXLEN);
        tlmi_priv.pwd_power->encoding = TLMI_ENCODING_ASCII;
@@ -859,11 +878,19 @@ static int tlmi_analyze(void)
        if (pwdcfg.password_state & TLMI_POP_PWD)
                tlmi_priv.pwd_power->valid = true;
 
+       kobject_init(&tlmi_priv.pwd_power->kobj, &tlmi_pwd_setting_ktype);
+
        return 0;
 
+fail_free_pwd_admin:
+       kfree(tlmi_priv.pwd_admin);
 fail_clear_attr:
-       for (i = 0; i < TLMI_SETTINGS_COUNT; ++i)
-               kfree(tlmi_priv.setting[i]);
+       for (i = 0; i < TLMI_SETTINGS_COUNT; ++i) {
+               if (tlmi_priv.setting[i]) {
+                       kfree(tlmi_priv.setting[i]->possible_values);
+                       kfree(tlmi_priv.setting[i]);
+               }
+       }
        return ret;
 }
 
index 6fa8da7..eb59884 100644 (file)
@@ -60,6 +60,7 @@ struct think_lmi {
        bool can_get_bios_selections;
        bool can_set_bios_password;
        bool can_get_password_settings;
+       bool pending_changes;
 
        struct tlmi_attr_setting *setting[TLMI_SETTINGS_COUNT];
        struct device *class_dev;
index b010e4c..11c60a2 100644 (file)
@@ -78,7 +78,7 @@ static int wl_add(struct acpi_device *device)
 
        err = wireless_input_setup();
        if (err)
-               pr_err("Failed to setup hp wireless hotkeys\n");
+               pr_err("Failed to setup wireless hotkeys\n");
 
        return err;
 }
index 84fc7a0..4a84599 100644 (file)
@@ -2642,6 +2642,7 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt)
 //#endif
                clear_bit(SCpnt->device->id * 8 +
                          (u8)(SCpnt->device->lun & 0x7), host->busyluns);
+               fallthrough;
 
        /*
         * We found the command, and cleared it out.  Either
index 6baa9b3..9c4458a 100644 (file)
@@ -1375,6 +1375,7 @@ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigne
                case IS_COMPLETE:
                        break;
                }
+               break;
 
        default:
                break;
index 06fb7a9..4d5ae61 100644 (file)
@@ -168,21 +168,6 @@ config OSF4_COMPAT
          with v4 shared libraries freely available from Compaq. If you're
          going to use shared libraries from Tru64 version 5.0 or later, say N.
 
-config BINFMT_EM86
-       tristate "Kernel support for Linux/Intel ELF binaries"
-       depends on ALPHA
-       help
-         Say Y here if you want to be able to execute Linux/Intel ELF
-         binaries just like native Alpha binaries on your Alpha machine. For
-         this to work, you need to have the emulator /usr/bin/em86 in place.
-
-         You can get the same functionality by saying N here and saying Y to
-         "Kernel support for MISC binaries".
-
-         You may answer M to compile the emulation support as a module and
-         later load the module when you want to use a Linux/Intel binary. The
-         module will be called binfmt_em86. If unsure, say Y.
-
 config BINFMT_MISC
        tristate "Kernel support for MISC binaries"
        help
index 9c708e1..f98f3e6 100644 (file)
@@ -39,7 +39,6 @@ obj-$(CONFIG_FS_ENCRYPTION)   += crypto/
 obj-$(CONFIG_FS_VERITY)                += verity/
 obj-$(CONFIG_FILE_LOCKING)      += locks.o
 obj-$(CONFIG_BINFMT_AOUT)      += binfmt_aout.o
-obj-$(CONFIG_BINFMT_EM86)      += binfmt_em86.o
 obj-$(CONFIG_BINFMT_MISC)      += binfmt_misc.o
 obj-$(CONFIG_BINFMT_SCRIPT)    += binfmt_script.o
 obj-$(CONFIG_BINFMT_ELF)       += binfmt_elf.o
diff --git a/fs/binfmt_em86.c b/fs/binfmt_em86.c
deleted file mode 100644 (file)
index 06b9b9f..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- *  linux/fs/binfmt_em86.c
- *
- *  Based on linux/fs/binfmt_script.c
- *  Copyright (C) 1996  Martin von Löwis
- *  original #!-checking implemented by tytso.
- *
- *  em86 changes Copyright (C) 1997  Jim Paradis
- */
-
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/stat.h>
-#include <linux/binfmts.h>
-#include <linux/elf.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/errno.h>
-
-
-#define EM86_INTERP    "/usr/bin/em86"
-#define EM86_I_NAME    "em86"
-
-static int load_em86(struct linux_binprm *bprm)
-{
-       const char *i_name, *i_arg;
-       char *interp;
-       struct file * file;
-       int retval;
-       struct elfhdr   elf_ex;
-
-       /* Make sure this is a Linux/Intel ELF executable... */
-       elf_ex = *((struct elfhdr *)bprm->buf);
-
-       if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
-               return  -ENOEXEC;
-
-       /* First of all, some simple consistency checks */
-       if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
-               (!((elf_ex.e_machine == EM_386) || (elf_ex.e_machine == EM_486))) ||
-               !bprm->file->f_op->mmap) {
-                       return -ENOEXEC;
-       }
-
-       /* Need to be able to load the file after exec */
-       if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
-               return -ENOENT;
-
-       /* Unlike in the script case, we don't have to do any hairy
-        * parsing to find our interpreter... it's hardcoded!
-        */
-       interp = EM86_INTERP;
-       i_name = EM86_I_NAME;
-       i_arg = NULL;           /* We reserve the right to add an arg later */
-
-       /*
-        * Splice in (1) the interpreter's name for argv[0]
-        *           (2) (optional) argument to interpreter
-        *           (3) filename of emulated file (replace argv[0])
-        *
-        * This is done in reverse order, because of how the
-        * user environment and arguments are stored.
-        */
-       remove_arg_zero(bprm);
-       retval = copy_string_kernel(bprm->filename, bprm);
-       if (retval < 0) return retval; 
-       bprm->argc++;
-       if (i_arg) {
-               retval = copy_string_kernel(i_arg, bprm);
-               if (retval < 0) return retval; 
-               bprm->argc++;
-       }
-       retval = copy_string_kernel(i_name, bprm);
-       if (retval < 0) return retval;
-       bprm->argc++;
-
-       /*
-        * OK, now restart the process with the interpreter's inode.
-        * Note that we use open_exec() as the name is now in kernel
-        * space, and we don't need to copy it.
-        */
-       file = open_exec(interp);
-       if (IS_ERR(file))
-               return PTR_ERR(file);
-
-       bprm->interpreter = file;
-       return 0;
-}
-
-static struct linux_binfmt em86_format = {
-       .module         = THIS_MODULE,
-       .load_binary    = load_em86,
-};
-
-static int __init init_em86_binfmt(void)
-{
-       register_binfmt(&em86_format);
-       return 0;
-}
-
-static void __exit exit_em86_binfmt(void)
-{
-       unregister_binfmt(&em86_format);
-}
-
-core_initcall(init_em86_binfmt);
-module_exit(exit_em86_binfmt);
-MODULE_LICENSE("GPL");
index 14292db..2c2f179 100644 (file)
@@ -106,12 +106,11 @@ static int ext2_commit_chunk(struct page *page, loff_t pos, unsigned len)
        return err;
 }
 
-static bool ext2_check_page(struct page *page, int quiet)
+static bool ext2_check_page(struct page *page, int quiet, char *kaddr)
 {
        struct inode *dir = page->mapping->host;
        struct super_block *sb = dir->i_sb;
        unsigned chunk_size = ext2_chunk_size(dir);
-       char *kaddr = page_address(page);
        u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count);
        unsigned offs, rec_len;
        unsigned limit = PAGE_SIZE;
@@ -205,7 +204,8 @@ static struct page * ext2_get_page(struct inode *dir, unsigned long n,
        if (!IS_ERR(page)) {
                *page_addr = kmap_local_page(page);
                if (unlikely(!PageChecked(page))) {
-                       if (PageError(page) || !ext2_check_page(page, quiet))
+                       if (PageError(page) || !ext2_check_page(page, quiet,
+                                                               *page_addr))
                                goto fail;
                }
        }
@@ -584,10 +584,10 @@ out_unlock:
  * ext2_delete_entry deletes a directory entry by merging it with the
  * previous entry. Page is up-to-date.
  */
-int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
+int ext2_delete_entry (struct ext2_dir_entry_2 *dir, struct page *page,
+                       char *kaddr)
 {
        struct inode *inode = page->mapping->host;
-       char *kaddr = page_address(page);
        unsigned from = ((char*)dir - kaddr) & ~(ext2_chunk_size(inode)-1);
        unsigned to = ((char *)dir - kaddr) +
                                ext2_rec_len_from_disk(dir->rec_len);
@@ -607,7 +607,7 @@ int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
                de = ext2_next_entry(de);
        }
        if (pde)
-               from = (char*)pde - (char*)page_address(page);
+               from = (char *)pde - kaddr;
        pos = page_offset(page) + from;
        lock_page(page);
        err = ext2_prepare_chunk(page, pos, to - from);
index b0a6948..e512630 100644 (file)
@@ -740,7 +740,8 @@ extern int ext2_inode_by_name(struct inode *dir,
 extern int ext2_make_empty(struct inode *, struct inode *);
 extern struct ext2_dir_entry_2 *ext2_find_entry(struct inode *, const struct qstr *,
                                                struct page **, void **res_page_addr);
-extern int ext2_delete_entry (struct ext2_dir_entry_2 *, struct page *);
+extern int ext2_delete_entry(struct ext2_dir_entry_2 *dir, struct page *page,
+                            char *kaddr);
 extern int ext2_empty_dir (struct inode *);
 extern struct ext2_dir_entry_2 *ext2_dotdot(struct inode *dir, struct page **p, void **pa);
 extern void ext2_set_link(struct inode *, struct ext2_dir_entry_2 *, struct page *, void *,
index 1f69b81..5f6b756 100644 (file)
@@ -293,7 +293,7 @@ static int ext2_unlink(struct inode * dir, struct dentry *dentry)
                goto out;
        }
 
-       err = ext2_delete_entry (de, page);
+       err = ext2_delete_entry (de, page, page_addr);
        ext2_put_page(page, page_addr);
        if (err)
                goto out;
@@ -397,7 +397,7 @@ static int ext2_rename (struct user_namespace * mnt_userns,
        old_inode->i_ctime = current_time(old_inode);
        mark_inode_dirty(old_inode);
 
-       ext2_delete_entry(old_de, old_page);
+       ext2_delete_entry(old_de, old_page, old_page_addr);
 
        if (dir_de) {
                if (old_dir != new_dir)
index 3ce8edb..82e8eb3 100644 (file)
@@ -61,7 +61,6 @@ extern void __init chrdev_init(void);
  */
 extern const struct fs_context_operations legacy_fs_context_ops;
 extern int parse_monolithic_mount_data(struct fs_context *, void *);
-extern void fc_drop_locked(struct fs_context *);
 extern void vfs_clean_context(struct fs_context *fc);
 extern int finish_clean_context(struct fs_context *fc);
 
index 476a7ff..ef42729 100644 (file)
@@ -387,6 +387,24 @@ void pathrelse(struct treepath *search_path)
        search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET;
 }
 
+static int has_valid_deh_location(struct buffer_head *bh, struct item_head *ih)
+{
+       struct reiserfs_de_head *deh;
+       int i;
+
+       deh = B_I_DEH(bh, ih);
+       for (i = 0; i < ih_entry_count(ih); i++) {
+               if (deh_location(&deh[i]) > ih_item_len(ih)) {
+                       reiserfs_warning(NULL, "reiserfs-5094",
+                                        "directory entry location seems wrong %h",
+                                        &deh[i]);
+                       return 0;
+               }
+       }
+
+       return 1;
+}
+
 static int is_leaf(char *buf, int blocksize, struct buffer_head *bh)
 {
        struct block_head *blkh;
@@ -454,11 +472,14 @@ static int is_leaf(char *buf, int blocksize, struct buffer_head *bh)
                                         "(second one): %h", ih);
                        return 0;
                }
-               if (is_direntry_le_ih(ih) && (ih_item_len(ih) < (ih_entry_count(ih) * IH_SIZE))) {
-                       reiserfs_warning(NULL, "reiserfs-5093",
-                                        "item entry count seems wrong %h",
-                                        ih);
-                       return 0;
+               if (is_direntry_le_ih(ih)) {
+                       if (ih_item_len(ih) < (ih_entry_count(ih) * IH_SIZE)) {
+                               reiserfs_warning(NULL, "reiserfs-5093",
+                                                "item entry count seems wrong %h",
+                                                ih);
+                               return 0;
+                       }
+                       return has_valid_deh_location(bh, ih);
                }
                prev_location = ih_location(ih);
        }
index 3ffafc7..58481f8 100644 (file)
@@ -2082,6 +2082,14 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
                unlock_new_inode(root_inode);
        }
 
+       if (!S_ISDIR(root_inode->i_mode) || !inode_get_bytes(root_inode) ||
+           !root_inode->i_size) {
+               SWARN(silent, s, "", "corrupt root inode, run fsck");
+               iput(root_inode);
+               errval = -EUCLEAN;
+               goto error;
+       }
+
        s->s_root = d_make_root(root_inode);
        if (!s->s_root)
                goto error;
index e2bc163..6b54982 100644 (file)
@@ -141,6 +141,7 @@ extern int vfs_get_tree(struct fs_context *fc);
 extern void put_fs_context(struct fs_context *fc);
 extern int vfs_parse_fs_param_source(struct fs_context *fc,
                                     struct fs_parameter *param);
+extern void fc_drop_locked(struct fs_context *fc);
 
 /*
  * sget() wrappers to be called from the ->get_tree() op.
index e33997b..edc346a 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: LGPL-2.1 WITH Linux-syscall-note */
 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
 #ifndef _USR_IDXD_H_
 #define _USR_IDXD_H_
index 26b638a..a7085e0 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB */
 /*
  * Copyright (c) 2006 - 2021 Intel Corporation.  All rights reserved.
  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
index 8d6bf56..de2c432 100644 (file)
@@ -1221,9 +1221,7 @@ int cgroup1_get_tree(struct fs_context *fc)
                ret = cgroup_do_get_tree(fc);
 
        if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
-               struct super_block *sb = fc->root->d_sb;
-               dput(fc->root);
-               deactivate_locked_super(sb);
+               fc_drop_locked(fc);
                ret = 1;
        }
 
index 50142fc..f148eac 100644 (file)
@@ -3676,15 +3676,21 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
                                                  unbound_release_work);
        struct workqueue_struct *wq = pwq->wq;
        struct worker_pool *pool = pwq->pool;
-       bool is_last;
+       bool is_last = false;
 
-       if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
-               return;
+       /*
+        * when @pwq is not linked, it doesn't hold any reference to the
+        * @wq, and @wq is invalid to access.
+        */
+       if (!list_empty(&pwq->pwqs_node)) {
+               if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
+                       return;
 
-       mutex_lock(&wq->mutex);
-       list_del_rcu(&pwq->pwqs_node);
-       is_last = list_empty(&wq->pwqs);
-       mutex_unlock(&wq->mutex);
+               mutex_lock(&wq->mutex);
+               list_del_rcu(&pwq->pwqs_node);
+               is_last = list_empty(&wq->pwqs);
+               mutex_unlock(&wq->mutex);
+       }
 
        mutex_lock(&wq_pool_mutex);
        put_unbound_pool(pool);
index 23c92ad..ba7ced9 100644 (file)
@@ -1526,6 +1526,53 @@ out:
        return err;
 }
 
+static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
+{
+       scm->fp = scm_fp_dup(UNIXCB(skb).fp);
+
+       /*
+        * Garbage collection of unix sockets starts by selecting a set of
+        * candidate sockets which have reference only from being in flight
+        * (total_refs == inflight_refs).  This condition is checked once during
+        * the candidate collection phase, and candidates are marked as such, so
+        * that non-candidates can later be ignored.  While inflight_refs is
+        * protected by unix_gc_lock, total_refs (file count) is not, hence this
+        * is an instantaneous decision.
+        *
+        * Once a candidate, however, the socket must not be reinstalled into a
+        * file descriptor while the garbage collection is in progress.
+        *
+        * If the above conditions are met, then the directed graph of
+        * candidates (*) does not change while unix_gc_lock is held.
+        *
+        * Any operations that changes the file count through file descriptors
+        * (dup, close, sendmsg) does not change the graph since candidates are
+        * not installed in fds.
+        *
+        * Dequeing a candidate via recvmsg would install it into an fd, but
+        * that takes unix_gc_lock to decrement the inflight count, so it's
+        * serialized with garbage collection.
+        *
+        * MSG_PEEK is special in that it does not change the inflight count,
+        * yet does install the socket into an fd.  The following lock/unlock
+        * pair is to ensure serialization with garbage collection.  It must be
+        * done between incrementing the file count and installing the file into
+        * an fd.
+        *
+        * If garbage collection starts after the barrier provided by the
+        * lock/unlock, then it will see the elevated refcount and not mark this
+        * as a candidate.  If a garbage collection is already in progress
+        * before the file count was incremented, then the lock/unlock pair will
+        * ensure that garbage collection is finished before progressing to
+        * installing the fd.
+        *
+        * (*) A -> B where B is on the queue of A or B is on the queue of C
+        * which is on the queue of listening socket A.
+        */
+       spin_lock(&unix_gc_lock);
+       spin_unlock(&unix_gc_lock);
+}
+
 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
 {
        int err = 0;
@@ -2175,7 +2222,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
                sk_peek_offset_fwd(sk, size);
 
                if (UNIXCB(skb).fp)
-                       scm.fp = scm_fp_dup(UNIXCB(skb).fp);
+                       unix_peek_fds(&scm, skb);
        }
        err = (flags & MSG_TRUNC) ? skb->len - skip : size;
 
@@ -2418,7 +2465,7 @@ unlock:
                        /* It is questionable, see note in unix_dgram_recvmsg.
                         */
                        if (UNIXCB(skb).fp)
-                               scm.fp = scm_fp_dup(UNIXCB(skb).fp);
+                               unix_peek_fds(&scm, skb);
 
                        sk_peek_offset_fwd(sk, chunk);
 
index 06a351b..0709af0 100644 (file)
@@ -38,6 +38,7 @@
 /x86_64/xen_vmcall_test
 /x86_64/xss_msr_test
 /x86_64/vmx_pmu_msrs_test
+/access_tracking_perf_test
 /demand_paging_test
 /dirty_log_test
 /dirty_log_perf_test
index b853be2..5832f51 100644 (file)
@@ -71,6 +71,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test
 TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
 TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
+TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
 TEST_GEN_PROGS_x86_64 += demand_paging_test
 TEST_GEN_PROGS_x86_64 += dirty_log_test
 TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
index a16c8f0..cc89818 100644 (file)
@@ -1019,7 +1019,8 @@ static __u64 sve_rejects_set[] = {
 #define VREGS_SUBLIST \
        { "vregs", .regs = vregs, .regs_n = ARRAY_SIZE(vregs), }
 #define PMU_SUBLIST \
-       { "pmu", .regs = pmu_regs, .regs_n = ARRAY_SIZE(pmu_regs), }
+       { "pmu", .capability = KVM_CAP_ARM_PMU_V3, .feature = KVM_ARM_VCPU_PMU_V3, \
+         .regs = pmu_regs, .regs_n = ARRAY_SIZE(pmu_regs), }
 #define SVE_SUBLIST \
        { "sve", .capability = KVM_CAP_ARM_SVE, .feature = KVM_ARM_VCPU_SVE, .finalize = true, \
          .regs = sve_regs, .regs_n = ARRAY_SIZE(sve_regs), \
diff --git a/tools/testing/selftests/kvm/access_tracking_perf_test.c b/tools/testing/selftests/kvm/access_tracking_perf_test.c
new file mode 100644 (file)
index 0000000..e2baa18
--- /dev/null
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * access_tracking_perf_test
+ *
+ * Copyright (C) 2021, Google, Inc.
+ *
+ * This test measures the performance effects of KVM's access tracking.
+ * Access tracking is driven by the MMU notifiers test_young, clear_young, and
+ * clear_flush_young. These notifiers do not have a direct userspace API,
+ * however the clear_young notifier can be triggered by marking a pages as idle
+ * in /sys/kernel/mm/page_idle/bitmap. This test leverages that mechanism to
+ * enable access tracking on guest memory.
+ *
+ * To measure performance this test runs a VM with a configurable number of
+ * vCPUs that each touch every page in disjoint regions of memory. Performance
+ * is measured in the time it takes all vCPUs to finish touching their
+ * predefined region.
+ *
+ * Note that a deterministic correctness test of access tracking is not possible
+ * by using page_idle as it exists today. This is for a few reasons:
+ *
+ * 1. page_idle only issues clear_young notifiers, which lack a TLB flush. This
+ *    means subsequent guest accesses are not guaranteed to see page table
+ *    updates made by KVM until some time in the future.
+ *
+ * 2. page_idle only operates on LRU pages. Newly allocated pages are not
+ *    immediately allocated to LRU lists. Instead they are held in a "pagevec",
+ *    which is drained to LRU lists some time in the future. There is no
+ *    userspace API to force this drain to occur.
+ *
+ * These limitations are worked around in this test by using a large enough
+ * region of memory for each vCPU such that the number of translations cached in
+ * the TLB and the number of pages held in pagevecs are a small fraction of the
+ * overall workload. And if either of those conditions are not true this test
+ * will fail rather than silently passing.
+ */
+#include <inttypes.h>
+#include <limits.h>
+#include <pthread.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include "kvm_util.h"
+#include "test_util.h"
+#include "perf_test_util.h"
+#include "guest_modes.h"
+
+/* Global variable used to synchronize all of the vCPU threads. */
+static int iteration = -1;
+
+/* Defines what vCPU threads should do during a given iteration. */
+static enum {
+       /* Run the vCPU to access all its memory. */
+       ITERATION_ACCESS_MEMORY,
+       /* Mark the vCPU's memory idle in page_idle. */
+       ITERATION_MARK_IDLE,
+} iteration_work;
+
+/* Set to true when vCPU threads should exit. */
+static bool done;
+
+/* The iteration that was last completed by each vCPU. */
+static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
+
+/* Whether to overlap the regions of memory vCPUs access. */
+static bool overlap_memory_access;
+
+struct test_params {
+       /* The backing source for the region of memory. */
+       enum vm_mem_backing_src_type backing_src;
+
+       /* The amount of memory to allocate for each vCPU. */
+       uint64_t vcpu_memory_bytes;
+
+       /* The number of vCPUs to create in the VM. */
+       int vcpus;
+};
+
+static uint64_t pread_uint64(int fd, const char *filename, uint64_t index)
+{
+       uint64_t value;
+       off_t offset = index * sizeof(value);
+
+       TEST_ASSERT(pread(fd, &value, sizeof(value), offset) == sizeof(value),
+                   "pread from %s offset 0x%" PRIx64 " failed!",
+                   filename, offset);
+
+       return value;
+
+}
+
+#define PAGEMAP_PRESENT (1ULL << 63)
+#define PAGEMAP_PFN_MASK ((1ULL << 55) - 1)
+
+static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva)
+{
+       uint64_t hva = (uint64_t) addr_gva2hva(vm, gva);
+       uint64_t entry;
+       uint64_t pfn;
+
+       entry = pread_uint64(pagemap_fd, "pagemap", hva / getpagesize());
+       if (!(entry & PAGEMAP_PRESENT))
+               return 0;
+
+       pfn = entry & PAGEMAP_PFN_MASK;
+       if (!pfn) {
+               print_skip("Looking up PFNs requires CAP_SYS_ADMIN");
+               exit(KSFT_SKIP);
+       }
+
+       return pfn;
+}
+
+static bool is_page_idle(int page_idle_fd, uint64_t pfn)
+{
+       uint64_t bits = pread_uint64(page_idle_fd, "page_idle", pfn / 64);
+
+       return !!((bits >> (pfn % 64)) & 1);
+}
+
+static void mark_page_idle(int page_idle_fd, uint64_t pfn)
+{
+       uint64_t bits = 1ULL << (pfn % 64);
+
+       TEST_ASSERT(pwrite(page_idle_fd, &bits, 8, 8 * (pfn / 64)) == 8,
+                   "Set page_idle bits for PFN 0x%" PRIx64, pfn);
+}
+
+static void mark_vcpu_memory_idle(struct kvm_vm *vm, int vcpu_id)
+{
+       uint64_t base_gva = perf_test_args.vcpu_args[vcpu_id].gva;
+       uint64_t pages = perf_test_args.vcpu_args[vcpu_id].pages;
+       uint64_t page;
+       uint64_t still_idle = 0;
+       uint64_t no_pfn = 0;
+       int page_idle_fd;
+       int pagemap_fd;
+
+       /* If vCPUs are using an overlapping region, let vCPU 0 mark it idle. */
+       if (overlap_memory_access && vcpu_id)
+               return;
+
+       page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR);
+       TEST_ASSERT(page_idle_fd > 0, "Failed to open page_idle.");
+
+       pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
+       TEST_ASSERT(pagemap_fd > 0, "Failed to open pagemap.");
+
+       for (page = 0; page < pages; page++) {
+               uint64_t gva = base_gva + page * perf_test_args.guest_page_size;
+               uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva);
+
+               if (!pfn) {
+                       no_pfn++;
+                       continue;
+               }
+
+               if (is_page_idle(page_idle_fd, pfn)) {
+                       still_idle++;
+                       continue;
+               }
+
+               mark_page_idle(page_idle_fd, pfn);
+       }
+
+       /*
+        * Assumption: Less than 1% of pages are going to be swapped out from
+        * under us during this test.
+        */
+       TEST_ASSERT(no_pfn < pages / 100,
+                   "vCPU %d: No PFN for %" PRIu64 " out of %" PRIu64 " pages.",
+                   vcpu_id, no_pfn, pages);
+
+       /*
+        * Test that at least 90% of memory has been marked idle (the rest might
+        * not be marked idle because the pages have not yet made it to an LRU
+        * list or the translations are still cached in the TLB). 90% is
+        * arbitrary; high enough that we ensure most memory access went through
+        * access tracking but low enough as to not make the test too brittle
+        * over time and across architectures.
+        */
+       TEST_ASSERT(still_idle < pages / 10,
+                   "vCPU%d: Too many pages still idle (%"PRIu64 " out of %"
+                   PRIu64 ").\n",
+                   vcpu_id, still_idle, pages);
+
+       close(page_idle_fd);
+       close(pagemap_fd);
+}
+
+static void assert_ucall(struct kvm_vm *vm, uint32_t vcpu_id,
+                        uint64_t expected_ucall)
+{
+       struct ucall uc;
+       uint64_t actual_ucall = get_ucall(vm, vcpu_id, &uc);
+
+       TEST_ASSERT(expected_ucall == actual_ucall,
+                   "Guest exited unexpectedly (expected ucall %" PRIu64
+                   ", got %" PRIu64 ")",
+                   expected_ucall, actual_ucall);
+}
+
+static bool spin_wait_for_next_iteration(int *current_iteration)
+{
+       int last_iteration = *current_iteration;
+
+       do {
+               if (READ_ONCE(done))
+                       return false;
+
+               *current_iteration = READ_ONCE(iteration);
+       } while (last_iteration == *current_iteration);
+
+       return true;
+}
+
+static void *vcpu_thread_main(void *arg)
+{
+       struct perf_test_vcpu_args *vcpu_args = arg;
+       struct kvm_vm *vm = perf_test_args.vm;
+       int vcpu_id = vcpu_args->vcpu_id;
+       int current_iteration = -1;
+
+       vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
+
+       while (spin_wait_for_next_iteration(&current_iteration)) {
+               switch (READ_ONCE(iteration_work)) {
+               case ITERATION_ACCESS_MEMORY:
+                       vcpu_run(vm, vcpu_id);
+                       assert_ucall(vm, vcpu_id, UCALL_SYNC);
+                       break;
+               case ITERATION_MARK_IDLE:
+                       mark_vcpu_memory_idle(vm, vcpu_id);
+                       break;
+               };
+
+               vcpu_last_completed_iteration[vcpu_id] = current_iteration;
+       }
+
+       return NULL;
+}
+
+static void spin_wait_for_vcpu(int vcpu_id, int target_iteration)
+{
+       while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) !=
+              target_iteration) {
+               continue;
+       }
+}
+
+/* The type of memory accesses to perform in the VM. */
+enum access_type {
+       ACCESS_READ,
+       ACCESS_WRITE,
+};
+
+static void run_iteration(struct kvm_vm *vm, int vcpus, const char *description)
+{
+       struct timespec ts_start;
+       struct timespec ts_elapsed;
+       int next_iteration;
+       int vcpu_id;
+
+       /* Kick off the vCPUs by incrementing iteration. */
+       next_iteration = ++iteration;
+
+       clock_gettime(CLOCK_MONOTONIC, &ts_start);
+
+       /* Wait for all vCPUs to finish the iteration. */
+       for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++)
+               spin_wait_for_vcpu(vcpu_id, next_iteration);
+
+       ts_elapsed = timespec_elapsed(ts_start);
+       pr_info("%-30s: %ld.%09lds\n",
+               description, ts_elapsed.tv_sec, ts_elapsed.tv_nsec);
+}
+
+static void access_memory(struct kvm_vm *vm, int vcpus, enum access_type access,
+                         const char *description)
+{
+       perf_test_args.wr_fract = (access == ACCESS_READ) ? INT_MAX : 1;
+       sync_global_to_guest(vm, perf_test_args);
+       iteration_work = ITERATION_ACCESS_MEMORY;
+       run_iteration(vm, vcpus, description);
+}
+
+static void mark_memory_idle(struct kvm_vm *vm, int vcpus)
+{
+       /*
+        * Even though this parallelizes the work across vCPUs, this is still a
+        * very slow operation because page_idle forces the test to mark one pfn
+        * at a time and the clear_young notifier serializes on the KVM MMU
+        * lock.
+        */
+       pr_debug("Marking VM memory idle (slow)...\n");
+       iteration_work = ITERATION_MARK_IDLE;
+       run_iteration(vm, vcpus, "Mark memory idle");
+}
+
+static pthread_t *create_vcpu_threads(int vcpus)
+{
+       pthread_t *vcpu_threads;
+       int i;
+
+       vcpu_threads = malloc(vcpus * sizeof(vcpu_threads[0]));
+       TEST_ASSERT(vcpu_threads, "Failed to allocate vcpu_threads.");
+
+       for (i = 0; i < vcpus; i++) {
+               vcpu_last_completed_iteration[i] = iteration;
+               pthread_create(&vcpu_threads[i], NULL, vcpu_thread_main,
+                              &perf_test_args.vcpu_args[i]);
+       }
+
+       return vcpu_threads;
+}
+
+static void terminate_vcpu_threads(pthread_t *vcpu_threads, int vcpus)
+{
+       int i;
+
+       /* Set done to signal the vCPU threads to exit */
+       done = true;
+
+       for (i = 0; i < vcpus; i++)
+               pthread_join(vcpu_threads[i], NULL);
+}
+
+static void run_test(enum vm_guest_mode mode, void *arg)
+{
+       struct test_params *params = arg;
+       struct kvm_vm *vm;
+       pthread_t *vcpu_threads;
+       int vcpus = params->vcpus;
+
+       vm = perf_test_create_vm(mode, vcpus, params->vcpu_memory_bytes,
+                                params->backing_src);
+
+       perf_test_setup_vcpus(vm, vcpus, params->vcpu_memory_bytes,
+                             !overlap_memory_access);
+
+       vcpu_threads = create_vcpu_threads(vcpus);
+
+       pr_info("\n");
+       access_memory(vm, vcpus, ACCESS_WRITE, "Populating memory");
+
+       /* As a control, read and write to the populated memory first. */
+       access_memory(vm, vcpus, ACCESS_WRITE, "Writing to populated memory");
+       access_memory(vm, vcpus, ACCESS_READ, "Reading from populated memory");
+
+       /* Repeat on memory that has been marked as idle. */
+       mark_memory_idle(vm, vcpus);
+       access_memory(vm, vcpus, ACCESS_WRITE, "Writing to idle memory");
+       mark_memory_idle(vm, vcpus);
+       access_memory(vm, vcpus, ACCESS_READ, "Reading from idle memory");
+
+       terminate_vcpu_threads(vcpu_threads, vcpus);
+       free(vcpu_threads);
+       perf_test_destroy_vm(vm);
+}
+
+static void help(char *name)
+{
+       puts("");
+       printf("usage: %s [-h] [-m mode] [-b vcpu_bytes] [-v vcpus] [-o]  [-s mem_type]\n",
+              name);
+       puts("");
+       printf(" -h: Display this help message.");
+       guest_modes_help();
+       printf(" -b: specify the size of the memory region which should be\n"
+              "     dirtied by each vCPU. e.g. 10M or 3G.\n"
+              "     (default: 1G)\n");
+       printf(" -v: specify the number of vCPUs to run.\n");
+       printf(" -o: Overlap guest memory accesses instead of partitioning\n"
+              "     them into a separate region of memory for each vCPU.\n");
+       printf(" -s: specify the type of memory that should be used to\n"
+              "     back the guest data region.\n\n");
+       backing_src_help();
+       puts("");
+       exit(0);
+}
+
+int main(int argc, char *argv[])
+{
+       struct test_params params = {
+               .backing_src = VM_MEM_SRC_ANONYMOUS,
+               .vcpu_memory_bytes = DEFAULT_PER_VCPU_MEM_SIZE,
+               .vcpus = 1,
+       };
+       int page_idle_fd;
+       int opt;
+
+       guest_modes_append_default();
+
+       while ((opt = getopt(argc, argv, "hm:b:v:os:")) != -1) {
+               switch (opt) {
+               case 'm':
+                       guest_modes_cmdline(optarg);
+                       break;
+               case 'b':
+                       params.vcpu_memory_bytes = parse_size(optarg);
+                       break;
+               case 'v':
+                       params.vcpus = atoi(optarg);
+                       break;
+               case 'o':
+                       overlap_memory_access = true;
+                       break;
+               case 's':
+                       params.backing_src = parse_backing_src_type(optarg);
+                       break;
+               case 'h':
+               default:
+                       help(argv[0]);
+                       break;
+               }
+       }
+
+       page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR);
+       if (page_idle_fd < 0) {
+               print_skip("CONFIG_IDLE_PAGE_TRACKING is not enabled");
+               exit(KSFT_SKIP);
+       }
+       close(page_idle_fd);
+
+       for_each_guest_mode(run_test, &params);
+
+       return 0;
+}
index 04a2641..80cbd3a 100644 (file)
@@ -312,6 +312,7 @@ int main(int argc, char *argv[])
                        break;
                case 'o':
                        p.partition_vcpu_memory_access = false;
+                       break;
                case 's':
                        p.backing_src = parse_backing_src_type(optarg);
                        break;
index b0031f2..ecec308 100644 (file)
@@ -320,7 +320,7 @@ int main(int ac, char **av)
                run_delay = get_run_delay();
                pthread_create(&thread, &attr, do_steal_time, NULL);
                do
-                       pthread_yield();
+                       sched_yield();
                while (get_run_delay() - run_delay < MIN_RUN_DELAY_NS);
                pthread_join(thread, NULL);
                run_delay = get_run_delay() - run_delay;
index 9869598..d20fba0 100644 (file)
@@ -3110,6 +3110,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
                                        ++vcpu->stat.generic.halt_poll_invalid;
                                goto out;
                        }
+                       cpu_relax();
                        poll_end = cur = ktime_get();
                } while (kvm_vcpu_can_poll(cur, stop));
        }
@@ -4390,6 +4391,16 @@ struct compat_kvm_dirty_log {
        };
 };
 
+struct compat_kvm_clear_dirty_log {
+       __u32 slot;
+       __u32 num_pages;
+       __u64 first_page;
+       union {
+               compat_uptr_t dirty_bitmap; /* one bit per page */
+               __u64 padding2;
+       };
+};
+
 static long kvm_vm_compat_ioctl(struct file *filp,
                           unsigned int ioctl, unsigned long arg)
 {
@@ -4399,6 +4410,24 @@ static long kvm_vm_compat_ioctl(struct file *filp,
        if (kvm->mm != current->mm)
                return -EIO;
        switch (ioctl) {
+#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
+       case KVM_CLEAR_DIRTY_LOG: {
+               struct compat_kvm_clear_dirty_log compat_log;
+               struct kvm_clear_dirty_log log;
+
+               if (copy_from_user(&compat_log, (void __user *)arg,
+                                  sizeof(compat_log)))
+                       return -EFAULT;
+               log.slot         = compat_log.slot;
+               log.num_pages    = compat_log.num_pages;
+               log.first_page   = compat_log.first_page;
+               log.padding2     = compat_log.padding2;
+               log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
+
+               r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
+               break;
+       }
+#endif
        case KVM_GET_DIRTY_LOG: {
                struct compat_kvm_dirty_log compat_log;
                struct kvm_dirty_log log;