Merge tag 'sh-for-5.9-part2' of git://git.libc.org/linux-sh
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 18 Sep 2020 18:59:15 +0000 (11:59 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 18 Sep 2020 18:59:15 +0000 (11:59 -0700)
Pull arch/sh fixes from Rich Felker:
 "Fixes for build and function regression"

* tag 'sh-for-5.9-part2' of git://git.libc.org/linux-sh:
  sh: fix syscall tracing
  sh: remove spurious circular inclusion from asm/smp.h

121 files changed:
Documentation/features/debug/debug-vm-pgtable/arch-support.txt
MAINTAINERS
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/paravirt.c
arch/arm64/net/bpf_jit_comp.c
arch/ia64/include/asm/acpi.h
arch/mips/Kconfig
arch/mips/sni/a20r.c
arch/powerpc/Kconfig
arch/powerpc/configs/pasemi_defconfig
arch/powerpc/configs/ppc6xx_defconfig
arch/powerpc/include/asm/book3s/64/mmu.h
arch/powerpc/kernel/dma-iommu.c
arch/powerpc/kernel/vdso32/Makefile
arch/powerpc/kernel/vdso32/vdso32.lds.S
arch/powerpc/kernel/vdso64/Makefile
arch/powerpc/kernel/vdso64/vdso64.lds.S
arch/powerpc/mm/book3s64/radix_pgtable.c
arch/powerpc/mm/init_64.c
arch/powerpc/platforms/pseries/papr_scm.c
arch/x86/configs/i386_defconfig
arch/x86/configs/x86_64_defconfig
arch/x86/include/asm/acpi.h
drivers/acpi/processor_idle.c
drivers/clk/bcm/Kconfig
drivers/clk/davinci/pll.c
drivers/clk/qcom/lpasscorecc-sc7180.c
drivers/clk/rockchip/clk-rk3228.c
drivers/clk/versatile/clk-impd1.c
drivers/cpuidle/cpuidle-pseries.c
drivers/cpuidle/cpuidle.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_sw_fence.c
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/gpu/drm/mediatek/mtk_dsi.c
drivers/gpu/drm/mediatek/mtk_hdmi.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/hv/channel_mgmt.c
drivers/hv/vmbus_drv.c
drivers/iommu/amd/iommu.c
drivers/mtd/spi-nor/core.c
drivers/mtd/spi-nor/core.h
drivers/powercap/intel_rapl_common.c
drivers/scsi/libsas/sas_discover.c
drivers/video/console/Kconfig
drivers/video/console/vgacon.c
drivers/video/fbdev/core/bitblit.c
drivers/video/fbdev/core/fbcon.c
drivers/video/fbdev/core/fbcon.h
drivers/video/fbdev/core/fbcon_ccw.c
drivers/video/fbdev/core/fbcon_cw.c
drivers/video/fbdev/core/fbcon_ud.c
drivers/video/fbdev/core/tileblit.c
fs/btrfs/ioctl.c
include/linux/cpuhotplug.h
include/linux/cpuidle.h
include/linux/mm.h
include/linux/wait.h
include/sound/soc.h
kernel/sysctl.c
mm/filemap.c
mm/percpu.c
security/device_cgroup.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/max98373-sdw.c
sound/soc/codecs/pcm3168a.c
sound/soc/codecs/rt1308-sdw.c
sound/soc/codecs/rt700-sdw.c
sound/soc/codecs/rt711-sdw.c
sound/soc/codecs/rt715-sdw.c
sound/soc/codecs/tlv320adcx140.c
sound/soc/codecs/wm8994.c
sound/soc/codecs/wm_hubs.c
sound/soc/codecs/wm_hubs.h
sound/soc/intel/atom/sst-mfld-platform-pcm.c
sound/soc/intel/boards/bytcr_rt5640.c
sound/soc/intel/boards/skl_hda_dsp_generic.c
sound/soc/intel/boards/sof_maxim_common.c
sound/soc/intel/haswell/sst-haswell-dsp.c
sound/soc/meson/axg-toddr.c
sound/soc/qcom/apq8016_sbc.c
sound/soc/qcom/apq8096.c
sound/soc/qcom/common.c
sound/soc/qcom/sdm845.c
sound/soc/qcom/storm.c
sound/soc/soc-core.c
sound/soc/soc-dai.c
sound/soc/soc-pcm.c
sound/soc/ti/ams-delta.c
tools/include/uapi/linux/in.h
tools/include/uapi/linux/kvm.h
tools/perf/bench/sched-messaging.c
tools/perf/pmu-events/arch/x86/amdzen1/core.json
tools/perf/pmu-events/arch/x86/amdzen2/core.json
tools/perf/tests/attr/README
tools/perf/tests/attr/test-record-group2 [new file with mode: 0644]
tools/perf/tests/bp_signal.c
tools/perf/tests/parse-metric.c
tools/perf/tests/pmu-events.c
tools/perf/tests/pmu.c
tools/perf/util/evlist.c
tools/perf/util/evsel.c
tools/perf/util/metricgroup.c
tools/perf/util/parse-events.c
tools/perf/util/pmu.c
tools/perf/util/pmu.h
tools/perf/util/record.c
tools/perf/util/stat-shadow.c
tools/testing/selftests/powerpc/mm/prot_sao.c

index 53da483..1c49723 100644 (file)
@@ -22,7 +22,7 @@
     |       nios2: | TODO |
     |    openrisc: | TODO |
     |      parisc: | TODO |
-    |     powerpc: |  ok  |
+    |     powerpc: | TODO |
     |       riscv: |  ok  |
     |        s390: |  ok  |
     |          sh: | TODO |
index 0d0862b..d746519 100644 (file)
@@ -6180,7 +6180,7 @@ F:        Documentation/devicetree/bindings/edac/aspeed-sdram-edac.txt
 F:     drivers/edac/aspeed_edac.c
 
 EDAC-BLUEFIELD
-M:     Shravan Kumar Ramani <sramani@nvidia.com>
+M:     Shravan Kumar Ramani <shravankr@nvidia.com>
 S:     Supported
 F:     drivers/edac/bluefield_edac.c
 
@@ -9251,7 +9251,7 @@ F:        drivers/firmware/iscsi_ibft*
 
 ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
 M:     Sagi Grimberg <sagi@grimberg.me>
-M:     Max Gurtovoy <maxg@nvidia.com>
+M:     Max Gurtovoy <mgurtovoy@nvidia.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
 W:     http://www.openfabrics.org
index c332d49..560ba69 100644 (file)
@@ -910,8 +910,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                .desc = "ARM erratum 1418040",
                .capability = ARM64_WORKAROUND_1418040,
                ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
-               .type = (ARM64_CPUCAP_SCOPE_LOCAL_CPU |
-                        ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU),
+               /*
+                * We need to allow affected CPUs to come in late, but
+                * also need the non-affected CPUs to be able to come
+                * in at any point in time. Wonderful.
+                */
+               .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
        },
 #endif
 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
index 295d664..c07d7a0 100644 (file)
@@ -50,16 +50,19 @@ static u64 pv_steal_clock(int cpu)
        struct pv_time_stolen_time_region *reg;
 
        reg = per_cpu_ptr(&stolen_time_region, cpu);
-       if (!reg->kaddr) {
-               pr_warn_once("stolen time enabled but not configured for cpu %d\n",
-                            cpu);
+
+       /*
+        * paravirt_steal_clock() may be called before the CPU
+        * online notification callback runs. Until the callback
+        * has run we just return zero.
+        */
+       if (!reg->kaddr)
                return 0;
-       }
 
        return le64_to_cpu(READ_ONCE(reg->kaddr->stolen_time));
 }
 
-static int stolen_time_dying_cpu(unsigned int cpu)
+static int stolen_time_cpu_down_prepare(unsigned int cpu)
 {
        struct pv_time_stolen_time_region *reg;
 
@@ -73,7 +76,7 @@ static int stolen_time_dying_cpu(unsigned int cpu)
        return 0;
 }
 
-static int init_stolen_time_cpu(unsigned int cpu)
+static int stolen_time_cpu_online(unsigned int cpu)
 {
        struct pv_time_stolen_time_region *reg;
        struct arm_smccc_res res;
@@ -103,19 +106,20 @@ static int init_stolen_time_cpu(unsigned int cpu)
        return 0;
 }
 
-static int pv_time_init_stolen_time(void)
+static int __init pv_time_init_stolen_time(void)
 {
        int ret;
 
-       ret = cpuhp_setup_state(CPUHP_AP_ARM_KVMPV_STARTING,
-                               "hypervisor/arm/pvtime:starting",
-                               init_stolen_time_cpu, stolen_time_dying_cpu);
+       ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+                               "hypervisor/arm/pvtime:online",
+                               stolen_time_cpu_online,
+                               stolen_time_cpu_down_prepare);
        if (ret < 0)
                return ret;
        return 0;
 }
 
-static bool has_pv_steal_clock(void)
+static bool __init has_pv_steal_clock(void)
 {
        struct arm_smccc_res res;
 
index f8912e4..ef9f1d5 100644 (file)
@@ -143,14 +143,17 @@ static inline void emit_addr_mov_i64(const int reg, const u64 val,
        }
 }
 
-static inline int bpf2a64_offset(int bpf_to, int bpf_from,
+static inline int bpf2a64_offset(int bpf_insn, int off,
                                 const struct jit_ctx *ctx)
 {
-       int to = ctx->offset[bpf_to];
-       /* -1 to account for the Branch instruction */
-       int from = ctx->offset[bpf_from] - 1;
-
-       return to - from;
+       /* BPF JMP offset is relative to the next instruction */
+       bpf_insn++;
+       /*
+        * Whereas arm64 branch instructions encode the offset
+        * from the branch itself, so we must subtract 1 from the
+        * instruction offset.
+        */
+       return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1);
 }
 
 static void jit_fill_hole(void *area, unsigned int size)
@@ -642,7 +645,7 @@ emit_bswap_uxt:
 
        /* JUMP off */
        case BPF_JMP | BPF_JA:
-               jmp_offset = bpf2a64_offset(i + off, i, ctx);
+               jmp_offset = bpf2a64_offset(i, off, ctx);
                check_imm26(jmp_offset);
                emit(A64_B(jmp_offset), ctx);
                break;
@@ -669,7 +672,7 @@ emit_bswap_uxt:
        case BPF_JMP32 | BPF_JSLE | BPF_X:
                emit(A64_CMP(is64, dst, src), ctx);
 emit_cond_jmp:
-               jmp_offset = bpf2a64_offset(i + off, i, ctx);
+               jmp_offset = bpf2a64_offset(i, off, ctx);
                check_imm19(jmp_offset);
                switch (BPF_OP(code)) {
                case BPF_JEQ:
@@ -908,10 +911,21 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
        const struct bpf_prog *prog = ctx->prog;
        int i;
 
+       /*
+        * - offset[0] offset of the end of prologue,
+        *   start of the 1st instruction.
+        * - offset[1] - offset of the end of 1st instruction,
+        *   start of the 2nd instruction
+        * [....]
+        * - offset[3] - offset of the end of 3rd instruction,
+        *   start of 4th instruction
+        */
        for (i = 0; i < prog->len; i++) {
                const struct bpf_insn *insn = &prog->insnsi[i];
                int ret;
 
+               if (ctx->image == NULL)
+                       ctx->offset[i] = ctx->idx;
                ret = build_insn(insn, ctx, extra_pass);
                if (ret > 0) {
                        i++;
@@ -919,11 +933,16 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
                                ctx->offset[i] = ctx->idx;
                        continue;
                }
-               if (ctx->image == NULL)
-                       ctx->offset[i] = ctx->idx;
                if (ret)
                        return ret;
        }
+       /*
+        * offset is allocated with prog->len + 1 so fill in
+        * the last element with the offset after the last
+        * instruction (end of program)
+        */
+       if (ctx->image == NULL)
+               ctx->offset[i] = ctx->idx;
 
        return 0;
 }
@@ -1002,7 +1021,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
        memset(&ctx, 0, sizeof(ctx));
        ctx.prog = prog;
 
-       ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
+       ctx.offset = kcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
        if (ctx.offset == NULL) {
                prog = orig_prog;
                goto out_off;
@@ -1089,7 +1108,7 @@ skip_init_ctx:
        prog->jited_len = prog_size;
 
        if (!prog->is_func || extra_pass) {
-               bpf_prog_fill_jited_linfo(prog, ctx.offset);
+               bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
 out_off:
                kfree(ctx.offset);
                kfree(jit_data);
index b66ba90..87927eb 100644 (file)
@@ -74,8 +74,6 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf)
        buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
 }
 
-#define acpi_unlazy_tlb(x)
-
 #ifdef CONFIG_ACPI_NUMA
 extern cpumask_t early_cpu_possible_map;
 #define for_each_possible_early_cpu(cpu)  \
index c95fa3a..8f32829 100644 (file)
@@ -877,6 +877,7 @@ config SNI_RM
        select I8253
        select I8259
        select ISA
+       select MIPS_L1_CACHE_SHIFT_6
        select SWAP_IO_SPACE if CPU_BIG_ENDIAN
        select SYS_HAS_CPU_R4X00
        select SYS_HAS_CPU_R5000
index b09dc84..eeeec18 100644 (file)
@@ -143,7 +143,10 @@ static struct platform_device sc26xx_pdev = {
        },
 };
 
-static u32 a20r_ack_hwint(void)
+/*
+ * Trigger chipset to update CPU's CAUSE IP field
+ */
+static u32 a20r_update_cause_ip(void)
 {
        u32 status = read_c0_status();
 
@@ -205,12 +208,14 @@ static void a20r_hwint(void)
        int irq;
 
        clear_c0_status(IE_IRQ0);
-       status = a20r_ack_hwint();
+       status = a20r_update_cause_ip();
        cause = read_c0_cause();
 
        irq = ffs(((cause & status) >> 8) & 0xf8);
        if (likely(irq > 0))
                do_IRQ(SNI_A20R_IRQ_BASE + irq - 1);
+
+       a20r_update_cause_ip();
        set_c0_status(IE_IRQ0);
 }
 
index 65bed1f..787e829 100644 (file)
@@ -116,7 +116,6 @@ config PPC
        #
        select ARCH_32BIT_OFF_T if PPC32
        select ARCH_HAS_DEBUG_VIRTUAL
-       select ARCH_HAS_DEBUG_VM_PGTABLE
        select ARCH_HAS_DEVMEM_IS_ALLOWED
        select ARCH_HAS_ELF_RANDOMIZE
        select ARCH_HAS_FORTIFY_SOURCE
index af9af03..15ed8d0 100644 (file)
@@ -108,7 +108,6 @@ CONFIG_FB_NVIDIA=y
 CONFIG_FB_NVIDIA_I2C=y
 CONFIG_FB_RADEON=y
 # CONFIG_LCD_CLASS_DEVICE is not set
-CONFIG_VGACON_SOFT_SCROLLBACK=y
 CONFIG_LOGO=y
 CONFIG_SOUND=y
 CONFIG_SND=y
index 5e6f92b..66e9a0f 100644 (file)
@@ -743,7 +743,6 @@ CONFIG_FB_TRIDENT=m
 CONFIG_FB_SM501=m
 CONFIG_FB_IBM_GXT4500=y
 CONFIG_LCD_PLATFORM=m
-CONFIG_VGACON_SOFT_SCROLLBACK=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
 CONFIG_LOGO=y
index 55442d4..b392384 100644 (file)
@@ -239,14 +239,14 @@ static inline void early_init_mmu_secondary(void)
 
 extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
                                         phys_addr_t first_memblock_size);
-extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
-                                        phys_addr_t first_memblock_size);
 static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
                                              phys_addr_t first_memblock_size)
 {
-       if (early_radix_enabled())
-               return radix__setup_initial_memory_limit(first_memblock_base,
-                                                  first_memblock_size);
+       /*
+        * Hash has more strict restrictions. At this point we don't
+        * know which translations we will pick. Hence go with hash
+        * restrictions.
+        */
        return hash__setup_initial_memory_limit(first_memblock_base,
                                           first_memblock_size);
 }
index 569fecd..9053fc9 100644 (file)
@@ -120,7 +120,8 @@ u64 dma_iommu_get_required_mask(struct device *dev)
        if (!tbl)
                return 0;
 
-       mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
+       mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) +
+                       tbl->it_page_shift - 1);
        mask += mask - 1;
 
        return mask;
index 87ab115..e147bbd 100644 (file)
@@ -50,7 +50,7 @@ $(obj-vdso32): %.o: %.S FORCE
 
 # actual build commands
 quiet_cmd_vdso32ld = VDSO32L $@
-      cmd_vdso32ld = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ $(call cc-ldoption, -Wl$(comma)--orphan-handling=warn) -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
+      cmd_vdso32ld = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
 quiet_cmd_vdso32as = VDSO32A $@
       cmd_vdso32as = $(VDSOCC) $(a_flags) $(CC32FLAGS) -c -o $@ $<
 
index 4c98546..5206c2e 100644 (file)
@@ -111,7 +111,6 @@ SECTIONS
                *(.note.GNU-stack)
                *(.data .data.* .gnu.linkonce.d.* .sdata*)
                *(.bss .sbss .dynbss .dynsbss)
-               *(.glink .iplt .plt .rela*)
        }
 }
 
index 38c317f..32ebb35 100644 (file)
@@ -34,7 +34,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
 
 # actual build commands
 quiet_cmd_vdso64ld = VDSO64L $@
-      cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) $(call cc-ldoption, -Wl$(comma)--orphan-handling=warn)
+      cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
 
 # install commands for the unstripped file
 quiet_cmd_vdso_install = INSTALL $@
index 4e3a8d4..256fb97 100644 (file)
@@ -30,7 +30,7 @@ SECTIONS
        . = ALIGN(16);
        .text           : {
                *(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*)
-               *(.sfpr)
+               *(.sfpr .glink)
        }                                               :text
        PROVIDE(__etext = .);
        PROVIDE(_etext = .);
@@ -111,7 +111,6 @@ SECTIONS
                *(.branch_lt)
                *(.data .data.* .gnu.linkonce.d.* .sdata*)
                *(.bss .sbss .dynbss .dynsbss)
-               *(.glink .iplt .plt .rela*)
        }
 }
 
index 28c7849..d5f0c10 100644 (file)
@@ -734,21 +734,6 @@ void radix__mmu_cleanup_all(void)
        }
 }
 
-void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
-                               phys_addr_t first_memblock_size)
-{
-       /*
-        * We don't currently support the first MEMBLOCK not mapping 0
-        * physical on those processors
-        */
-       BUG_ON(first_memblock_base != 0);
-
-       /*
-        * Radix mode is not limited by RMA / VRMA addressing.
-        */
-       ppc64_rma_size = ULONG_MAX;
-}
-
 #ifdef CONFIG_MEMORY_HOTPLUG
 static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
 {
index 02e127f..8459056 100644 (file)
@@ -433,9 +433,16 @@ void __init mmu_early_init_devtree(void)
        if (!(mfmsr() & MSR_HV))
                early_check_vec5();
 
-       if (early_radix_enabled())
+       if (early_radix_enabled()) {
                radix__early_init_devtree();
-       else
+               /*
+                * We have finalized the translation we are going to use by now.
+                * Radix mode is not limited by RMA / VRMA addressing.
+                * Hence don't limit memblock allocations.
+                */
+               ppc64_rma_size = ULONG_MAX;
+               memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
+       } else
                hash__early_init_devtree();
 }
 #endif /* CONFIG_PPC_BOOK3S_64 */
index f439f0d..a88a707 100644 (file)
@@ -822,7 +822,7 @@ free_stats:
        kfree(stats);
        return rc ? rc : seq_buf_used(&s);
 }
-DEVICE_ATTR_RO(perf_stats);
+DEVICE_ATTR_ADMIN_RO(perf_stats);
 
 static ssize_t flags_show(struct device *dev,
                          struct device_attribute *attr, char *buf)
index d7577fe..f556827 100644 (file)
@@ -186,7 +186,6 @@ CONFIG_DRM_I915=y
 CONFIG_FB_MODE_HELPERS=y
 CONFIG_FB_TILEBLITTING=y
 CONFIG_FB_EFI=y
-CONFIG_VGACON_SOFT_SCROLLBACK=y
 CONFIG_LOGO=y
 # CONFIG_LOGO_LINUX_MONO is not set
 # CONFIG_LOGO_LINUX_VGA16 is not set
index f856001..9936528 100644 (file)
@@ -181,7 +181,6 @@ CONFIG_DRM_I915=y
 CONFIG_FB_MODE_HELPERS=y
 CONFIG_FB_TILEBLITTING=y
 CONFIG_FB_EFI=y
-CONFIG_VGACON_SOFT_SCROLLBACK=y
 CONFIG_LOGO=y
 # CONFIG_LOGO_LINUX_MONO is not set
 # CONFIG_LOGO_LINUX_VGA16 is not set
index ca09764..6d2df1e 100644 (file)
@@ -159,8 +159,6 @@ static inline u64 x86_default_get_root_pointer(void)
 extern int x86_acpi_numa_init(void);
 #endif /* CONFIG_ACPI_NUMA */
 
-#define acpi_unlazy_tlb(x)     leave_mm(x)
-
 #ifdef CONFIG_ACPI_APEI
 static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
 {
index 71a30b0..7ecb90e 100644 (file)
@@ -161,18 +161,10 @@ static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
 }
 
 /* Power(C) State timer broadcast control */
-static void lapic_timer_state_broadcast(struct acpi_processor *pr,
-                                      struct acpi_processor_cx *cx,
-                                      int broadcast)
+static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
+                                       struct acpi_processor_cx *cx)
 {
-       int state = cx - pr->power.states;
-
-       if (state >= pr->power.timer_broadcast_on_state) {
-               if (broadcast)
-                       tick_broadcast_enter();
-               else
-                       tick_broadcast_exit();
-       }
+       return cx - pr->power.states >= pr->power.timer_broadcast_on_state;
 }
 
 #else
@@ -180,9 +172,9 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
                                   struct acpi_processor_cx *cstate) { }
 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
-static void lapic_timer_state_broadcast(struct acpi_processor *pr,
-                                      struct acpi_processor_cx *cx,
-                                      int broadcast)
+
+static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
+                                       struct acpi_processor_cx *cx)
 {
 }
 
@@ -566,32 +558,43 @@ static DEFINE_RAW_SPINLOCK(c3_lock);
 
 /**
  * acpi_idle_enter_bm - enters C3 with proper BM handling
+ * @drv: cpuidle driver
  * @pr: Target processor
  * @cx: Target state context
- * @timer_bc: Whether or not to change timer mode to broadcast
+ * @index: index of target state
  */
-static void acpi_idle_enter_bm(struct acpi_processor *pr,
-                              struct acpi_processor_cx *cx, bool timer_bc)
+static int acpi_idle_enter_bm(struct cpuidle_driver *drv,
+                              struct acpi_processor *pr,
+                              struct acpi_processor_cx *cx,
+                              int index)
 {
-       acpi_unlazy_tlb(smp_processor_id());
-
-       /*
-        * Must be done before busmaster disable as we might need to
-        * access HPET !
-        */
-       if (timer_bc)
-               lapic_timer_state_broadcast(pr, cx, 1);
+       static struct acpi_processor_cx safe_cx = {
+               .entry_method = ACPI_CSTATE_HALT,
+       };
 
        /*
         * disable bus master
         * bm_check implies we need ARB_DIS
         * bm_control implies whether we can do ARB_DIS
         *
-        * That leaves a case where bm_check is set and bm_control is
-        * not set. In that case we cannot do much, we enter C3
-        * without doing anything.
+        * That leaves a case where bm_check is set and bm_control is not set.
+        * In that case we cannot do much, we enter C3 without doing anything.
         */
-       if (pr->flags.bm_control) {
+       bool dis_bm = pr->flags.bm_control;
+
+       /* If we can skip BM, demote to a safe state. */
+       if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
+               dis_bm = false;
+               index = drv->safe_state_index;
+               if (index >= 0) {
+                       cx = this_cpu_read(acpi_cstate[index]);
+               } else {
+                       cx = &safe_cx;
+                       index = -EBUSY;
+               }
+       }
+
+       if (dis_bm) {
                raw_spin_lock(&c3_lock);
                c3_cpu_count++;
                /* Disable bus master arbitration when all CPUs are in C3 */
@@ -600,18 +603,21 @@ static void acpi_idle_enter_bm(struct acpi_processor *pr,
                raw_spin_unlock(&c3_lock);
        }
 
+       rcu_idle_enter();
+
        acpi_idle_do_entry(cx);
 
+       rcu_idle_exit();
+
        /* Re-enable bus master arbitration */
-       if (pr->flags.bm_control) {
+       if (dis_bm) {
                raw_spin_lock(&c3_lock);
                acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
                c3_cpu_count--;
                raw_spin_unlock(&c3_lock);
        }
 
-       if (timer_bc)
-               lapic_timer_state_broadcast(pr, cx, 0);
+       return index;
 }
 
 static int acpi_idle_enter(struct cpuidle_device *dev,
@@ -625,32 +631,21 @@ static int acpi_idle_enter(struct cpuidle_device *dev,
                return -EINVAL;
 
        if (cx->type != ACPI_STATE_C1) {
+               if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check)
+                       return acpi_idle_enter_bm(drv, pr, cx, index);
+
+               /* C2 to C1 demotion. */
                if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
                        index = ACPI_IDLE_STATE_START;
                        cx = per_cpu(acpi_cstate[index], dev->cpu);
-               } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
-                       if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
-                               acpi_idle_enter_bm(pr, cx, true);
-                               return index;
-                       } else if (drv->safe_state_index >= 0) {
-                               index = drv->safe_state_index;
-                               cx = per_cpu(acpi_cstate[index], dev->cpu);
-                       } else {
-                               acpi_safe_halt();
-                               return -EBUSY;
-                       }
                }
        }
 
-       lapic_timer_state_broadcast(pr, cx, 1);
-
        if (cx->type == ACPI_STATE_C3)
                ACPI_FLUSH_CPU_CACHE();
 
        acpi_idle_do_entry(cx);
 
-       lapic_timer_state_broadcast(pr, cx, 0);
-
        return index;
 }
 
@@ -666,7 +661,13 @@ static int acpi_idle_enter_s2idle(struct cpuidle_device *dev,
                        return 0;
 
                if (pr->flags.bm_check) {
-                       acpi_idle_enter_bm(pr, cx, false);
+                       u8 bm_sts_skip = cx->bm_sts_skip;
+
+                       /* Don't check BM_STS, do an unconditional ARB_DIS for S2IDLE */
+                       cx->bm_sts_skip = 1;
+                       acpi_idle_enter_bm(drv, pr, cx, index);
+                       cx->bm_sts_skip = bm_sts_skip;
+
                        return 0;
                } else {
                        ACPI_FLUSH_CPU_CACHE();
@@ -682,11 +683,13 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
 {
        int i, count = ACPI_IDLE_STATE_START;
        struct acpi_processor_cx *cx;
+       struct cpuidle_state *state;
 
        if (max_cstate == 0)
                max_cstate = 1;
 
        for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
+               state = &acpi_idle_driver.states[count];
                cx = &pr->power.states[i];
 
                if (!cx->valid)
@@ -694,6 +697,15 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
 
                per_cpu(acpi_cstate[count], dev->cpu) = cx;
 
+               if (lapic_timer_needs_broadcast(pr, cx))
+                       state->flags |= CPUIDLE_FLAG_TIMER_STOP;
+
+               if (cx->type == ACPI_STATE_C3) {
+                       state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
+                       if (pr->flags.bm_check)
+                               state->flags |= CPUIDLE_FLAG_RCU_IDLE;
+               }
+
                count++;
                if (count == CPUIDLE_STATE_MAX)
                        break;
index 784f12c..ec738f7 100644 (file)
@@ -5,6 +5,7 @@ config CLK_BCM2711_DVP
        depends on ARCH_BCM2835 ||COMPILE_TEST
        depends on COMMON_CLK
        default ARCH_BCM2835
+       select RESET_CONTROLLER
        select RESET_SIMPLE
        help
          Enable common clock framework support for the Broadcom BCM2711
index 6c35e4b..0d75043 100644 (file)
@@ -491,7 +491,7 @@ struct clk *davinci_pll_clk_register(struct device *dev,
                parent_name = postdiv_name;
        }
 
-       pllen = kzalloc(sizeof(*pllout), GFP_KERNEL);
+       pllen = kzalloc(sizeof(*pllen), GFP_KERNEL);
        if (!pllen) {
                ret = -ENOMEM;
                goto err_unregister_postdiv;
index d4c1864..228d08f 100644 (file)
@@ -420,17 +420,18 @@ static int lpass_core_sc7180_probe(struct platform_device *pdev)
        pm_runtime_enable(&pdev->dev);
        ret = pm_clk_create(&pdev->dev);
        if (ret)
-               return ret;
+               goto disable_pm_runtime;
 
        ret = pm_clk_add(&pdev->dev, "iface");
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to acquire iface clock\n");
-               goto disable_pm_runtime;
+               goto destroy_pm_clk;
        }
 
+       ret = -EINVAL;
        clk_probe = of_device_get_match_data(&pdev->dev);
        if (!clk_probe)
-               return -EINVAL;
+               goto destroy_pm_clk;
 
        ret = clk_probe(pdev);
        if (ret)
index d7243c0..47d6482 100644 (file)
@@ -137,7 +137,7 @@ PNAME(mux_usb480m_p)                = { "usb480m_phy", "xin24m" };
 PNAME(mux_hdmiphy_p)           = { "hdmiphy_phy", "xin24m" };
 PNAME(mux_aclk_cpu_src_p)      = { "cpll_aclk_cpu", "gpll_aclk_cpu", "hdmiphy_aclk_cpu" };
 
-PNAME(mux_pll_src_4plls_p)     = { "cpll", "gpll", "hdmiphy" "usb480m" };
+PNAME(mux_pll_src_4plls_p)     = { "cpll", "gpll", "hdmiphy", "usb480m" };
 PNAME(mux_pll_src_3plls_p)     = { "cpll", "gpll", "hdmiphy" };
 PNAME(mux_pll_src_2plls_p)     = { "cpll", "gpll" };
 PNAME(mux_sclk_hdmi_cec_p)     = { "cpll", "gpll", "xin24m" };
index ca79824..85c395d 100644 (file)
@@ -109,8 +109,10 @@ static int integrator_impd1_clk_probe(struct platform_device *pdev)
 
        for_each_available_child_of_node(np, child) {
                ret = integrator_impd1_clk_spawn(dev, np, child);
-               if (ret)
+               if (ret) {
+                       of_node_put(child);
                        break;
+               }
        }
 
        return ret;
index ff6d99e..a2b5c6f 100644 (file)
@@ -361,7 +361,10 @@ static void __init fixup_cede0_latency(void)
        for (i = 0; i < nr_xcede_records; i++) {
                struct xcede_latency_record *record = &payload->records[i];
                u64 latency_tb = be64_to_cpu(record->latency_ticks);
-               u64 latency_us = tb_to_ns(latency_tb) / NSEC_PER_USEC;
+               u64 latency_us = DIV_ROUND_UP_ULL(tb_to_ns(latency_tb), NSEC_PER_USEC);
+
+               if (latency_us == 0)
+                       pr_warn("cpuidle: xcede record %d has an unrealistic latency of 0us.\n", i);
 
                if (latency_us < min_latency_us)
                        min_latency_us = latency_us;
@@ -378,10 +381,14 @@ static void __init fixup_cede0_latency(void)
         * Perform the fix-up.
         */
        if (min_latency_us < dedicated_states[1].exit_latency) {
-               u64 cede0_latency = min_latency_us - 1;
+               /*
+                * We set a minimum of 1us wakeup latency for cede0 to
+                * distinguish it from snooze
+                */
+               u64 cede0_latency = 1;
 
-               if (cede0_latency <= 0)
-                       cede0_latency = min_latency_us;
+               if (min_latency_us > cede0_latency)
+                       cede0_latency = min_latency_us - 1;
 
                dedicated_states[1].exit_latency = cede0_latency;
                dedicated_states[1].target_residency = 10 * (cede0_latency);
index 04becd7..6c7e562 100644 (file)
@@ -138,6 +138,7 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
                                struct cpuidle_device *dev, int index)
 {
        ktime_t time_start, time_end;
+       struct cpuidle_state *target_state = &drv->states[index];
 
        time_start = ns_to_ktime(local_clock());
 
@@ -153,8 +154,9 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
         * suspended is generally unsafe.
         */
        stop_critical_timings();
-       rcu_idle_enter();
-       drv->states[index].enter_s2idle(dev, drv, index);
+       if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
+               rcu_idle_enter();
+       target_state->enter_s2idle(dev, drv, index);
        if (WARN_ON_ONCE(!irqs_disabled()))
                local_irq_disable();
        /*
@@ -162,7 +164,8 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
         * first CPU executing it calls functions containing RCU read-side
         * critical sections, so tell RCU about that.
         */
-       rcu_idle_exit();
+       if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
+               rcu_idle_exit();
        tick_unfreeze();
        start_critical_timings();
 
@@ -239,9 +242,11 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
        time_start = ns_to_ktime(local_clock());
 
        stop_critical_timings();
-       rcu_idle_enter();
+       if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
+               rcu_idle_enter();
        entered_state = target_state->enter(dev, drv, index);
-       rcu_idle_exit();
+       if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
+               rcu_idle_exit();
        start_critical_timings();
 
        sched_clock_idle_wakeup_event();
index d8c6520..0675768 100644 (file)
@@ -178,7 +178,7 @@ static int psp_sw_init(void *handle)
                return ret;
        }
 
-       if (adev->asic_type == CHIP_NAVI10) {
+       if (adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_SIENNA_CICHLID) {
                ret= psp_sysfs_init(adev);
                if (ret) {
                        return ret;
index e16874f..6c5d961 100644 (file)
@@ -58,7 +58,7 @@ MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
 MODULE_FIRMWARE("amdgpu/sienna_cichlid_sos.bin");
 MODULE_FIRMWARE("amdgpu/sienna_cichlid_ta.bin");
 MODULE_FIRMWARE("amdgpu/navy_flounder_sos.bin");
-MODULE_FIRMWARE("amdgpu/navy_flounder_asd.bin");
+MODULE_FIRMWARE("amdgpu/navy_flounder_ta.bin");
 
 /* address block */
 #define smnMP1_FIRMWARE_FLAGS          0x3010024
index e0e60b0..0f4508b 100644 (file)
@@ -1216,6 +1216,8 @@ static int stop_cpsch(struct device_queue_manager *dqm)
        dqm->sched_running = false;
        dqm_unlock(dqm);
 
+       pm_release_ib(&dqm->packets);
+
        kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
        pm_uninit(&dqm->packets, hanging);
 
@@ -1326,7 +1328,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
        if (q->properties.is_active) {
                increment_queue_count(dqm, q->properties.type);
 
-               retval = execute_queues_cpsch(dqm,
+               execute_queues_cpsch(dqm,
                                KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
        }
 
index b51c527..4ba8b54 100644 (file)
@@ -5278,19 +5278,6 @@ static void dm_crtc_helper_disable(struct drm_crtc *crtc)
 {
 }
 
-static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
-{
-       struct drm_device *dev = new_crtc_state->crtc->dev;
-       struct drm_plane *plane;
-
-       drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
-               if (plane->type == DRM_PLANE_TYPE_CURSOR)
-                       return true;
-       }
-
-       return false;
-}
-
 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
 {
        struct drm_atomic_state *state = new_crtc_state->state;
@@ -5354,19 +5341,20 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
                return ret;
        }
 
-       /* In some use cases, like reset, no stream is attached */
-       if (!dm_crtc_state->stream)
-               return 0;
-
        /*
-        * We want at least one hardware plane enabled to use
-        * the stream with a cursor enabled.
+        * We require the primary plane to be enabled whenever the CRTC is, otherwise
+        * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
+        * planes are disabled, which is not supported by the hardware. And there is legacy
+        * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
         */
-       if (state->enable && state->active &&
-           does_crtc_have_active_cursor(state) &&
-           dm_crtc_state->active_planes == 0)
+       if (state->enable &&
+           !(state->plane_mask & drm_plane_mask(crtc->primary)))
                return -EINVAL;
 
+       /* In some use cases, like reset, no stream is attached */
+       if (!dm_crtc_state->stream)
+               return 0;
+
        if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
                return 0;
 
index 9140b3f..f31f48d 100644 (file)
@@ -409,8 +409,8 @@ static struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
                        },
                },
        .num_states = 5,
-       .sr_exit_time_us = 8.6,
-       .sr_enter_plus_exit_time_us = 10.9,
+       .sr_exit_time_us = 11.6,
+       .sr_enter_plus_exit_time_us = 13.9,
        .urgent_latency_us = 4.0,
        .urgent_latency_pixel_data_only_us = 4.0,
        .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
index d3192b9..47f8ee2 100644 (file)
@@ -27,7 +27,7 @@
 #define MOD_HDCP_LOG_H_
 
 #ifdef CONFIG_DRM_AMD_DC_HDCP
-#define HDCP_LOG_ERR(hdcp, ...) DRM_WARN(__VA_ARGS__)
+#define HDCP_LOG_ERR(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
 #define HDCP_LOG_VER(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
 #define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
 #define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__)
index fb1161d..3a367a5 100644 (file)
@@ -88,7 +88,7 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        if (!psp->dtm_context.dtm_initialized) {
-               DRM_ERROR("Failed to add display topology, DTM TA is not initialized.");
+               DRM_INFO("Failed to add display topology, DTM TA is not initialized.");
                display->state = MOD_HDCP_DISPLAY_INACTIVE;
                return MOD_HDCP_STATUS_FAILURE;
        }
index 0826625..63f945f 100644 (file)
@@ -1126,7 +1126,7 @@ static int smu_disable_dpms(struct smu_context *smu)
         */
        if (smu->uploading_custom_pp_table &&
            (adev->asic_type >= CHIP_NAVI10) &&
-           (adev->asic_type <= CHIP_NAVI12))
+           (adev->asic_type <= CHIP_NAVY_FLOUNDER))
                return 0;
 
        /*
@@ -1211,7 +1211,9 @@ static int smu_hw_fini(void *handle)
 int smu_reset(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
-       int ret = 0;
+       int ret;
+
+       amdgpu_gfx_off_ctrl(smu->adev, false);
 
        ret = smu_hw_fini(adev);
        if (ret)
@@ -1222,8 +1224,12 @@ int smu_reset(struct smu_context *smu)
                return ret;
 
        ret = smu_late_init(adev);
+       if (ret)
+               return ret;
 
-       return ret;
+       amdgpu_gfx_off_ctrl(smu->adev, true);
+
+       return 0;
 }
 
 static int smu_suspend(void *handle)
index d0bdb6d..ef755dd 100644 (file)
@@ -439,29 +439,36 @@ static bool __cancel_engine(struct intel_engine_cs *engine)
        return __reset_engine(engine);
 }
 
-static struct intel_engine_cs *__active_engine(struct i915_request *rq)
+static bool
+__active_engine(struct i915_request *rq, struct intel_engine_cs **active)
 {
        struct intel_engine_cs *engine, *locked;
+       bool ret = false;
 
        /*
         * Serialise with __i915_request_submit() so that it sees
         * is-banned?, or we know the request is already inflight.
+        *
+        * Note that rq->engine is unstable, and so we double
+        * check that we have acquired the lock on the final engine.
         */
        locked = READ_ONCE(rq->engine);
        spin_lock_irq(&locked->active.lock);
        while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
                spin_unlock(&locked->active.lock);
-               spin_lock(&engine->active.lock);
                locked = engine;
+               spin_lock(&locked->active.lock);
        }
 
-       engine = NULL;
-       if (i915_request_is_active(rq) && rq->fence.error != -EIO)
-               engine = rq->engine;
+       if (!i915_request_completed(rq)) {
+               if (i915_request_is_active(rq) && rq->fence.error != -EIO)
+                       *active = locked;
+               ret = true;
+       }
 
        spin_unlock_irq(&locked->active.lock);
 
-       return engine;
+       return ret;
 }
 
 static struct intel_engine_cs *active_engine(struct intel_context *ce)
@@ -472,17 +479,16 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
        if (!ce->timeline)
                return NULL;
 
-       mutex_lock(&ce->timeline->mutex);
-       list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
-               if (i915_request_completed(rq))
-                       break;
+       rcu_read_lock();
+       list_for_each_entry_rcu(rq, &ce->timeline->requests, link) {
+               if (i915_request_is_active(rq) && i915_request_completed(rq))
+                       continue;
 
                /* Check with the backend if the request is inflight */
-               engine = __active_engine(rq);
-               if (engine)
+               if (__active_engine(rq, &engine))
                        break;
        }
-       mutex_unlock(&ce->timeline->mutex);
+       rcu_read_unlock();
 
        return engine;
 }
@@ -713,6 +719,7 @@ __create_context(struct drm_i915_private *i915)
        ctx->i915 = i915;
        ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
        mutex_init(&ctx->mutex);
+       INIT_LIST_HEAD(&ctx->link);
 
        spin_lock_init(&ctx->stale.lock);
        INIT_LIST_HEAD(&ctx->stale.engines);
@@ -740,10 +747,6 @@ __create_context(struct drm_i915_private *i915)
        for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
                ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
 
-       spin_lock(&i915->gem.contexts.lock);
-       list_add_tail(&ctx->link, &i915->gem.contexts.list);
-       spin_unlock(&i915->gem.contexts.lock);
-
        return ctx;
 
 err_free:
@@ -931,6 +934,7 @@ static int gem_context_register(struct i915_gem_context *ctx,
                                struct drm_i915_file_private *fpriv,
                                u32 *id)
 {
+       struct drm_i915_private *i915 = ctx->i915;
        struct i915_address_space *vm;
        int ret;
 
@@ -949,8 +953,16 @@ static int gem_context_register(struct i915_gem_context *ctx,
        /* And finally expose ourselves to userspace via the idr */
        ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL);
        if (ret)
-               put_pid(fetch_and_zero(&ctx->pid));
+               goto err_pid;
+
+       spin_lock(&i915->gem.contexts.lock);
+       list_add_tail(&ctx->link, &i915->gem.contexts.list);
+       spin_unlock(&i915->gem.contexts.lock);
+
+       return 0;
 
+err_pid:
+       put_pid(fetch_and_zero(&ctx->pid));
        return ret;
 }
 
index 24322ef..9eeaca9 100644 (file)
@@ -2060,6 +2060,14 @@ static inline void clear_ports(struct i915_request **ports, int count)
        memset_p((void **)ports, NULL, count);
 }
 
+static inline void
+copy_ports(struct i915_request **dst, struct i915_request **src, int count)
+{
+       /* A memcpy_p() would be very useful here! */
+       while (count--)
+               WRITE_ONCE(*dst++, *src++); /* avoid write tearing */
+}
+
 static void execlists_dequeue(struct intel_engine_cs *engine)
 {
        struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -2648,10 +2656,9 @@ static void process_csb(struct intel_engine_cs *engine)
 
                        /* switch pending to inflight */
                        GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
-                       memcpy(execlists->inflight,
-                              execlists->pending,
-                              execlists_num_ports(execlists) *
-                              sizeof(*execlists->pending));
+                       copy_ports(execlists->inflight,
+                                  execlists->pending,
+                                  execlists_num_ports(execlists));
                        smp_wmb(); /* complete the seqlock */
                        WRITE_ONCE(execlists->active, execlists->inflight);
 
index 0b2fe55..781a678 100644 (file)
@@ -388,17 +388,38 @@ static bool __request_in_flight(const struct i915_request *signal)
         * As we know that there are always preemption points between
         * requests, we know that only the currently executing request
         * may be still active even though we have cleared the flag.
-        * However, we can't rely on our tracking of ELSP[0] to known
+        * However, we can't rely on our tracking of ELSP[0] to know
         * which request is currently active and so maybe stuck, as
         * the tracking maybe an event behind. Instead assume that
         * if the context is still inflight, then it is still active
         * even if the active flag has been cleared.
+        *
+        * To further complicate matters, if there a pending promotion, the HW
+        * may either perform a context switch to the second inflight execlists,
+        * or it may switch to the pending set of execlists. In the case of the
+        * latter, it may send the ACK and we process the event copying the
+        * pending[] over top of inflight[], _overwriting_ our *active. Since
+        * this implies the HW is arbitrating and not struck in *active, we do
+        * not worry about complete accuracy, but we do require no read/write
+        * tearing of the pointer [the read of the pointer must be valid, even
+        * as the array is being overwritten, for which we require the writes
+        * to avoid tearing.]
+        *
+        * Note that the read of *execlists->active may race with the promotion
+        * of execlists->pending[] to execlists->inflight[], overwritting
+        * the value at *execlists->active. This is fine. The promotion implies
+        * that we received an ACK from the HW, and so the context is not
+        * stuck -- if we do not see ourselves in *active, the inflight status
+        * is valid. If instead we see ourselves being copied into *active,
+        * we are inflight and may signal the callback.
         */
        if (!intel_context_inflight(signal->context))
                return false;
 
        rcu_read_lock();
-       for (port = __engine_active(signal->engine); (rq = *port); port++) {
+       for (port = __engine_active(signal->engine);
+            (rq = READ_ONCE(*port)); /* may race with promotion of pending[] */
+            port++) {
                if (rq->context == signal->context) {
                        inflight = i915_seqno_passed(rq->fence.seqno,
                                                     signal->fence.seqno);
index 295b982..4cd2038 100644 (file)
@@ -164,9 +164,13 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
 
                do {
                        list_for_each_entry_safe(pos, next, &x->head, entry) {
-                               pos->func(pos,
-                                         TASK_NORMAL, fence->error,
-                                         &extra);
+                               int wake_flags;
+
+                               wake_flags = fence->error;
+                               if (pos->func == autoremove_wake_function)
+                                       wake_flags = 0;
+
+                               pos->func(pos, TASK_NORMAL, wake_flags, &extra);
                        }
 
                        if (list_empty(&extra))
index 3fc5511..4d29568 100644 (file)
@@ -831,13 +831,19 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
                        drm_crtc_index(&mtk_crtc->base));
                mtk_crtc->cmdq_client = NULL;
        }
-       ret = of_property_read_u32_index(priv->mutex_node,
-                                        "mediatek,gce-events",
-                                        drm_crtc_index(&mtk_crtc->base),
-                                        &mtk_crtc->cmdq_event);
-       if (ret)
-               dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
-                       drm_crtc_index(&mtk_crtc->base));
+
+       if (mtk_crtc->cmdq_client) {
+               ret = of_property_read_u32_index(priv->mutex_node,
+                                                "mediatek,gce-events",
+                                                drm_crtc_index(&mtk_crtc->base),
+                                                &mtk_crtc->cmdq_event);
+               if (ret) {
+                       dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
+                               drm_crtc_index(&mtk_crtc->base));
+                       cmdq_mbox_destroy(mtk_crtc->cmdq_client);
+                       mtk_crtc->cmdq_client = NULL;
+               }
+       }
 #endif
        return 0;
 }
index 57c88de..5266488 100644 (file)
@@ -496,6 +496,7 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
        if (of_address_to_resource(node, 0, &res) != 0) {
                dev_err(dev, "Missing reg in %s node\n", node->full_name);
+               put_device(&larb_pdev->dev);
                return -EINVAL;
        }
        comp->regs_pa = res.start;
index 040a8f3..2d98274 100644 (file)
@@ -27,7 +27,6 @@
 
 #include "mtk_drm_crtc.h"
 #include "mtk_drm_ddp.h"
-#include "mtk_drm_ddp.h"
 #include "mtk_drm_ddp_comp.h"
 #include "mtk_drm_drv.h"
 #include "mtk_drm_gem.h"
@@ -165,7 +164,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
 
        ret = drmm_mode_config_init(drm);
        if (ret)
-               return ret;
+               goto put_mutex_dev;
 
        drm->mode_config.min_width = 64;
        drm->mode_config.min_height = 64;
@@ -182,7 +181,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
 
        ret = component_bind_all(drm->dev, drm);
        if (ret)
-               return ret;
+               goto put_mutex_dev;
 
        /*
         * We currently support two fixed data streams, each optional,
@@ -229,7 +228,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
        }
        if (!dma_dev->dma_parms) {
                ret = -ENOMEM;
-               goto err_component_unbind;
+               goto put_dma_dev;
        }
 
        ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32));
@@ -256,9 +255,12 @@ static int mtk_drm_kms_init(struct drm_device *drm)
 err_unset_dma_parms:
        if (private->dma_parms_allocated)
                dma_dev->dma_parms = NULL;
+put_dma_dev:
+       put_device(private->dma_dev);
 err_component_unbind:
        component_unbind_all(drm->dev, drm);
-
+put_mutex_dev:
+       put_device(private->mutex_dev);
        return ret;
 }
 
@@ -544,8 +546,13 @@ err_pm:
        pm_runtime_disable(dev);
 err_node:
        of_node_put(private->mutex_node);
-       for (i = 0; i < DDP_COMPONENT_ID_MAX; i++)
+       for (i = 0; i < DDP_COMPONENT_ID_MAX; i++) {
                of_node_put(private->comp_node[i]);
+               if (private->ddp_comp[i]) {
+                       put_device(private->ddp_comp[i]->larb_dev);
+                       private->ddp_comp[i] = NULL;
+               }
+       }
        return ret;
 }
 
index 16fd99d..80b7a08 100644 (file)
@@ -466,14 +466,13 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
        horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
 
        if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
-               horizontal_backporch_byte =
-                       (vm->hback_porch * dsi_tmp_buf_bpp - 10);
+               horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp;
        else
-               horizontal_backporch_byte = ((vm->hback_porch + vm->hsync_len) *
-                       dsi_tmp_buf_bpp - 10);
+               horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) *
+                                           dsi_tmp_buf_bpp;
 
        data_phy_cycles = timing->lpx + timing->da_hs_prepare +
-                         timing->da_hs_zero + timing->da_hs_exit + 3;
+                         timing->da_hs_zero + timing->da_hs_exit;
 
        if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
                if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
index f2e9b42..a977256 100644 (file)
@@ -1507,25 +1507,30 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
                dev_err(dev,
                        "Failed to get system configuration registers: %d\n",
                        ret);
-               return ret;
+               goto put_device;
        }
        hdmi->sys_regmap = regmap;
 
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        hdmi->regs = devm_ioremap_resource(dev, mem);
-       if (IS_ERR(hdmi->regs))
-               return PTR_ERR(hdmi->regs);
+       if (IS_ERR(hdmi->regs)) {
+               ret = PTR_ERR(hdmi->regs);
+               goto put_device;
+       }
 
        remote = of_graph_get_remote_node(np, 1, 0);
-       if (!remote)
-               return -EINVAL;
+       if (!remote) {
+               ret = -EINVAL;
+               goto put_device;
+       }
 
        if (!of_device_is_compatible(remote, "hdmi-connector")) {
                hdmi->next_bridge = of_drm_find_bridge(remote);
                if (!hdmi->next_bridge) {
                        dev_err(dev, "Waiting for external bridge\n");
                        of_node_put(remote);
-                       return -EPROBE_DEFER;
+                       ret = -EPROBE_DEFER;
+                       goto put_device;
                }
        }
 
@@ -1534,7 +1539,8 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
                dev_err(dev, "Failed to find ddc-i2c-bus node in %pOF\n",
                        remote);
                of_node_put(remote);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto put_device;
        }
        of_node_put(remote);
 
@@ -1542,10 +1548,14 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
        of_node_put(i2c_np);
        if (!hdmi->ddc_adpt) {
                dev_err(dev, "Failed to get ddc i2c adapter by node\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto put_device;
        }
 
        return 0;
+put_device:
+       put_device(hdmi->cec_dev);
+       return ret;
 }
 
 /*
index 7b69d6d..e0ae911 100644 (file)
@@ -933,7 +933,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
 
        /* get matching reference and feedback divider */
        *ref_div = min(max(den/post_div, 1u), ref_div_max);
-       *fb_div = max(nom * *ref_div * post_div / den, 1u);
+       *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
 
        /* limit fb divider to its maximum */
        if (*fb_div > fb_div_max) {
index 591106c..1d44bb6 100644 (file)
@@ -731,7 +731,7 @@ static void vmbus_wait_for_unload(void)
        void *page_addr;
        struct hv_message *msg;
        struct vmbus_channel_message_header *hdr;
-       u32 message_type;
+       u32 message_type, i;
 
        /*
         * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was
@@ -741,8 +741,11 @@ static void vmbus_wait_for_unload(void)
         * functional and vmbus_unload_response() will complete
         * vmbus_connection.unload_event. If not, the last thing we can do is
         * read message pages for all CPUs directly.
+        *
+        * Wait no more than 10 seconds so that the panic path can't get
+        * hung forever in case the response message isn't seen.
         */
-       while (1) {
+       for (i = 0; i < 1000; i++) {
                if (completion_done(&vmbus_connection.unload_event))
                        break;
 
index 910b6e9..946d0ab 100644 (file)
@@ -2382,7 +2382,10 @@ static int vmbus_bus_suspend(struct device *dev)
        if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
                wait_for_completion(&vmbus_connection.ready_for_suspend_event);
 
-       WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0);
+       if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) {
+               pr_err("Can not suspend due to a previous failed resuming\n");
+               return -EBUSY;
+       }
 
        mutex_lock(&vmbus_connection.channel_mutex);
 
@@ -2456,7 +2459,9 @@ static int vmbus_bus_resume(struct device *dev)
 
        vmbus_request_offers();
 
-       wait_for_completion(&vmbus_connection.ready_for_resume_event);
+       if (wait_for_completion_timeout(
+               &vmbus_connection.ready_for_resume_event, 10 * HZ) == 0)
+               pr_err("Some vmbus device is missing after suspending?\n");
 
        /* Reset the event for the next suspend. */
        reinit_completion(&vmbus_connection.ready_for_suspend_event);
index 07ae8b9..10e4200 100644 (file)
@@ -3840,14 +3840,18 @@ int amd_iommu_activate_guest_mode(void *data)
 {
        struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
        struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
+       u64 valid;
 
        if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
            !entry || entry->lo.fields_vapic.guest_mode)
                return 0;
 
+       valid = entry->lo.fields_vapic.valid;
+
        entry->lo.val = 0;
        entry->hi.val = 0;
 
+       entry->lo.fields_vapic.valid       = valid;
        entry->lo.fields_vapic.guest_mode  = 1;
        entry->lo.fields_vapic.ga_log_intr = 1;
        entry->hi.fields.ga_root_ptr       = ir_data->ga_root_ptr;
@@ -3864,12 +3868,14 @@ int amd_iommu_deactivate_guest_mode(void *data)
        struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
        struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
        struct irq_cfg *cfg = ir_data->cfg;
-       u64 valid = entry->lo.fields_remap.valid;
+       u64 valid;
 
        if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
            !entry || !entry->lo.fields_vapic.guest_mode)
                return 0;
 
+       valid = entry->lo.fields_remap.valid;
+
        entry->lo.val = 0;
        entry->hi.val = 0;
 
index 65eff4c..0369d98 100644 (file)
@@ -1907,16 +1907,15 @@ static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 }
 
 /**
- * spi_nor_sr1_bit6_quad_enable() - Set/Unset the Quad Enable BIT(6) in the
- *                                  Status Register 1.
+ * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
+ * Register 1.
  * @nor:       pointer to a 'struct spi_nor'
- * @enable:    true to enable Quad mode, false to disable Quad mode.
  *
  * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
  *
  * Return: 0 on success, -errno otherwise.
  */
-int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor, bool enable)
+int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
 {
        int ret;
 
@@ -1924,56 +1923,45 @@ int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor, bool enable)
        if (ret)
                return ret;
 
-       if ((enable && (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)) ||
-           (!enable && !(nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)))
+       if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
                return 0;
 
-       if (enable)
-               nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
-       else
-               nor->bouncebuf[0] &= ~SR1_QUAD_EN_BIT6;
+       nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
 
        return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
 }
 
 /**
- * spi_nor_sr2_bit1_quad_enable() - set/unset the Quad Enable BIT(1) in the
- *                                  Status Register 2.
+ * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
+ * Register 2.
  * @nor:       pointer to a 'struct spi_nor'.
- * @enable:    true to enable Quad mode, false to disable Quad mode.
  *
  * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
  *
  * Return: 0 on success, -errno otherwise.
  */
-int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor, bool enable)
+int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
 {
        int ret;
 
        if (nor->flags & SNOR_F_NO_READ_CR)
-               return spi_nor_write_16bit_cr_and_check(nor,
-                                               enable ? SR2_QUAD_EN_BIT1 : 0);
+               return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
 
        ret = spi_nor_read_cr(nor, nor->bouncebuf);
        if (ret)
                return ret;
 
-       if ((enable && (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)) ||
-           (!enable && !(nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)))
+       if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
                return 0;
 
-       if (enable)
-               nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
-       else
-               nor->bouncebuf[0] &= ~SR2_QUAD_EN_BIT1;
+       nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
 
        return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
 }
 
 /**
- * spi_nor_sr2_bit7_quad_enable() - set/unset QE bit in Status Register 2.
+ * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
  * @nor:       pointer to a 'struct spi_nor'
- * @enable:    true to enable Quad mode, false to disable Quad mode.
  *
  * Set the Quad Enable (QE) bit in the Status Register 2.
  *
@@ -1983,7 +1971,7 @@ int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor, bool enable)
  *
  * Return: 0 on success, -errno otherwise.
  */
-int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor, bool enable)
+int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
 {
        u8 *sr2 = nor->bouncebuf;
        int ret;
@@ -1993,15 +1981,11 @@ int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor, bool enable)
        ret = spi_nor_read_sr2(nor, sr2);
        if (ret)
                return ret;
-       if ((enable && (*sr2 & SR2_QUAD_EN_BIT7)) ||
-           (!enable && !(*sr2 & SR2_QUAD_EN_BIT7)))
+       if (*sr2 & SR2_QUAD_EN_BIT7)
                return 0;
 
        /* Update the Quad Enable bit. */
-       if (enable)
-               *sr2 |= SR2_QUAD_EN_BIT7;
-       else
-               *sr2 &= ~SR2_QUAD_EN_BIT7;
+       *sr2 |= SR2_QUAD_EN_BIT7;
 
        ret = spi_nor_write_sr2(nor, sr2);
        if (ret)
@@ -2914,13 +2898,12 @@ static int spi_nor_init_params(struct spi_nor *nor)
 }
 
 /**
- * spi_nor_quad_enable() - enable/disable Quad I/O if needed.
+ * spi_nor_quad_enable() - enable Quad I/O if needed.
  * @nor:                pointer to a 'struct spi_nor'
- * @enable:             true to enable Quad mode. false to disable Quad mode.
  *
  * Return: 0 on success, -errno otherwise.
  */
-static int spi_nor_quad_enable(struct spi_nor *nor, bool enable)
+static int spi_nor_quad_enable(struct spi_nor *nor)
 {
        if (!nor->params->quad_enable)
                return 0;
@@ -2929,7 +2912,7 @@ static int spi_nor_quad_enable(struct spi_nor *nor, bool enable)
              spi_nor_get_protocol_width(nor->write_proto) == 4))
                return 0;
 
-       return nor->params->quad_enable(nor, enable);
+       return nor->params->quad_enable(nor);
 }
 
 /**
@@ -2953,7 +2936,7 @@ static int spi_nor_init(struct spi_nor *nor)
 {
        int err;
 
-       err = spi_nor_quad_enable(nor, true);
+       err = spi_nor_quad_enable(nor);
        if (err) {
                dev_dbg(nor->dev, "quad mode not supported\n");
                return err;
@@ -3000,8 +2983,6 @@ void spi_nor_restore(struct spi_nor *nor)
        if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
            nor->flags & SNOR_F_BROKEN_RESET)
                nor->params->set_4byte_addr_mode(nor, false);
-
-       spi_nor_quad_enable(nor, false);
 }
 EXPORT_SYMBOL_GPL(spi_nor_restore);
 
index 95aa32f..6f2f6b2 100644 (file)
@@ -198,7 +198,7 @@ struct spi_nor_locking_ops {
  *                      higher index in the array, the higher priority.
  * @erase_map:         the erase map parsed from the SFDP Sector Map Parameter
  *                      Table.
- * @quad_enable:       enables/disables SPI NOR Quad mode.
+ * @quad_enable:       enables SPI NOR quad mode.
  * @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode.
  * @convert_addr:      converts an absolute address into something the flash
  *                      will understand. Particularly useful when pagesize is
@@ -219,7 +219,7 @@ struct spi_nor_flash_parameter {
 
        struct spi_nor_erase_map        erase_map;
 
-       int (*quad_enable)(struct spi_nor *nor, bool enable);
+       int (*quad_enable)(struct spi_nor *nor);
        int (*set_4byte_addr_mode)(struct spi_nor *nor, bool enable);
        u32 (*convert_addr)(struct spi_nor *nor, u32 addr);
        int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps);
@@ -406,9 +406,9 @@ int spi_nor_write_ear(struct spi_nor *nor, u8 ear);
 int spi_nor_wait_till_ready(struct spi_nor *nor);
 int spi_nor_lock_and_prep(struct spi_nor *nor);
 void spi_nor_unlock_and_unprep(struct spi_nor *nor);
-int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor, bool enable);
-int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor, bool enable);
-int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor, bool enable);
+int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor);
+int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor);
+int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor);
 
 int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr);
 ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
index 25c7649..983d75b 100644 (file)
@@ -1039,6 +1039,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE,          &rapl_defaults_core),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE,           &rapl_defaults_core),
        X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,    &rapl_defaults_spr_server),
+       X86_MATCH_INTEL_FAM6_MODEL(LAKEFIELD,           &rapl_defaults_core),
 
        X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT,     &rapl_defaults_byt),
        X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT,        &rapl_defaults_cht),
index cd7c7d2..d0f9e90 100644 (file)
@@ -182,10 +182,11 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
                pr_warn("driver on host %s cannot handle device %016llx, error:%d\n",
                        dev_name(sas_ha->dev),
                        SAS_ADDR(dev->sas_addr), res);
+               return res;
        }
        set_bit(SAS_DEV_FOUND, &dev->state);
        kref_get(&dev->kref);
-       return res;
+       return 0;
 }
 
 
index 5e850cc..39deb22 100644 (file)
@@ -22,52 +22,6 @@ config VGA_CONSOLE
 
          Say Y.
 
-config VGACON_SOFT_SCROLLBACK
-       bool "Enable Scrollback Buffer in System RAM"
-       depends on VGA_CONSOLE
-       default n
-       help
-        The scrollback buffer of the standard VGA console is located in
-        the VGA RAM.  The size of this RAM is fixed and is quite small.
-        If you require a larger scrollback buffer, this can be placed in
-        System RAM which is dynamically allocated during initialization.
-        Placing the scrollback buffer in System RAM will slightly slow
-        down the console.
-
-        If you want this feature, say 'Y' here and enter the amount of
-        RAM to allocate for this buffer.  If unsure, say 'N'.
-
-config VGACON_SOFT_SCROLLBACK_SIZE
-       int "Scrollback Buffer Size (in KB)"
-       depends on VGACON_SOFT_SCROLLBACK
-       range 1 1024
-       default "64"
-       help
-         Enter the amount of System RAM to allocate for scrollback
-         buffers of VGA consoles. Each 64KB will give you approximately
-         16 80x25 screenfuls of scrollback buffer.
-
-config VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT
-       bool "Persistent Scrollback History for each console by default"
-       depends on VGACON_SOFT_SCROLLBACK
-       default n
-       help
-         Say Y here if the scrollback history should persist by default when
-         switching between consoles. Otherwise, the scrollback history will be
-         flushed each time the console is switched. This feature can also be
-         enabled using the boot command line parameter
-         'vgacon.scrollback_persistent=1'.
-
-         This feature might break your tool of choice to flush the scrollback
-         buffer, e.g. clear(1) will work fine but Debian's clear_console(1)
-         will be broken, which might cause security issues.
-         You can use the escape sequence \e[3J instead if this feature is
-         activated.
-
-         Note that a buffer of VGACON_SOFT_SCROLLBACK_SIZE is taken for each
-         created tty device.
-         So if you use a RAM-constrained system, say N here.
-
 config MDA_CONSOLE
        depends on !M68K && !PARISC && ISA
        tristate "MDA text console (dual-headed)"
index a52bb37..17876f0 100644 (file)
@@ -165,214 +165,6 @@ static inline void vga_set_mem_top(struct vc_data *c)
        write_vga(12, (c->vc_visible_origin - vga_vram_base) / 2);
 }
 
-#ifdef CONFIG_VGACON_SOFT_SCROLLBACK
-/* software scrollback */
-struct vgacon_scrollback_info {
-       void *data;
-       int tail;
-       int size;
-       int rows;
-       int cnt;
-       int cur;
-       int save;
-       int restore;
-};
-
-static struct vgacon_scrollback_info *vgacon_scrollback_cur;
-static struct vgacon_scrollback_info vgacon_scrollbacks[MAX_NR_CONSOLES];
-static bool scrollback_persistent = \
-       IS_ENABLED(CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT);
-module_param_named(scrollback_persistent, scrollback_persistent, bool, 0000);
-MODULE_PARM_DESC(scrollback_persistent, "Enable persistent scrollback for all vga consoles");
-
-static void vgacon_scrollback_reset(int vc_num, size_t reset_size)
-{
-       struct vgacon_scrollback_info *scrollback = &vgacon_scrollbacks[vc_num];
-
-       if (scrollback->data && reset_size > 0)
-               memset(scrollback->data, 0, reset_size);
-
-       scrollback->cnt  = 0;
-       scrollback->tail = 0;
-       scrollback->cur  = 0;
-}
-
-static void vgacon_scrollback_init(int vc_num)
-{
-       int pitch = vga_video_num_columns * 2;
-       size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024;
-       int rows = size / pitch;
-       void *data;
-
-       data = kmalloc_array(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE, 1024,
-                            GFP_NOWAIT);
-
-       vgacon_scrollbacks[vc_num].data = data;
-       vgacon_scrollback_cur = &vgacon_scrollbacks[vc_num];
-
-       vgacon_scrollback_cur->rows = rows - 1;
-       vgacon_scrollback_cur->size = rows * pitch;
-
-       vgacon_scrollback_reset(vc_num, size);
-}
-
-static void vgacon_scrollback_switch(int vc_num)
-{
-       if (!scrollback_persistent)
-               vc_num = 0;
-
-       if (!vgacon_scrollbacks[vc_num].data) {
-               vgacon_scrollback_init(vc_num);
-       } else {
-               if (scrollback_persistent) {
-                       vgacon_scrollback_cur = &vgacon_scrollbacks[vc_num];
-               } else {
-                       size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024;
-
-                       vgacon_scrollback_reset(vc_num, size);
-               }
-       }
-}
-
-static void vgacon_scrollback_startup(void)
-{
-       vgacon_scrollback_cur = &vgacon_scrollbacks[0];
-       vgacon_scrollback_init(0);
-}
-
-static void vgacon_scrollback_update(struct vc_data *c, int t, int count)
-{
-       void *p;
-
-       if (!vgacon_scrollback_cur->data || !vgacon_scrollback_cur->size ||
-           c->vc_num != fg_console)
-               return;
-
-       p = (void *) (c->vc_origin + t * c->vc_size_row);
-
-       while (count--) {
-               if ((vgacon_scrollback_cur->tail + c->vc_size_row) >
-                   vgacon_scrollback_cur->size)
-                       vgacon_scrollback_cur->tail = 0;
-
-               scr_memcpyw(vgacon_scrollback_cur->data +
-                           vgacon_scrollback_cur->tail,
-                           p, c->vc_size_row);
-
-               vgacon_scrollback_cur->cnt++;
-               p += c->vc_size_row;
-               vgacon_scrollback_cur->tail += c->vc_size_row;
-
-               if (vgacon_scrollback_cur->tail >= vgacon_scrollback_cur->size)
-                       vgacon_scrollback_cur->tail = 0;
-
-               if (vgacon_scrollback_cur->cnt > vgacon_scrollback_cur->rows)
-                       vgacon_scrollback_cur->cnt = vgacon_scrollback_cur->rows;
-
-               vgacon_scrollback_cur->cur = vgacon_scrollback_cur->cnt;
-       }
-}
-
-static void vgacon_restore_screen(struct vc_data *c)
-{
-       c->vc_origin = c->vc_visible_origin;
-       vgacon_scrollback_cur->save = 0;
-
-       if (!vga_is_gfx && !vgacon_scrollback_cur->restore) {
-               scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf,
-                           c->vc_screenbuf_size > vga_vram_size ?
-                           vga_vram_size : c->vc_screenbuf_size);
-               vgacon_scrollback_cur->restore = 1;
-               vgacon_scrollback_cur->cur = vgacon_scrollback_cur->cnt;
-       }
-}
-
-static void vgacon_scrolldelta(struct vc_data *c, int lines)
-{
-       int start, end, count, soff;
-
-       if (!lines) {
-               vgacon_restore_screen(c);
-               return;
-       }
-
-       if (!vgacon_scrollback_cur->data)
-               return;
-
-       if (!vgacon_scrollback_cur->save) {
-               vgacon_cursor(c, CM_ERASE);
-               vgacon_save_screen(c);
-               c->vc_origin = (unsigned long)c->vc_screenbuf;
-               vgacon_scrollback_cur->save = 1;
-       }
-
-       vgacon_scrollback_cur->restore = 0;
-       start = vgacon_scrollback_cur->cur + lines;
-       end = start + abs(lines);
-
-       if (start < 0)
-               start = 0;
-
-       if (start > vgacon_scrollback_cur->cnt)
-               start = vgacon_scrollback_cur->cnt;
-
-       if (end < 0)
-               end = 0;
-
-       if (end > vgacon_scrollback_cur->cnt)
-               end = vgacon_scrollback_cur->cnt;
-
-       vgacon_scrollback_cur->cur = start;
-       count = end - start;
-       soff = vgacon_scrollback_cur->tail -
-               ((vgacon_scrollback_cur->cnt - end) * c->vc_size_row);
-       soff -= count * c->vc_size_row;
-
-       if (soff < 0)
-               soff += vgacon_scrollback_cur->size;
-
-       count = vgacon_scrollback_cur->cnt - start;
-
-       if (count > c->vc_rows)
-               count = c->vc_rows;
-
-       if (count) {
-               int copysize;
-
-               int diff = c->vc_rows - count;
-               void *d = (void *) c->vc_visible_origin;
-               void *s = (void *) c->vc_screenbuf;
-
-               count *= c->vc_size_row;
-               /* how much memory to end of buffer left? */
-               copysize = min(count, vgacon_scrollback_cur->size - soff);
-               scr_memcpyw(d, vgacon_scrollback_cur->data + soff, copysize);
-               d += copysize;
-               count -= copysize;
-
-               if (count) {
-                       scr_memcpyw(d, vgacon_scrollback_cur->data, count);
-                       d += count;
-               }
-
-               if (diff)
-                       scr_memcpyw(d, s, diff * c->vc_size_row);
-       } else
-               vgacon_cursor(c, CM_MOVE);
-}
-
-static void vgacon_flush_scrollback(struct vc_data *c)
-{
-       size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024;
-
-       vgacon_scrollback_reset(c->vc_num, size);
-}
-#else
-#define vgacon_scrollback_startup(...) do { } while (0)
-#define vgacon_scrollback_init(...)    do { } while (0)
-#define vgacon_scrollback_update(...)  do { } while (0)
-#define vgacon_scrollback_switch(...)  do { } while (0)
-
 static void vgacon_restore_screen(struct vc_data *c)
 {
        if (c->vc_origin != c->vc_visible_origin)
@@ -386,11 +178,6 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
        vga_set_mem_top(c);
 }
 
-static void vgacon_flush_scrollback(struct vc_data *c)
-{
-}
-#endif /* CONFIG_VGACON_SOFT_SCROLLBACK */
-
 static const char *vgacon_startup(void)
 {
        const char *display_desc = NULL;
@@ -573,10 +360,7 @@ static const char *vgacon_startup(void)
        vgacon_xres = screen_info.orig_video_cols * VGA_FONTWIDTH;
        vgacon_yres = vga_scan_lines;
 
-       if (!vga_init_done) {
-               vgacon_scrollback_startup();
-               vga_init_done = true;
-       }
+       vga_init_done = true;
 
        return display_desc;
 }
@@ -869,7 +653,6 @@ static int vgacon_switch(struct vc_data *c)
                        vgacon_doresize(c, c->vc_cols, c->vc_rows);
        }
 
-       vgacon_scrollback_switch(c->vc_num);
        return 0;               /* Redrawing not needed */
 }
 
@@ -1386,7 +1169,6 @@ static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
        oldo = c->vc_origin;
        delta = lines * c->vc_size_row;
        if (dir == SM_UP) {
-               vgacon_scrollback_update(c, t, lines);
                if (c->vc_scr_end + delta >= vga_vram_end) {
                        scr_memcpyw((u16 *) vga_vram_base,
                                    (u16 *) (oldo + delta),
@@ -1450,7 +1232,6 @@ const struct consw vga_con = {
        .con_save_screen = vgacon_save_screen,
        .con_build_attr = vgacon_build_attr,
        .con_invert_region = vgacon_invert_region,
-       .con_flush_scrollback = vgacon_flush_scrollback,
 };
 EXPORT_SYMBOL(vga_con);
 
index 4e6cbc2..9725ecd 100644 (file)
@@ -234,7 +234,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
 }
 
 static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
-                      int softback_lines, int fg, int bg)
+                      int fg, int bg)
 {
        struct fb_cursor cursor;
        struct fbcon_ops *ops = info->fbcon_par;
@@ -247,15 +247,6 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
 
        cursor.set = 0;
 
-       if (softback_lines) {
-               if (y + softback_lines >= vc->vc_rows) {
-                       mode = CM_ERASE;
-                       ops->cursor_flash = 0;
-                       return;
-               } else
-                       y += softback_lines;
-       }
-
        c = scr_readw((u16 *) vc->vc_pos);
        attribute = get_attribute(info, c);
        src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height));
index 6616783..0b49b0f 100644 (file)
@@ -122,12 +122,6 @@ static int logo_lines;
 /* logo_shown is an index to vc_cons when >= 0; otherwise follows FBCON_LOGO
    enums.  */
 static int logo_shown = FBCON_LOGO_CANSHOW;
-/* Software scrollback */
-static int fbcon_softback_size = 32768;
-static unsigned long softback_buf, softback_curr;
-static unsigned long softback_in;
-static unsigned long softback_top, softback_end;
-static int softback_lines;
 /* console mappings */
 static int first_fb_vc;
 static int last_fb_vc = MAX_NR_CONSOLES - 1;
@@ -167,8 +161,6 @@ static int margin_color;
 
 static const struct consw fb_con;
 
-#define CM_SOFTBACK    (8)
-
 #define advance_row(p, delta) (unsigned short *)((unsigned long)(p) + (delta) * vc->vc_size_row)
 
 static int fbcon_set_origin(struct vc_data *);
@@ -373,18 +365,6 @@ static int get_color(struct vc_data *vc, struct fb_info *info,
        return color;
 }
 
-static void fbcon_update_softback(struct vc_data *vc)
-{
-       int l = fbcon_softback_size / vc->vc_size_row;
-
-       if (l > 5)
-               softback_end = softback_buf + l * vc->vc_size_row;
-       else
-               /* Smaller scrollback makes no sense, and 0 would screw
-                  the operation totally */
-               softback_top = 0;
-}
-
 static void fb_flashcursor(struct work_struct *work)
 {
        struct fb_info *info = container_of(work, struct fb_info, queue);
@@ -414,7 +394,7 @@ static void fb_flashcursor(struct work_struct *work)
        c = scr_readw((u16 *) vc->vc_pos);
        mode = (!ops->cursor_flash || ops->cursor_state.enable) ?
                CM_ERASE : CM_DRAW;
-       ops->cursor(vc, info, mode, softback_lines, get_color(vc, info, c, 1),
+       ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
                    get_color(vc, info, c, 0));
        console_unlock();
 }
@@ -471,13 +451,7 @@ static int __init fb_console_setup(char *this_opt)
                }
                
                if (!strncmp(options, "scrollback:", 11)) {
-                       options += 11;
-                       if (*options) {
-                               fbcon_softback_size = simple_strtoul(options, &options, 0);
-                               if (*options == 'k' || *options == 'K') {
-                                       fbcon_softback_size *= 1024;
-                               }
-                       }
+                       pr_warn("Ignoring scrollback size option\n");
                        continue;
                }
                
@@ -1022,31 +996,6 @@ static const char *fbcon_startup(void)
 
        set_blitting_type(vc, info);
 
-       if (info->fix.type != FB_TYPE_TEXT) {
-               if (fbcon_softback_size) {
-                       if (!softback_buf) {
-                               softback_buf =
-                                   (unsigned long)
-                                   kvmalloc(fbcon_softback_size,
-                                           GFP_KERNEL);
-                               if (!softback_buf) {
-                                       fbcon_softback_size = 0;
-                                       softback_top = 0;
-                               }
-                       }
-               } else {
-                       if (softback_buf) {
-                               kvfree((void *) softback_buf);
-                               softback_buf = 0;
-                               softback_top = 0;
-                       }
-               }
-               if (softback_buf)
-                       softback_in = softback_top = softback_curr =
-                           softback_buf;
-               softback_lines = 0;
-       }
-
        /* Setup default font */
        if (!p->fontdata && !vc->vc_font.data) {
                if (!fontname[0] || !(font = find_font(fontname)))
@@ -1220,9 +1169,6 @@ static void fbcon_init(struct vc_data *vc, int init)
        if (logo)
                fbcon_prepare_logo(vc, info, cols, rows, new_cols, new_rows);
 
-       if (vc == svc && softback_buf)
-               fbcon_update_softback(vc);
-
        if (ops->rotate_font && ops->rotate_font(info, vc)) {
                ops->rotate = FB_ROTATE_UR;
                set_blitting_type(vc, info);
@@ -1385,7 +1331,6 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
 {
        struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
        struct fbcon_ops *ops = info->fbcon_par;
-       int y;
        int c = scr_readw((u16 *) vc->vc_pos);
 
        ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
@@ -1399,16 +1344,8 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
                fbcon_add_cursor_timer(info);
 
        ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1;
-       if (mode & CM_SOFTBACK) {
-               mode &= ~CM_SOFTBACK;
-               y = softback_lines;
-       } else {
-               if (softback_lines)
-                       fbcon_set_origin(vc);
-               y = 0;
-       }
 
-       ops->cursor(vc, info, mode, y, get_color(vc, info, c, 1),
+       ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
                    get_color(vc, info, c, 0));
 }
 
@@ -1479,8 +1416,6 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
 
        if (con_is_visible(vc)) {
                update_screen(vc);
-               if (softback_buf)
-                       fbcon_update_softback(vc);
        }
 }
 
@@ -1618,99 +1553,6 @@ static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count)
        scrollback_current = 0;
 }
 
-static void fbcon_redraw_softback(struct vc_data *vc, struct fbcon_display *p,
-                                 long delta)
-{
-       int count = vc->vc_rows;
-       unsigned short *d, *s;
-       unsigned long n;
-       int line = 0;
-
-       d = (u16 *) softback_curr;
-       if (d == (u16 *) softback_in)
-               d = (u16 *) vc->vc_origin;
-       n = softback_curr + delta * vc->vc_size_row;
-       softback_lines -= delta;
-       if (delta < 0) {
-               if (softback_curr < softback_top && n < softback_buf) {
-                       n += softback_end - softback_buf;
-                       if (n < softback_top) {
-                               softback_lines -=
-                                   (softback_top - n) / vc->vc_size_row;
-                               n = softback_top;
-                       }
-               } else if (softback_curr >= softback_top
-                          && n < softback_top) {
-                       softback_lines -=
-                           (softback_top - n) / vc->vc_size_row;
-                       n = softback_top;
-               }
-       } else {
-               if (softback_curr > softback_in && n >= softback_end) {
-                       n += softback_buf - softback_end;
-                       if (n > softback_in) {
-                               n = softback_in;
-                               softback_lines = 0;
-                       }
-               } else if (softback_curr <= softback_in && n > softback_in) {
-                       n = softback_in;
-                       softback_lines = 0;
-               }
-       }
-       if (n == softback_curr)
-               return;
-       softback_curr = n;
-       s = (u16 *) softback_curr;
-       if (s == (u16 *) softback_in)
-               s = (u16 *) vc->vc_origin;
-       while (count--) {
-               unsigned short *start;
-               unsigned short *le;
-               unsigned short c;
-               int x = 0;
-               unsigned short attr = 1;
-
-               start = s;
-               le = advance_row(s, 1);
-               do {
-                       c = scr_readw(s);
-                       if (attr != (c & 0xff00)) {
-                               attr = c & 0xff00;
-                               if (s > start) {
-                                       fbcon_putcs(vc, start, s - start,
-                                                   line, x);
-                                       x += s - start;
-                                       start = s;
-                               }
-                       }
-                       if (c == scr_readw(d)) {
-                               if (s > start) {
-                                       fbcon_putcs(vc, start, s - start,
-                                                   line, x);
-                                       x += s - start + 1;
-                                       start = s + 1;
-                               } else {
-                                       x++;
-                                       start++;
-                               }
-                       }
-                       s++;
-                       d++;
-               } while (s < le);
-               if (s > start)
-                       fbcon_putcs(vc, start, s - start, line, x);
-               line++;
-               if (d == (u16 *) softback_end)
-                       d = (u16 *) softback_buf;
-               if (d == (u16 *) softback_in)
-                       d = (u16 *) vc->vc_origin;
-               if (s == (u16 *) softback_end)
-                       s = (u16 *) softback_buf;
-               if (s == (u16 *) softback_in)
-                       s = (u16 *) vc->vc_origin;
-       }
-}
-
 static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p,
                              int line, int count, int dy)
 {
@@ -1850,31 +1692,6 @@ static void fbcon_redraw(struct vc_data *vc, struct fbcon_display *p,
        }
 }
 
-static inline void fbcon_softback_note(struct vc_data *vc, int t,
-                                      int count)
-{
-       unsigned short *p;
-
-       if (vc->vc_num != fg_console)
-               return;
-       p = (unsigned short *) (vc->vc_origin + t * vc->vc_size_row);
-
-       while (count) {
-               scr_memcpyw((u16 *) softback_in, p, vc->vc_size_row);
-               count--;
-               p = advance_row(p, 1);
-               softback_in += vc->vc_size_row;
-               if (softback_in == softback_end)
-                       softback_in = softback_buf;
-               if (softback_in == softback_top) {
-                       softback_top += vc->vc_size_row;
-                       if (softback_top == softback_end)
-                               softback_top = softback_buf;
-               }
-       }
-       softback_curr = softback_in;
-}
-
 static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
                enum con_scroll dir, unsigned int count)
 {
@@ -1897,8 +1714,6 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
        case SM_UP:
                if (count > vc->vc_rows)        /* Maximum realistic size */
                        count = vc->vc_rows;
-               if (softback_top)
-                       fbcon_softback_note(vc, t, count);
                if (logo_shown >= 0)
                        goto redraw_up;
                switch (p->scrollmode) {
@@ -2269,14 +2084,6 @@ static int fbcon_switch(struct vc_data *vc)
        info = registered_fb[con2fb_map[vc->vc_num]];
        ops = info->fbcon_par;
 
-       if (softback_top) {
-               if (softback_lines)
-                       fbcon_set_origin(vc);
-               softback_top = softback_curr = softback_in = softback_buf;
-               softback_lines = 0;
-               fbcon_update_softback(vc);
-       }
-
        if (logo_shown >= 0) {
                struct vc_data *conp2 = vc_cons[logo_shown].d;
 
@@ -2600,9 +2407,6 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
        int cnt;
        char *old_data = NULL;
 
-       if (con_is_visible(vc) && softback_lines)
-               fbcon_set_origin(vc);
-
        resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
        if (p->userfont)
                old_data = vc->vc_font.data;
@@ -2628,8 +2432,6 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
                cols /= w;
                rows /= h;
                vc_resize(vc, cols, rows);
-               if (con_is_visible(vc) && softback_buf)
-                       fbcon_update_softback(vc);
        } else if (con_is_visible(vc)
                   && vc->vc_mode == KD_TEXT) {
                fbcon_clear_margins(vc, 0);
@@ -2788,19 +2590,7 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table)
 
 static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
 {
-       unsigned long p;
-       int line;
-       
-       if (vc->vc_num != fg_console || !softback_lines)
-               return (u16 *) (vc->vc_origin + offset);
-       line = offset / vc->vc_size_row;
-       if (line >= softback_lines)
-               return (u16 *) (vc->vc_origin + offset -
-                               softback_lines * vc->vc_size_row);
-       p = softback_curr + offset;
-       if (p >= softback_end)
-               p += softback_buf - softback_end;
-       return (u16 *) p;
+       return (u16 *) (vc->vc_origin + offset);
 }
 
 static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos,
@@ -2814,22 +2604,7 @@ static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos,
 
                x = offset % vc->vc_cols;
                y = offset / vc->vc_cols;
-               if (vc->vc_num == fg_console)
-                       y += softback_lines;
                ret = pos + (vc->vc_cols - x) * 2;
-       } else if (vc->vc_num == fg_console && softback_lines) {
-               unsigned long offset = pos - softback_curr;
-
-               if (pos < softback_curr)
-                       offset += softback_end - softback_buf;
-               offset /= 2;
-               x = offset % vc->vc_cols;
-               y = offset / vc->vc_cols;
-               ret = pos + (vc->vc_cols - x) * 2;
-               if (ret == softback_end)
-                       ret = softback_buf;
-               if (ret == softback_in)
-                       ret = vc->vc_origin;
        } else {
                /* Should not happen */
                x = y = 0;
@@ -2857,106 +2632,11 @@ static void fbcon_invert_region(struct vc_data *vc, u16 * p, int cnt)
                        a = ((a) & 0x88ff) | (((a) & 0x7000) >> 4) |
                            (((a) & 0x0700) << 4);
                scr_writew(a, p++);
-               if (p == (u16 *) softback_end)
-                       p = (u16 *) softback_buf;
-               if (p == (u16 *) softback_in)
-                       p = (u16 *) vc->vc_origin;
-       }
-}
-
-static void fbcon_scrolldelta(struct vc_data *vc, int lines)
-{
-       struct fb_info *info = registered_fb[con2fb_map[fg_console]];
-       struct fbcon_ops *ops = info->fbcon_par;
-       struct fbcon_display *disp = &fb_display[fg_console];
-       int offset, limit, scrollback_old;
-
-       if (softback_top) {
-               if (vc->vc_num != fg_console)
-                       return;
-               if (vc->vc_mode != KD_TEXT || !lines)
-                       return;
-               if (logo_shown >= 0) {
-                       struct vc_data *conp2 = vc_cons[logo_shown].d;
-
-                       if (conp2->vc_top == logo_lines
-                           && conp2->vc_bottom == conp2->vc_rows)
-                               conp2->vc_top = 0;
-                       if (logo_shown == vc->vc_num) {
-                               unsigned long p, q;
-                               int i;
-
-                               p = softback_in;
-                               q = vc->vc_origin +
-                                   logo_lines * vc->vc_size_row;
-                               for (i = 0; i < logo_lines; i++) {
-                                       if (p == softback_top)
-                                               break;
-                                       if (p == softback_buf)
-                                               p = softback_end;
-                                       p -= vc->vc_size_row;
-                                       q -= vc->vc_size_row;
-                                       scr_memcpyw((u16 *) q, (u16 *) p,
-                                                   vc->vc_size_row);
-                               }
-                               softback_in = softback_curr = p;
-                               update_region(vc, vc->vc_origin,
-                                             logo_lines * vc->vc_cols);
-                       }
-                       logo_shown = FBCON_LOGO_CANSHOW;
-               }
-               fbcon_cursor(vc, CM_ERASE | CM_SOFTBACK);
-               fbcon_redraw_softback(vc, disp, lines);
-               fbcon_cursor(vc, CM_DRAW | CM_SOFTBACK);
-               return;
        }
-
-       if (!scrollback_phys_max)
-               return;
-
-       scrollback_old = scrollback_current;
-       scrollback_current -= lines;
-       if (scrollback_current < 0)
-               scrollback_current = 0;
-       else if (scrollback_current > scrollback_max)
-               scrollback_current = scrollback_max;
-       if (scrollback_current == scrollback_old)
-               return;
-
-       if (fbcon_is_inactive(vc, info))
-               return;
-
-       fbcon_cursor(vc, CM_ERASE);
-
-       offset = disp->yscroll - scrollback_current;
-       limit = disp->vrows;
-       switch (disp->scrollmode) {
-       case SCROLL_WRAP_MOVE:
-               info->var.vmode |= FB_VMODE_YWRAP;
-               break;
-       case SCROLL_PAN_MOVE:
-       case SCROLL_PAN_REDRAW:
-               limit -= vc->vc_rows;
-               info->var.vmode &= ~FB_VMODE_YWRAP;
-               break;
-       }
-       if (offset < 0)
-               offset += limit;
-       else if (offset >= limit)
-               offset -= limit;
-
-       ops->var.xoffset = 0;
-       ops->var.yoffset = offset * vc->vc_font.height;
-       ops->update_start(info);
-
-       if (!scrollback_current)
-               fbcon_cursor(vc, CM_DRAW);
 }
 
 static int fbcon_set_origin(struct vc_data *vc)
 {
-       if (softback_lines)
-               fbcon_scrolldelta(vc, softback_lines);
        return 0;
 }
 
@@ -3020,8 +2700,6 @@ static void fbcon_modechanged(struct fb_info *info)
 
                fbcon_set_palette(vc, color_table);
                update_screen(vc);
-               if (softback_buf)
-                       fbcon_update_softback(vc);
        }
 }
 
@@ -3432,7 +3110,6 @@ static const struct consw fb_con = {
        .con_font_default       = fbcon_set_def_font,
        .con_font_copy          = fbcon_copy_font,
        .con_set_palette        = fbcon_set_palette,
-       .con_scrolldelta        = fbcon_scrolldelta,
        .con_set_origin         = fbcon_set_origin,
        .con_invert_region      = fbcon_invert_region,
        .con_screen_pos         = fbcon_screen_pos,
@@ -3667,9 +3344,6 @@ static void fbcon_exit(void)
        }
 #endif
 
-       kvfree((void *)softback_buf);
-       softback_buf = 0UL;
-
        for_each_registered_fb(i) {
                int pending = 0;
 
index 20dea85..78bb14c 100644 (file)
@@ -62,7 +62,7 @@ struct fbcon_ops {
        void (*clear_margins)(struct vc_data *vc, struct fb_info *info,
                              int color, int bottom_only);
        void (*cursor)(struct vc_data *vc, struct fb_info *info, int mode,
-                      int softback_lines, int fg, int bg);
+                      int fg, int bg);
        int  (*update_start)(struct fb_info *info);
        int  (*rotate_font)(struct fb_info *info, struct vc_data *vc);
        struct fb_var_screeninfo var;  /* copy of the current fb_var_screeninfo */
index 5b17713..bbd869e 100644 (file)
@@ -219,7 +219,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info,
 }
 
 static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
-                      int softback_lines, int fg, int bg)
+                      int fg, int bg)
 {
        struct fb_cursor cursor;
        struct fbcon_ops *ops = info->fbcon_par;
@@ -236,15 +236,6 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
 
        cursor.set = 0;
 
-       if (softback_lines) {
-               if (y + softback_lines >= vc->vc_rows) {
-                       mode = CM_ERASE;
-                       ops->cursor_flash = 0;
-                       return;
-               } else
-                       y += softback_lines;
-       }
-
        c = scr_readw((u16 *) vc->vc_pos);
        attribute = get_attribute(info, c);
        src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width));
index 894d01a..a34cbe8 100644 (file)
@@ -202,7 +202,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info,
 }
 
 static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
-                     int softback_lines, int fg, int bg)
+                     int fg, int bg)
 {
        struct fb_cursor cursor;
        struct fbcon_ops *ops = info->fbcon_par;
@@ -219,15 +219,6 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
 
        cursor.set = 0;
 
-       if (softback_lines) {
-               if (y + softback_lines >= vc->vc_rows) {
-                       mode = CM_ERASE;
-                       ops->cursor_flash = 0;
-                       return;
-               } else
-                       y += softback_lines;
-       }
-
        c = scr_readw((u16 *) vc->vc_pos);
        attribute = get_attribute(info, c);
        src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width));
index 01b87f2..199cbc7 100644 (file)
@@ -249,7 +249,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info,
 }
 
 static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode,
-                     int softback_lines, int fg, int bg)
+                     int fg, int bg)
 {
        struct fb_cursor cursor;
        struct fbcon_ops *ops = info->fbcon_par;
@@ -267,15 +267,6 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode,
 
        cursor.set = 0;
 
-       if (softback_lines) {
-               if (y + softback_lines >= vc->vc_rows) {
-                       mode = CM_ERASE;
-                       ops->cursor_flash = 0;
-                       return;
-               } else
-                       y += softback_lines;
-       }
-
        c = scr_readw((u16 *) vc->vc_pos);
        attribute = get_attribute(info, c);
        src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.height));
index 1dfaff0..31b85b7 100644 (file)
@@ -80,7 +80,7 @@ static void tile_clear_margins(struct vc_data *vc, struct fb_info *info,
 }
 
 static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode,
-                       int softback_lines, int fg, int bg)
+                       int fg, int bg)
 {
        struct fb_tilecursor cursor;
        int use_sw = vc->vc_cursor_type & CUR_SW;
index ac45f02..2d9109d 100644 (file)
@@ -2193,7 +2193,8 @@ static noinline int search_ioctl(struct inode *inode,
        key.offset = sk->min_offset;
 
        while (1) {
-               ret = fault_in_pages_writeable(ubuf, *buf_size - sk_offset);
+               ret = fault_in_pages_writeable(ubuf + sk_offset,
+                                              *buf_size - sk_offset);
                if (ret)
                        break;
 
index 3215023..bf9181c 100644 (file)
@@ -142,7 +142,6 @@ enum cpuhp_state {
        /* Must be the last timer callback */
        CPUHP_AP_DUMMY_TIMER_STARTING,
        CPUHP_AP_ARM_XEN_STARTING,
-       CPUHP_AP_ARM_KVMPV_STARTING,
        CPUHP_AP_ARM_CORESIGHT_STARTING,
        CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
        CPUHP_AP_ARM64_ISNDEP_STARTING,
index 75895e6..6175c77 100644 (file)
@@ -82,6 +82,7 @@ struct cpuidle_state {
 #define CPUIDLE_FLAG_UNUSABLE          BIT(3) /* avoid using this state */
 #define CPUIDLE_FLAG_OFF               BIT(4) /* disable this state by default */
 #define CPUIDLE_FLAG_TLB_FLUSHED       BIT(5) /* idle-state flushes TLBs */
+#define CPUIDLE_FLAG_RCU_IDLE          BIT(6) /* idle-state takes care of RCU */
 
 struct cpuidle_device_kobj;
 struct cpuidle_state_kobj;
index ca6e6a8..b2f370f 100644 (file)
@@ -41,6 +41,8 @@ struct writeback_control;
 struct bdi_writeback;
 struct pt_regs;
 
+extern int sysctl_page_lock_unfairness;
+
 void init_mm_internals(void);
 
 #ifndef CONFIG_NEED_MULTIPLE_NODES     /* Don't use mapnrs, do it properly */
index 898c890..27fb99c 100644 (file)
@@ -21,6 +21,7 @@ int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int
 #define WQ_FLAG_WOKEN          0x02
 #define WQ_FLAG_BOOKMARK       0x04
 #define WQ_FLAG_CUSTOM         0x08
+#define WQ_FLAG_DONE           0x10
 
 /*
  * A single wait-queue entry structure:
index 5e3919f..fc4fcac 100644 (file)
@@ -1193,6 +1193,8 @@ struct snd_soc_pcm_runtime {
             ((i) < (rtd)->num_cpus + (rtd)->num_codecs) &&             \
                     ((dai) = (rtd)->dais[i]);                          \
             (i)++)
+#define for_each_rtd_dais_rollback(rtd, i, dai)                \
+       for (; (--(i) >= 0) && ((dai) = (rtd)->dais[i]);)
 
 void snd_soc_close_delayed_work(struct snd_soc_pcm_runtime *rtd);
 
@@ -1361,6 +1363,8 @@ void snd_soc_unregister_dai(struct snd_soc_dai *dai);
 
 struct snd_soc_dai *snd_soc_find_dai(
        const struct snd_soc_dai_link_component *dlc);
+struct snd_soc_dai *snd_soc_find_dai_with_mutex(
+       const struct snd_soc_dai_link_component *dlc);
 
 #include <sound/soc-dai.h>
 
index 09e70ee..afad085 100644 (file)
@@ -2912,6 +2912,14 @@ static struct ctl_table vm_table[] = {
                .proc_handler   = percpu_pagelist_fraction_sysctl_handler,
                .extra1         = SYSCTL_ZERO,
        },
+       {
+               .procname       = "page_lock_unfairness",
+               .data           = &sysctl_page_lock_unfairness,
+               .maxlen         = sizeof(sysctl_page_lock_unfairness),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = SYSCTL_ZERO,
+       },
 #ifdef CONFIG_MMU
        {
                .procname       = "max_map_count",
index 1aaea26..6aa08e7 100644 (file)
@@ -988,9 +988,43 @@ void __init pagecache_init(void)
        page_writeback_init();
 }
 
+/*
+ * The page wait code treats the "wait->flags" somewhat unusually, because
+ * we have multiple different kinds of waits, not just he usual "exclusive"
+ * one.
+ *
+ * We have:
+ *
+ *  (a) no special bits set:
+ *
+ *     We're just waiting for the bit to be released, and when a waker
+ *     calls the wakeup function, we set WQ_FLAG_WOKEN and wake it up,
+ *     and remove it from the wait queue.
+ *
+ *     Simple and straightforward.
+ *
+ *  (b) WQ_FLAG_EXCLUSIVE:
+ *
+ *     The waiter is waiting to get the lock, and only one waiter should
+ *     be woken up to avoid any thundering herd behavior. We'll set the
+ *     WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue.
+ *
+ *     This is the traditional exclusive wait.
+ *
+ *  (b) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM:
+ *
+ *     The waiter is waiting to get the bit, and additionally wants the
+ *     lock to be transferred to it for fair lock behavior. If the lock
+ *     cannot be taken, we stop walking the wait queue without waking
+ *     the waiter.
+ *
+ *     This is the "fair lock handoff" case, and in addition to setting
+ *     WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to let the waiter easily see
+ *     that it now has the lock.
+ */
 static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
 {
-       int ret;
+       unsigned int flags;
        struct wait_page_key *key = arg;
        struct wait_page_queue *wait_page
                = container_of(wait, struct wait_page_queue, wait);
@@ -999,35 +1033,44 @@ static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync,
                return 0;
 
        /*
-        * If it's an exclusive wait, we get the bit for it, and
-        * stop walking if we can't.
-        *
-        * If it's a non-exclusive wait, then the fact that this
-        * wake function was called means that the bit already
-        * was cleared, and we don't care if somebody then
-        * re-took it.
+        * If it's a lock handoff wait, we get the bit for it, and
+        * stop walking (and do not wake it up) if we can't.
         */
-       ret = 0;
-       if (wait->flags & WQ_FLAG_EXCLUSIVE) {
-               if (test_and_set_bit(key->bit_nr, &key->page->flags))
+       flags = wait->flags;
+       if (flags & WQ_FLAG_EXCLUSIVE) {
+               if (test_bit(key->bit_nr, &key->page->flags))
                        return -1;
-               ret = 1;
+               if (flags & WQ_FLAG_CUSTOM) {
+                       if (test_and_set_bit(key->bit_nr, &key->page->flags))
+                               return -1;
+                       flags |= WQ_FLAG_DONE;
+               }
        }
-       wait->flags |= WQ_FLAG_WOKEN;
 
+       /*
+        * We are holding the wait-queue lock, but the waiter that
+        * is waiting for this will be checking the flags without
+        * any locking.
+        *
+        * So update the flags atomically, and wake up the waiter
+        * afterwards to avoid any races. This store-release pairs
+        * with the load-acquire in wait_on_page_bit_common().
+        */
+       smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN);
        wake_up_state(wait->private, mode);
 
        /*
         * Ok, we have successfully done what we're waiting for,
         * and we can unconditionally remove the wait entry.
         *
-        * Note that this has to be the absolute last thing we do,
-        * since after list_del_init(&wait->entry) the wait entry
+        * Note that this pairs with the "finish_wait()" in the
+        * waiter, and has to be the absolute last thing we do.
+        * After this list_del_init(&wait->entry) the wait entry
         * might be de-allocated and the process might even have
         * exited.
         */
        list_del_init_careful(&wait->entry);
-       return ret;
+       return (flags & WQ_FLAG_EXCLUSIVE) != 0;
 }
 
 static void wake_up_page_bit(struct page *page, int bit_nr)
@@ -1107,8 +1150,8 @@ enum behavior {
 };
 
 /*
- * Attempt to check (or get) the page bit, and mark the
- * waiter woken if successful.
+ * Attempt to check (or get) the page bit, and mark us done
+ * if successful.
  */
 static inline bool trylock_page_bit_common(struct page *page, int bit_nr,
                                        struct wait_queue_entry *wait)
@@ -1119,13 +1162,17 @@ static inline bool trylock_page_bit_common(struct page *page, int bit_nr,
        } else if (test_bit(bit_nr, &page->flags))
                return false;
 
-       wait->flags |= WQ_FLAG_WOKEN;
+       wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE;
        return true;
 }
 
+/* How many times do we accept lock stealing from under a waiter? */
+int sysctl_page_lock_unfairness = 5;
+
 static inline int wait_on_page_bit_common(wait_queue_head_t *q,
        struct page *page, int bit_nr, int state, enum behavior behavior)
 {
+       int unfairness = sysctl_page_lock_unfairness;
        struct wait_page_queue wait_page;
        wait_queue_entry_t *wait = &wait_page.wait;
        bool thrashing = false;
@@ -1143,11 +1190,18 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
        }
 
        init_wait(wait);
-       wait->flags = behavior == EXCLUSIVE ? WQ_FLAG_EXCLUSIVE : 0;
        wait->func = wake_page_function;
        wait_page.page = page;
        wait_page.bit_nr = bit_nr;
 
+repeat:
+       wait->flags = 0;
+       if (behavior == EXCLUSIVE) {
+               wait->flags = WQ_FLAG_EXCLUSIVE;
+               if (--unfairness < 0)
+                       wait->flags |= WQ_FLAG_CUSTOM;
+       }
+
        /*
         * Do one last check whether we can get the
         * page bit synchronously.
@@ -1170,27 +1224,63 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
 
        /*
         * From now on, all the logic will be based on
-        * the WQ_FLAG_WOKEN flag, and the and the page
-        * bit testing (and setting) will be - or has
-        * already been - done by the wake function.
+        * the WQ_FLAG_WOKEN and WQ_FLAG_DONE flag, to
+        * see whether the page bit testing has already
+        * been done by the wake function.
         *
         * We can drop our reference to the page.
         */
        if (behavior == DROP)
                put_page(page);
 
+       /*
+        * Note that until the "finish_wait()", or until
+        * we see the WQ_FLAG_WOKEN flag, we need to
+        * be very careful with the 'wait->flags', because
+        * we may race with a waker that sets them.
+        */
        for (;;) {
+               unsigned int flags;
+
                set_current_state(state);
 
-               if (signal_pending_state(state, current))
+               /* Loop until we've been woken or interrupted */
+               flags = smp_load_acquire(&wait->flags);
+               if (!(flags & WQ_FLAG_WOKEN)) {
+                       if (signal_pending_state(state, current))
+                               break;
+
+                       io_schedule();
+                       continue;
+               }
+
+               /* If we were non-exclusive, we're done */
+               if (behavior != EXCLUSIVE)
                        break;
 
-               if (wait->flags & WQ_FLAG_WOKEN)
+               /* If the waker got the lock for us, we're done */
+               if (flags & WQ_FLAG_DONE)
                        break;
 
-               io_schedule();
+               /*
+                * Otherwise, if we're getting the lock, we need to
+                * try to get it ourselves.
+                *
+                * And if that fails, we'll have to retry this all.
+                */
+               if (unlikely(test_and_set_bit(bit_nr, &page->flags)))
+                       goto repeat;
+
+               wait->flags |= WQ_FLAG_DONE;
+               break;
        }
 
+       /*
+        * If a signal happened, this 'finish_wait()' may remove the last
+        * waiter from the wait-queues, but the PageWaiters bit will remain
+        * set. That's ok. The next wakeup will take care of it, and trying
+        * to do it here would be difficult and prone to races.
+        */
        finish_wait(q, wait);
 
        if (thrashing) {
@@ -1200,12 +1290,20 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
        }
 
        /*
-        * A signal could leave PageWaiters set. Clearing it here if
-        * !waitqueue_active would be possible (by open-coding finish_wait),
-        * but still fail to catch it in the case of wait hash collision. We
-        * already can fail to clear wait hash collision cases, so don't
-        * bother with signals either.
+        * NOTE! The wait->flags weren't stable until we've done the
+        * 'finish_wait()', and we could have exited the loop above due
+        * to a signal, and had a wakeup event happen after the signal
+        * test but before the 'finish_wait()'.
+        *
+        * So only after the finish_wait() can we reliably determine
+        * if we got woken up or not, so we can now figure out the final
+        * return value based on that state without races.
+        *
+        * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive
+        * waiter, but an exclusive one requires WQ_FLAG_DONE.
         */
+       if (behavior == EXCLUSIVE)
+               return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR;
 
        return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
 }
index f470962..1ed1a34 100644 (file)
@@ -1316,7 +1316,7 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
 
        /* allocate chunk */
        alloc_size = sizeof(struct pcpu_chunk) +
-               BITS_TO_LONGS(region_size >> PAGE_SHIFT);
+               BITS_TO_LONGS(region_size >> PAGE_SHIFT) * sizeof(unsigned long);
        chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
        if (!chunk)
                panic("%s: Failed to allocate %zu bytes\n", __func__,
index 43ab0ad..04375df 100644 (file)
@@ -354,7 +354,8 @@ static bool match_exception_partial(struct list_head *exceptions, short type,
 {
        struct dev_exception_item *ex;
 
-       list_for_each_entry_rcu(ex, exceptions, list) {
+       list_for_each_entry_rcu(ex, exceptions, list,
+                               lockdep_is_held(&devcgroup_mutex)) {
                if ((type & DEVCG_DEV_BLOCK) && !(ex->type & DEVCG_DEV_BLOCK))
                        continue;
                if ((type & DEVCG_DEV_CHAR) && !(ex->type & DEVCG_DEV_CHAR))
index c521a1f..85e2071 100644 (file)
@@ -5993,6 +5993,40 @@ static void alc_fixup_disable_mic_vref(struct hda_codec *codec,
                snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ);
 }
 
+
+static void alc294_gx502_toggle_output(struct hda_codec *codec,
+                                       struct hda_jack_callback *cb)
+{
+       /* The Windows driver sets the codec up in a very different way where
+        * it appears to leave 0x10 = 0x8a20 set. For Linux we need to toggle it
+        */
+       if (snd_hda_jack_detect_state(codec, 0x21) == HDA_JACK_PRESENT)
+               alc_write_coef_idx(codec, 0x10, 0x8a20);
+       else
+               alc_write_coef_idx(codec, 0x10, 0x0a20);
+}
+
+static void alc294_fixup_gx502_hp(struct hda_codec *codec,
+                                       const struct hda_fixup *fix, int action)
+{
+       /* Pin 0x21: headphones/headset mic */
+       if (!is_jack_detectable(codec, 0x21))
+               return;
+
+       switch (action) {
+       case HDA_FIXUP_ACT_PRE_PROBE:
+               snd_hda_jack_detect_enable_callback(codec, 0x21,
+                               alc294_gx502_toggle_output);
+               break;
+       case HDA_FIXUP_ACT_INIT:
+               /* Make sure to start in a correct state, i.e. if
+                * headphones have been plugged in before powering up the system
+                */
+               alc294_gx502_toggle_output(codec, NULL);
+               break;
+       }
+}
+
 static void  alc285_fixup_hp_gpio_amp_init(struct hda_codec *codec,
                              const struct hda_fixup *fix, int action)
 {
@@ -6173,6 +6207,9 @@ enum {
        ALC285_FIXUP_THINKPAD_HEADSET_JACK,
        ALC294_FIXUP_ASUS_HPE,
        ALC294_FIXUP_ASUS_COEF_1B,
+       ALC294_FIXUP_ASUS_GX502_HP,
+       ALC294_FIXUP_ASUS_GX502_PINS,
+       ALC294_FIXUP_ASUS_GX502_VERBS,
        ALC285_FIXUP_HP_GPIO_LED,
        ALC285_FIXUP_HP_MUTE_LED,
        ALC236_FIXUP_HP_MUTE_LED,
@@ -6191,6 +6228,7 @@ enum {
        ALC269_FIXUP_LEMOTE_A1802,
        ALC269_FIXUP_LEMOTE_A190X,
        ALC256_FIXUP_INTEL_NUC8_RUGGED,
+       ALC255_FIXUP_XIAOMI_HEADSET_MIC,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -7338,6 +7376,33 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
        },
+       [ALC294_FIXUP_ASUS_GX502_PINS] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x03a11050 }, /* front HP mic */
+                       { 0x1a, 0x01a11830 }, /* rear external mic */
+                       { 0x21, 0x03211020 }, /* front HP out */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC294_FIXUP_ASUS_GX502_VERBS
+       },
+       [ALC294_FIXUP_ASUS_GX502_VERBS] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       /* set 0x15 to HP-OUT ctrl */
+                       { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0 },
+                       /* unmute the 0x15 amp */
+                       { 0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000 },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC294_FIXUP_ASUS_GX502_HP
+       },
+       [ALC294_FIXUP_ASUS_GX502_HP] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc294_fixup_gx502_hp,
+       },
        [ALC294_FIXUP_ASUS_COEF_1B] = {
                .type = HDA_FIXUP_VERBS,
                .v.verbs = (const struct hda_verb[]) {
@@ -7527,6 +7592,16 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_HEADSET_MODE
        },
+       [ALC255_FIXUP_XIAOMI_HEADSET_MIC] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC289_FIXUP_ASUS_GA401
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -7711,6 +7786,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
        SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
+       SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
        SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
        SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
@@ -7823,6 +7899,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
        SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101),
        SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
+       SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
        SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
        SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
@@ -8000,6 +8077,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC298_FIXUP_HUAWEI_MBX_STEREO, .name = "huawei-mbx-stereo"},
        {.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"},
        {.id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc298-samsung-headphone"},
+       {.id = ALC255_FIXUP_XIAOMI_HEADSET_MIC, .name = "alc255-xiaomi-headset"},
        {}
 };
 #define ALC225_STANDARD_PINS \
index 5fe7247..e4675cf 100644 (file)
@@ -838,8 +838,8 @@ static int max98373_sdw_probe(struct sdw_slave *slave,
 
        /* Regmap Initialization */
        regmap = devm_regmap_init_sdw(slave, &max98373_sdw_regmap);
-       if (!regmap)
-               return -EINVAL;
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
 
        return max98373_init(slave, regmap);
 }
index 5e445fe..821e739 100644 (file)
@@ -306,6 +306,13 @@ static int pcm3168a_set_dai_sysclk(struct snd_soc_dai *dai,
        struct pcm3168a_priv *pcm3168a = snd_soc_component_get_drvdata(dai->component);
        int ret;
 
+       /*
+        * Some sound card sets 0 Hz as reset,
+        * but it is impossible to set. Ignore it here
+        */
+       if (freq == 0)
+               return 0;
+
        if (freq > PCM3168A_MAX_SYSCLK)
                return -EINVAL;
 
index b0ba0d2..56e952a 100644 (file)
@@ -684,8 +684,8 @@ static int rt1308_sdw_probe(struct sdw_slave *slave,
 
        /* Regmap Initialization */
        regmap = devm_regmap_init_sdw(slave, &rt1308_sdw_regmap);
-       if (!regmap)
-               return -EINVAL;
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
 
        rt1308_sdw_init(&slave->dev, regmap, slave);
 
index 4d14048..1d24bf0 100644 (file)
@@ -452,8 +452,8 @@ static int rt700_sdw_probe(struct sdw_slave *slave,
 
        /* Regmap Initialization */
        sdw_regmap = devm_regmap_init_sdw(slave, &rt700_sdw_regmap);
-       if (!sdw_regmap)
-               return -EINVAL;
+       if (IS_ERR(sdw_regmap))
+               return PTR_ERR(sdw_regmap);
 
        regmap = devm_regmap_init(&slave->dev, NULL,
                &slave->dev, &rt700_regmap);
index 45b9289..7efff13 100644 (file)
@@ -452,8 +452,8 @@ static int rt711_sdw_probe(struct sdw_slave *slave,
 
        /* Regmap Initialization */
        sdw_regmap = devm_regmap_init_sdw(slave, &rt711_sdw_regmap);
-       if (!sdw_regmap)
-               return -EINVAL;
+       if (IS_ERR(sdw_regmap))
+               return PTR_ERR(sdw_regmap);
 
        regmap = devm_regmap_init(&slave->dev, NULL,
                &slave->dev, &rt711_regmap);
index d11b23d..68a3673 100644 (file)
@@ -527,8 +527,8 @@ static int rt715_sdw_probe(struct sdw_slave *slave,
 
        /* Regmap Initialization */
        sdw_regmap = devm_regmap_init_sdw(slave, &rt715_sdw_regmap);
-       if (!sdw_regmap)
-               return -EINVAL;
+       if (IS_ERR(sdw_regmap))
+               return PTR_ERR(sdw_regmap);
 
        regmap = devm_regmap_init(&slave->dev, NULL, &slave->dev,
                &rt715_regmap);
index 5cd50d8..8efe206 100644 (file)
@@ -842,6 +842,18 @@ static int adcx140_codec_probe(struct snd_soc_component *component)
        if (ret)
                goto out;
 
+       if (adcx140->supply_areg == NULL)
+               sleep_cfg_val |= ADCX140_AREG_INTERNAL;
+
+       ret = regmap_write(adcx140->regmap, ADCX140_SLEEP_CFG, sleep_cfg_val);
+       if (ret) {
+               dev_err(adcx140->dev, "setting sleep config failed %d\n", ret);
+               goto out;
+       }
+
+       /* 8.4.3: Wait >= 1ms after entering active mode. */
+       usleep_range(1000, 100000);
+
        pdm_count = device_property_count_u32(adcx140->dev,
                                              "ti,pdm-edge-select");
        if (pdm_count <= ADCX140_NUM_PDM_EDGES && pdm_count > 0) {
@@ -889,18 +901,6 @@ static int adcx140_codec_probe(struct snd_soc_component *component)
        if (ret)
                goto out;
 
-       if (adcx140->supply_areg == NULL)
-               sleep_cfg_val |= ADCX140_AREG_INTERNAL;
-
-       ret = regmap_write(adcx140->regmap, ADCX140_SLEEP_CFG, sleep_cfg_val);
-       if (ret) {
-               dev_err(adcx140->dev, "setting sleep config failed %d\n", ret);
-               goto out;
-       }
-
-       /* 8.4.3: Wait >= 1ms after entering active mode. */
-       usleep_range(1000, 100000);
-
        ret = regmap_update_bits(adcx140->regmap, ADCX140_BIAS_CFG,
                                ADCX140_MIC_BIAS_VAL_MSK |
                                ADCX140_MIC_BIAS_VREF_MSK, bias_cfg);
@@ -980,6 +980,8 @@ static int adcx140_i2c_probe(struct i2c_client *i2c,
        if (!adcx140)
                return -ENOMEM;
 
+       adcx140->dev = &i2c->dev;
+
        adcx140->gpio_reset = devm_gpiod_get_optional(adcx140->dev,
                                                      "reset", GPIOD_OUT_LOW);
        if (IS_ERR(adcx140->gpio_reset))
@@ -1007,7 +1009,7 @@ static int adcx140_i2c_probe(struct i2c_client *i2c,
                        ret);
                return ret;
        }
-       adcx140->dev = &i2c->dev;
+
        i2c_set_clientdata(i2c, adcx140);
 
        return devm_snd_soc_register_component(&i2c->dev,
index 038be66..fc9ea19 100644 (file)
@@ -3514,6 +3514,8 @@ int wm8994_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
                return -EINVAL;
        }
 
+       pm_runtime_get_sync(component->dev);
+
        switch (micbias) {
        case 1:
                micdet = &wm8994->micdet[0];
@@ -3561,6 +3563,8 @@ int wm8994_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
 
        snd_soc_dapm_sync(dapm);
 
+       pm_runtime_put(component->dev);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(wm8994_mic_detect);
@@ -3932,6 +3936,8 @@ int wm8958_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
                return -EINVAL;
        }
 
+       pm_runtime_get_sync(component->dev);
+
        if (jack) {
                snd_soc_dapm_force_enable_pin(dapm, "CLK_SYS");
                snd_soc_dapm_sync(dapm);
@@ -4000,6 +4006,8 @@ int wm8958_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
                snd_soc_dapm_sync(dapm);
        }
 
+       pm_runtime_put(component->dev);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(wm8958_mic_detect);
@@ -4193,11 +4201,13 @@ static int wm8994_component_probe(struct snd_soc_component *component)
                        wm8994->hubs.dcs_readback_mode = 2;
                        break;
                }
+               wm8994->hubs.micd_scthr = true;
                break;
 
        case WM8958:
                wm8994->hubs.dcs_readback_mode = 1;
                wm8994->hubs.hp_startup_mode = 1;
+               wm8994->hubs.micd_scthr = true;
 
                switch (control->revision) {
                case 0:
index 891effe..0c88184 100644 (file)
@@ -1223,6 +1223,9 @@ int wm_hubs_handle_analogue_pdata(struct snd_soc_component *component,
                snd_soc_component_update_bits(component, WM8993_ADDITIONAL_CONTROL,
                                    WM8993_LINEOUT2_FB, WM8993_LINEOUT2_FB);
 
+       if (!hubs->micd_scthr)
+               return 0;
+
        snd_soc_component_update_bits(component, WM8993_MICBIAS,
                            WM8993_JD_SCTHR_MASK | WM8993_JD_THR_MASK |
                            WM8993_MICB1_LVL | WM8993_MICB2_LVL,
index 4b8e5f0..988b29e 100644 (file)
@@ -27,6 +27,7 @@ struct wm_hubs_data {
        int hp_startup_mode;
        int series_startup;
        int no_series_update;
+       bool micd_scthr;
 
        bool no_cache_dac_hp_direct;
        struct list_head dcs_cache;
index b1cac7a..fba2c79 100644 (file)
@@ -333,6 +333,17 @@ static int sst_media_open(struct snd_pcm_substream *substream,
        if (ret_val < 0)
                goto out_power_up;
 
+       /*
+        * Make sure the period to be multiple of 1ms to align the
+        * design of firmware. Apply same rule to buffer size to make
+        * sure alsa could always find a value for period size
+        * regardless the buffer size given by user space.
+        */
+       snd_pcm_hw_constraint_step(substream->runtime, 0,
+                          SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 48);
+       snd_pcm_hw_constraint_step(substream->runtime, 0,
+                          SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 48);
+
        /* Make sure, that the period size is always even */
        snd_pcm_hw_constraint_step(substream->runtime, 0,
                           SNDRV_PCM_HW_PARAM_PERIODS, 2);
index 479992f..fc20274 100644 (file)
@@ -591,6 +591,16 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
                                        BYT_RT5640_SSP0_AIF1 |
                                        BYT_RT5640_MCLK_EN),
        },
+       {       /* MPMAN Converter 9, similar hw as the I.T.Works TW891 2-in-1 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "MPMAN"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Converter9"),
+               },
+               .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
+                                       BYT_RT5640_MONO_SPEAKER |
+                                       BYT_RT5640_SSP0_AIF1 |
+                                       BYT_RT5640_MCLK_EN),
+       },
        {
                /* MPMAN MPWIN895CL */
                .matches = {
index ca49000..bc50eda 100644 (file)
@@ -181,7 +181,7 @@ static void skl_set_hda_codec_autosuspend_delay(struct snd_soc_card *card)
        struct snd_soc_dai *dai;
 
        for_each_card_rtds(card, rtd) {
-               if (!strstr(rtd->dai_link->codecs->name, "ehdaudio"))
+               if (!strstr(rtd->dai_link->codecs->name, "ehdaudio0D0"))
                        continue;
                dai = asoc_rtd_to_codec(rtd, 0);
                hda_pvt = snd_soc_component_get_drvdata(dai->component);
index 1a69615..b6e63ea 100644 (file)
@@ -66,6 +66,10 @@ int max98373_trigger(struct snd_pcm_substream *substream, int cmd)
        int j;
        int ret = 0;
 
+       /* set spk pin by playback only */
+       if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+               return 0;
+
        for_each_rtd_codec_dais(rtd, j, codec_dai) {
                struct snd_soc_component *component = codec_dai->component;
                struct snd_soc_dapm_context *dapm =
@@ -86,9 +90,6 @@ int max98373_trigger(struct snd_pcm_substream *substream, int cmd)
                case SNDRV_PCM_TRIGGER_STOP:
                case SNDRV_PCM_TRIGGER_SUSPEND:
                case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-                       /* Make sure no streams are active before disable pin */
-                       if (snd_soc_dai_active(codec_dai) != 1)
-                               break;
                        ret = snd_soc_dapm_disable_pin(dapm, pin_name);
                        if (!ret)
                                snd_soc_dapm_sync(dapm);
index de80e19..88c3f63 100644 (file)
@@ -243,92 +243,45 @@ static irqreturn_t hsw_irq(int irq, void *context)
        return ret;
 }
 
-#define CSR_DEFAULT_VALUE 0x8480040E
-#define ISC_DEFAULT_VALUE 0x0
-#define ISD_DEFAULT_VALUE 0x0
-#define IMC_DEFAULT_VALUE 0x7FFF0003
-#define IMD_DEFAULT_VALUE 0x7FFF0003
-#define IPCC_DEFAULT_VALUE 0x0
-#define IPCD_DEFAULT_VALUE 0x0
-#define CLKCTL_DEFAULT_VALUE 0x7FF
-#define CSR2_DEFAULT_VALUE 0x0
-#define LTR_CTRL_DEFAULT_VALUE 0x0
-#define HMD_CTRL_DEFAULT_VALUE 0x0
-
-static void hsw_set_shim_defaults(struct sst_dsp *sst)
-{
-       sst_dsp_shim_write_unlocked(sst, SST_CSR, CSR_DEFAULT_VALUE);
-       sst_dsp_shim_write_unlocked(sst, SST_ISRX, ISC_DEFAULT_VALUE);
-       sst_dsp_shim_write_unlocked(sst, SST_ISRD, ISD_DEFAULT_VALUE);
-       sst_dsp_shim_write_unlocked(sst, SST_IMRX, IMC_DEFAULT_VALUE);
-       sst_dsp_shim_write_unlocked(sst, SST_IMRD, IMD_DEFAULT_VALUE);
-       sst_dsp_shim_write_unlocked(sst, SST_IPCX, IPCC_DEFAULT_VALUE);
-       sst_dsp_shim_write_unlocked(sst, SST_IPCD, IPCD_DEFAULT_VALUE);
-       sst_dsp_shim_write_unlocked(sst, SST_CLKCTL, CLKCTL_DEFAULT_VALUE);
-       sst_dsp_shim_write_unlocked(sst, SST_CSR2, CSR2_DEFAULT_VALUE);
-       sst_dsp_shim_write_unlocked(sst, SST_LTRC, LTR_CTRL_DEFAULT_VALUE);
-       sst_dsp_shim_write_unlocked(sst, SST_HMDC, HMD_CTRL_DEFAULT_VALUE);
-}
-
-/* all clock-gating minus DCLCGE and DTCGE */
-#define SST_VDRTCL2_CG_OTHER   0xB7D
-
 static void hsw_set_dsp_D3(struct sst_dsp *sst)
 {
+       u32 val;
        u32 reg;
 
-       /* disable clock core gating */
+       /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */
        reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
-       reg &= ~(SST_VDRTCL2_DCLCGE);
+       reg &= ~(SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE);
        writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
 
-       /* stall, reset and set 24MHz XOSC */
-       sst_dsp_shim_update_bits_unlocked(sst, SST_CSR,
-                       SST_CSR_24MHZ_LPCS | SST_CSR_STALL | SST_CSR_RST,
-                       SST_CSR_24MHZ_LPCS | SST_CSR_STALL | SST_CSR_RST);
-
-       /* DRAM power gating all */
-       reg = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
-       reg |= SST_VDRTCL0_ISRAMPGE_MASK |
-               SST_VDRTCL0_DSRAMPGE_MASK;
-       reg &= ~(SST_VDRTCL0_D3SRAMPGD);
-       reg |= SST_VDRTCL0_D3PGD;
-       writel(reg, sst->addr.pci_cfg + SST_VDRTCTL0);
-       udelay(50);
+       /* enable power gating and switch off DRAM & IRAM blocks */
+       val = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
+       val |= SST_VDRTCL0_DSRAMPGE_MASK |
+               SST_VDRTCL0_ISRAMPGE_MASK;
+       val &= ~(SST_VDRTCL0_D3PGD | SST_VDRTCL0_D3SRAMPGD);
+       writel(val, sst->addr.pci_cfg + SST_VDRTCTL0);
 
-       /* PLL shutdown enable */
-       reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
-       reg |= SST_VDRTCL2_APLLSE_MASK;
-       writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
+       /* switch off audio PLL */
+       val = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
+       val |= SST_VDRTCL2_APLLSE_MASK;
+       writel(val, sst->addr.pci_cfg + SST_VDRTCTL2);
 
-       /* disable MCLK */
+       /* disable MCLK(clkctl.smos = 0) */
        sst_dsp_shim_update_bits_unlocked(sst, SST_CLKCTL,
-                       SST_CLKCTL_MASK, 0);
-
-       /* switch clock gating */
-       reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
-       reg |= SST_VDRTCL2_CG_OTHER;
-       reg &= ~(SST_VDRTCL2_DTCGE);
-       writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
-       /* enable DTCGE separatelly */
-       reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
-       reg |= SST_VDRTCL2_DTCGE;
-       writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
+               SST_CLKCTL_MASK, 0);
 
-       /* set shim defaults */
-       hsw_set_shim_defaults(sst);
-
-       /* set D3 */
-       reg = readl(sst->addr.pci_cfg + SST_PMCS);
-       reg |= SST_PMCS_PS_MASK;
-       writel(reg, sst->addr.pci_cfg + SST_PMCS);
+       /* Set D3 state, delay 50 us */
+       val = readl(sst->addr.pci_cfg + SST_PMCS);
+       val |= SST_PMCS_PS_MASK;
+       writel(val, sst->addr.pci_cfg + SST_PMCS);
        udelay(50);
 
-       /* enable clock core gating */
+       /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */
        reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
-       reg |= SST_VDRTCL2_DCLCGE;
+       reg |= SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE;
        writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
+
        udelay(50);
+
 }
 
 static void hsw_reset(struct sst_dsp *sst)
@@ -346,62 +299,75 @@ static void hsw_reset(struct sst_dsp *sst)
                SST_CSR_RST | SST_CSR_STALL, SST_CSR_STALL);
 }
 
-/* recommended CSR state for power-up */
-#define SST_CSR_D0_MASK (0x18A09C0C | SST_CSR_DCS_MASK)
-
 static int hsw_set_dsp_D0(struct sst_dsp *sst)
 {
-       u32 reg;
+       int tries = 10;
+       u32 reg, fw_dump_bit;
 
-       /* disable clock core gating */
+       /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */
        reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
-       reg &= ~(SST_VDRTCL2_DCLCGE);
+       reg &= ~(SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE);
        writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
 
-       /* switch clock gating */
-       reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
-       reg |= SST_VDRTCL2_CG_OTHER;
-       reg &= ~(SST_VDRTCL2_DTCGE);
-       writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
+       /* Disable D3PG (VDRTCTL0.D3PGD = 1) */
+       reg = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
+       reg |= SST_VDRTCL0_D3PGD;
+       writel(reg, sst->addr.pci_cfg + SST_VDRTCTL0);
 
-       /* set D0 */
+       /* Set D0 state */
        reg = readl(sst->addr.pci_cfg + SST_PMCS);
-       reg &= ~(SST_PMCS_PS_MASK);
+       reg &= ~SST_PMCS_PS_MASK;
        writel(reg, sst->addr.pci_cfg + SST_PMCS);
 
-       /* DRAM power gating none */
-       reg = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
-       reg &= ~(SST_VDRTCL0_ISRAMPGE_MASK |
-               SST_VDRTCL0_DSRAMPGE_MASK);
-       reg |= SST_VDRTCL0_D3SRAMPGD;
-       reg |= SST_VDRTCL0_D3PGD;
-       writel(reg, sst->addr.pci_cfg + SST_VDRTCTL0);
-       mdelay(10);
+       /* check that ADSP shim is enabled */
+       while (tries--) {
+               reg = readl(sst->addr.pci_cfg + SST_PMCS) & SST_PMCS_PS_MASK;
+               if (reg == 0)
+                       goto finish;
+
+               msleep(1);
+       }
+
+       return -ENODEV;
 
-       /* set shim defaults */
-       hsw_set_shim_defaults(sst);
+finish:
+       /* select SSP1 19.2MHz base clock, SSP clock 0, turn off Low Power Clock */
+       sst_dsp_shim_update_bits_unlocked(sst, SST_CSR,
+               SST_CSR_S1IOCS | SST_CSR_SBCS1 | SST_CSR_LPCS, 0x0);
+
+       /* stall DSP core, set clk to 192/96Mhz */
+       sst_dsp_shim_update_bits_unlocked(sst,
+               SST_CSR, SST_CSR_STALL | SST_CSR_DCS_MASK,
+               SST_CSR_STALL | SST_CSR_DCS(4));
 
-       /* restore MCLK */
+       /* Set 24MHz MCLK, prevent local clock gating, enable SSP0 clock */
        sst_dsp_shim_update_bits_unlocked(sst, SST_CLKCTL,
-                       SST_CLKCTL_MASK, SST_CLKCTL_MASK);
+               SST_CLKCTL_MASK | SST_CLKCTL_DCPLCG | SST_CLKCTL_SCOE0,
+               SST_CLKCTL_MASK | SST_CLKCTL_DCPLCG | SST_CLKCTL_SCOE0);
 
-       /* PLL shutdown disable */
+       /* Stall and reset core, set CSR */
+       hsw_reset(sst);
+
+       /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */
        reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
-       reg &= ~(SST_VDRTCL2_APLLSE_MASK);
+       reg |= SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE;
        writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
 
-       sst_dsp_shim_update_bits_unlocked(sst, SST_CSR,
-                       SST_CSR_D0_MASK, SST_CSR_SBCS0 | SST_CSR_SBCS1 |
-                       SST_CSR_STALL | SST_CSR_DCS(4));
        udelay(50);
 
-       /* enable clock core gating */
+       /* switch on audio PLL */
        reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
-       reg |= SST_VDRTCL2_DCLCGE;
+       reg &= ~SST_VDRTCL2_APLLSE_MASK;
        writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
 
-       /* clear reset */
-       sst_dsp_shim_update_bits_unlocked(sst, SST_CSR, SST_CSR_RST, 0);
+       /* set default power gating control, enable power gating control for all blocks. that is,
+       can't be accessed, please enable each block before accessing. */
+       reg = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
+       reg |= SST_VDRTCL0_DSRAMPGE_MASK | SST_VDRTCL0_ISRAMPGE_MASK;
+       /* for D0, always enable the block(DSRAM[0]) used for FW dump */
+       fw_dump_bit = 1 << SST_VDRTCL0_DSRAMPGE_SHIFT;
+       writel(reg & ~fw_dump_bit, sst->addr.pci_cfg + SST_VDRTCTL0);
+
 
        /* disable DMA finish function for SSP0 & SSP1 */
        sst_dsp_shim_update_bits_unlocked(sst, SST_CSR2, SST_CSR2_SDFD_SSP1,
@@ -418,6 +384,12 @@ static int hsw_set_dsp_D0(struct sst_dsp *sst)
        sst_dsp_shim_update_bits(sst, SST_IMRD, (SST_IMRD_DONE | SST_IMRD_BUSY |
                                SST_IMRD_SSP0 | SST_IMRD_DMAC), 0x0);
 
+       /* clear IPC registers */
+       sst_dsp_shim_write(sst, SST_IPCX, 0x0);
+       sst_dsp_shim_write(sst, SST_IPCD, 0x0);
+       sst_dsp_shim_write(sst, 0x80, 0x6);
+       sst_dsp_shim_write(sst, 0xe0, 0x300a);
+
        return 0;
 }
 
@@ -443,6 +415,11 @@ static void hsw_sleep(struct sst_dsp *sst)
 {
        dev_dbg(sst->dev, "HSW_PM dsp runtime suspend\n");
 
+       /* put DSP into reset and stall */
+       sst_dsp_shim_update_bits(sst, SST_CSR,
+               SST_CSR_24MHZ_LPCS | SST_CSR_RST | SST_CSR_STALL,
+               SST_CSR_RST | SST_CSR_STALL | SST_CSR_24MHZ_LPCS);
+
        hsw_set_dsp_D3(sst);
        dev_dbg(sst->dev, "HSW_PM dsp runtime suspend exit\n");
 }
index e711abc..d6adf7e 100644 (file)
@@ -18,6 +18,7 @@
 #define CTRL0_TODDR_SEL_RESAMPLE       BIT(30)
 #define CTRL0_TODDR_EXT_SIGNED         BIT(29)
 #define CTRL0_TODDR_PP_MODE            BIT(28)
+#define CTRL0_TODDR_SYNC_CH            BIT(27)
 #define CTRL0_TODDR_TYPE_MASK          GENMASK(15, 13)
 #define CTRL0_TODDR_TYPE(x)            ((x) << 13)
 #define CTRL0_TODDR_MSB_POS_MASK       GENMASK(12, 8)
@@ -189,10 +190,31 @@ static const struct axg_fifo_match_data axg_toddr_match_data = {
        .dai_drv                = &axg_toddr_dai_drv
 };
 
+static int g12a_toddr_dai_startup(struct snd_pcm_substream *substream,
+                                struct snd_soc_dai *dai)
+{
+       struct axg_fifo *fifo = snd_soc_dai_get_drvdata(dai);
+       int ret;
+
+       ret = axg_toddr_dai_startup(substream, dai);
+       if (ret)
+               return ret;
+
+       /*
+        * Make sure the first channel ends up in the at beginning of the output
+        * As weird as it looks, without this the first channel may be misplaced
+        * in memory, with a random shift of 2 channels.
+        */
+       regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_TODDR_SYNC_CH,
+                          CTRL0_TODDR_SYNC_CH);
+
+       return 0;
+}
+
 static const struct snd_soc_dai_ops g12a_toddr_ops = {
        .prepare        = g12a_toddr_dai_prepare,
        .hw_params      = axg_toddr_dai_hw_params,
-       .startup        = axg_toddr_dai_startup,
+       .startup        = g12a_toddr_dai_startup,
        .shutdown       = axg_toddr_dai_shutdown,
 };
 
index 083413a..575e2ae 100644 (file)
@@ -143,6 +143,7 @@ static int apq8016_sbc_platform_probe(struct platform_device *pdev)
 
        card = &data->card;
        card->dev = dev;
+       card->owner = THIS_MODULE;
        card->dapm_widgets = apq8016_sbc_dapm_widgets;
        card->num_dapm_widgets = ARRAY_SIZE(apq8016_sbc_dapm_widgets);
 
index 2535496..1a69bae 100644 (file)
@@ -114,6 +114,7 @@ static int apq8096_platform_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        card->dev = dev;
+       card->owner = THIS_MODULE;
        dev_set_drvdata(dev, card);
        ret = qcom_snd_parse_of(card);
        if (ret)
index 5194d90..fd69cf8 100644 (file)
@@ -52,8 +52,10 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
 
        for_each_child_of_node(dev->of_node, np) {
                dlc = devm_kzalloc(dev, 2 * sizeof(*dlc), GFP_KERNEL);
-               if (!dlc)
-                       return -ENOMEM;
+               if (!dlc) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
 
                link->cpus      = &dlc[0];
                link->platforms = &dlc[1];
index 0d10fba..ab1bf23 100644 (file)
@@ -555,6 +555,7 @@ static int sdm845_snd_platform_probe(struct platform_device *pdev)
        card->dapm_widgets = sdm845_snd_widgets;
        card->num_dapm_widgets = ARRAY_SIZE(sdm845_snd_widgets);
        card->dev = dev;
+       card->owner = THIS_MODULE;
        dev_set_drvdata(dev, card);
        ret = qcom_snd_parse_of(card);
        if (ret)
index c0c388d..80c9cf2 100644 (file)
@@ -96,6 +96,7 @@ static int storm_platform_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        card->dev = &pdev->dev;
+       card->owner = THIS_MODULE;
 
        ret = snd_soc_of_parse_card_name(card, "qcom,model");
        if (ret) {
index 663e383..0544376 100644 (file)
@@ -834,6 +834,19 @@ struct snd_soc_dai *snd_soc_find_dai(
 }
 EXPORT_SYMBOL_GPL(snd_soc_find_dai);
 
+struct snd_soc_dai *snd_soc_find_dai_with_mutex(
+       const struct snd_soc_dai_link_component *dlc)
+{
+       struct snd_soc_dai *dai;
+
+       mutex_lock(&client_mutex);
+       dai = snd_soc_find_dai(dlc);
+       mutex_unlock(&client_mutex);
+
+       return dai;
+}
+EXPORT_SYMBOL_GPL(snd_soc_find_dai_with_mutex);
+
 static int soc_dai_link_sanity_check(struct snd_soc_card *card,
                                     struct snd_soc_dai_link *link)
 {
index 91a2551..0dbd312 100644 (file)
@@ -412,14 +412,14 @@ void snd_soc_dai_link_set_capabilities(struct snd_soc_dai_link *dai_link)
                supported_codec = false;
 
                for_each_link_cpus(dai_link, i, cpu) {
-                       dai = snd_soc_find_dai(cpu);
+                       dai = snd_soc_find_dai_with_mutex(cpu);
                        if (dai && snd_soc_dai_stream_valid(dai, direction)) {
                                supported_cpu = true;
                                break;
                        }
                }
                for_each_link_codecs(dai_link, i, codec) {
-                       dai = snd_soc_find_dai(codec);
+                       dai = snd_soc_find_dai_with_mutex(codec);
                        if (dai && snd_soc_dai_stream_valid(dai, direction)) {
                                supported_codec = true;
                                break;
index 00ac1cb..4c9d4cd 100644 (file)
@@ -812,7 +812,7 @@ dynamic:
        return 0;
 
 config_err:
-       for_each_rtd_dais(rtd, i, dai)
+       for_each_rtd_dais_rollback(rtd, i, dai)
                snd_soc_dai_shutdown(dai, substream);
 
        snd_soc_link_shutdown(substream);
index 5c47de9..57feb47 100644 (file)
@@ -446,12 +446,12 @@ static const struct snd_soc_dai_ops ams_delta_dai_ops = {
 /* Will be used if the codec ever has its own digital_mute function */
 static int ams_delta_startup(struct snd_pcm_substream *substream)
 {
-       return ams_delta_digital_mute(NULL, 0, substream->stream);
+       return ams_delta_mute(NULL, 0, substream->stream);
 }
 
 static void ams_delta_shutdown(struct snd_pcm_substream *substream)
 {
-       ams_delta_digital_mute(NULL, 1, substream->stream);
+       ams_delta_mute(NULL, 1, substream->stream);
 }
 
 
index 3d0d823..7d66876 100644 (file)
@@ -135,7 +135,7 @@ struct in_addr {
  * this socket to prevent accepting spoofed ones.
  */
 #define IP_PMTUDISC_INTERFACE          4
-/* weaker version of IP_PMTUDISC_INTERFACE, which allos packets to get
+/* weaker version of IP_PMTUDISC_INTERFACE, which allows packets to get
  * fragmented if they exeed the interface mtu
  */
 #define IP_PMTUDISC_OMIT               5
index f6d8603..7d8eced 100644 (file)
@@ -790,9 +790,10 @@ struct kvm_ppc_resize_hpt {
 #define KVM_VM_PPC_HV 1
 #define KVM_VM_PPC_PR 2
 
-/* on MIPS, 0 forces trap & emulate, 1 forces VZ ASE */
-#define KVM_VM_MIPS_TE         0
+/* on MIPS, 0 indicates auto, 1 forces VZ ASE, 2 forces trap & emulate */
+#define KVM_VM_MIPS_AUTO       0
 #define KVM_VM_MIPS_VZ         1
+#define KVM_VM_MIPS_TE         2
 
 #define KVM_S390_SIE_PAGE_OFFSET 1
 
@@ -1035,6 +1036,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_LAST_CPU 184
 #define KVM_CAP_SMALLER_MAXPHYADDR 185
 #define KVM_CAP_S390_DIAG318 186
+#define KVM_CAP_STEAL_TIME 187
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index 71d830d..cecce93 100644 (file)
@@ -66,11 +66,10 @@ static void fdpair(int fds[2])
 /* Block until we're ready to go */
 static void ready(int ready_out, int wakefd)
 {
-       char dummy;
        struct pollfd pollfd = { .fd = wakefd, .events = POLLIN };
 
        /* Tell them we're ready. */
-       if (write(ready_out, &dummy, 1) != 1)
+       if (write(ready_out, "R", 1) != 1)
                err(EXIT_FAILURE, "CLIENT: ready write");
 
        /* Wait for "GO" signal */
@@ -85,6 +84,7 @@ static void *sender(struct sender_context *ctx)
        unsigned int i, j;
 
        ready(ctx->ready_out, ctx->wakefd);
+       memset(data, 'S', sizeof(data));
 
        /* Now pump to every receiver. */
        for (i = 0; i < nr_loops; i++) {
index 7e1aa82..653b11b 100644 (file)
@@ -61,7 +61,7 @@
   {
     "EventName": "ex_ret_brn_ind_misp",
     "EventCode": "0xca",
-    "BriefDescription": "Retired Indirect Branch Instructions Mispredicted.",
+    "BriefDescription": "Retired Indirect Branch Instructions Mispredicted."
   },
   {
     "EventName": "ex_ret_mmx_fp_instr.sse_instr",
index de89e5a..4b75183 100644 (file)
   {
     "EventName": "ex_ret_fus_brnch_inst",
     "EventCode": "0x1d0",
-    "BriefDescription": "Retired Fused Instructions. The number of fuse-branch instructions retired per cycle. The number of events logged per cycle can vary from 0-8.",
+    "BriefDescription": "Retired Fused Instructions. The number of fuse-branch instructions retired per cycle. The number of events logged per cycle can vary from 0-8."
   }
 ]
index 6cd4081..a36f49f 100644 (file)
@@ -49,6 +49,7 @@ Following tests are defined (with perf commands):
   perf record --call-graph fp kill              (test-record-graph-fp)
   perf record --group -e cycles,instructions kill (test-record-group)
   perf record -e '{cycles,instructions}' kill   (test-record-group1)
+  perf record -e '{cycles/period=1/,instructions/period=2/}:S' kill (test-record-group2)
   perf record -D kill                           (test-record-no-delay)
   perf record -i kill                           (test-record-no-inherit)
   perf record -n kill                           (test-record-no-samples)
diff --git a/tools/perf/tests/attr/test-record-group2 b/tools/perf/tests/attr/test-record-group2
new file mode 100644 (file)
index 0000000..6b9f8d1
--- /dev/null
@@ -0,0 +1,29 @@
+[config]
+command = record
+args    = --no-bpf-event -e '{cycles/period=1234000/,instructions/period=6789000/}:S' kill >/dev/null 2>&1
+ret     = 1
+
+[event-1:base-record]
+fd=1
+group_fd=-1
+config=0|1
+sample_period=1234000
+sample_type=87
+read_format=12
+inherit=0
+freq=0
+
+[event-2:base-record]
+fd=2
+group_fd=1
+config=0|1
+sample_period=6789000
+sample_type=87
+read_format=12
+disabled=0
+inherit=0
+mmap=0
+comm=0
+freq=0
+enable_on_exec=0
+task=0
index da8ec1e..cc9fbce 100644 (file)
@@ -45,10 +45,13 @@ volatile long the_var;
 #if defined (__x86_64__)
 extern void __test_function(volatile long *ptr);
 asm (
+       ".pushsection .text;"
        ".globl __test_function\n"
+       ".type __test_function, @function;"
        "__test_function:\n"
        "incq (%rdi)\n"
-       "ret\n");
+       "ret\n"
+       ".popsection\n");
 #else
 static void __test_function(volatile long *ptr)
 {
index 23db8ac..cd7331a 100644 (file)
@@ -153,8 +153,10 @@ static int __compute_metric(const char *name, struct value *vals,
                return -ENOMEM;
 
        cpus = perf_cpu_map__new("0");
-       if (!cpus)
+       if (!cpus) {
+               evlist__delete(evlist);
                return -ENOMEM;
+       }
 
        perf_evlist__set_maps(&evlist->core, cpus, NULL);
 
@@ -163,10 +165,11 @@ static int __compute_metric(const char *name, struct value *vals,
                                             false, false,
                                             &metric_events);
        if (err)
-               return err;
+               goto out;
 
-       if (perf_evlist__alloc_stats(evlist, false))
-               return -1;
+       err = perf_evlist__alloc_stats(evlist, false);
+       if (err)
+               goto out;
 
        /* Load the runtime stats with given numbers for events. */
        runtime_stat__init(&st);
@@ -178,13 +181,14 @@ static int __compute_metric(const char *name, struct value *vals,
        if (name2 && ratio2)
                *ratio2 = compute_single(&metric_events, evlist, &st, name2);
 
+out:
        /* ... clenup. */
        metricgroup__rblist_exit(&metric_events);
        runtime_stat__exit(&st);
        perf_evlist__free_stats(evlist);
        perf_cpu_map__put(cpus);
        evlist__delete(evlist);
-       return 0;
+       return err;
 }
 
 static int compute_metric(const char *name, struct value *vals, double *ratio)
index eb19f9a..d3517a7 100644 (file)
@@ -274,6 +274,7 @@ static int __test__pmu_event_aliases(char *pmu_name, int *count)
        int res = 0;
        bool use_uncore_table;
        struct pmu_events_map *map = __test_pmu_get_events_map();
+       struct perf_pmu_alias *a, *tmp;
 
        if (!map)
                return -1;
@@ -347,6 +348,10 @@ static int __test__pmu_event_aliases(char *pmu_name, int *count)
                          pmu_name, alias->name);
        }
 
+       list_for_each_entry_safe(a, tmp, &aliases, list) {
+               list_del(&a->list);
+               perf_pmu_free_alias(a);
+       }
        free(pmu);
        return res;
 }
index 5c11fe2..714e683 100644 (file)
@@ -173,6 +173,7 @@ int test__pmu(struct test *test __maybe_unused, int subtest __maybe_unused)
                ret = 0;
        } while (0);
 
+       perf_pmu__del_formats(&formats);
        test_format_dir_put(format);
        return ret;
 }
index e3fa3bf..c0768c6 100644 (file)
@@ -946,6 +946,10 @@ int perf_evlist__create_maps(struct evlist *evlist, struct target *target)
 
        perf_evlist__set_maps(&evlist->core, cpus, threads);
 
+       /* as evlist now has references, put count here */
+       perf_cpu_map__put(cpus);
+       perf_thread_map__put(threads);
+
        return 0;
 
 out_delete_threads:
@@ -1273,11 +1277,12 @@ static int perf_evlist__create_syswide_maps(struct evlist *evlist)
                goto out_put;
 
        perf_evlist__set_maps(&evlist->core, cpus, threads);
-out:
-       return err;
+
+       perf_thread_map__put(threads);
 out_put:
        perf_cpu_map__put(cpus);
-       goto out;
+out:
+       return err;
 }
 
 int evlist__open(struct evlist *evlist)
index fd86500..459b51e 100644 (file)
@@ -976,16 +976,20 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
         * We default some events to have a default interval. But keep
         * it a weak assumption overridable by the user.
         */
-       if (!attr->sample_period || (opts->user_freq != UINT_MAX ||
-                                    opts->user_interval != ULLONG_MAX)) {
+       if (!attr->sample_period) {
                if (opts->freq) {
-                       evsel__set_sample_bit(evsel, PERIOD);
                        attr->freq              = 1;
                        attr->sample_freq       = opts->freq;
                } else {
                        attr->sample_period = opts->default_interval;
                }
        }
+       /*
+        * If attr->freq was set (here or earlier), ask for period
+        * to be sampled.
+        */
+       if (attr->freq)
+               evsel__set_sample_bit(evsel, PERIOD);
 
        if (opts->no_samples)
                attr->sample_freq = 0;
index 8831b96..ab5030f 100644 (file)
@@ -85,6 +85,7 @@ static void metric_event_delete(struct rblist *rblist __maybe_unused,
 
        list_for_each_entry_safe(expr, tmp, &me->head, nd) {
                free(expr->metric_refs);
+               free(expr->metric_events);
                free(expr);
        }
 
@@ -316,6 +317,7 @@ static int metricgroup__setup_events(struct list_head *groups,
                        if (!metric_refs) {
                                ret = -ENOMEM;
                                free(metric_events);
+                               free(expr);
                                break;
                        }
 
@@ -530,6 +532,9 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
                                                continue;
                                        strlist__add(me->metrics, s);
                                }
+
+                               if (!raw)
+                                       free(s);
                        }
                        free(omg);
                }
@@ -667,7 +672,6 @@ static int __add_metric(struct list_head *metric_list,
                m->has_constraint = metric_no_group || metricgroup__has_constraint(pe);
                INIT_LIST_HEAD(&m->metric_refs);
                m->metric_refs_cnt = 0;
-               *mp = m;
 
                parent = expr_ids__alloc(ids);
                if (!parent) {
@@ -680,6 +684,7 @@ static int __add_metric(struct list_head *metric_list,
                        free(m);
                        return -ENOMEM;
                }
+               *mp = m;
        } else {
                /*
                 * We got here for the referenced metric, via the
@@ -714,8 +719,11 @@ static int __add_metric(struct list_head *metric_list,
         * all the metric's IDs and add it to the parent context.
         */
        if (expr__find_other(pe->metric_expr, NULL, &m->pctx, runtime) < 0) {
-               expr__ctx_clear(&m->pctx);
-               free(m);
+               if (m->metric_refs_cnt == 0) {
+                       expr__ctx_clear(&m->pctx);
+                       free(m);
+                       *mp = NULL;
+               }
                return -EINVAL;
        }
 
@@ -934,7 +942,7 @@ static int metricgroup__add_metric(const char *metric, bool metric_no_group,
 
                ret = add_metric(&list, pe, metric_no_group, &m, NULL, &ids);
                if (ret)
-                       return ret;
+                       goto out;
 
                /*
                 * Process any possible referenced metrics
@@ -943,12 +951,14 @@ static int metricgroup__add_metric(const char *metric, bool metric_no_group,
                ret = resolve_metric(metric_no_group,
                                     &list, map, &ids);
                if (ret)
-                       return ret;
+                       goto out;
        }
 
        /* End of pmu events. */
-       if (!has_match)
-               return -EINVAL;
+       if (!has_match) {
+               ret = -EINVAL;
+               goto out;
+       }
 
        list_for_each_entry(m, &list, nd) {
                if (events->len > 0)
@@ -963,9 +973,14 @@ static int metricgroup__add_metric(const char *metric, bool metric_no_group,
                }
        }
 
+out:
+       /*
+        * add to metric_list so that they can be released
+        * even if it's failed
+        */
        list_splice(&list, metric_list);
        expr_ids__exit(&ids);
-       return 0;
+       return ret;
 }
 
 static int metricgroup__add_metric_list(const char *list, bool metric_no_group,
@@ -1040,7 +1055,7 @@ static int parse_groups(struct evlist *perf_evlist, const char *str,
        ret = metricgroup__add_metric_list(str, metric_no_group,
                                           &extra_events, &metric_list, map);
        if (ret)
-               return ret;
+               goto out;
        pr_debug("adding %s\n", extra_events.buf);
        bzero(&parse_error, sizeof(parse_error));
        ret = __parse_events(perf_evlist, extra_events.buf, &parse_error, fake_pmu);
@@ -1048,11 +1063,11 @@ static int parse_groups(struct evlist *perf_evlist, const char *str,
                parse_events_print_error(&parse_error, extra_events.buf);
                goto out;
        }
-       strbuf_release(&extra_events);
        ret = metricgroup__setup_events(&metric_list, metric_no_merge,
                                        perf_evlist, metric_events);
 out:
        metricgroup__free_metrics(&metric_list);
+       strbuf_release(&extra_events);
        return ret;
 }
 
index c4d2394..667cbca 100644 (file)
@@ -411,7 +411,7 @@ static int add_event_tool(struct list_head *list, int *idx,
                return -ENOMEM;
        evsel->tool_event = tool_event;
        if (tool_event == PERF_TOOL_DURATION_TIME)
-               evsel->unit = strdup("ns");
+               evsel->unit = "ns";
        return 0;
 }
 
index f1688e1..d41caeb 100644 (file)
@@ -274,7 +274,7 @@ static void perf_pmu_update_alias(struct perf_pmu_alias *old,
 }
 
 /* Delete an alias entry. */
-static void perf_pmu_free_alias(struct perf_pmu_alias *newalias)
+void perf_pmu_free_alias(struct perf_pmu_alias *newalias)
 {
        zfree(&newalias->name);
        zfree(&newalias->desc);
@@ -1354,6 +1354,17 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to)
                set_bit(b, bits);
 }
 
+void perf_pmu__del_formats(struct list_head *formats)
+{
+       struct perf_pmu_format *fmt, *tmp;
+
+       list_for_each_entry_safe(fmt, tmp, formats, list) {
+               list_del(&fmt->list);
+               free(fmt->name);
+               free(fmt);
+       }
+}
+
 static int sub_non_neg(int a, int b)
 {
        if (b > a)
index 44ccbdb..a64e9c9 100644 (file)
@@ -94,6 +94,7 @@ int perf_pmu__new_format(struct list_head *list, char *name,
                         int config, unsigned long *bits);
 void perf_pmu__set_format(unsigned long *bits, long from, long to);
 int perf_pmu__format_parse(char *dir, struct list_head *head);
+void perf_pmu__del_formats(struct list_head *formats);
 
 struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu);
 
@@ -113,6 +114,7 @@ void pmu_add_cpu_aliases_map(struct list_head *head, struct perf_pmu *pmu,
 
 struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu);
 bool pmu_uncore_alias_match(const char *pmu_name, const char *name);
+void perf_pmu_free_alias(struct perf_pmu_alias *alias);
 
 int perf_pmu__convert_scale(const char *scale, char **end, double *sval);
 
index a4cc115..ea9aa1d 100644 (file)
@@ -2,6 +2,7 @@
 #include "debug.h"
 #include "evlist.h"
 #include "evsel.h"
+#include "evsel_config.h"
 #include "parse-events.h"
 #include <errno.h>
 #include <limits.h>
@@ -33,11 +34,24 @@ static struct evsel *evsel__read_sampler(struct evsel *evsel, struct evlist *evl
        return leader;
 }
 
+static u64 evsel__config_term_mask(struct evsel *evsel)
+{
+       struct evsel_config_term *term;
+       struct list_head *config_terms = &evsel->config_terms;
+       u64 term_types = 0;
+
+       list_for_each_entry(term, config_terms, list) {
+               term_types |= 1 << term->type;
+       }
+       return term_types;
+}
+
 static void evsel__config_leader_sampling(struct evsel *evsel, struct evlist *evlist)
 {
        struct perf_event_attr *attr = &evsel->core.attr;
        struct evsel *leader = evsel->leader;
        struct evsel *read_sampler;
+       u64 term_types, freq_mask;
 
        if (!leader->sample_read)
                return;
@@ -47,16 +61,20 @@ static void evsel__config_leader_sampling(struct evsel *evsel, struct evlist *ev
        if (evsel == read_sampler)
                return;
 
+       term_types = evsel__config_term_mask(evsel);
        /*
-        * Disable sampling for all group members other than the leader in
-        * case the leader 'leads' the sampling, except when the leader is an
-        * AUX area event, in which case the 2nd event in the group is the one
-        * that 'leads' the sampling.
+        * Disable sampling for all group members except those with explicit
+        * config terms or the leader. In the case of an AUX area event, the 2nd
+        * event in the group is the one that 'leads' the sampling.
         */
-       attr->freq           = 0;
-       attr->sample_freq    = 0;
-       attr->sample_period  = 0;
-       attr->write_backward = 0;
+       freq_mask = (1 << EVSEL__CONFIG_TERM_FREQ) | (1 << EVSEL__CONFIG_TERM_PERIOD);
+       if ((term_types & freq_mask) == 0) {
+               attr->freq           = 0;
+               attr->sample_freq    = 0;
+               attr->sample_period  = 0;
+       }
+       if ((term_types & (1 << EVSEL__CONFIG_TERM_OVERWRITE)) == 0)
+               attr->write_backward = 0;
 
        /*
         * We don't get a sample for slave events, we make them when delivering
index e1ba6c1..924b54d 100644 (file)
@@ -517,7 +517,7 @@ static void print_l1_dcache_misses(struct perf_stat_config *config,
 
        color = get_ratio_color(GRC_CACHE_MISSES, ratio);
 
-       out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-dcache hits", ratio);
+       out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-dcache accesses", ratio);
 }
 
 static void print_l1_icache_misses(struct perf_stat_config *config,
@@ -538,7 +538,7 @@ static void print_l1_icache_misses(struct perf_stat_config *config,
                ratio = avg / total * 100.0;
 
        color = get_ratio_color(GRC_CACHE_MISSES, ratio);
-       out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-icache hits", ratio);
+       out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-icache accesses", ratio);
 }
 
 static void print_dtlb_cache_misses(struct perf_stat_config *config,
@@ -558,7 +558,7 @@ static void print_dtlb_cache_misses(struct perf_stat_config *config,
                ratio = avg / total * 100.0;
 
        color = get_ratio_color(GRC_CACHE_MISSES, ratio);
-       out->print_metric(config, out->ctx, color, "%7.2f%%", "of all dTLB cache hits", ratio);
+       out->print_metric(config, out->ctx, color, "%7.2f%%", "of all dTLB cache accesses", ratio);
 }
 
 static void print_itlb_cache_misses(struct perf_stat_config *config,
@@ -578,7 +578,7 @@ static void print_itlb_cache_misses(struct perf_stat_config *config,
                ratio = avg / total * 100.0;
 
        color = get_ratio_color(GRC_CACHE_MISSES, ratio);
-       out->print_metric(config, out->ctx, color, "%7.2f%%", "of all iTLB cache hits", ratio);
+       out->print_metric(config, out->ctx, color, "%7.2f%%", "of all iTLB cache accesses", ratio);
 }
 
 static void print_ll_cache_misses(struct perf_stat_config *config,
@@ -598,7 +598,7 @@ static void print_ll_cache_misses(struct perf_stat_config *config,
                ratio = avg / total * 100.0;
 
        color = get_ratio_color(GRC_CACHE_MISSES, ratio);
-       out->print_metric(config, out->ctx, color, "%7.2f%%", "of all LL-cache hits", ratio);
+       out->print_metric(config, out->ctx, color, "%7.2f%%", "of all LL-cache accesses", ratio);
 }
 
 /*
@@ -853,14 +853,16 @@ static void generic_metric(struct perf_stat_config *config,
 double test_generic_metric(struct metric_expr *mexp, int cpu, struct runtime_stat *st)
 {
        struct expr_parse_ctx pctx;
-       double ratio;
+       double ratio = 0.0;
 
        if (prepare_metric(mexp->metric_events, mexp->metric_refs, &pctx, cpu, st) < 0)
-               return 0.;
+               goto out;
 
        if (expr__parse(&ratio, &pctx, mexp->metric_expr, 1))
-               return 0.;
+               ratio = 0.0;
 
+out:
+       expr__ctx_clear(&pctx);
        return ratio;
 }
 
@@ -918,7 +920,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                if (runtime_stat_n(st, STAT_L1_DCACHE, ctx, cpu) != 0)
                        print_l1_dcache_misses(config, cpu, evsel, avg, out, st);
                else
-                       print_metric(config, ctxp, NULL, NULL, "of all L1-dcache hits", 0);
+                       print_metric(config, ctxp, NULL, NULL, "of all L1-dcache accesses", 0);
        } else if (
                evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
                evsel->core.attr.config ==  ( PERF_COUNT_HW_CACHE_L1I |
@@ -928,7 +930,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                if (runtime_stat_n(st, STAT_L1_ICACHE, ctx, cpu) != 0)
                        print_l1_icache_misses(config, cpu, evsel, avg, out, st);
                else
-                       print_metric(config, ctxp, NULL, NULL, "of all L1-icache hits", 0);
+                       print_metric(config, ctxp, NULL, NULL, "of all L1-icache accesses", 0);
        } else if (
                evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
                evsel->core.attr.config ==  ( PERF_COUNT_HW_CACHE_DTLB |
@@ -938,7 +940,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                if (runtime_stat_n(st, STAT_DTLB_CACHE, ctx, cpu) != 0)
                        print_dtlb_cache_misses(config, cpu, evsel, avg, out, st);
                else
-                       print_metric(config, ctxp, NULL, NULL, "of all dTLB cache hits", 0);
+                       print_metric(config, ctxp, NULL, NULL, "of all dTLB cache accesses", 0);
        } else if (
                evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
                evsel->core.attr.config ==  ( PERF_COUNT_HW_CACHE_ITLB |
@@ -948,7 +950,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                if (runtime_stat_n(st, STAT_ITLB_CACHE, ctx, cpu) != 0)
                        print_itlb_cache_misses(config, cpu, evsel, avg, out, st);
                else
-                       print_metric(config, ctxp, NULL, NULL, "of all iTLB cache hits", 0);
+                       print_metric(config, ctxp, NULL, NULL, "of all iTLB cache accesses", 0);
        } else if (
                evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
                evsel->core.attr.config ==  ( PERF_COUNT_HW_CACHE_LL |
@@ -958,7 +960,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                if (runtime_stat_n(st, STAT_LL_CACHE, ctx, cpu) != 0)
                        print_ll_cache_misses(config, cpu, evsel, avg, out, st);
                else
-                       print_metric(config, ctxp, NULL, NULL, "of all LL-cache hits", 0);
+                       print_metric(config, ctxp, NULL, NULL, "of all LL-cache accesses", 0);
        } else if (evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) {
                total = runtime_stat_avg(st, STAT_CACHEREFS, ctx, cpu);
 
index e0cf8eb..30b71b1 100644 (file)
@@ -7,6 +7,7 @@
 #include <stdlib.h>
 #include <string.h>
 #include <sys/mman.h>
+#include <unistd.h>
 
 #include <asm/cputable.h>
 
@@ -18,9 +19,13 @@ int test_prot_sao(void)
 {
        char *p;
 
-       /* SAO was introduced in 2.06 and removed in 3.1 */
+       /*
+        * SAO was introduced in 2.06 and removed in 3.1. It's disabled in
+        * guests/LPARs by default, so also skip if we are running in a guest.
+        */
        SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06) ||
-               have_hwcap2(PPC_FEATURE2_ARCH_3_1));
+               have_hwcap2(PPC_FEATURE2_ARCH_3_1) ||
+               access("/proc/device-tree/rtas/ibm,hypertas-functions", F_OK) == 0);
 
        /*
         * Ensure we can ask for PROT_SAO.