Merge tag 'riscv-for-linus-5.12-rc6' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 3 Apr 2021 18:52:18 +0000 (11:52 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 3 Apr 2021 18:52:18 +0000 (11:52 -0700)
Pull RISC-V fixes from Palmer Dabbelt:
 "A handful of fixes for 5.12:

   - fix a stack tracing regression related to "const register asm"
     variables, which have unexpected behavior.

   - ensure the value to be written by put_user() is evaluated before
     enabling access to userspace memory..

   - align the exception vector table correctly, so we don't rely on the
     firmware's handling of unaligned accesses.

   - build fix to make NUMA depend on MMU, which triggered on some
     randconfigs"

* tag 'riscv-for-linus-5.12-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
  riscv: Make NUMA depend on MMU
  riscv: remove unneeded semicolon
  riscv,entry: fix misaligned base for excp_vect_table
  riscv: evaluate put_user() arg before enabling user access
  riscv: Drop const annotation for sp

112 files changed:
MAINTAINERS
arch/arm64/include/asm/kvm_arm.h
arch/arm64/kernel/cpufeature.c
arch/arm64/kvm/debug.c
arch/arm64/kvm/hyp/vgic-v3-sr.c
arch/mips/kernel/setup.c
arch/powerpc/platforms/pseries/lpar.c
arch/powerpc/platforms/pseries/mobility.c
arch/s390/include/asm/vdso/data.h
arch/s390/kernel/time.c
arch/x86/include/asm/smp.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/setup.c
arch/x86/kernel/smpboot.c
arch/x86/kvm/Makefile
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/mmu/tdp_mmu.h
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/pmu.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/xtensa/kernel/coprocessor.S
arch/xtensa/mm/fault.c
block/bio.c
block/blk-mq-debugfs.c
drivers/acpi/processor_idle.c
drivers/acpi/scan.c
drivers/acpi/tables.c
drivers/base/dd.c
drivers/base/power/runtime.c
drivers/block/null_blk/main.c
drivers/block/null_blk/null_blk.h
drivers/block/xen-blkback/blkback.c
drivers/cpufreq/freq_table.c
drivers/extcon/extcon.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/imx/imx-drm-core.c
drivers/gpu/drm/imx/imx-ldb.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/sor.c
drivers/gpu/host1x/bus.c
drivers/interconnect/bulk.c
drivers/interconnect/core.c
drivers/interconnect/qcom/msm8939.c
drivers/misc/mei/client.c
drivers/pinctrl/intel/pinctrl-intel.c
drivers/pinctrl/pinctrl-microchip-sgpio.c
drivers/pinctrl/pinctrl-rockchip.c
drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
drivers/pinctrl/qcom/pinctrl-sc7280.c
drivers/pinctrl/qcom/pinctrl-sdx55.c
drivers/scsi/scsi_transport_iscsi.c
drivers/soc/qcom/qcom-geni-se.c
drivers/staging/rtl8192e/rtllib.h
drivers/staging/rtl8192e/rtllib_rx.c
drivers/tty/serial/qcom_geni_serial.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/hcd.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/dwc3-qcom.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/udc/amd5536udc_pci.c
drivers/usb/host/xhci-mtk.c
drivers/usb/musb/musb_core.c
drivers/usb/usbip/vhci_hcd.c
drivers/vfio/pci/Kconfig
drivers/vfio/vfio_iommu_type1.c
drivers/video/fbdev/core/fbcon.c
drivers/video/fbdev/hyperv_fb.c
fs/block_dev.c
fs/io-wq.c
fs/io_uring.c
fs/reiserfs/xattr.h
include/linux/acpi.h
include/linux/blkdev.h
include/linux/extcon.h
include/linux/firmware/intel/stratix10-svc-client.h
include/linux/host1x.h
include/linux/qcom-geni-se.h
include/linux/xarray.h
include/scsi/scsi_transport_iscsi.h
include/uapi/linux/blkpg.h
kernel/trace/ftrace.c
kernel/trace/trace.c
lib/test_xarray.c
lib/xarray.c
mm/memory.c
scripts/module.lds.S
security/tomoyo/network.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/usb/quirks.c
tools/kvm/kvm_stat/kvm_stat.service
tools/testing/radix-tree/idr-test.c
tools/testing/radix-tree/linux/compiler_types.h [deleted file]
tools/testing/radix-tree/multiorder.c
tools/testing/radix-tree/xarray.c
tools/testing/selftests/kvm/hardware_disable_test.c
tools/testing/selftests/kvm/x86_64/hyperv_clock.c

index fb2a363..c80ad73 100644 (file)
@@ -7474,8 +7474,9 @@ F:        include/uapi/asm-generic/
 GENERIC PHY FRAMEWORK
 M:     Kishon Vijay Abraham I <kishon@ti.com>
 M:     Vinod Koul <vkoul@kernel.org>
-L:     linux-kernel@vger.kernel.org
+L:     linux-phy@lists.infradead.org
 S:     Supported
+Q:     https://patchwork.kernel.org/project/linux-phy/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/phy/linux-phy.git
 F:     Documentation/devicetree/bindings/phy/
 F:     drivers/phy/
@@ -15627,8 +15628,8 @@ F:      Documentation/s390/pci.rst
 
 S390 VFIO AP DRIVER
 M:     Tony Krowiak <akrowiak@linux.ibm.com>
-M:     Pierre Morel <pmorel@linux.ibm.com>
 M:     Halil Pasic <pasic@linux.ibm.com>
+M:     Jason Herne <jjherne@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
 W:     http://www.ibm.com/developerworks/linux/linux390/
@@ -15640,6 +15641,7 @@ F:      drivers/s390/crypto/vfio_ap_private.h
 S390 VFIO-CCW DRIVER
 M:     Cornelia Huck <cohuck@redhat.com>
 M:     Eric Farman <farman@linux.ibm.com>
+M:     Matthew Rosato <mjrosato@linux.ibm.com>
 R:     Halil Pasic <pasic@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 L:     kvm@vger.kernel.org
@@ -15650,6 +15652,7 @@ F:      include/uapi/linux/vfio_ccw.h
 
 S390 VFIO-PCI DRIVER
 M:     Matthew Rosato <mjrosato@linux.ibm.com>
+M:     Eric Farman <farman@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 L:     kvm@vger.kernel.org
 S:     Supported
index 4e90c2d..94d4025 100644 (file)
 #define CPTR_EL2_DEFAULT       CPTR_EL2_RES1
 
 /* Hyp Debug Configuration Register bits */
+#define MDCR_EL2_TTRF          (1 << 19)
 #define MDCR_EL2_TPMS          (1 << 14)
 #define MDCR_EL2_E2PB_MASK     (UL(0x3))
 #define MDCR_EL2_E2PB_SHIFT    (UL(12))
index 2a5d985..e5281e1 100644 (file)
@@ -383,7 +383,6 @@ static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
         * of support.
         */
        S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
        ARM64_FTR_END,
 };
index 7a7e425..dbc8905 100644 (file)
@@ -89,6 +89,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
  *  - Debug ROM Address (MDCR_EL2_TDRA)
  *  - OS related registers (MDCR_EL2_TDOSA)
  *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
+ *  - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
  *
  * Additionally, KVM only traps guest accesses to the debug registers if
  * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
@@ -112,6 +113,7 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
        vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
        vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
                                MDCR_EL2_TPMS |
+                               MDCR_EL2_TTRF |
                                MDCR_EL2_TPMCR |
                                MDCR_EL2_TDRA |
                                MDCR_EL2_TDOSA);
index ee3682b..39f8f7f 100644 (file)
@@ -429,6 +429,13 @@ u64 __vgic_v3_get_gic_config(void)
        if (has_vhe())
                flags = local_daif_save();
 
+       /*
+        * Table 11-2 "Permitted ICC_SRE_ELx.SRE settings" indicates
+        * that to be able to set ICC_SRE_EL1.SRE to 0, all the
+        * interrupt overrides must be set. You've got to love this.
+        */
+       sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
+       isb();
        write_gicreg(0, ICC_SRE_EL1);
        isb();
 
@@ -436,6 +443,8 @@ u64 __vgic_v3_get_gic_config(void)
 
        write_gicreg(sre, ICC_SRE_EL1);
        isb();
+       sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
+       isb();
 
        if (has_vhe())
                local_daif_restore(flags);
index 279be01..23a1403 100644 (file)
@@ -43,7 +43,7 @@
 #include <asm/prom.h>
 
 #ifdef CONFIG_MIPS_ELF_APPENDED_DTB
-const char __section(".appended_dtb") __appended_dtb[0x100000];
+char __section(".appended_dtb") __appended_dtb[0x100000];
 #endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
 
 struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
index 764170f..3805519 100644 (file)
@@ -887,7 +887,8 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot,
 
        want_v = hpte_encode_avpn(vpn, psize, ssize);
 
-       flags = (newpp & 7) | H_AVPN;
+       flags = (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO)) | H_AVPN;
+       flags |= (newpp & HPTE_R_KEY_HI) >> 48;
        if (mmu_has_feature(MMU_FTR_KERNEL_RO))
                /* Move pp0 into bit 8 (IBM 55) */
                flags |= (newpp & HPTE_R_PP0) >> 55;
index ea4d6a6..e83e089 100644 (file)
@@ -452,12 +452,28 @@ static int do_suspend(void)
        return ret;
 }
 
+/**
+ * struct pseries_suspend_info - State shared between CPUs for join/suspend.
+ * @counter: Threads are to increment this upon resuming from suspend
+ *           or if an error is received from H_JOIN. The thread which performs
+ *           the first increment (i.e. sets it to 1) is responsible for
+ *           waking the other threads.
+ * @done: False if join/suspend is in progress. True if the operation is
+ *        complete (successful or not).
+ */
+struct pseries_suspend_info {
+       atomic_t counter;
+       bool done;
+};
+
 static int do_join(void *arg)
 {
-       atomic_t *counter = arg;
+       struct pseries_suspend_info *info = arg;
+       atomic_t *counter = &info->counter;
        long hvrc;
        int ret;
 
+retry:
        /* Must ensure MSR.EE off for H_JOIN. */
        hard_irq_disable();
        hvrc = plpar_hcall_norets(H_JOIN);
@@ -473,8 +489,20 @@ static int do_join(void *arg)
        case H_SUCCESS:
                /*
                 * The suspend is complete and this cpu has received a
-                * prod.
+                * prod, or we've received a stray prod from unrelated
+                * code (e.g. paravirt spinlocks) and we need to join
+                * again.
+                *
+                * This barrier orders the return from H_JOIN above vs
+                * the load of info->done. It pairs with the barrier
+                * in the wakeup/prod path below.
                 */
+               smp_mb();
+               if (READ_ONCE(info->done) == false) {
+                       pr_info_ratelimited("premature return from H_JOIN on CPU %i, retrying",
+                                           smp_processor_id());
+                       goto retry;
+               }
                ret = 0;
                break;
        case H_BAD_MODE:
@@ -488,6 +516,13 @@ static int do_join(void *arg)
 
        if (atomic_inc_return(counter) == 1) {
                pr_info("CPU %u waking all threads\n", smp_processor_id());
+               WRITE_ONCE(info->done, true);
+               /*
+                * This barrier orders the store to info->done vs subsequent
+                * H_PRODs to wake the other CPUs. It pairs with the barrier
+                * in the H_SUCCESS case above.
+                */
+               smp_mb();
                prod_others();
        }
        /*
@@ -535,11 +570,16 @@ static int pseries_suspend(u64 handle)
        int ret;
 
        while (true) {
-               atomic_t counter = ATOMIC_INIT(0);
+               struct pseries_suspend_info info;
                unsigned long vasi_state;
                int vasi_err;
 
-               ret = stop_machine(do_join, &counter, cpu_online_mask);
+               info = (struct pseries_suspend_info) {
+                       .counter = ATOMIC_INIT(0),
+                       .done = false,
+               };
+
+               ret = stop_machine(do_join, &info, cpu_online_mask);
                if (ret == 0)
                        break;
                /*
index 7b3cdb4..73ee891 100644 (file)
@@ -6,7 +6,7 @@
 #include <vdso/datapage.h>
 
 struct arch_vdso_data {
-       __u64 tod_steering_delta;
+       __s64 tod_steering_delta;
        __u64 tod_steering_end;
 };
 
index 165da96..326cb8f 100644 (file)
@@ -80,10 +80,12 @@ void __init time_early_init(void)
 {
        struct ptff_qto qto;
        struct ptff_qui qui;
+       int cs;
 
        /* Initialize TOD steering parameters */
        tod_steering_end = tod_clock_base.tod;
-       vdso_data->arch_data.tod_steering_end = tod_steering_end;
+       for (cs = 0; cs < CS_BASES; cs++)
+               vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
 
        if (!test_facility(28))
                return;
@@ -366,6 +368,7 @@ static void clock_sync_global(unsigned long delta)
 {
        unsigned long now, adj;
        struct ptff_qto qto;
+       int cs;
 
        /* Fixup the monotonic sched clock. */
        tod_clock_base.eitod += delta;
@@ -381,7 +384,10 @@ static void clock_sync_global(unsigned long delta)
                panic("TOD clock sync offset %li is too large to drift\n",
                      tod_steering_delta);
        tod_steering_end = now + (abs(tod_steering_delta) << 15);
-       vdso_data->arch_data.tod_steering_end = tod_steering_end;
+       for (cs = 0; cs < CS_BASES; cs++) {
+               vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
+               vdso_data[cs].arch_data.tod_steering_delta = tod_steering_delta;
+       }
 
        /* Update LPAR offset. */
        if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
index c0538f8..57ef209 100644 (file)
@@ -132,6 +132,7 @@ void native_play_dead(void);
 void play_dead_common(void);
 void wbinvd_on_cpu(int cpu);
 int wbinvd_on_all_cpus(void);
+bool wakeup_cpu0(void);
 
 void native_smp_send_reschedule(int cpu);
 void native_send_call_func_ipi(const struct cpumask *mask);
index 7bdc023..14cd318 100644 (file)
@@ -1554,10 +1554,18 @@ void __init acpi_boot_table_init(void)
        /*
         * Initialize the ACPI boot-time table parser.
         */
-       if (acpi_table_init()) {
+       if (acpi_locate_initial_tables())
                disable_acpi();
-               return;
-       }
+       else
+               acpi_reserve_initial_tables();
+}
+
+int __init early_acpi_boot_init(void)
+{
+       if (acpi_disabled)
+               return 1;
+
+       acpi_table_init_complete();
 
        acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
 
@@ -1570,18 +1578,9 @@ void __init acpi_boot_table_init(void)
                } else {
                        printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
                        disable_acpi();
-                       return;
+                       return 1;
                }
        }
-}
-
-int __init early_acpi_boot_init(void)
-{
-       /*
-        * If acpi_disabled, bail out
-        */
-       if (acpi_disabled)
-               return 1;
 
        /*
         * Process the Multiple APIC Description Table (MADT), if present
index d883176..5ecd69a 100644 (file)
@@ -1045,6 +1045,9 @@ void __init setup_arch(char **cmdline_p)
 
        cleanup_highmap();
 
+       /* Look for ACPI tables and reserve memory occupied by them. */
+       acpi_boot_table_init();
+
        memblock_set_current_limit(ISA_END_ADDRESS);
        e820__memblock_setup();
 
@@ -1136,11 +1139,6 @@ void __init setup_arch(char **cmdline_p)
 
        early_platform_quirks();
 
-       /*
-        * Parse the ACPI tables for possible boot-time SMP configuration.
-        */
-       acpi_boot_table_init();
-
        early_acpi_boot_init();
 
        initmem_init();
index 02813a7..f877150 100644 (file)
@@ -1659,7 +1659,7 @@ void play_dead_common(void)
        local_irq_disable();
 }
 
-static bool wakeup_cpu0(void)
+bool wakeup_cpu0(void)
 {
        if (smp_processor_id() == 0 && enable_start_cpu0)
                return true;
index 1b4766f..eafc4d6 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 
-ccflags-y += -Iarch/x86/kvm
+ccflags-y += -I $(srctree)/arch/x86/kvm
 ccflags-$(CONFIG_KVM_WERROR) += -Werror
 
 ifeq ($(CONFIG_FRAME_POINTER),y)
index d75524b..486aa94 100644 (file)
@@ -5884,6 +5884,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
        struct kvm_mmu_page *sp;
        unsigned int ratio;
        LIST_HEAD(invalid_list);
+       bool flush = false;
        ulong to_zap;
 
        rcu_idx = srcu_read_lock(&kvm->srcu);
@@ -5905,19 +5906,19 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
                                      lpage_disallowed_link);
                WARN_ON_ONCE(!sp->lpage_disallowed);
                if (is_tdp_mmu_page(sp)) {
-                       kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn,
-                               sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level));
+                       flush = kvm_tdp_mmu_zap_sp(kvm, sp);
                } else {
                        kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
                        WARN_ON_ONCE(sp->lpage_disallowed);
                }
 
                if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
-                       kvm_mmu_commit_zap_page(kvm, &invalid_list);
+                       kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
                        cond_resched_rwlock_write(&kvm->mmu_lock);
+                       flush = false;
                }
        }
-       kvm_mmu_commit_zap_page(kvm, &invalid_list);
+       kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
 
        write_unlock(&kvm->mmu_lock);
        srcu_read_unlock(&kvm->srcu, rcu_idx);
index 462b1f7..018d82e 100644 (file)
@@ -86,7 +86,7 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
        list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
 
 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
-                         gfn_t start, gfn_t end, bool can_yield);
+                         gfn_t start, gfn_t end, bool can_yield, bool flush);
 
 void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
 {
@@ -99,7 +99,7 @@ void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
 
        list_del(&root->link);
 
-       zap_gfn_range(kvm, root, 0, max_gfn, false);
+       zap_gfn_range(kvm, root, 0, max_gfn, false, false);
 
        free_page((unsigned long)root->spt);
        kmem_cache_free(mmu_page_header_cache, root);
@@ -668,20 +668,21 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
  * scheduler needs the CPU or there is contention on the MMU lock. If this
  * function cannot yield, it will not release the MMU lock or reschedule and
  * the caller must ensure it does not supply too large a GFN range, or the
- * operation can cause a soft lockup.
+ * operation can cause a soft lockup.  Note, in some use cases a flush may be
+ * required by prior actions.  Ensure the pending flush is performed prior to
+ * yielding.
  */
 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
-                         gfn_t start, gfn_t end, bool can_yield)
+                         gfn_t start, gfn_t end, bool can_yield, bool flush)
 {
        struct tdp_iter iter;
-       bool flush_needed = false;
 
        rcu_read_lock();
 
        tdp_root_for_each_pte(iter, root, start, end) {
                if (can_yield &&
-                   tdp_mmu_iter_cond_resched(kvm, &iter, flush_needed)) {
-                       flush_needed = false;
+                   tdp_mmu_iter_cond_resched(kvm, &iter, flush)) {
+                       flush = false;
                        continue;
                }
 
@@ -699,11 +700,11 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
                        continue;
 
                tdp_mmu_set_spte(kvm, &iter, 0);
-               flush_needed = true;
+               flush = true;
        }
 
        rcu_read_unlock();
-       return flush_needed;
+       return flush;
 }
 
 /*
@@ -712,13 +713,14 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
  * SPTEs have been cleared and a TLB flush is needed before releasing the
  * MMU lock.
  */
-bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
+bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end,
+                                bool can_yield)
 {
        struct kvm_mmu_page *root;
        bool flush = false;
 
        for_each_tdp_mmu_root_yield_safe(kvm, root)
-               flush |= zap_gfn_range(kvm, root, start, end, true);
+               flush = zap_gfn_range(kvm, root, start, end, can_yield, flush);
 
        return flush;
 }
@@ -930,7 +932,7 @@ static int zap_gfn_range_hva_wrapper(struct kvm *kvm,
                                     struct kvm_mmu_page *root, gfn_t start,
                                     gfn_t end, unsigned long unused)
 {
-       return zap_gfn_range(kvm, root, start, end, false);
+       return zap_gfn_range(kvm, root, start, end, false, false);
 }
 
 int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
index 3b761c1..31096ec 100644 (file)
@@ -8,7 +8,29 @@
 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
 void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);
 
-bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end);
+bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end,
+                                bool can_yield);
+static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start,
+                                            gfn_t end)
+{
+       return __kvm_tdp_mmu_zap_gfn_range(kvm, start, end, true);
+}
+static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+       gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level);
+
+       /*
+        * Don't allow yielding, as the caller may have a flush pending.  Note,
+        * if mmu_lock is held for write, zapping will never yield in this case,
+        * but explicitly disallow it for safety.  The TDP MMU does not yield
+        * until it has made forward progress (steps sideways), and when zapping
+        * a single shadow page that it's guaranteed to see (thus the mmu_lock
+        * requirement), its "step sideways" will always step beyond the bounds
+        * of the shadow page's gfn range and stop iterating before yielding.
+        */
+       lockdep_assert_held_write(&kvm->mmu_lock);
+       return __kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn, end, false);
+}
 void kvm_tdp_mmu_zap_all(struct kvm *kvm);
 
 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
index 35891d9..fb204ea 100644 (file)
@@ -246,11 +246,18 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
        return true;
 }
 
-static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
+static bool nested_vmcb_check_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
 {
        struct kvm_vcpu *vcpu = &svm->vcpu;
        bool vmcb12_lma;
 
+       /*
+        * FIXME: these should be done after copying the fields,
+        * to avoid TOC/TOU races.  For these save area checks
+        * the possible damage is limited since kvm_set_cr0 and
+        * kvm_set_cr4 handle failure; EFER_SVME is an exception
+        * so it is force-set later in nested_prepare_vmcb_save.
+        */
        if ((vmcb12->save.efer & EFER_SVME) == 0)
                return false;
 
@@ -271,7 +278,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
        if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
                return false;
 
-       return nested_vmcb_check_controls(&vmcb12->control);
+       return true;
 }
 
 static void load_nested_vmcb_control(struct vcpu_svm *svm,
@@ -396,7 +403,14 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
        svm->vmcb->save.gdtr = vmcb12->save.gdtr;
        svm->vmcb->save.idtr = vmcb12->save.idtr;
        kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
-       svm_set_efer(&svm->vcpu, vmcb12->save.efer);
+
+       /*
+        * Force-set EFER_SVME even though it is checked earlier on the
+        * VMCB12, because the guest can flip the bit between the check
+        * and now.  Clearing EFER_SVME would call svm_free_nested.
+        */
+       svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);
+
        svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
        svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
        svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
@@ -468,7 +482,6 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
 
 
        svm->nested.vmcb12_gpa = vmcb12_gpa;
-       load_nested_vmcb_control(svm, &vmcb12->control);
        nested_prepare_vmcb_control(svm);
        nested_prepare_vmcb_save(svm, vmcb12);
 
@@ -515,7 +528,10 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
        if (WARN_ON_ONCE(!svm->nested.initialized))
                return -EINVAL;
 
-       if (!nested_vmcb_checks(svm, vmcb12)) {
+       load_nested_vmcb_control(svm, &vmcb12->control);
+
+       if (!nested_vmcb_check_save(svm, vmcb12) ||
+           !nested_vmcb_check_controls(&svm->nested.ctl)) {
                vmcb12->control.exit_code    = SVM_EXIT_ERR;
                vmcb12->control.exit_code_hi = 0;
                vmcb12->control.exit_info_1  = 0;
@@ -1209,6 +1225,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
         */
        if (!(save->cr0 & X86_CR0_PG))
                goto out_free;
+       if (!(save->efer & EFER_SVME))
+               goto out_free;
 
        /*
         * All checks done, we can enter guest mode.  L1 control fields
index 035da07..fdf587f 100644 (file)
@@ -98,6 +98,8 @@ static enum index msr_to_index(u32 msr)
 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
                                             enum pmu_type type)
 {
+       struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
+
        switch (msr) {
        case MSR_F15H_PERF_CTL0:
        case MSR_F15H_PERF_CTL1:
@@ -105,6 +107,9 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
        case MSR_F15H_PERF_CTL3:
        case MSR_F15H_PERF_CTL4:
        case MSR_F15H_PERF_CTL5:
+               if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
+                       return NULL;
+               fallthrough;
        case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
                if (type != PMU_TYPE_EVNTSEL)
                        return NULL;
@@ -115,6 +120,9 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
        case MSR_F15H_PERF_CTR3:
        case MSR_F15H_PERF_CTR4:
        case MSR_F15H_PERF_CTR5:
+               if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
+                       return NULL;
+               fallthrough;
        case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
                if (type != PMU_TYPE_COUNTER)
                        return NULL;
index fe806e8..eca6362 100644 (file)
@@ -271,8 +271,7 @@ static struct kmem_cache *x86_emulator_cache;
  * When called, it means the previous get/set msr reached an invalid msr.
  * Return true if we want to ignore/silent this failed msr access.
  */
-static bool kvm_msr_ignored_check(struct kvm_vcpu *vcpu, u32 msr,
-                                 u64 data, bool write)
+static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write)
 {
        const char *op = write ? "wrmsr" : "rdmsr";
 
@@ -1445,7 +1444,7 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
        if (r == KVM_MSR_RET_INVALID) {
                /* Unconditionally clear the output for simplicity */
                *data = 0;
-               if (kvm_msr_ignored_check(vcpu, index, 0, false))
+               if (kvm_msr_ignored_check(index, 0, false))
                        r = 0;
        }
 
@@ -1620,7 +1619,7 @@ static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
        int ret = __kvm_set_msr(vcpu, index, data, host_initiated);
 
        if (ret == KVM_MSR_RET_INVALID)
-               if (kvm_msr_ignored_check(vcpu, index, data, true))
+               if (kvm_msr_ignored_check(index, data, true))
                        ret = 0;
 
        return ret;
@@ -1658,7 +1657,7 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
        if (ret == KVM_MSR_RET_INVALID) {
                /* Unconditionally clear *data for simplicity */
                *data = 0;
-               if (kvm_msr_ignored_check(vcpu, index, 0, false))
+               if (kvm_msr_ignored_check(index, 0, false))
                        ret = 0;
        }
 
@@ -2329,7 +2328,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
        kvm_vcpu_write_tsc_offset(vcpu, offset);
        raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
 
-       spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
+       spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags);
        if (!matched) {
                kvm->arch.nr_vcpus_matched_tsc = 0;
        } else if (!already_matched) {
@@ -2337,7 +2336,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
        }
 
        kvm_track_tsc_matching(vcpu);
-       spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
+       spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags);
 }
 
 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
@@ -2559,13 +2558,16 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
        int i;
        struct kvm_vcpu *vcpu;
        struct kvm_arch *ka = &kvm->arch;
+       unsigned long flags;
 
        kvm_hv_invalidate_tsc_page(kvm);
 
-       spin_lock(&ka->pvclock_gtod_sync_lock);
        kvm_make_mclock_inprogress_request(kvm);
+
        /* no guest entries from this point */
+       spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
        pvclock_update_vm_gtod_copy(kvm);
+       spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
 
        kvm_for_each_vcpu(i, vcpu, kvm)
                kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -2573,8 +2575,6 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
        /* guest entries allowed */
        kvm_for_each_vcpu(i, vcpu, kvm)
                kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
-
-       spin_unlock(&ka->pvclock_gtod_sync_lock);
 #endif
 }
 
@@ -2582,17 +2582,18 @@ u64 get_kvmclock_ns(struct kvm *kvm)
 {
        struct kvm_arch *ka = &kvm->arch;
        struct pvclock_vcpu_time_info hv_clock;
+       unsigned long flags;
        u64 ret;
 
-       spin_lock(&ka->pvclock_gtod_sync_lock);
+       spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
        if (!ka->use_master_clock) {
-               spin_unlock(&ka->pvclock_gtod_sync_lock);
+               spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
                return get_kvmclock_base_ns() + ka->kvmclock_offset;
        }
 
        hv_clock.tsc_timestamp = ka->master_cycle_now;
        hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
-       spin_unlock(&ka->pvclock_gtod_sync_lock);
+       spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
 
        /* both __this_cpu_read() and rdtsc() should be on the same cpu */
        get_cpu();
@@ -2686,13 +2687,13 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
         * If the host uses TSC clock, then passthrough TSC as stable
         * to the guest.
         */
-       spin_lock(&ka->pvclock_gtod_sync_lock);
+       spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
        use_master_clock = ka->use_master_clock;
        if (use_master_clock) {
                host_tsc = ka->master_cycle_now;
                kernel_ns = ka->master_kernel_ns;
        }
-       spin_unlock(&ka->pvclock_gtod_sync_lock);
+       spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
 
        /* Keep irq disabled to prevent changes to the clock */
        local_irq_save(flags);
@@ -5726,6 +5727,7 @@ set_pit2_out:
        }
 #endif
        case KVM_SET_CLOCK: {
+               struct kvm_arch *ka = &kvm->arch;
                struct kvm_clock_data user_ns;
                u64 now_ns;
 
@@ -5744,8 +5746,22 @@ set_pit2_out:
                 * pvclock_update_vm_gtod_copy().
                 */
                kvm_gen_update_masterclock(kvm);
-               now_ns = get_kvmclock_ns(kvm);
-               kvm->arch.kvmclock_offset += user_ns.clock - now_ns;
+
+               /*
+                * This pairs with kvm_guest_time_update(): when masterclock is
+                * in use, we use master_kernel_ns + kvmclock_offset to set
+                * unsigned 'system_time' so if we use get_kvmclock_ns() (which
+                * is slightly ahead) here we risk going negative on unsigned
+                * 'system_time' when 'user_ns.clock' is very small.
+                */
+               spin_lock_irq(&ka->pvclock_gtod_sync_lock);
+               if (kvm->arch.use_master_clock)
+                       now_ns = ka->master_kernel_ns;
+               else
+                       now_ns = get_kvmclock_base_ns();
+               ka->kvmclock_offset = user_ns.clock - now_ns;
+               spin_unlock_irq(&ka->pvclock_gtod_sync_lock);
+
                kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE);
                break;
        }
@@ -7724,6 +7740,7 @@ static void kvm_hyperv_tsc_notifier(void)
        struct kvm *kvm;
        struct kvm_vcpu *vcpu;
        int cpu;
+       unsigned long flags;
 
        mutex_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list)
@@ -7739,17 +7756,15 @@ static void kvm_hyperv_tsc_notifier(void)
        list_for_each_entry(kvm, &vm_list, vm_list) {
                struct kvm_arch *ka = &kvm->arch;
 
-               spin_lock(&ka->pvclock_gtod_sync_lock);
-
+               spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
                pvclock_update_vm_gtod_copy(kvm);
+               spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
 
                kvm_for_each_vcpu(cpu, vcpu, kvm)
                        kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
 
                kvm_for_each_vcpu(cpu, vcpu, kvm)
                        kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
-
-               spin_unlock(&ka->pvclock_gtod_sync_lock);
        }
        mutex_unlock(&kvm_lock);
 }
index 39eb048..9035e34 100644 (file)
@@ -250,7 +250,6 @@ static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu)
 void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs);
 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
 
-void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
 u64 get_kvmclock_ns(struct kvm *kvm);
 
 int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
index c426b84..45cc0ae 100644 (file)
        LOAD_CP_REGS_TAB(6)
        LOAD_CP_REGS_TAB(7)
 
-/*
- * coprocessor_flush(struct thread_info*, index)
- *                             a2        a3
- *
- * Save coprocessor registers for coprocessor 'index'.
- * The register values are saved to or loaded from the coprocessor area 
- * inside the task_info structure.
- *
- * Note that this function doesn't update the coprocessor_owner information!
- *
- */
-
-ENTRY(coprocessor_flush)
-
-       /* reserve 4 bytes on stack to save a0 */
-       abi_entry(4)
-
-       s32i    a0, a1, 0
-       movi    a0, .Lsave_cp_regs_jump_table
-       addx8   a3, a3, a0
-       l32i    a4, a3, 4
-       l32i    a3, a3, 0
-       add     a2, a2, a4
-       beqz    a3, 1f
-       callx0  a3
-1:     l32i    a0, a1, 0
-
-       abi_ret(4)
-
-ENDPROC(coprocessor_flush)
-
 /*
  * Entry condition:
  *
@@ -245,6 +214,39 @@ ENTRY(fast_coprocessor)
 
 ENDPROC(fast_coprocessor)
 
+       .text
+
+/*
+ * coprocessor_flush(struct thread_info*, index)
+ *                             a2        a3
+ *
+ * Save coprocessor registers for coprocessor 'index'.
+ * The register values are saved to or loaded from the coprocessor area
+ * inside the task_info structure.
+ *
+ * Note that this function doesn't update the coprocessor_owner information!
+ *
+ */
+
+ENTRY(coprocessor_flush)
+
+       /* reserve 4 bytes on stack to save a0 */
+       abi_entry(4)
+
+       s32i    a0, a1, 0
+       movi    a0, .Lsave_cp_regs_jump_table
+       addx8   a3, a3, a0
+       l32i    a4, a3, 4
+       l32i    a3, a3, 0
+       add     a2, a2, a4
+       beqz    a3, 1f
+       callx0  a3
+1:     l32i    a0, a1, 0
+
+       abi_ret(4)
+
+ENDPROC(coprocessor_flush)
+
        .data
 
 ENTRY(coprocessor_owner)
index 7666408..95a7489 100644 (file)
@@ -112,8 +112,11 @@ good_area:
         */
        fault = handle_mm_fault(vma, address, flags, regs);
 
-       if (fault_signal_pending(fault, regs))
+       if (fault_signal_pending(fault, regs)) {
+               if (!user_mode(regs))
+                       goto bad_page_fault;
                return;
+       }
 
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
index 963d1d4..50e5790 100644 (file)
@@ -277,7 +277,7 @@ static struct bio *__bio_chain_endio(struct bio *bio)
 {
        struct bio *parent = bio->bi_private;
 
-       if (!parent->bi_status)
+       if (bio->bi_status && !parent->bi_status)
                parent->bi_status = bio->bi_status;
        bio_put(bio);
        return parent;
index 9ebb344..271f659 100644 (file)
@@ -302,7 +302,6 @@ static const char *const rqf_name[] = {
        RQF_NAME(QUIET),
        RQF_NAME(ELVPRIV),
        RQF_NAME(IO_STAT),
-       RQF_NAME(ALLOCED),
        RQF_NAME(PM),
        RQF_NAME(HASHED),
        RQF_NAME(STATS),
index d93e400..768a6b4 100644 (file)
@@ -29,6 +29,7 @@
  */
 #ifdef CONFIG_X86
 #include <asm/apic.h>
+#include <asm/cpu.h>
 #endif
 
 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
@@ -541,6 +542,12 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
                        wait_for_freeze();
                } else
                        return -ENODEV;
+
+#if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU)
+               /* If NMI wants to wake up CPU0, start CPU0. */
+               if (wakeup_cpu0())
+                       start_cpu0();
+#endif
        }
 
        /* Never reached */
index 84bb7c1..6efe7ed 100644 (file)
@@ -1670,6 +1670,8 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
        device_initialize(&device->dev);
        dev_set_uevent_suppress(&device->dev, true);
        acpi_init_coherency(device);
+       /* Assume there are unmet deps to start with. */
+       device->dep_unmet = 1;
 }
 
 void acpi_device_add_finalize(struct acpi_device *device)
@@ -1933,6 +1935,8 @@ static void acpi_scan_dep_init(struct acpi_device *adev)
 {
        struct acpi_dep_data *dep;
 
+       adev->dep_unmet = 0;
+
        mutex_lock(&acpi_dep_list_lock);
 
        list_for_each_entry(dep, &acpi_dep_list, node) {
@@ -1980,7 +1984,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
                return AE_CTRL_DEPTH;
 
        acpi_scan_init_hotplug(device);
-       if (!check_dep)
+       /*
+        * If check_dep is true at this point, the device has no dependencies,
+        * or the creation of the device object would have been postponed above.
+        */
+       if (check_dep)
+               device->dep_unmet = 0;
+       else
                acpi_scan_dep_init(device);
 
 out:
index e48690a..9d58104 100644 (file)
@@ -780,7 +780,7 @@ acpi_status acpi_os_table_override(struct acpi_table_header *existing_table,
 }
 
 /*
- * acpi_table_init()
+ * acpi_locate_initial_tables()
  *
  * find RSDP, find and checksum SDT/XSDT.
  * checksum all tables, print SDT/XSDT
@@ -788,7 +788,7 @@ acpi_status acpi_os_table_override(struct acpi_table_header *existing_table,
  * result: sdt_entry[] is initialized
  */
 
-int __init acpi_table_init(void)
+int __init acpi_locate_initial_tables(void)
 {
        acpi_status status;
 
@@ -803,9 +803,45 @@ int __init acpi_table_init(void)
        status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
        if (ACPI_FAILURE(status))
                return -EINVAL;
-       acpi_table_initrd_scan();
 
+       return 0;
+}
+
+void __init acpi_reserve_initial_tables(void)
+{
+       int i;
+
+       for (i = 0; i < ACPI_MAX_TABLES; i++) {
+               struct acpi_table_desc *table_desc = &initial_tables[i];
+               u64 start = table_desc->address;
+               u64 size = table_desc->length;
+
+               if (!start || !size)
+                       break;
+
+               pr_info("Reserving %4s table memory at [mem 0x%llx-0x%llx]\n",
+                       table_desc->signature.ascii, start, start + size - 1);
+
+               memblock_reserve(start, size);
+       }
+}
+
+void __init acpi_table_init_complete(void)
+{
+       acpi_table_initrd_scan();
        check_multiple_madt();
+}
+
+int __init acpi_table_init(void)
+{
+       int ret;
+
+       ret = acpi_locate_initial_tables();
+       if (ret)
+               return ret;
+
+       acpi_table_init_complete();
+
        return 0;
 }
 
index 9179825..e2cf3b2 100644 (file)
@@ -97,6 +97,9 @@ static void deferred_probe_work_func(struct work_struct *work)
 
                get_device(dev);
 
+               kfree(dev->p->deferred_probe_reason);
+               dev->p->deferred_probe_reason = NULL;
+
                /*
                 * Drop the mutex while probing each device; the probe path may
                 * manipulate the deferred list
index d54e540..fe1dad6 100644 (file)
@@ -1690,8 +1690,8 @@ void pm_runtime_get_suppliers(struct device *dev)
                                device_links_read_lock_held())
                if (link->flags & DL_FLAG_PM_RUNTIME) {
                        link->supplier_preactivated = true;
-                       refcount_inc(&link->rpm_active);
                        pm_runtime_get_sync(link->supplier);
+                       refcount_inc(&link->rpm_active);
                }
 
        device_links_read_unlock(idx);
@@ -1704,6 +1704,8 @@ void pm_runtime_get_suppliers(struct device *dev)
 void pm_runtime_put_suppliers(struct device *dev)
 {
        struct device_link *link;
+       unsigned long flags;
+       bool put;
        int idx;
 
        idx = device_links_read_lock();
@@ -1712,7 +1714,11 @@ void pm_runtime_put_suppliers(struct device *dev)
                                device_links_read_lock_held())
                if (link->supplier_preactivated) {
                        link->supplier_preactivated = false;
-                       if (refcount_dec_not_one(&link->rpm_active))
+                       spin_lock_irqsave(&dev->power.lock, flags);
+                       put = pm_runtime_status_suspended(dev) &&
+                             refcount_dec_not_one(&link->rpm_active);
+                       spin_unlock_irqrestore(&dev->power.lock, flags);
+                       if (put)
                                pm_runtime_put(link->supplier);
                }
 
index d6c821d..51bfd77 100644 (file)
@@ -1369,10 +1369,13 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
        }
 
        if (dev->zoned)
-               cmd->error = null_process_zoned_cmd(cmd, op,
-                                                   sector, nr_sectors);
+               sts = null_process_zoned_cmd(cmd, op, sector, nr_sectors);
        else
-               cmd->error = null_process_cmd(cmd, op, sector, nr_sectors);
+               sts = null_process_cmd(cmd, op, sector, nr_sectors);
+
+       /* Do not overwrite errors (e.g. timeout errors) */
+       if (cmd->error == BLK_STS_OK)
+               cmd->error = sts;
 
 out:
        nullb_complete_cmd(cmd);
@@ -1451,8 +1454,20 @@ static bool should_requeue_request(struct request *rq)
 
 static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
 {
+       struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
+
        pr_info("rq %p timed out\n", rq);
-       blk_mq_complete_request(rq);
+
+       /*
+        * If the device is marked as blocking (i.e. memory backed or zoned
+        * device), the submission path may be blocked waiting for resources
+        * and cause real timeouts. For these real timeouts, the submission
+        * path will complete the request using blk_mq_complete_request().
+        * Only fake timeouts need to execute blk_mq_complete_request() here.
+        */
+       cmd->error = BLK_STS_TIMEOUT;
+       if (cmd->fake_timeout)
+               blk_mq_complete_request(rq);
        return BLK_EH_DONE;
 }
 
@@ -1473,6 +1488,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
        cmd->rq = bd->rq;
        cmd->error = BLK_STS_OK;
        cmd->nq = nq;
+       cmd->fake_timeout = should_timeout_request(bd->rq);
 
        blk_mq_start_request(bd->rq);
 
@@ -1489,7 +1505,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
                        return BLK_STS_OK;
                }
        }
-       if (should_timeout_request(bd->rq))
+       if (cmd->fake_timeout)
                return BLK_STS_OK;
 
        return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
index 83504f3..4876d5a 100644 (file)
@@ -22,6 +22,7 @@ struct nullb_cmd {
        blk_status_t error;
        struct nullb_queue *nq;
        struct hrtimer timer;
+       bool fake_timeout;
 };
 
 struct nullb_queue {
index 1cdf09f..14e4528 100644 (file)
@@ -891,7 +891,7 @@ next:
 out:
        for (i = last_map; i < num; i++) {
                /* Don't zap current batch's valid persistent grants. */
-               if(i >= last_map + segs_to_map)
+               if(i >= map_until)
                        pages[i]->persistent_gnt = NULL;
                pages[i]->handle = BLKBACK_INVALID_HANDLE;
        }
index d3f756f..67e56cf 100644 (file)
@@ -267,7 +267,7 @@ struct freq_attr cpufreq_freq_attr_##_name##_freqs =     \
 __ATTR_RO(_name##_frequencies)
 
 /*
- * show_scaling_available_frequencies - show available normal frequencies for
+ * scaling_available_frequencies_show - show available normal frequencies for
  * the specified CPU
  */
 static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy,
@@ -279,7 +279,7 @@ cpufreq_attr_available_freq(scaling_available);
 EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
 
 /*
- * show_available_boost_freqs - show available boost frequencies for
+ * scaling_boost_frequencies_show - show available boost frequencies for
  * the specified CPU
  */
 static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy,
index 0a6438c..e7a9561 100644 (file)
@@ -1241,6 +1241,7 @@ int extcon_dev_register(struct extcon_dev *edev)
                                sizeof(*edev->nh), GFP_KERNEL);
        if (!edev->nh) {
                ret = -ENOMEM;
+               device_unregister(&edev->dev);
                goto err_dev;
        }
 
index 64beb33..a4e2cf7 100644 (file)
@@ -778,9 +778,9 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                        dev_info->high_va_offset = AMDGPU_GMC_HOLE_END;
                        dev_info->high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
                }
-               dev_info->virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
+               dev_info->virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
                dev_info->pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
-               dev_info->gart_page_size = AMDGPU_GPU_PAGE_SIZE;
+               dev_info->gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
                dev_info->cu_active_number = adev->gfx.cu_info.number;
                dev_info->cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
                dev_info->ce_ram_size = adev->gfx.ce_ram_size;
index ad91c0c..7d2c8b1 100644 (file)
@@ -2197,8 +2197,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
        uint64_t eaddr;
 
        /* validate the parameters */
-       if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
-           size == 0 || size & AMDGPU_GPU_PAGE_MASK)
+       if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
+           size == 0 || size & ~PAGE_MASK)
                return -EINVAL;
 
        /* make sure object fit at this offset */
@@ -2263,8 +2263,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
        int r;
 
        /* validate the parameters */
-       if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
-           size == 0 || size & AMDGPU_GPU_PAGE_MASK)
+       if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
+           size == 0 || size & ~PAGE_MASK)
                return -EINVAL;
 
        /* make sure object fit at this offset */
@@ -2409,7 +2409,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
                        after->start = eaddr + 1;
                        after->last = tmp->last;
                        after->offset = tmp->offset;
-                       after->offset += after->start - tmp->start;
+                       after->offset += (after->start - tmp->start) << PAGE_SHIFT;
                        after->flags = tmp->flags;
                        after->bo_va = tmp->bo_va;
                        list_add(&after->list, &tmp->bo_va->invalids);
index b258a3d..159add0 100644 (file)
@@ -155,7 +155,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
 
        /* Wait till CP writes sync code: */
        status = amdkfd_fence_wait_timeout(
-                       (unsigned int *) rm_state,
+                       rm_state,
                        QUEUESTATE__ACTIVE, 1500);
 
        kfd_gtt_sa_free(dbgdev->dev, mem_obj);
index e686ce2..4598a9a 100644 (file)
@@ -1167,7 +1167,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
        if (retval)
                goto fail_allocate_vidmem;
 
-       dqm->fence_addr = dqm->fence_mem->cpu_ptr;
+       dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr;
        dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
 
        init_interrupts(dqm);
@@ -1340,8 +1340,8 @@ out:
        return retval;
 }
 
-int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
-                               unsigned int fence_value,
+int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
+                               uint64_t fence_value,
                                unsigned int timeout_ms)
 {
        unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
index 7351dd1..45f8159 100644 (file)
@@ -192,7 +192,7 @@ struct device_queue_manager {
        uint16_t                vmid_pasid[VMID_NUM];
        uint64_t                pipelines_addr;
        uint64_t                fence_gpu_addr;
-       unsigned int            *fence_addr;
+       uint64_t                *fence_addr;
        struct kfd_mem_obj      *fence_mem;
        bool                    active_runlist;
        int                     sched_policy;
index 5d541e0..f71a7fa 100644 (file)
@@ -347,7 +347,7 @@ fail_create_runlist_ib:
 }
 
 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
-                       uint32_t fence_value)
+                       uint64_t fence_value)
 {
        uint32_t *buffer, size;
        int retval = 0;
index dfaf771..e3ba0cd 100644 (file)
@@ -283,7 +283,7 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
 }
 
 static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
-                       uint64_t fence_address, uint32_t fence_value)
+                       uint64_t fence_address, uint64_t fence_value)
 {
        struct pm4_mes_query_status *packet;
 
index a852e0d..08442e7 100644 (file)
@@ -263,7 +263,7 @@ static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
 }
 
 static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
-                       uint64_t fence_address, uint32_t fence_value)
+                       uint64_t fence_address, uint64_t fence_value)
 {
        struct pm4_mes_query_status *packet;
 
index 09599ef..f304d1f 100644 (file)
@@ -1003,8 +1003,8 @@ int pqm_get_wave_state(struct process_queue_manager *pqm,
                       u32 *ctl_stack_used_size,
                       u32 *save_area_used_size);
 
-int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
-                             unsigned int fence_value,
+int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
+                             uint64_t fence_value,
                              unsigned int timeout_ms);
 
 /* Packet Manager */
@@ -1040,7 +1040,7 @@ struct packet_manager_funcs {
                        uint32_t filter_param, bool reset,
                        unsigned int sdma_engine);
        int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
-                       uint64_t fence_address, uint32_t fence_value);
+                       uint64_t fence_address, uint64_t fence_value);
        int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
 
        /* Packet sizes */
@@ -1062,7 +1062,7 @@ int pm_send_set_resources(struct packet_manager *pm,
                                struct scheduling_resources *res);
 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
-                               uint32_t fence_value);
+                               uint64_t fence_value);
 
 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
                        enum kfd_unmap_queues_filter mode,
index 22b6a8e..d0ec838 100644 (file)
@@ -3330,7 +3330,8 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 
        disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) &&
                                                !hwmgr->display_config->multi_monitor_in_sync) ||
-                                               smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time);
+                                               (hwmgr->display_config->num_display &&
+                                               smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time));
 
        disable_mclk_switching = disable_mclk_switching_for_frame_lock ||
                                         disable_mclk_switching_for_display;
index 7ddbaec..101eaa2 100644 (file)
@@ -384,10 +384,15 @@ static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
 
 static bool vangogh_is_dpm_running(struct smu_context *smu)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
        uint32_t feature_mask[2];
        uint64_t feature_enabled;
 
+       /* we need to re-init after suspend so return false */
+       if (adev->in_suspend)
+               return false;
+
        ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
 
        if (ret)
index 1f79bc2..1510e4e 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/irq.h>
 #include <linux/mfd/syscon.h>
 #include <linux/of_device.h>
-#include <linux/of_gpio.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/regmap.h>
index d1a9841..e6a88c8 100644 (file)
@@ -215,7 +215,7 @@ static int imx_drm_bind(struct device *dev)
 
        ret = drmm_mode_config_init(drm);
        if (ret)
-               return ret;
+               goto err_kms;
 
        ret = drm_vblank_init(drm, MAX_CRTC);
        if (ret)
index dbfe39e..ffdc492 100644 (file)
@@ -197,6 +197,11 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder)
        int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
        int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
 
+       if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) {
+               dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux);
+               return;
+       }
+
        drm_panel_prepare(imx_ldb_ch->panel);
 
        if (dual) {
@@ -255,6 +260,11 @@ imx_ldb_encoder_atomic_mode_set(struct drm_encoder *encoder,
        int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
        u32 bus_format = imx_ldb_ch->bus_format;
 
+       if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) {
+               dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux);
+               return;
+       }
+
        if (mode->clock > 170000) {
                dev_warn(ldb->dev,
                         "%s: mode exceeds 170 MHz pixel clock\n", __func__);
@@ -583,7 +593,7 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
                struct imx_ldb_channel *channel = &imx_ldb->channel[i];
 
                if (!channel->ldb)
-                       break;
+                       continue;
 
                ret = imx_ldb_register(drm, channel);
                if (ret)
index 0ae3a02..134986d 100644 (file)
@@ -1688,6 +1688,11 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
                        dev_err(dc->dev,
                                "failed to set clock rate to %lu Hz\n",
                                state->pclk);
+
+               err = clk_set_rate(dc->clk, state->pclk);
+               if (err < 0)
+                       dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
+                               dc->clk, state->pclk, err);
        }
 
        DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk),
@@ -1698,11 +1703,6 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
                value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1;
                tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
        }
-
-       err = clk_set_rate(dc->clk, state->pclk);
-       if (err < 0)
-               dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
-                       dc->clk, state->pclk, err);
 }
 
 static void tegra_dc_stop(struct tegra_dc *dc)
@@ -2501,22 +2501,18 @@ static int tegra_dc_couple(struct tegra_dc *dc)
         * POWER_CONTROL registers during CRTC enabling.
         */
        if (dc->soc->coupled_pm && dc->pipe == 1) {
-               u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
-               struct device_link *link;
-               struct device *partner;
+               struct device *companion;
+               struct tegra_dc *parent;
 
-               partner = driver_find_device(dc->dev->driver, NULL, NULL,
-                                            tegra_dc_match_by_pipe);
-               if (!partner)
+               companion = driver_find_device(dc->dev->driver, NULL, (const void *)0,
+                                              tegra_dc_match_by_pipe);
+               if (!companion)
                        return -EPROBE_DEFER;
 
-               link = device_link_add(dc->dev, partner, flags);
-               if (!link) {
-                       dev_err(dc->dev, "failed to link controllers\n");
-                       return -EINVAL;
-               }
+               parent = dev_get_drvdata(companion);
+               dc->client.parent = &parent->client;
 
-               dev_dbg(dc->dev, "coupled to %s\n", dev_name(partner));
+               dev_dbg(dc->dev, "coupled to %s\n", dev_name(companion));
        }
 
        return 0;
index f02a035..7b88261 100644 (file)
@@ -3115,6 +3115,12 @@ static int tegra_sor_init(struct host1x_client *client)
         * kernel is possible.
         */
        if (sor->rst) {
+               err = pm_runtime_resume_and_get(sor->dev);
+               if (err < 0) {
+                       dev_err(sor->dev, "failed to get runtime PM: %d\n", err);
+                       return err;
+               }
+
                err = reset_control_acquire(sor->rst);
                if (err < 0) {
                        dev_err(sor->dev, "failed to acquire SOR reset: %d\n",
@@ -3148,6 +3154,7 @@ static int tegra_sor_init(struct host1x_client *client)
                }
 
                reset_control_release(sor->rst);
+               pm_runtime_put(sor->dev);
        }
 
        err = clk_prepare_enable(sor->clk_safe);
index 347fb96..68a766f 100644 (file)
@@ -705,8 +705,9 @@ void host1x_driver_unregister(struct host1x_driver *driver)
 EXPORT_SYMBOL(host1x_driver_unregister);
 
 /**
- * host1x_client_register() - register a host1x client
+ * __host1x_client_register() - register a host1x client
  * @client: host1x client
+ * @key: lock class key for the client-specific mutex
  *
  * Registers a host1x client with each host1x controller instance. Note that
  * each client will only match their parent host1x controller and will only be
@@ -715,13 +716,14 @@ EXPORT_SYMBOL(host1x_driver_unregister);
  * device and call host1x_device_init(), which will in turn call each client's
  * &host1x_client_ops.init implementation.
  */
-int host1x_client_register(struct host1x_client *client)
+int __host1x_client_register(struct host1x_client *client,
+                            struct lock_class_key *key)
 {
        struct host1x *host1x;
        int err;
 
        INIT_LIST_HEAD(&client->list);
-       mutex_init(&client->lock);
+       __mutex_init(&client->lock, "host1x client lock", key);
        client->usecount = 0;
 
        mutex_lock(&devices_lock);
@@ -742,7 +744,7 @@ int host1x_client_register(struct host1x_client *client)
 
        return 0;
 }
-EXPORT_SYMBOL(host1x_client_register);
+EXPORT_SYMBOL(__host1x_client_register);
 
 /**
  * host1x_client_unregister() - unregister a host1x client
index 73e2c8d..448cc53 100644 (file)
@@ -53,7 +53,7 @@ void icc_bulk_put(int num_paths, struct icc_bulk_data *paths)
 EXPORT_SYMBOL_GPL(icc_bulk_put);
 
 /**
- * icc_bulk_set() - set bandwidth to a set of paths
+ * icc_bulk_set_bw() - set bandwidth to a set of paths
  * @num_paths: the number of icc_bulk_data
  * @paths: the icc_bulk_data table containing the paths and bandwidth
  *
index 5ad519c..8a1e70e 100644 (file)
@@ -942,6 +942,8 @@ int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
                       GFP_KERNEL);
        if (new)
                src->links = new;
+       else
+               ret = -ENOMEM;
 
 out:
        mutex_unlock(&icc_lock);
index dfbec30..20f31a1 100644 (file)
@@ -131,7 +131,7 @@ DEFINE_QNODE(mas_pcnoc_sdcc_1, MSM8939_MASTER_SDCC_1, 8, -1, -1, MSM8939_PNOC_IN
 DEFINE_QNODE(mas_pcnoc_sdcc_2, MSM8939_MASTER_SDCC_2, 8, -1, -1, MSM8939_PNOC_INT_1);
 DEFINE_QNODE(mas_qdss_bam, MSM8939_MASTER_QDSS_BAM, 8, -1, -1, MSM8939_SNOC_QDSS_INT);
 DEFINE_QNODE(mas_qdss_etr, MSM8939_MASTER_QDSS_ETR, 8, -1, -1, MSM8939_SNOC_QDSS_INT);
-DEFINE_QNODE(mas_snoc_cfg, MSM8939_MASTER_SNOC_CFG, 4, 20, -1, MSM8939_SLAVE_SRVC_SNOC);
+DEFINE_QNODE(mas_snoc_cfg, MSM8939_MASTER_SNOC_CFG, 4, -1, -1, MSM8939_SLAVE_SRVC_SNOC);
 DEFINE_QNODE(mas_spdm, MSM8939_MASTER_SPDM, 4, -1, -1, MSM8939_PNOC_MAS_0);
 DEFINE_QNODE(mas_tcu0, MSM8939_MASTER_TCU0, 16, -1, -1, MSM8939_SLAVE_EBI_CH0, MSM8939_BIMC_SNOC_MAS, MSM8939_SLAVE_AMPSS_L2);
 DEFINE_QNODE(mas_usb_hs1, MSM8939_MASTER_USB_HS1, 4, -1, -1, MSM8939_PNOC_MAS_1);
@@ -156,14 +156,14 @@ DEFINE_QNODE(pcnoc_snoc_mas, MSM8939_PNOC_SNOC_MAS, 8, 29, -1, MSM8939_PNOC_SNOC
 DEFINE_QNODE(pcnoc_snoc_slv, MSM8939_PNOC_SNOC_SLV, 8, -1, 45, MSM8939_SNOC_INT_0, MSM8939_SNOC_INT_BIMC, MSM8939_SNOC_INT_1);
 DEFINE_QNODE(qdss_int, MSM8939_SNOC_QDSS_INT, 8, -1, -1, MSM8939_SNOC_INT_0, MSM8939_SNOC_INT_BIMC);
 DEFINE_QNODE(slv_apps_l2, MSM8939_SLAVE_AMPSS_L2, 16, -1, -1, 0);
-DEFINE_QNODE(slv_apss, MSM8939_SLAVE_APSS, 4, -1, 20, 0);
+DEFINE_QNODE(slv_apss, MSM8939_SLAVE_APSS, 4, -1, -1, 0);
 DEFINE_QNODE(slv_audio, MSM8939_SLAVE_LPASS, 4, -1, -1, 0);
 DEFINE_QNODE(slv_bimc_cfg, MSM8939_SLAVE_BIMC_CFG, 4, -1, -1, 0);
 DEFINE_QNODE(slv_blsp_1, MSM8939_SLAVE_BLSP_1, 4, -1, -1, 0);
 DEFINE_QNODE(slv_boot_rom, MSM8939_SLAVE_BOOT_ROM, 4, -1, -1, 0);
 DEFINE_QNODE(slv_camera_cfg, MSM8939_SLAVE_CAMERA_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_cats_0, MSM8939_SLAVE_CATS_128, 16, -1, 106, 0);
-DEFINE_QNODE(slv_cats_1, MSM8939_SLAVE_OCMEM_64, 8, -1, 107, 0);
+DEFINE_QNODE(slv_cats_0, MSM8939_SLAVE_CATS_128, 16, -1, -1, 0);
+DEFINE_QNODE(slv_cats_1, MSM8939_SLAVE_OCMEM_64, 8, -1, -1, 0);
 DEFINE_QNODE(slv_clk_ctl, MSM8939_SLAVE_CLK_CTL, 4, -1, -1, 0);
 DEFINE_QNODE(slv_crypto_0_cfg, MSM8939_SLAVE_CRYPTO_0_CFG, 4, -1, -1, 0);
 DEFINE_QNODE(slv_dehr_cfg, MSM8939_SLAVE_DEHR_CFG, 4, -1, -1, 0);
@@ -187,20 +187,20 @@ DEFINE_QNODE(slv_sdcc_2, MSM8939_SLAVE_SDCC_2, 4, -1, -1, 0);
 DEFINE_QNODE(slv_security, MSM8939_SLAVE_SECURITY, 4, -1, -1, 0);
 DEFINE_QNODE(slv_snoc_cfg, MSM8939_SLAVE_SNOC_CFG, 4, -1, -1, 0);
 DEFINE_QNODE(slv_spdm, MSM8939_SLAVE_SPDM, 4, -1, -1, 0);
-DEFINE_QNODE(slv_srvc_snoc, MSM8939_SLAVE_SRVC_SNOC, 8, -1, 29, 0);
+DEFINE_QNODE(slv_srvc_snoc, MSM8939_SLAVE_SRVC_SNOC, 8, -1, -1, 0);
 DEFINE_QNODE(slv_tcsr, MSM8939_SLAVE_TCSR, 4, -1, -1, 0);
 DEFINE_QNODE(slv_tlmm, MSM8939_SLAVE_TLMM, 4, -1, -1, 0);
 DEFINE_QNODE(slv_usb_hs1, MSM8939_SLAVE_USB_HS1, 4, -1, -1, 0);
 DEFINE_QNODE(slv_usb_hs2, MSM8939_SLAVE_USB_HS2, 4, -1, -1, 0);
 DEFINE_QNODE(slv_venus_cfg, MSM8939_SLAVE_VENUS_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(snoc_bimc_0_mas, MSM8939_SNOC_BIMC_0_MAS, 16, 3, -1, MSM8939_SNOC_BIMC_0_SLV);
-DEFINE_QNODE(snoc_bimc_0_slv, MSM8939_SNOC_BIMC_0_SLV, 16, -1, 24, MSM8939_SLAVE_EBI_CH0);
+DEFINE_QNODE(snoc_bimc_0_mas, MSM8939_SNOC_BIMC_0_MAS, 16, -1, -1, MSM8939_SNOC_BIMC_0_SLV);
+DEFINE_QNODE(snoc_bimc_0_slv, MSM8939_SNOC_BIMC_0_SLV, 16, -1, -1, MSM8939_SLAVE_EBI_CH0);
 DEFINE_QNODE(snoc_bimc_1_mas, MSM8939_SNOC_BIMC_1_MAS, 16, 76, -1, MSM8939_SNOC_BIMC_1_SLV);
 DEFINE_QNODE(snoc_bimc_1_slv, MSM8939_SNOC_BIMC_1_SLV, 16, -1, 104, MSM8939_SLAVE_EBI_CH0);
 DEFINE_QNODE(snoc_bimc_2_mas, MSM8939_SNOC_BIMC_2_MAS, 16, -1, -1, MSM8939_SNOC_BIMC_2_SLV);
 DEFINE_QNODE(snoc_bimc_2_slv, MSM8939_SNOC_BIMC_2_SLV, 16, -1, -1, MSM8939_SLAVE_EBI_CH0);
 DEFINE_QNODE(snoc_int_0, MSM8939_SNOC_INT_0, 8, 99, 130, MSM8939_SLAVE_QDSS_STM, MSM8939_SLAVE_IMEM, MSM8939_SNOC_PNOC_MAS);
-DEFINE_QNODE(snoc_int_1, MSM8939_SNOC_INT_1, 8, 100, 131, MSM8939_SLAVE_APSS, MSM8939_SLAVE_CATS_128, MSM8939_SLAVE_OCMEM_64);
+DEFINE_QNODE(snoc_int_1, MSM8939_SNOC_INT_1, 8, -1, -1, MSM8939_SLAVE_APSS, MSM8939_SLAVE_CATS_128, MSM8939_SLAVE_OCMEM_64);
 DEFINE_QNODE(snoc_int_bimc, MSM8939_SNOC_INT_BIMC, 8, 101, 132, MSM8939_SNOC_BIMC_1_MAS);
 DEFINE_QNODE(snoc_pcnoc_mas, MSM8939_SNOC_PNOC_MAS, 8, -1, -1, MSM8939_SNOC_PNOC_SLV);
 DEFINE_QNODE(snoc_pcnoc_slv, MSM8939_SNOC_PNOC_SLV, 8, -1, -1, MSM8939_PNOC_INT_0);
index 4378a9b..2cc370a 100644 (file)
@@ -2286,8 +2286,8 @@ int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
        if (buffer_id == 0)
                return -EINVAL;
 
-       if (!mei_cl_is_connected(cl))
-               return -ENODEV;
+       if (mei_cl_is_connected(cl))
+               return -EPROTO;
 
        if (cl->dma_mapped)
                return -EPROTO;
@@ -2327,9 +2327,7 @@ int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
 
        mutex_unlock(&dev->device_lock);
        wait_event_timeout(cl->wait,
-                          cl->dma_mapped ||
-                          cl->status ||
-                          !mei_cl_is_connected(cl),
+                          cl->dma_mapped || cl->status,
                           mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
        mutex_lock(&dev->device_lock);
 
@@ -2376,8 +2374,9 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
                return -EOPNOTSUPP;
        }
 
-       if (!mei_cl_is_connected(cl))
-               return -ENODEV;
+       /* do not allow unmap for connected client */
+       if (mei_cl_is_connected(cl))
+               return -EPROTO;
 
        if (!cl->dma_mapped)
                return -EPROTO;
@@ -2405,9 +2404,7 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
 
        mutex_unlock(&dev->device_lock);
        wait_event_timeout(cl->wait,
-                          !cl->dma_mapped ||
-                          cl->status ||
-                          !mei_cl_is_connected(cl),
+                          !cl->dma_mapped || cl->status,
                           mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
        mutex_lock(&dev->device_lock);
 
index 8085782..9f3361c 100644 (file)
@@ -1357,6 +1357,7 @@ static int intel_pinctrl_add_padgroups_by_gpps(struct intel_pinctrl *pctrl,
                                gpps[i].gpio_base = 0;
                                break;
                        case INTEL_GPIO_BASE_NOMAP:
+                               break;
                        default:
                                break;
                }
@@ -1393,6 +1394,7 @@ static int intel_pinctrl_add_padgroups_by_size(struct intel_pinctrl *pctrl,
                gpps[i].size = min(gpp_size, npins);
                npins -= gpps[i].size;
 
+               gpps[i].gpio_base = gpps[i].base;
                gpps[i].padown_num = padown_num;
 
                /*
@@ -1491,8 +1493,13 @@ static int intel_pinctrl_probe(struct platform_device *pdev,
                if (IS_ERR(regs))
                        return PTR_ERR(regs);
 
-               /* Determine community features based on the revision */
+               /*
+                * Determine community features based on the revision.
+                * A value of all ones means the device is not present.
+                */
                value = readl(regs + REVID);
+               if (value == ~0u)
+                       return -ENODEV;
                if (((value & REVID_MASK) >> REVID_SHIFT) >= 0x94) {
                        community->features |= PINCTRL_FEATURE_DEBOUNCE;
                        community->features |= PINCTRL_FEATURE_1K_PD;
index f35edb0..c12fa57 100644 (file)
@@ -572,7 +572,7 @@ static void microchip_sgpio_irq_settype(struct irq_data *data,
        /* Type value spread over 2 registers sets: low, high bit */
        sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER, addr.bit,
                         BIT(addr.port), (!!(type & 0x1)) << addr.port);
-       sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER + SGPIO_MAX_BITS, addr.bit,
+       sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER, SGPIO_MAX_BITS + addr.bit,
                         BIT(addr.port), (!!(type & 0x2)) << addr.port);
 
        if (type == SGPIO_INT_TRG_LEVEL)
index aa1a1c8..53a0bad 100644 (file)
@@ -3727,12 +3727,15 @@ static int __maybe_unused rockchip_pinctrl_suspend(struct device *dev)
 static int __maybe_unused rockchip_pinctrl_resume(struct device *dev)
 {
        struct rockchip_pinctrl *info = dev_get_drvdata(dev);
-       int ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX,
-                              rk3288_grf_gpio6c_iomux |
-                              GPIO6C6_SEL_WRITE_ENABLE);
+       int ret;
 
-       if (ret)
-               return ret;
+       if (info->ctrl->type == RK3288) {
+               ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX,
+                                  rk3288_grf_gpio6c_iomux |
+                                  GPIO6C6_SEL_WRITE_ENABLE);
+               if (ret)
+                       return ret;
+       }
 
        return pinctrl_force_default(info->pctl_dev);
 }
index 369ee20..2f19ab4 100644 (file)
@@ -392,7 +392,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
                          unsigned long *configs, unsigned int nconfs)
 {
        struct lpi_pinctrl *pctrl = dev_get_drvdata(pctldev->dev);
-       unsigned int param, arg, pullup, strength;
+       unsigned int param, arg, pullup = LPI_GPIO_BIAS_DISABLE, strength = 2;
        bool value, output_enabled = false;
        const struct lpi_pingroup *g;
        unsigned long sval;
index 8daccd5..9d41abf 100644 (file)
@@ -1439,14 +1439,14 @@ static const struct msm_pingroup sc7280_groups[] = {
        [172] = PINGROUP(172, qdss, _, _, _, _, _, _, _, _),
        [173] = PINGROUP(173, qdss, _, _, _, _, _, _, _, _),
        [174] = PINGROUP(174, qdss, _, _, _, _, _, _, _, _),
-       [175] = UFS_RESET(ufs_reset, 0x1be000),
-       [176] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x1b3000, 15, 0),
-       [177] = SDC_QDSD_PINGROUP(sdc1_clk, 0x1b3000, 13, 6),
-       [178] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x1b3000, 11, 3),
-       [179] = SDC_QDSD_PINGROUP(sdc1_data, 0x1b3000, 9, 0),
-       [180] = SDC_QDSD_PINGROUP(sdc2_clk, 0x1b4000, 14, 6),
-       [181] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x1b4000, 11, 3),
-       [182] = SDC_QDSD_PINGROUP(sdc2_data, 0x1b4000, 9, 0),
+       [175] = UFS_RESET(ufs_reset, 0xbe000),
+       [176] = SDC_QDSD_PINGROUP(sdc1_rclk, 0xb3004, 0, 6),
+       [177] = SDC_QDSD_PINGROUP(sdc1_clk, 0xb3000, 13, 6),
+       [178] = SDC_QDSD_PINGROUP(sdc1_cmd, 0xb3000, 11, 3),
+       [179] = SDC_QDSD_PINGROUP(sdc1_data, 0xb3000, 9, 0),
+       [180] = SDC_QDSD_PINGROUP(sdc2_clk, 0xb4000, 14, 6),
+       [181] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xb4000, 11, 3),
+       [182] = SDC_QDSD_PINGROUP(sdc2_data, 0xb4000, 9, 0),
 };
 
 static const struct msm_pinctrl_soc_data sc7280_pinctrl = {
index 2b5b0e2..5aaf57b 100644 (file)
@@ -423,7 +423,7 @@ static const char * const gpio_groups[] = {
 
 static const char * const qdss_stm_groups[] = {
        "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", "gpio12", "gpio13",
-       "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19" "gpio20", "gpio21", "gpio22",
+       "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
        "gpio23", "gpio44", "gpio45", "gpio52", "gpio53", "gpio56", "gpio57", "gpio61", "gpio62",
        "gpio63", "gpio64", "gpio65", "gpio66",
 };
index 91074fd..f4bf62b 100644 (file)
@@ -2475,6 +2475,7 @@ static void iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag)
         */
        mutex_lock(&conn_mutex);
        conn->transport->stop_conn(conn, flag);
+       conn->state = ISCSI_CONN_DOWN;
        mutex_unlock(&conn_mutex);
 
 }
@@ -2901,6 +2902,13 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
        default:
                err = transport->set_param(conn, ev->u.set_param.param,
                                           data, ev->u.set_param.len);
+               if ((conn->state == ISCSI_CONN_BOUND) ||
+                       (conn->state == ISCSI_CONN_UP)) {
+                       err = transport->set_param(conn, ev->u.set_param.param,
+                                       data, ev->u.set_param.len);
+               } else {
+                       return -ENOTCONN;
+               }
        }
 
        return err;
@@ -2960,6 +2968,7 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
                mutex_lock(&conn->ep_mutex);
                conn->ep = NULL;
                mutex_unlock(&conn->ep_mutex);
+               conn->state = ISCSI_CONN_DOWN;
        }
 
        transport->ep_disconnect(ep);
@@ -3727,6 +3736,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
                ev->r.retcode = transport->bind_conn(session, conn,
                                                ev->u.b_conn.transport_eph,
                                                ev->u.b_conn.is_leading);
+               if (!ev->r.retcode)
+                       conn->state = ISCSI_CONN_BOUND;
                mutex_unlock(&conn_mutex);
 
                if (ev->r.retcode || !transport->ep_connect)
@@ -3966,7 +3977,8 @@ iscsi_conn_attr(local_ipaddr, ISCSI_PARAM_LOCAL_IPADDR);
 static const char *const connection_state_names[] = {
        [ISCSI_CONN_UP] = "up",
        [ISCSI_CONN_DOWN] = "down",
-       [ISCSI_CONN_FAILED] = "failed"
+       [ISCSI_CONN_FAILED] = "failed",
+       [ISCSI_CONN_BOUND] = "bound"
 };
 
 static ssize_t show_conn_state(struct device *dev,
index f42954e..1fd29f9 100644 (file)
@@ -3,7 +3,6 @@
 
 #include <linux/acpi.h>
 #include <linux/clk.h>
-#include <linux/console.h>
 #include <linux/slab.h>
 #include <linux/dma-mapping.h>
 #include <linux/io.h>
@@ -92,14 +91,11 @@ struct geni_wrapper {
        struct device *dev;
        void __iomem *base;
        struct clk_bulk_data ahb_clks[NUM_AHB_CLKS];
-       struct geni_icc_path to_core;
 };
 
 static const char * const icc_path_names[] = {"qup-core", "qup-config",
                                                "qup-memory"};
 
-static struct geni_wrapper *earlycon_wrapper;
-
 #define QUP_HW_VER_REG                 0x4
 
 /* Common SE registers */
@@ -843,44 +839,11 @@ int geni_icc_disable(struct geni_se *se)
 }
 EXPORT_SYMBOL(geni_icc_disable);
 
-void geni_remove_earlycon_icc_vote(void)
-{
-       struct platform_device *pdev;
-       struct geni_wrapper *wrapper;
-       struct device_node *parent;
-       struct device_node *child;
-
-       if (!earlycon_wrapper)
-               return;
-
-       wrapper = earlycon_wrapper;
-       parent = of_get_next_parent(wrapper->dev->of_node);
-       for_each_child_of_node(parent, child) {
-               if (!of_device_is_compatible(child, "qcom,geni-se-qup"))
-                       continue;
-
-               pdev = of_find_device_by_node(child);
-               if (!pdev)
-                       continue;
-
-               wrapper = platform_get_drvdata(pdev);
-               icc_put(wrapper->to_core.path);
-               wrapper->to_core.path = NULL;
-
-       }
-       of_node_put(parent);
-
-       earlycon_wrapper = NULL;
-}
-EXPORT_SYMBOL(geni_remove_earlycon_icc_vote);
-
 static int geni_se_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct resource *res;
        struct geni_wrapper *wrapper;
-       struct console __maybe_unused *bcon;
-       bool __maybe_unused has_earlycon = false;
        int ret;
 
        wrapper = devm_kzalloc(dev, sizeof(*wrapper), GFP_KERNEL);
@@ -903,43 +866,6 @@ static int geni_se_probe(struct platform_device *pdev)
                }
        }
 
-#ifdef CONFIG_SERIAL_EARLYCON
-       for_each_console(bcon) {
-               if (!strcmp(bcon->name, "qcom_geni")) {
-                       has_earlycon = true;
-                       break;
-               }
-       }
-       if (!has_earlycon)
-               goto exit;
-
-       wrapper->to_core.path = devm_of_icc_get(dev, "qup-core");
-       if (IS_ERR(wrapper->to_core.path))
-               return PTR_ERR(wrapper->to_core.path);
-       /*
-        * Put minmal BW request on core clocks on behalf of early console.
-        * The vote will be removed earlycon exit function.
-        *
-        * Note: We are putting vote on each QUP wrapper instead only to which
-        * earlycon is connected because QUP core clock of different wrapper
-        * share same voltage domain. If core1 is put to 0, then core2 will
-        * also run at 0, if not voted. Default ICC vote will be removed ASA
-        * we touch any of the core clock.
-        * core1 = core2 = max(core1, core2)
-        */
-       ret = icc_set_bw(wrapper->to_core.path, GENI_DEFAULT_BW,
-                               GENI_DEFAULT_BW);
-       if (ret) {
-               dev_err(&pdev->dev, "%s: ICC BW voting failed for core: %d\n",
-                       __func__, ret);
-               return ret;
-       }
-
-       if (of_get_compatible_child(pdev->dev.of_node, "qcom,geni-debug-uart"))
-               earlycon_wrapper = wrapper;
-       of_node_put(pdev->dev.of_node);
-exit:
-#endif
        dev_set_drvdata(dev, wrapper);
        dev_dbg(dev, "GENI SE Driver probed\n");
        return devm_of_platform_populate(dev);
index b84f00b..4cabaf2 100644 (file)
@@ -1105,7 +1105,7 @@ struct rtllib_network {
        bool    bWithAironetIE;
        bool    bCkipSupported;
        bool    bCcxRmEnable;
-       u16     CcxRmState[2];
+       u     CcxRmState[2];
        bool    bMBssidValid;
        u8      MBssidMask;
        u8      MBssid[ETH_ALEN];
index 66c1353..15bbb63 100644 (file)
@@ -1967,7 +1967,7 @@ static void rtllib_parse_mife_generic(struct rtllib_device *ieee,
            info_element->data[2] == 0x96 &&
            info_element->data[3] == 0x01) {
                if (info_element->len == 6) {
-                       memcpy(network->CcxRmState, &info_element[4], 2);
+                       memcpy(network->CcxRmState, &info_element->data[4], 2);
                        if (network->CcxRmState[0] != 0)
                                network->bCcxRmEnable = true;
                        else
index 291649f..0d85b55 100644 (file)
@@ -1177,12 +1177,6 @@ static inline void qcom_geni_serial_enable_early_read(struct geni_se *se,
                                                      struct console *con) { }
 #endif
 
-static int qcom_geni_serial_earlycon_exit(struct console *con)
-{
-       geni_remove_earlycon_icc_vote();
-       return 0;
-}
-
 static struct qcom_geni_private_data earlycon_private_data;
 
 static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
@@ -1233,7 +1227,6 @@ static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
        writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
 
        dev->con->write = qcom_geni_serial_earlycon_write;
-       dev->con->exit = qcom_geni_serial_earlycon_exit;
        dev->con->setup = NULL;
        qcom_geni_serial_enable_early_read(&se, dev->con);
 
index 39ddb55..3fda1ec 100644 (file)
@@ -147,17 +147,29 @@ static inline int acm_set_control(struct acm *acm, int control)
 #define acm_send_break(acm, ms) \
        acm_ctrl_msg(acm, USB_CDC_REQ_SEND_BREAK, ms, NULL, 0)
 
-static void acm_kill_urbs(struct acm *acm)
+static void acm_poison_urbs(struct acm *acm)
 {
        int i;
 
-       usb_kill_urb(acm->ctrlurb);
+       usb_poison_urb(acm->ctrlurb);
        for (i = 0; i < ACM_NW; i++)
-               usb_kill_urb(acm->wb[i].urb);
+               usb_poison_urb(acm->wb[i].urb);
        for (i = 0; i < acm->rx_buflimit; i++)
-               usb_kill_urb(acm->read_urbs[i]);
+               usb_poison_urb(acm->read_urbs[i]);
+}
+
+static void acm_unpoison_urbs(struct acm *acm)
+{
+       int i;
+
+       for (i = 0; i < acm->rx_buflimit; i++)
+               usb_unpoison_urb(acm->read_urbs[i]);
+       for (i = 0; i < ACM_NW; i++)
+               usb_unpoison_urb(acm->wb[i].urb);
+       usb_unpoison_urb(acm->ctrlurb);
 }
 
+
 /*
  * Write buffer management.
  * All of these assume proper locks taken by the caller.
@@ -226,9 +238,10 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb)
 
        rc = usb_submit_urb(wb->urb, GFP_ATOMIC);
        if (rc < 0) {
-               dev_err(&acm->data->dev,
-                       "%s - usb_submit_urb(write bulk) failed: %d\n",
-                       __func__, rc);
+               if (rc != -EPERM)
+                       dev_err(&acm->data->dev,
+                               "%s - usb_submit_urb(write bulk) failed: %d\n",
+                               __func__, rc);
                acm_write_done(acm, wb);
        }
        return rc;
@@ -313,8 +326,10 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
                        acm->iocount.dsr++;
                if (difference & ACM_CTRL_DCD)
                        acm->iocount.dcd++;
-               if (newctrl & ACM_CTRL_BRK)
+               if (newctrl & ACM_CTRL_BRK) {
                        acm->iocount.brk++;
+                       tty_insert_flip_char(&acm->port, 0, TTY_BREAK);
+               }
                if (newctrl & ACM_CTRL_RI)
                        acm->iocount.rng++;
                if (newctrl & ACM_CTRL_FRAMING)
@@ -480,11 +495,6 @@ static void acm_read_bulk_callback(struct urb *urb)
        dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n",
                rb->index, urb->actual_length, status);
 
-       if (!acm->dev) {
-               dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__);
-               return;
-       }
-
        switch (status) {
        case 0:
                usb_mark_last_busy(acm->dev);
@@ -649,7 +659,8 @@ static void acm_port_dtr_rts(struct tty_port *port, int raise)
 
        res = acm_set_control(acm, val);
        if (res && (acm->ctrl_caps & USB_CDC_CAP_LINE))
-               dev_err(&acm->control->dev, "failed to set dtr/rts\n");
+               /* This is broken in too many devices to spam the logs */
+               dev_dbg(&acm->control->dev, "failed to set dtr/rts\n");
 }
 
 static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
@@ -731,6 +742,7 @@ static void acm_port_shutdown(struct tty_port *port)
         * Need to grab write_lock to prevent race with resume, but no need to
         * hold it due to the tty-port initialised flag.
         */
+       acm_poison_urbs(acm);
        spin_lock_irq(&acm->write_lock);
        spin_unlock_irq(&acm->write_lock);
 
@@ -747,7 +759,8 @@ static void acm_port_shutdown(struct tty_port *port)
                usb_autopm_put_interface_async(acm->control);
        }
 
-       acm_kill_urbs(acm);
+       acm_unpoison_urbs(acm);
+
 }
 
 static void acm_tty_cleanup(struct tty_struct *tty)
@@ -1296,13 +1309,6 @@ skip_normal_probe:
        if (!combined_interfaces && intf != control_interface)
                return -ENODEV;
 
-       if (!combined_interfaces && usb_interface_claimed(data_interface)) {
-               /* valid in this context */
-               dev_dbg(&intf->dev, "The data interface isn't available\n");
-               return -EBUSY;
-       }
-
-
        if (data_interface->cur_altsetting->desc.bNumEndpoints < 2 ||
            control_interface->cur_altsetting->desc.bNumEndpoints == 0)
                return -EINVAL;
@@ -1323,8 +1329,8 @@ made_compressed_probe:
        dev_dbg(&intf->dev, "interfaces are valid\n");
 
        acm = kzalloc(sizeof(struct acm), GFP_KERNEL);
-       if (acm == NULL)
-               goto alloc_fail;
+       if (!acm)
+               return -ENOMEM;
 
        tty_port_init(&acm->port);
        acm->port.ops = &acm_port_ops;
@@ -1341,7 +1347,7 @@ made_compressed_probe:
 
        minor = acm_alloc_minor(acm);
        if (minor < 0)
-               goto alloc_fail1;
+               goto err_put_port;
 
        acm->minor = minor;
        acm->dev = usb_dev;
@@ -1372,15 +1378,15 @@ made_compressed_probe:
 
        buf = usb_alloc_coherent(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
        if (!buf)
-               goto alloc_fail1;
+               goto err_put_port;
        acm->ctrl_buffer = buf;
 
        if (acm_write_buffers_alloc(acm) < 0)
-               goto alloc_fail2;
+               goto err_free_ctrl_buffer;
 
        acm->ctrlurb = usb_alloc_urb(0, GFP_KERNEL);
        if (!acm->ctrlurb)
-               goto alloc_fail3;
+               goto err_free_write_buffers;
 
        for (i = 0; i < num_rx_buf; i++) {
                struct acm_rb *rb = &(acm->read_buffers[i]);
@@ -1389,13 +1395,13 @@ made_compressed_probe:
                rb->base = usb_alloc_coherent(acm->dev, readsize, GFP_KERNEL,
                                                                &rb->dma);
                if (!rb->base)
-                       goto alloc_fail4;
+                       goto err_free_read_urbs;
                rb->index = i;
                rb->instance = acm;
 
                urb = usb_alloc_urb(0, GFP_KERNEL);
                if (!urb)
-                       goto alloc_fail4;
+                       goto err_free_read_urbs;
 
                urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
                urb->transfer_dma = rb->dma;
@@ -1416,8 +1422,8 @@ made_compressed_probe:
                struct acm_wb *snd = &(acm->wb[i]);
 
                snd->urb = usb_alloc_urb(0, GFP_KERNEL);
-               if (snd->urb == NULL)
-                       goto alloc_fail5;
+               if (!snd->urb)
+                       goto err_free_write_urbs;
 
                if (usb_endpoint_xfer_int(epwrite))
                        usb_fill_int_urb(snd->urb, usb_dev, acm->out,
@@ -1435,7 +1441,7 @@ made_compressed_probe:
 
        i = device_create_file(&intf->dev, &dev_attr_bmCapabilities);
        if (i < 0)
-               goto alloc_fail5;
+               goto err_free_write_urbs;
 
        if (h.usb_cdc_country_functional_desc) { /* export the country data */
                struct usb_cdc_country_functional_desc * cfd =
@@ -1480,20 +1486,21 @@ skip_countries:
        acm->nb_index = 0;
        acm->nb_size = 0;
 
-       dev_info(&intf->dev, "ttyACM%d: USB ACM device\n", minor);
-
        acm->line.dwDTERate = cpu_to_le32(9600);
        acm->line.bDataBits = 8;
        acm_set_line(acm, &acm->line);
 
-       usb_driver_claim_interface(&acm_driver, data_interface, acm);
-       usb_set_intfdata(data_interface, acm);
+       if (!acm->combined_interfaces) {
+               rv = usb_driver_claim_interface(&acm_driver, data_interface, acm);
+               if (rv)
+                       goto err_remove_files;
+       }
 
        tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
                        &control_interface->dev);
        if (IS_ERR(tty_dev)) {
                rv = PTR_ERR(tty_dev);
-               goto alloc_fail6;
+               goto err_release_data_interface;
        }
 
        if (quirks & CLEAR_HALT_CONDITIONS) {
@@ -1501,32 +1508,39 @@ skip_countries:
                usb_clear_halt(usb_dev, acm->out);
        }
 
+       dev_info(&intf->dev, "ttyACM%d: USB ACM device\n", minor);
+
        return 0;
-alloc_fail6:
+
+err_release_data_interface:
+       if (!acm->combined_interfaces) {
+               /* Clear driver data so that disconnect() returns early. */
+               usb_set_intfdata(data_interface, NULL);
+               usb_driver_release_interface(&acm_driver, data_interface);
+       }
+err_remove_files:
        if (acm->country_codes) {
                device_remove_file(&acm->control->dev,
                                &dev_attr_wCountryCodes);
                device_remove_file(&acm->control->dev,
                                &dev_attr_iCountryCodeRelDate);
-               kfree(acm->country_codes);
        }
        device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
-alloc_fail5:
-       usb_set_intfdata(intf, NULL);
+err_free_write_urbs:
        for (i = 0; i < ACM_NW; i++)
                usb_free_urb(acm->wb[i].urb);
-alloc_fail4:
+err_free_read_urbs:
        for (i = 0; i < num_rx_buf; i++)
                usb_free_urb(acm->read_urbs[i]);
        acm_read_buffers_free(acm);
        usb_free_urb(acm->ctrlurb);
-alloc_fail3:
+err_free_write_buffers:
        acm_write_buffers_free(acm);
-alloc_fail2:
+err_free_ctrl_buffer:
        usb_free_coherent(usb_dev, ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
-alloc_fail1:
+err_put_port:
        tty_port_put(&acm->port);
-alloc_fail:
+
        return rv;
 }
 
@@ -1540,8 +1554,14 @@ static void acm_disconnect(struct usb_interface *intf)
        if (!acm)
                return;
 
-       mutex_lock(&acm->mutex);
        acm->disconnected = true;
+       /*
+        * there is a circular dependency. acm_softint() can resubmit
+        * the URBs in error handling so we need to block any
+        * submission right away
+        */
+       acm_poison_urbs(acm);
+       mutex_lock(&acm->mutex);
        if (acm->country_codes) {
                device_remove_file(&acm->control->dev,
                                &dev_attr_wCountryCodes);
@@ -1560,7 +1580,6 @@ static void acm_disconnect(struct usb_interface *intf)
                tty_kref_put(tty);
        }
 
-       acm_kill_urbs(acm);
        cancel_delayed_work_sync(&acm->dwork);
 
        tty_unregister_device(acm_tty_driver, acm->minor);
@@ -1602,7 +1621,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
        if (cnt)
                return 0;
 
-       acm_kill_urbs(acm);
+       acm_poison_urbs(acm);
        cancel_delayed_work_sync(&acm->dwork);
        acm->urbs_in_error_delay = 0;
 
@@ -1615,6 +1634,7 @@ static int acm_resume(struct usb_interface *intf)
        struct urb *urb;
        int rv = 0;
 
+       acm_unpoison_urbs(acm);
        spin_lock_irq(&acm->write_lock);
 
        if (--acm->susp_count)
index 6ade3da..76ac5d6 100644 (file)
@@ -498,6 +498,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* DJI CineSSD */
        { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
 
+       /* Fibocom L850-GL LTE Modem */
+       { USB_DEVICE(0x2cb7, 0x0007), .driver_info =
+                       USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+
        /* INTEL VALUE SSD */
        { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
 
index fc3269f..1a9789e 100644 (file)
@@ -4322,7 +4322,8 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
        if (hsotg->op_state == OTG_STATE_B_PERIPHERAL)
                goto unlock;
 
-       if (hsotg->params.power_down > DWC2_POWER_DOWN_PARAM_PARTIAL)
+       if (hsotg->params.power_down != DWC2_POWER_DOWN_PARAM_PARTIAL ||
+           hsotg->flags.b.port_connect_status == 0)
                goto skip_power_saving;
 
        /*
@@ -5398,7 +5399,7 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
        dwc2_writel(hsotg, hprt0, HPRT0);
 
        /* Wait for the HPRT0.PrtSusp register field to be set */
-       if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000))
+       if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 5000))
                dev_warn(hsotg->dev, "Suspend wasn't generated\n");
 
        /*
index 3d3918a..4c5c697 100644 (file)
@@ -120,6 +120,8 @@ static const struct property_entry dwc3_pci_intel_properties[] = {
 static const struct property_entry dwc3_pci_mrfld_properties[] = {
        PROPERTY_ENTRY_STRING("dr_mode", "otg"),
        PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
+       PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
+       PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
        PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
        {}
 };
index fcaf044..3de291a 100644 (file)
@@ -244,6 +244,9 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom)
        struct device *dev = qcom->dev;
        int ret;
 
+       if (has_acpi_companion(dev))
+               return 0;
+
        qcom->icc_path_ddr = of_icc_get(dev, "usb-ddr");
        if (IS_ERR(qcom->icc_path_ddr)) {
                dev_err(dev, "failed to get usb-ddr path: %ld\n",
index 4a337f3..c7ef218 100644 (file)
@@ -791,10 +791,6 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
        reg &= ~DWC3_DALEPENA_EP(dep->number);
        dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
 
-       dep->stream_capable = false;
-       dep->type = 0;
-       dep->flags = 0;
-
        /* Clear out the ep descriptors for non-ep0 */
        if (dep->number > 1) {
                dep->endpoint.comp_desc = NULL;
@@ -803,6 +799,10 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
 
        dwc3_remove_requests(dwc, dep);
 
+       dep->stream_capable = false;
+       dep->type = 0;
+       dep->flags = 0;
+
        return 0;
 }
 
@@ -2083,7 +2083,7 @@ static void __dwc3_gadget_set_speed(struct dwc3 *dwc)
        u32                     reg;
 
        speed = dwc->gadget_max_speed;
-       if (speed > dwc->maximum_speed)
+       if (speed == USB_SPEED_UNKNOWN || speed > dwc->maximum_speed)
                speed = dwc->maximum_speed;
 
        if (speed == USB_SPEED_SUPER_PLUS &&
@@ -2523,6 +2523,7 @@ static void dwc3_gadget_set_ssp_rate(struct usb_gadget *g,
        unsigned long           flags;
 
        spin_lock_irqsave(&dwc->lock, flags);
+       dwc->gadget_max_speed = USB_SPEED_SUPER_PLUS;
        dwc->gadget_ssp_rate = rate;
        spin_unlock_irqrestore(&dwc->lock, flags);
 }
index 8d387e0..c80f9bd 100644 (file)
@@ -153,6 +153,11 @@ static int udc_pci_probe(
        pci_set_master(pdev);
        pci_try_set_mwi(pdev);
 
+       dev->phys_addr = resource;
+       dev->irq = pdev->irq;
+       dev->pdev = pdev;
+       dev->dev = &pdev->dev;
+
        /* init dma pools */
        if (use_dma) {
                retval = init_dma_pools(dev);
@@ -160,11 +165,6 @@ static int udc_pci_probe(
                        goto err_dma;
        }
 
-       dev->phys_addr = resource;
-       dev->irq = pdev->irq;
-       dev->pdev = pdev;
-       dev->dev = &pdev->dev;
-
        /* general probing */
        if (udc_probe(dev)) {
                retval = -ENODEV;
index fe010cc..2f27dc0 100644 (file)
@@ -397,6 +397,13 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci)
        xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
        if (mtk->lpm_support)
                xhci->quirks |= XHCI_LPM_SUPPORT;
+
+       /*
+        * MTK xHCI 0.96: PSA is 1 by default even if doesn't support stream,
+        * and it's 3 when support it.
+        */
+       if (xhci->hci_version < 0x100 && HCC_MAX_PSA(xhci->hcc_params) == 4)
+               xhci->quirks |= XHCI_BROKEN_STREAMS;
 }
 
 /* called during probe() after chip reset completes */
@@ -548,7 +555,8 @@ static int xhci_mtk_probe(struct platform_device *pdev)
        if (ret)
                goto put_usb3_hcd;
 
-       if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
+       if (HCC_MAX_PSA(xhci->hcc_params) >= 4 &&
+           !(xhci->quirks & XHCI_BROKEN_STREAMS))
                xhci->shared_hcd->can_do_streams = 1;
 
        ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
index 1cd8772..fc0457d 100644 (file)
@@ -2004,10 +2004,14 @@ static void musb_pm_runtime_check_session(struct musb *musb)
                MUSB_DEVCTL_HR;
        switch (devctl & ~s) {
        case MUSB_QUIRK_B_DISCONNECT_99:
-               musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
-               schedule_delayed_work(&musb->irq_work,
-                                     msecs_to_jiffies(1000));
-               break;
+               if (musb->quirk_retries && !musb->flush_irq_work) {
+                       musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
+                       schedule_delayed_work(&musb->irq_work,
+                                             msecs_to_jiffies(1000));
+                       musb->quirk_retries--;
+                       break;
+               }
+               fallthrough;
        case MUSB_QUIRK_B_INVALID_VBUS_91:
                if (musb->quirk_retries && !musb->flush_irq_work) {
                        musb_dbg(musb,
index 3209b5d..a20a838 100644 (file)
@@ -594,6 +594,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                                pr_err("invalid port number %d\n", wIndex);
                                goto error;
                        }
+                       if (wValue >= 32)
+                               goto error;
                        if (hcd->speed == HCD_USB3) {
                                if ((vhci_hcd->port_status[rhport] &
                                     USB_SS_PORT_STAT_POWER) != 0) {
index ac3c1dd..4abddbe 100644 (file)
@@ -42,6 +42,6 @@ config VFIO_PCI_IGD
 
 config VFIO_PCI_NVLINK2
        def_bool y
-       depends on VFIO_PCI && PPC_POWERNV
+       depends on VFIO_PCI && PPC_POWERNV && SPAPR_TCE_IOMMU
        help
          VFIO PCI support for P9 Witherspoon machine with NVIDIA V100 GPUs
index be44440..45cbfd4 100644 (file)
@@ -739,6 +739,12 @@ out:
        ret = vfio_lock_acct(dma, lock_acct, false);
 
 unpin_out:
+       if (batch->size == 1 && !batch->offset) {
+               /* May be a VM_PFNMAP pfn, which the batch can't remember. */
+               put_pfn(pfn, dma->prot);
+               batch->size = 0;
+       }
+
        if (ret < 0) {
                if (pinned && !rsvd) {
                        for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
index 44a5cd2..3406067 100644 (file)
@@ -1333,6 +1333,9 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
 
        ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1;
 
+       if (!ops->cursor)
+               return;
+
        ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
                    get_color(vc, info, c, 0));
 }
index c8b0ae6..4dc9077 100644 (file)
@@ -1031,7 +1031,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
                        PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
                if (!pdev) {
                        pr_err("Unable to find PCI Hyper-V video\n");
-                       kfree(info->apertures);
                        return -ENODEV;
                }
 
@@ -1129,7 +1128,6 @@ getmem_done:
        } else {
                pci_dev_put(pdev);
        }
-       kfree(info->apertures);
 
        return 0;
 
@@ -1141,7 +1139,6 @@ err2:
 err1:
        if (!gen2vm)
                pci_dev_put(pdev);
-       kfree(info->apertures);
 
        return -ENOMEM;
 }
index 28d583f..09d6f72 100644 (file)
@@ -275,6 +275,8 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
                bio.bi_opf = dio_bio_write_op(iocb);
                task_io_account_write(ret);
        }
+       if (iocb->ki_flags & IOCB_NOWAIT)
+               bio.bi_opf |= REQ_NOWAIT;
        if (iocb->ki_flags & IOCB_HIPRI)
                bio_set_polled(&bio, iocb);
 
@@ -428,6 +430,8 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
                        bio->bi_opf = dio_bio_write_op(iocb);
                        task_io_account_write(bio->bi_iter.bi_size);
                }
+               if (iocb->ki_flags & IOCB_NOWAIT)
+                       bio->bi_opf |= REQ_NOWAIT;
 
                dio->size += bio->bi_iter.bi_size;
                pos += bio->bi_iter.bi_size;
index 7434eb4..433c4d3 100644 (file)
@@ -484,7 +484,7 @@ static int io_wqe_worker(void *data)
        worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
        io_wqe_inc_running(worker);
 
-       sprintf(buf, "iou-wrk-%d", wq->task_pid);
+       snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task_pid);
        set_task_comm(current, buf);
 
        while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
@@ -711,7 +711,7 @@ static int io_wq_manager(void *data)
        char buf[TASK_COMM_LEN];
        int node;
 
-       sprintf(buf, "iou-mgr-%d", wq->task_pid);
+       snprintf(buf, sizeof(buf), "iou-mgr-%d", wq->task_pid);
        set_task_comm(current, buf);
 
        do {
index 1949b80..8be5420 100644 (file)
@@ -697,6 +697,7 @@ enum {
        REQ_F_NO_FILE_TABLE_BIT,
        REQ_F_LTIMEOUT_ACTIVE_BIT,
        REQ_F_COMPLETE_INLINE_BIT,
+       REQ_F_REISSUE_BIT,
 
        /* not a real bit, just to check we're not overflowing the space */
        __REQ_F_LAST_BIT,
@@ -740,6 +741,8 @@ enum {
        REQ_F_LTIMEOUT_ACTIVE   = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
        /* completion is deferred through io_comp_state */
        REQ_F_COMPLETE_INLINE   = BIT(REQ_F_COMPLETE_INLINE_BIT),
+       /* caller should reissue async */
+       REQ_F_REISSUE           = BIT(REQ_F_REISSUE_BIT),
 };
 
 struct async_poll {
@@ -1213,7 +1216,7 @@ static void io_prep_async_work(struct io_kiocb *req)
        if (req->flags & REQ_F_ISREG) {
                if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
                        io_wq_hash_work(&req->work, file_inode(req->file));
-       } else {
+       } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
                if (def->unbound_nonreg_file)
                        req->work.flags |= IO_WQ_WORK_UNBOUND;
        }
@@ -2503,8 +2506,10 @@ static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
 
        if (req->rw.kiocb.ki_flags & IOCB_WRITE)
                kiocb_end_write(req);
-       if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_reissue(req))
+       if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_should_reissue(req)) {
+               req->flags |= REQ_F_REISSUE;
                return;
+       }
        if (res != req->result)
                req_set_fail_links(req);
        if (req->flags & REQ_F_BUFFER_SELECTED)
@@ -3283,11 +3288,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
 
        ret = io_iter_do_read(req, iter);
 
-       if (ret == -EIOCBQUEUED) {
-               if (req->async_data)
-                       iov_iter_revert(iter, io_size - iov_iter_count(iter));
-               goto out_free;
-       } else if (ret == -EAGAIN) {
+       if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
                /* IOPOLL retry should happen for io-wq threads */
                if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
                        goto done;
@@ -3297,6 +3298,8 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
                /* some cases will consume bytes even on error returns */
                iov_iter_revert(iter, io_size - iov_iter_count(iter));
                ret = 0;
+       } else if (ret == -EIOCBQUEUED) {
+               goto out_free;
        } else if (ret <= 0 || ret == io_size || !force_nonblock ||
                   (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
                /* read all, failed, already did sync or don't want to retry */
@@ -3409,6 +3412,9 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
        else
                ret2 = -EINVAL;
 
+       if (req->flags & REQ_F_REISSUE)
+               ret2 = -EAGAIN;
+
        /*
         * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
         * retry them without IOCB_NOWAIT.
@@ -3418,8 +3424,6 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
        /* no retry on NONBLOCK nor RWF_NOWAIT */
        if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
                goto done;
-       if (ret2 == -EIOCBQUEUED && req->async_data)
-               iov_iter_revert(iter, io_size - iov_iter_count(iter));
        if (!force_nonblock || ret2 != -EAGAIN) {
                /* IOPOLL retry should happen for io-wq threads */
                if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
@@ -6164,6 +6168,7 @@ static void io_wq_submit_work(struct io_wq_work *work)
                ret = -ECANCELED;
 
        if (!ret) {
+               req->flags &= ~REQ_F_REISSUE;
                do {
                        ret = io_issue_sqe(req, 0);
                        /*
@@ -6718,7 +6723,7 @@ static int io_sq_thread(void *data)
        char buf[TASK_COMM_LEN];
        DEFINE_WAIT(wait);
 
-       sprintf(buf, "iou-sqp-%d", sqd->task_pid);
+       snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
        set_task_comm(current, buf);
        current->pf_io_worker = NULL;
 
@@ -6733,22 +6738,25 @@ static int io_sq_thread(void *data)
                int ret;
                bool cap_entries, sqt_spin, needs_sched;
 
-               if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state)) {
+               if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
+                   signal_pending(current)) {
+                       bool did_sig = false;
+
                        mutex_unlock(&sqd->lock);
+                       if (signal_pending(current)) {
+                               struct ksignal ksig;
+
+                               did_sig = get_signal(&ksig);
+                       }
                        cond_resched();
                        mutex_lock(&sqd->lock);
+                       if (did_sig)
+                               break;
                        io_run_task_work();
                        io_run_task_work_head(&sqd->park_task_work);
                        timeout = jiffies + sqd->sq_thread_idle;
                        continue;
                }
-               if (signal_pending(current)) {
-                       struct ksignal ksig;
-
-                       if (!get_signal(&ksig))
-                               continue;
-                       break;
-               }
                sqt_spin = false;
                cap_entries = !list_is_singular(&sqd->ctx_list);
                list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
@@ -8603,9 +8611,9 @@ static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
                        canceled++;
                }
        }
-       io_commit_cqring(ctx);
+       if (canceled != 0)
+               io_commit_cqring(ctx);
        spin_unlock_irq(&ctx->completion_lock);
-
        if (canceled != 0)
                io_cqring_ev_posted(ctx);
        return canceled != 0;
@@ -9002,6 +9010,8 @@ void __io_uring_task_cancel(void)
 
        /* make sure overflow events are dropped */
        atomic_inc(&tctx->in_idle);
+       __io_uring_files_cancel(NULL);
+
        do {
                /* read completions before cancelations */
                inflight = tctx_inflight(tctx);
index 9b3b06d..e47fde1 100644 (file)
@@ -44,7 +44,7 @@ void reiserfs_security_free(struct reiserfs_security_handle *sec);
 
 static inline int reiserfs_xattrs_initialized(struct super_block *sb)
 {
-       return REISERFS_SB(sb)->priv_root != NULL;
+       return REISERFS_SB(sb)->priv_root && REISERFS_SB(sb)->xattr_root;
 }
 
 #define xattr_size(size) ((size) + sizeof(struct reiserfs_xattr_header))
index fcdaab7..3bdcfc4 100644 (file)
@@ -222,10 +222,14 @@ void __iomem *__acpi_map_table(unsigned long phys, unsigned long size);
 void __acpi_unmap_table(void __iomem *map, unsigned long size);
 int early_acpi_boot_init(void);
 int acpi_boot_init (void);
+void acpi_boot_table_prepare (void);
 void acpi_boot_table_init (void);
 int acpi_mps_check (void);
 int acpi_numa_init (void);
 
+int acpi_locate_initial_tables (void);
+void acpi_reserve_initial_tables (void);
+void acpi_table_init_complete (void);
 int acpi_table_init (void);
 int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
 int __init acpi_table_parse_entries(char *id, unsigned long table_size,
@@ -814,9 +818,12 @@ static inline int acpi_boot_init(void)
        return 0;
 }
 
+static inline void acpi_boot_table_prepare(void)
+{
+}
+
 static inline void acpi_boot_table_init(void)
 {
-       return;
 }
 
 static inline int acpi_mps_check(void)
index bc6bc83..158aefa 100644 (file)
@@ -85,8 +85,6 @@ typedef __u32 __bitwise req_flags_t;
 #define RQF_ELVPRIV            ((__force req_flags_t)(1 << 12))
 /* account into disk and partition IO statistics */
 #define RQF_IO_STAT            ((__force req_flags_t)(1 << 13))
-/* request came from our alloc pool */
-#define RQF_ALLOCED            ((__force req_flags_t)(1 << 14))
 /* runtime pm request */
 #define RQF_PM                 ((__force req_flags_t)(1 << 15))
 /* on IO scheduler merge hash */
index fd183fb..0c19010 100644 (file)
@@ -271,6 +271,29 @@ static inline  void devm_extcon_unregister_notifier(struct device *dev,
                                struct extcon_dev *edev, unsigned int id,
                                struct notifier_block *nb) { }
 
+static inline int extcon_register_notifier_all(struct extcon_dev *edev,
+                                              struct notifier_block *nb)
+{
+       return 0;
+}
+
+static inline int extcon_unregister_notifier_all(struct extcon_dev *edev,
+                                                struct notifier_block *nb)
+{
+       return 0;
+}
+
+static inline int devm_extcon_register_notifier_all(struct device *dev,
+                                                   struct extcon_dev *edev,
+                                                   struct notifier_block *nb)
+{
+       return 0;
+}
+
+static inline void devm_extcon_unregister_notifier_all(struct device *dev,
+                                                      struct extcon_dev *edev,
+                                                      struct notifier_block *nb) { }
+
 static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
 {
        return ERR_PTR(-ENODEV);
index ebc2956..19781b0 100644 (file)
@@ -56,7 +56,7 @@
  * COMMAND_RECONFIG_FLAG_PARTIAL:
  * Set to FPGA configuration type (full or partial).
  */
-#define COMMAND_RECONFIG_FLAG_PARTIAL  1
+#define COMMAND_RECONFIG_FLAG_PARTIAL  0
 
 /*
  * Timeout settings for service clients:
index ce59a6a..9eb77c8 100644 (file)
@@ -320,7 +320,14 @@ static inline struct host1x_device *to_host1x_device(struct device *dev)
 int host1x_device_init(struct host1x_device *device);
 int host1x_device_exit(struct host1x_device *device);
 
-int host1x_client_register(struct host1x_client *client);
+int __host1x_client_register(struct host1x_client *client,
+                            struct lock_class_key *key);
+#define host1x_client_register(class) \
+       ({ \
+               static struct lock_class_key __key; \
+               __host1x_client_register(class, &__key); \
+       })
+
 int host1x_client_unregister(struct host1x_client *client);
 
 int host1x_client_suspend(struct host1x_client *client);
index ec2ad4b..c4fdb44 100644 (file)
@@ -460,7 +460,5 @@ void geni_icc_set_tag(struct geni_se *se, u32 tag);
 int geni_icc_enable(struct geni_se *se);
 
 int geni_icc_disable(struct geni_se *se);
-
-void geni_remove_earlycon_icc_vote(void);
 #endif
 #endif
index 92c0160..a91e3d9 100644 (file)
@@ -229,9 +229,10 @@ static inline int xa_err(void *entry)
  *
  * This structure is used either directly or via the XA_LIMIT() macro
  * to communicate the range of IDs that are valid for allocation.
- * Two common ranges are predefined for you:
+ * Three common ranges are predefined for you:
  * * xa_limit_32b      - [0 - UINT_MAX]
  * * xa_limit_31b      - [0 - INT_MAX]
+ * * xa_limit_16b      - [0 - USHRT_MAX]
  */
 struct xa_limit {
        u32 max;
@@ -242,6 +243,7 @@ struct xa_limit {
 
 #define xa_limit_32b   XA_LIMIT(0, UINT_MAX)
 #define xa_limit_31b   XA_LIMIT(0, INT_MAX)
+#define xa_limit_16b   XA_LIMIT(0, USHRT_MAX)
 
 typedef unsigned __bitwise xa_mark_t;
 #define XA_MARK_0              ((__force xa_mark_t)0U)
index 8a26a2f..fc5a398 100644 (file)
@@ -193,6 +193,7 @@ enum iscsi_connection_state {
        ISCSI_CONN_UP = 0,
        ISCSI_CONN_DOWN,
        ISCSI_CONN_FAILED,
+       ISCSI_CONN_BOUND,
 };
 
 struct iscsi_cls_conn {
index ac6474e..d0a64ee 100644 (file)
@@ -2,29 +2,6 @@
 #ifndef _UAPI__LINUX_BLKPG_H
 #define _UAPI__LINUX_BLKPG_H
 
-/*
- * Partition table and disk geometry handling
- *
- * A single ioctl with lots of subfunctions:
- *
- * Device number stuff:
- *    get_whole_disk()         (given the device number of a partition,
- *                               find the device number of the encompassing disk)
- *    get_all_partitions()     (given the device number of a disk, return the
- *                              device numbers of all its known partitions)
- *
- * Partition stuff:
- *    add_partition()
- *    delete_partition()
- *    test_partition_in_use()  (also for test_disk_in_use)
- *
- * Geometry stuff:
- *    get_geometry()
- *    set_geometry()
- *    get_bios_drivedata()
- *
- * For today, only the partition stuff - aeb, 990515
- */
 #include <linux/compiler.h>
 #include <linux/ioctl.h>
 
@@ -52,9 +29,8 @@ struct blkpg_partition {
        long long start;                /* starting offset in bytes */
        long long length;               /* length in bytes */
        int pno;                        /* partition number */
-       char devname[BLKPG_DEVNAMELTH]; /* partition name, like sda5 or c0d1p2,
-                                          to be used in kernel messages */
-       char volname[BLKPG_VOLNAMELTH]; /* volume label */
+       char devname[BLKPG_DEVNAMELTH]; /* unused / ignored */
+       char volname[BLKPG_VOLNAMELTH]; /* unused / ignore */
 };
 
 #endif /* _UAPI__LINUX_BLKPG_H */
index b7e29db..3ba52d4 100644 (file)
@@ -3231,7 +3231,8 @@ ftrace_allocate_pages(unsigned long num_to_init)
        pg = start_pg;
        while (pg) {
                order = get_count_order(pg->size / ENTRIES_PER_PAGE);
-               free_pages((unsigned long)pg->records, order);
+               if (order >= 0)
+                       free_pages((unsigned long)pg->records, order);
                start_pg = pg->next;
                kfree(pg);
                pg = start_pg;
@@ -6451,7 +6452,8 @@ void ftrace_release_mod(struct module *mod)
                clear_mod_from_hashes(pg);
 
                order = get_count_order(pg->size / ENTRIES_PER_PAGE);
-               free_pages((unsigned long)pg->records, order);
+               if (order >= 0)
+                       free_pages((unsigned long)pg->records, order);
                tmp_page = pg->next;
                kfree(pg);
                ftrace_number_of_pages -= 1 << order;
@@ -6811,7 +6813,8 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
                if (!pg->index) {
                        *last_pg = pg->next;
                        order = get_count_order(pg->size / ENTRIES_PER_PAGE);
-                       free_pages((unsigned long)pg->records, order);
+                       if (order >= 0)
+                               free_pages((unsigned long)pg->records, order);
                        ftrace_number_of_pages -= 1 << order;
                        ftrace_number_of_groups--;
                        kfree(pg);
index eccb4e1..5c77762 100644 (file)
@@ -2984,7 +2984,8 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
 
        size = nr_entries * sizeof(unsigned long);
        event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
-                                           sizeof(*entry) + size, trace_ctx);
+                                   (sizeof(*entry) - sizeof(entry->caller)) + size,
+                                   trace_ctx);
        if (!event)
                goto out;
        entry = ring_buffer_event_data(event);
index 8294f43..8b1c318 100644 (file)
@@ -1530,24 +1530,24 @@ static noinline void check_store_range(struct xarray *xa)
 
 #ifdef CONFIG_XARRAY_MULTI
 static void check_split_1(struct xarray *xa, unsigned long index,
-                                                       unsigned int order)
+                               unsigned int order, unsigned int new_order)
 {
-       XA_STATE(xas, xa, index);
-       void *entry;
-       unsigned int i = 0;
+       XA_STATE_ORDER(xas, xa, index, new_order);
+       unsigned int i;
 
        xa_store_order(xa, index, order, xa, GFP_KERNEL);
 
        xas_split_alloc(&xas, xa, order, GFP_KERNEL);
        xas_lock(&xas);
        xas_split(&xas, xa, order);
+       for (i = 0; i < (1 << order); i += (1 << new_order))
+               __xa_store(xa, index + i, xa_mk_index(index + i), 0);
        xas_unlock(&xas);
 
-       xa_for_each(xa, index, entry) {
-               XA_BUG_ON(xa, entry != xa);
-               i++;
+       for (i = 0; i < (1 << order); i++) {
+               unsigned int val = index + (i & ~((1 << new_order) - 1));
+               XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val));
        }
-       XA_BUG_ON(xa, i != 1 << order);
 
        xa_set_mark(xa, index, XA_MARK_0);
        XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
@@ -1557,14 +1557,16 @@ static void check_split_1(struct xarray *xa, unsigned long index,
 
 static noinline void check_split(struct xarray *xa)
 {
-       unsigned int order;
+       unsigned int order, new_order;
 
        XA_BUG_ON(xa, !xa_empty(xa));
 
        for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) {
-               check_split_1(xa, 0, order);
-               check_split_1(xa, 1UL << order, order);
-               check_split_1(xa, 3UL << order, order);
+               for (new_order = 0; new_order < order; new_order++) {
+                       check_split_1(xa, 0, order, new_order);
+                       check_split_1(xa, 1UL << order, order, new_order);
+                       check_split_1(xa, 3UL << order, order, new_order);
+               }
        }
 }
 #else
index 5fa5161..f5d8f54 100644 (file)
@@ -987,7 +987,7 @@ static void node_set_marks(struct xa_node *node, unsigned int offset,
  * xas_split_alloc() - Allocate memory for splitting an entry.
  * @xas: XArray operation state.
  * @entry: New entry which will be stored in the array.
- * @order: New entry order.
+ * @order: Current entry order.
  * @gfp: Memory allocation flags.
  *
  * This function should be called before calling xas_split().
@@ -1011,7 +1011,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
 
        do {
                unsigned int i;
-               void *sibling;
+               void *sibling = NULL;
                struct xa_node *node;
 
                node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
@@ -1021,7 +1021,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
                for (i = 0; i < XA_CHUNK_SIZE; i++) {
                        if ((i & mask) == 0) {
                                RCU_INIT_POINTER(node->slots[i], entry);
-                               sibling = xa_mk_sibling(0);
+                               sibling = xa_mk_sibling(i);
                        } else {
                                RCU_INIT_POINTER(node->slots[i], sibling);
                        }
@@ -1041,9 +1041,10 @@ EXPORT_SYMBOL_GPL(xas_split_alloc);
  * xas_split() - Split a multi-index entry into smaller entries.
  * @xas: XArray operation state.
  * @entry: New entry to store in the array.
- * @order: New entry order.
+ * @order: Current entry order.
  *
- * The value in the entry is copied to all the replacement entries.
+ * The size of the new entries is set in @xas.  The value in @entry is
+ * copied to all the replacement entries.
  *
  * Context: Any context.  The caller should hold the xa_lock.
  */
index 5efa07f..550405f 100644 (file)
@@ -166,7 +166,7 @@ static int __init init_zero_pfn(void)
        zero_pfn = page_to_pfn(ZERO_PAGE(0));
        return 0;
 }
-core_initcall(init_zero_pfn);
+early_initcall(init_zero_pfn);
 
 void mm_trace_rss_stat(struct mm_struct *mm, int member, long count)
 {
index 168cd27..2c52535 100644 (file)
@@ -20,6 +20,7 @@ SECTIONS {
 
        __patchable_function_entries : { *(__patchable_function_entries) }
 
+#ifdef CONFIG_LTO_CLANG
        /*
         * With CONFIG_LTO_CLANG, LLD always enables -fdata-sections and
         * -ffunction-sections, which increases the size of the final module.
@@ -41,6 +42,7 @@ SECTIONS {
        }
 
        .text : { *(.text .text.[0-9a-zA-Z_]*) }
+#endif
 }
 
 /* bring in arch-specific sections */
index 478f757..8dc6133 100644 (file)
@@ -613,7 +613,7 @@ static int tomoyo_check_unix_address(struct sockaddr *addr,
 static bool tomoyo_kernel_service(void)
 {
        /* Nothing to do if I am a kernel service. */
-       return (current->flags & (PF_KTHREAD | PF_IO_WORKER)) == PF_KTHREAD;
+       return current->flags & PF_KTHREAD;
 }
 
 /**
index b59b0f3..79ade33 100644 (file)
@@ -989,8 +989,12 @@ static int azx_prepare(struct device *dev)
        struct snd_card *card = dev_get_drvdata(dev);
        struct azx *chip;
 
+       if (!azx_is_pm_ready(card))
+               return 0;
+
        chip = card->private_data;
        chip->pm_prepared = 1;
+       snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
 
        flush_work(&azx_bus(chip)->unsol_work);
 
@@ -1005,7 +1009,11 @@ static void azx_complete(struct device *dev)
        struct snd_card *card = dev_get_drvdata(dev);
        struct azx *chip;
 
+       if (!azx_is_pm_ready(card))
+               return;
+
        chip = card->private_data;
+       snd_power_change_state(card, SNDRV_CTL_POWER_D0);
        chip->pm_prepared = 0;
 }
 
index 316b9b4..58946d0 100644 (file)
@@ -5256,7 +5256,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
        case 0x10ec0274:
        case 0x10ec0294:
                alc_process_coef_fw(codec, coef0274);
-               msleep(80);
+               msleep(850);
                val = alc_read_coef_idx(codec, 0x46);
                is_ctia = (val & 0x00f0) == 0x00f0;
                break;
@@ -5440,6 +5440,7 @@ static void alc_update_headset_jack_cb(struct hda_codec *codec,
                                       struct hda_jack_callback *jack)
 {
        snd_hda_gen_hp_automute(codec, jack);
+       alc_update_headset_mode(codec);
 }
 
 static void alc_probe_headset_mode(struct hda_codec *codec)
@@ -8057,6 +8058,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
                      ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x87f2, "HP ProBook 640 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
index d3001fb..176437a 100644 (file)
@@ -1521,6 +1521,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        case USB_ID(0x21b4, 0x0081): /* AudioQuest DragonFly */
        case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */
        case USB_ID(0x413c, 0xa506): /* Dell AE515 sound bar */
+       case USB_ID(0x046d, 0x084c): /* Logitech ConferenceCam Connect */
                return true;
        }
 
index 71aabaf..8f13b84 100644 (file)
@@ -9,6 +9,7 @@ Type=simple
 ExecStart=/usr/bin/kvm_stat -dtcz -s 10 -L /var/log/kvm_stat.csv
 ExecReload=/bin/kill -HUP $MAINPID
 Restart=always
+RestartSec=60s
 SyslogIdentifier=kvm_stat
 SyslogLevel=debug
 
index 3b796dd..ca24f68 100644 (file)
@@ -296,21 +296,34 @@ static void *idr_throbber(void *arg)
        return NULL;
 }
 
+/*
+ * There are always either 1 or 2 objects in the IDR.  If we find nothing,
+ * or we find something at an ID we didn't expect, that's a bug.
+ */
 void idr_find_test_1(int anchor_id, int throbber_id)
 {
        pthread_t throbber;
        time_t start = time(NULL);
 
-       pthread_create(&throbber, NULL, idr_throbber, &throbber_id);
-
        BUG_ON(idr_alloc(&find_idr, xa_mk_value(anchor_id), anchor_id,
                                anchor_id + 1, GFP_KERNEL) != anchor_id);
 
+       pthread_create(&throbber, NULL, idr_throbber, &throbber_id);
+
+       rcu_read_lock();
        do {
                int id = 0;
                void *entry = idr_get_next(&find_idr, &id);
-               BUG_ON(entry != xa_mk_value(id));
+               rcu_read_unlock();
+               if ((id != anchor_id && id != throbber_id) ||
+                   entry != xa_mk_value(id)) {
+                       printf("%s(%d, %d): %p at %d\n", __func__, anchor_id,
+                               throbber_id, entry, id);
+                       abort();
+               }
+               rcu_read_lock();
        } while (time(NULL) < start + 11);
+       rcu_read_unlock();
 
        pthread_join(throbber, NULL);
 
@@ -577,6 +590,7 @@ void ida_tests(void)
 
 int __weak main(void)
 {
+       rcu_register_thread();
        radix_tree_init();
        idr_checks();
        ida_tests();
@@ -584,5 +598,6 @@ int __weak main(void)
        rcu_barrier();
        if (nr_allocated)
                printf("nr_allocated = %d\n", nr_allocated);
+       rcu_unregister_thread();
        return 0;
 }
diff --git a/tools/testing/radix-tree/linux/compiler_types.h b/tools/testing/radix-tree/linux/compiler_types.h
deleted file mode 100644 (file)
index e69de29..0000000
index 9eae0fb..e00520c 100644 (file)
@@ -224,7 +224,9 @@ void multiorder_checks(void)
 
 int __weak main(void)
 {
+       rcu_register_thread();
        radix_tree_init();
        multiorder_checks();
+       rcu_unregister_thread();
        return 0;
 }
index e61e43e..f20e12c 100644 (file)
@@ -25,11 +25,13 @@ void xarray_tests(void)
 
 int __weak main(void)
 {
+       rcu_register_thread();
        radix_tree_init();
        xarray_tests();
        radix_tree_cpu_dead(1);
        rcu_barrier();
        if (nr_allocated)
                printf("nr_allocated = %d\n", nr_allocated);
+       rcu_unregister_thread();
        return 0;
 }
index 2f2eeb8..5aadf84 100644 (file)
@@ -108,7 +108,7 @@ static void run_test(uint32_t run)
        kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
        vm_create_irqchip(vm);
 
-       fprintf(stderr, "%s: [%d] start vcpus\n", __func__, run);
+       pr_debug("%s: [%d] start vcpus\n", __func__, run);
        for (i = 0; i < VCPU_NUM; ++i) {
                vm_vcpu_add_default(vm, i, guest_code);
                payloads[i].vm = vm;
@@ -124,7 +124,7 @@ static void run_test(uint32_t run)
                        check_set_affinity(throw_away, &cpu_set);
                }
        }
-       fprintf(stderr, "%s: [%d] all threads launched\n", __func__, run);
+       pr_debug("%s: [%d] all threads launched\n", __func__, run);
        sem_post(sem);
        for (i = 0; i < VCPU_NUM; ++i)
                check_join(threads[i], &b);
@@ -147,16 +147,16 @@ int main(int argc, char **argv)
                if (pid == 0)
                        run_test(i); /* This function always exits */
 
-               fprintf(stderr, "%s: [%d] waiting semaphore\n", __func__, i);
+               pr_debug("%s: [%d] waiting semaphore\n", __func__, i);
                sem_wait(sem);
                r = (rand() % DELAY_US_MAX) + 1;
-               fprintf(stderr, "%s: [%d] waiting %dus\n", __func__, i, r);
+               pr_debug("%s: [%d] waiting %dus\n", __func__, i, r);
                usleep(r);
                r = waitpid(pid, &s, WNOHANG);
                TEST_ASSERT(r != pid,
                            "%s: [%d] child exited unexpectedly status: [%d]",
                            __func__, i, s);
-               fprintf(stderr, "%s: [%d] killing child\n", __func__, i);
+               pr_debug("%s: [%d] killing child\n", __func__, i);
                kill(pid, SIGKILL);
        }
 
index ffbc455..7f1d276 100644 (file)
@@ -80,19 +80,24 @@ static inline void check_tsc_msr_rdtsc(void)
        GUEST_ASSERT(delta_ns * 100 < (t2 - t1) * 100);
 }
 
+static inline u64 get_tscpage_ts(struct ms_hyperv_tsc_page *tsc_page)
+{
+       return mul_u64_u64_shr64(rdtsc(), tsc_page->tsc_scale) + tsc_page->tsc_offset;
+}
+
 static inline void check_tsc_msr_tsc_page(struct ms_hyperv_tsc_page *tsc_page)
 {
        u64 r1, r2, t1, t2;
 
        /* Compare TSC page clocksource with HV_X64_MSR_TIME_REF_COUNT */
-       t1 = mul_u64_u64_shr64(rdtsc(), tsc_page->tsc_scale) + tsc_page->tsc_offset;
+       t1 = get_tscpage_ts(tsc_page);
        r1 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
 
        /* 10 ms tolerance */
        GUEST_ASSERT(r1 >= t1 && r1 - t1 < 100000);
        nop_loop();
 
-       t2 = mul_u64_u64_shr64(rdtsc(), tsc_page->tsc_scale) + tsc_page->tsc_offset;
+       t2 = get_tscpage_ts(tsc_page);
        r2 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
        GUEST_ASSERT(r2 >= t1 && r2 - t2 < 100000);
 }
@@ -130,7 +135,11 @@ static void guest_main(struct ms_hyperv_tsc_page *tsc_page, vm_paddr_t tsc_page_
 
        tsc_offset = tsc_page->tsc_offset;
        /* Call KVM_SET_CLOCK from userspace, check that TSC page was updated */
+
        GUEST_SYNC(7);
+       /* Sanity check TSC page timestamp, it should be close to 0 */
+       GUEST_ASSERT(get_tscpage_ts(tsc_page) < 100000);
+
        GUEST_ASSERT(tsc_page->tsc_offset != tsc_offset);
 
        nop_loop();