Merge remote-tracking branch 'torvalds/master' into perf/core
authorArnaldo Carvalho de Melo <acme@redhat.com>
Wed, 11 Aug 2021 18:30:38 +0000 (15:30 -0300)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Wed, 11 Aug 2021 18:30:38 +0000 (15:30 -0300)
To get in sync with upstream to help people developing in this branch.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
290 files changed:
Documentation/networking/netdev-FAQ.rst
Documentation/networking/operstates.rst
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/include/asm/checksum.h
arch/arc/include/asm/perf_event.h
arch/arc/kernel/fpu.c
arch/arc/kernel/unwind.c
arch/arc/kernel/vmlinux.lds.S
arch/arm/boot/dts/am437x-l4.dtsi
arch/arm/boot/dts/am43x-epos-evm.dts
arch/arm/boot/dts/imx53-m53menlo.dts
arch/arm/boot/dts/imx6qdl-sr-som.dtsi
arch/arm/boot/dts/imx6ull-colibri-wifi.dtsi
arch/arm/boot/dts/omap5-board-common.dtsi
arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
arch/arm/mach-imx/common.h
arch/arm/mach-imx/mmdc.c
arch/arm/mach-ixp4xx/Kconfig
arch/arm/mach-omap2/omap_hwmod.c
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts
arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
arch/arm64/boot/dts/nvidia/tegra194.dtsi
arch/arm64/include/asm/ptrace.h
arch/arm64/include/asm/stacktrace.h
arch/arm64/include/asm/syscall.h
arch/arm64/kernel/kaslr.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/signal.c
arch/arm64/kernel/stacktrace.c
arch/arm64/kernel/syscall.c
arch/mips/Makefile
arch/mips/include/asm/pgalloc.h
arch/mips/mti-malta/malta-platform.c
arch/riscv/Kconfig
arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
arch/riscv/include/asm/page.h
arch/riscv/kernel/stacktrace.c
arch/riscv/mm/init.c
arch/s390/boot/compressed/Makefile
arch/s390/boot/compressed/clz_ctz.c [new file with mode: 0644]
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/kernel/vdso32/vdso32.lds.S
arch/s390/kernel/vdso64/vdso64.lds.S
arch/x86/events/core.c
arch/x86/events/intel/core.c
arch/x86/events/perf_event.h
arch/x86/kvm/hyperv.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/trace.h
arch/x86/kvm/x86.c
arch/x86/tools/relocs.c
block/blk-cgroup.c
block/blk-iolatency.c
block/kyber-iosched.c
block/partitions/ldm.c
drivers/acpi/acpica/nsrepair2.c
drivers/base/dd.c
drivers/base/firmware_loader/fallback.c
drivers/base/firmware_loader/firmware.h
drivers/base/firmware_loader/main.c
drivers/block/n64cart.c
drivers/bus/mhi/core/internal.h
drivers/bus/mhi/core/main.c
drivers/bus/ti-sysc.c
drivers/char/tpm/tpm_ftpm_tee.c
drivers/clk/clk-devres.c
drivers/clk/clk-stm32f4.c
drivers/clk/hisilicon/Kconfig
drivers/clk/qcom/clk-smd-rpm.c
drivers/clk/tegra/clk-sdmmc-mux.c
drivers/cpuidle/governors/teo.c
drivers/dma/idxd/idxd.h
drivers/dma/idxd/init.c
drivers/dma/idxd/irq.c
drivers/dma/idxd/submit.c
drivers/dma/idxd/sysfs.c
drivers/dma/imx-dma.c
drivers/dma/of-dma.c
drivers/dma/sh/usb-dmac.c
drivers/dma/stm32-dma.c
drivers/dma/stm32-dmamux.c
drivers/dma/uniphier-xdmac.c
drivers/dma/xilinx/xilinx_dma.c
drivers/firmware/broadcom/tee_bnxt_fw.c
drivers/fpga/dfl-fme-perf.c
drivers/gpio/gpio-mpc8xxx.c
drivers/gpio/gpio-tqmx86.c
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
drivers/gpu/drm/i915/i915_globals.c
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/kmb/kmb_drv.c
drivers/gpu/drm/kmb/kmb_drv.h
drivers/gpu/drm/kmb/kmb_plane.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/infiniband/core/cma.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/ev.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/hns/hns_roce_cmd.c
drivers/infiniband/hw/hns/hns_roce_main.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/sw/rxe/rxe_net.c
drivers/infiniband/sw/rxe/rxe_resp.c
drivers/interconnect/core.c
drivers/interconnect/qcom/icc-rpmh.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/media/common/videobuf2/videobuf2-core.c
drivers/media/platform/atmel/Kconfig
drivers/media/platform/atmel/Makefile
drivers/media/platform/atmel/atmel-isc-base.c
drivers/media/usb/dvb-usb-v2/rtl28xxu.c
drivers/net/dsa/qca/ar9331.c
drivers/net/dsa/sja1105/sja1105_dynamic_config.c
drivers/net/dsa/sja1105/sja1105_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/marvell/prestera/prestera_devlink.c
drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
drivers/net/ethernet/natsemi/natsemi.c
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
drivers/net/ethernet/qlogic/qede/qede.h
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/ti/am65-cpsw-nuss.c
drivers/net/mhi/net.c
drivers/net/phy/micrel.c
drivers/net/usb/lan78xx.c
drivers/net/usb/pegasus.c
drivers/net/wwan/iosm/iosm_ipc_mmio.h
drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
drivers/net/wwan/iosm/iosm_ipc_mux_codec.h
drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
drivers/net/wwan/iosm/iosm_ipc_wwan.c
drivers/net/wwan/mhi_wwan_ctrl.c
drivers/pcmcia/i82092.c
drivers/platform/x86/Kconfig
drivers/platform/x86/dual_accel_detect.h [new file with mode: 0644]
drivers/platform/x86/intel-hid.c
drivers/platform/x86/intel-vbtn.c
drivers/platform/x86/pcengines-apuv2.c
drivers/platform/x86/thinkpad_acpi.c
drivers/s390/block/dasd_eckd.c
drivers/scsi/device_handler/scsi_dh_rdac.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ibmvscsi/ibmvfc.h
drivers/scsi/megaraid/megaraid_mm.c
drivers/scsi/pm8001/pm8001_sas.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/sr.c
drivers/soc/Makefile
drivers/soc/imx/soc-imx8m.c
drivers/soc/ixp4xx/ixp4xx-npe.c
drivers/soc/ixp4xx/ixp4xx-qmgr.c
drivers/soc/tegra/Kconfig
drivers/spi/spi-cadence-quadspi.c
drivers/spi/spi-imx.c
drivers/spi/spi-meson-spicc.c
drivers/spi/spi-mt65xx.c
drivers/spi/spi-mux.c
drivers/spi/spi.c
drivers/staging/mt7621-pci/pci-mt7621.c
drivers/staging/rtl8712/hal_init.c
drivers/staging/rtl8712/rtl8712_led.c
drivers/staging/rtl8712/rtl871x_led.h
drivers/staging/rtl8712/rtl871x_pwrctrl.c
drivers/staging/rtl8712/rtl871x_pwrctrl.h
drivers/staging/rtl8712/usb_intf.c
drivers/staging/rtl8723bs/Kconfig
drivers/staging/rtl8723bs/hal/sdio_ops.c
drivers/tee/optee/call.c
drivers/tee/optee/core.c
drivers/tee/optee/optee_private.h
drivers/tee/optee/rpc.c
drivers/tee/optee/shm_pool.c
drivers/tee/tee_shm.c
drivers/thunderbolt/switch.c
drivers/tty/serial/8250/8250_aspeed_vuart.c
drivers/tty/serial/8250/8250_fsl.c
drivers/tty/serial/8250/8250_mtk.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/max310x.c
drivers/tty/serial/serial-tegra.c
drivers/usb/cdns3/cdns3-ep0.c
drivers/usb/cdns3/cdnsp-gadget.c
drivers/usb/cdns3/cdnsp-gadget.h
drivers/usb/cdns3/cdnsp-ring.c
drivers/usb/class/usbtmc.c
drivers/usb/common/usb-otg-fsm.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/f_hid.c
drivers/usb/gadget/udc/max3420_udc.c
drivers/usb/host/ohci-at91.c
drivers/usb/musb/omap2430.c
drivers/usb/serial/ch341.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/typec/tcpm/tcpm.c
drivers/virt/acrn/vm.c
fs/ext4/ext4_jbd2.c
fs/ext4/mmp.c
fs/ext4/namei.c
fs/io-wq.c
fs/namespace.c
fs/overlayfs/export.c
fs/overlayfs/file.c
fs/overlayfs/readdir.c
fs/pipe.c
include/linux/mhi.h
include/linux/serial_core.h
include/linux/tee_drv.h
include/linux/usb/otg-fsm.h
include/net/bluetooth/hci_core.h
include/net/flow_offload.h
include/net/ip6_route.h
include/net/netns/xfrm.h
include/net/pkt_cls.h
kernel/cgroup/rstat.c
kernel/events/core.c
kernel/sched/core.c
kernel/time/timer.c
kernel/trace/trace.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_hwlat.c
kernel/tracepoint.c
kernel/ucount.c
net/bluetooth/hci_core.c
net/bluetooth/hci_sock.c
net/bluetooth/hci_sysfs.c
net/bridge/br.c
net/bridge/br_fdb.c
net/bridge/br_private.h
net/ipv4/tcp_offload.c
net/ipv4/udp_offload.c
net/mptcp/pm_netlink.c
net/qrtr/mhi.c
net/sched/sch_generic.c
net/sched/sch_taprio.c
net/sctp/auth.c
net/vmw_vsock/virtio_transport_common.c
net/xfrm/xfrm_compat.c
net/xfrm/xfrm_ipcomp.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
scripts/checkversion.pl
scripts/recordmcount.pl
scripts/tracing/draw_functrace.py
security/selinux/ss/policydb.c
sound/core/memalloc.c
sound/core/pcm_native.c
sound/core/seq/seq_ports.c
sound/pci/hda/patch_realtek.c
sound/usb/card.c
sound/usb/clock.c
sound/usb/mixer.c
sound/usb/mixer_scarlett_gen2.c
sound/usb/quirks.c
tools/testing/selftests/kvm/include/x86_64/hyperv.h
tools/testing/selftests/kvm/x86_64/hyperv_clock.c
tools/testing/selftests/kvm/x86_64/hyperv_features.c
tools/testing/selftests/net/ipsec.c
virt/kvm/kvm_main.c

index 91b2cf7..e26532f 100644 (file)
@@ -228,6 +228,23 @@ before posting to the mailing list. The patchwork build bot instance
 gets overloaded very easily and netdev@vger really doesn't need more
 traffic if we can help it.
 
+netdevsim is great, can I extend it for my out-of-tree tests?
+-------------------------------------------------------------
+
+No, `netdevsim` is a test vehicle solely for upstream tests.
+(Please add your tests under tools/testing/selftests/.)
+
+We also give no guarantees that `netdevsim` won't change in the future
+in a way which would break what would normally be considered uAPI.
+
+Is netdevsim considered a "user" of an API?
+-------------------------------------------
+
+Linux kernel has a long standing rule that no API should be added unless
+it has a real, in-tree user. Mock-ups and tests based on `netdevsim` are
+strongly encouraged when adding new APIs, but `netdevsim` in itself
+is **not** considered a use case/user.
+
 Any other tips to help ensure my net/net-next patch gets OK'd?
 --------------------------------------------------------------
 Attention to detail.  Re-read your own work as if you were the
index 9c918f7..1ee2141 100644 (file)
@@ -73,7 +73,9 @@ IF_OPER_LOWERLAYERDOWN (3):
  state (f.e. VLAN).
 
 IF_OPER_TESTING (4):
- Unused in current kernel.
+ Interface is in testing mode, for example executing driver self-tests
+ or media (cable) test. It can't be used for normal traffic until tests
+ complete.
 
 IF_OPER_DORMANT (5):
  Interface is L1 up, but waiting for an external event, f.e. for a
@@ -111,7 +113,7 @@ it as lower layer.
 
 Note that for certain kind of soft-devices, which are not managing any
 real hardware, it is possible to set this bit from userspace.  One
-should use TVL IFLA_CARRIER to do so.
+should use TLV IFLA_CARRIER to do so.
 
 netif_carrier_ok() can be used to query that bit.
 
index c9467d2..bbaecde 100644 (file)
@@ -17815,7 +17815,7 @@ F:      include/linux/sync_file.h
 F:     include/uapi/linux/sync_file.h
 
 SYNOPSYS ARC ARCHITECTURE
-M:     Vineet Gupta <vgupta@synopsys.com>
+M:     Vineet Gupta <vgupta@kernel.org>
 L:     linux-snps-arc@lists.infradead.org
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc.git
index 27a072c..eae1314 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 14
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
 NAME = Opossums on Parade
 
 # *DOCUMENTATION*
@@ -546,7 +546,6 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
 PHONY += scripts_basic
 scripts_basic:
        $(Q)$(MAKE) $(build)=scripts/basic
-       $(Q)rm -f .tmp_quiet_recordmcount
 
 PHONY += outputmakefile
 ifdef building_out_of_srctree
@@ -1317,6 +1316,16 @@ PHONY += scripts_unifdef
 scripts_unifdef: scripts_basic
        $(Q)$(MAKE) $(build)=scripts scripts/unifdef
 
+# ---------------------------------------------------------------------------
+# Install
+
+# Many distributions have the custom install script, /sbin/installkernel.
+# If DKMS is installed, 'make install' will eventually recuses back
+# to the this Makefile to build and install external modules.
+# Cancel sub_make_done so that options such as M=, V=, etc. are parsed.
+
+install: sub_make_done :=
+
 # ---------------------------------------------------------------------------
 # Tools
 
index d8f51eb..b5bf68e 100644 (file)
@@ -409,7 +409,7 @@ choice
        help
          Depending on the configuration, CPU can contain DSP registers
          (ACC0_GLO, ACC0_GHI, DSP_BFLY0, DSP_CTRL, DSP_FFT_CTRL).
-         Bellow is options describing how to handle these registers in
+         Below are options describing how to handle these registers in
          interrupt entry / exit and in context switch.
 
 config ARC_DSP_NONE
index 69debd7..0b48580 100644 (file)
@@ -24,7 +24,7 @@
  */
 static inline __sum16 csum_fold(__wsum s)
 {
-       unsigned r = s << 16 | s >> 16; /* ror */
+       unsigned int r = s << 16 | s >> 16;     /* ror */
        s = ~s;
        s -= r;
        return s >> 16;
index 30b9ae5..e1971d3 100644 (file)
@@ -123,7 +123,7 @@ static const char * const arc_pmu_ev_hw_map[] = {
 #define C(_x)                  PERF_COUNT_HW_CACHE_##_x
 #define CACHE_OP_UNSUPPORTED   0xffff
 
-static const unsigned arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+static const unsigned int arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
        [C(L1D)] = {
                [C(OP_READ)] = {
                        [C(RESULT_ACCESS)]      = PERF_COUNT_ARC_LDC,
index c67c0f0..ec64021 100644 (file)
@@ -57,23 +57,26 @@ void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
 
 void fpu_init_task(struct pt_regs *regs)
 {
+       const unsigned int fwe = 0x80000000;
+
        /* default rounding mode */
        write_aux_reg(ARC_REG_FPU_CTRL, 0x100);
 
-       /* set "Write enable" to allow explicit write to exception flags */
-       write_aux_reg(ARC_REG_FPU_STATUS, 0x80000000);
+       /* Initialize to zero: setting requires FWE be set */
+       write_aux_reg(ARC_REG_FPU_STATUS, fwe);
 }
 
 void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
 {
        struct arc_fpu *save = &prev->thread.fpu;
        struct arc_fpu *restore = &next->thread.fpu;
+       const unsigned int fwe = 0x80000000;
 
        save->ctrl = read_aux_reg(ARC_REG_FPU_CTRL);
        save->status = read_aux_reg(ARC_REG_FPU_STATUS);
 
        write_aux_reg(ARC_REG_FPU_CTRL, restore->ctrl);
-       write_aux_reg(ARC_REG_FPU_STATUS, restore->status);
+       write_aux_reg(ARC_REG_FPU_STATUS, (fwe | restore->status));
 }
 
 #endif
index 47bab67..9e28058 100644 (file)
@@ -260,7 +260,7 @@ static void init_unwind_hdr(struct unwind_table *table,
 {
        const u8 *ptr;
        unsigned long tableSize = table->size, hdrSize;
-       unsigned n;
+       unsigned int n;
        const u32 *fde;
        struct {
                u8 version;
@@ -462,7 +462,7 @@ static uleb128_t get_uleb128(const u8 **pcur, const u8 *end)
 {
        const u8 *cur = *pcur;
        uleb128_t value;
-       unsigned shift;
+       unsigned int shift;
 
        for (shift = 0, value = 0; cur < end; shift += 7) {
                if (shift + 7 > 8 * sizeof(value)
@@ -483,7 +483,7 @@ static sleb128_t get_sleb128(const u8 **pcur, const u8 *end)
 {
        const u8 *cur = *pcur;
        sleb128_t value;
-       unsigned shift;
+       unsigned int shift;
 
        for (shift = 0, value = 0; cur < end; shift += 7) {
                if (shift + 7 > 8 * sizeof(value)
@@ -609,7 +609,7 @@ static unsigned long read_pointer(const u8 **pLoc, const void *end,
 static signed fde_pointer_type(const u32 *cie)
 {
        const u8 *ptr = (const u8 *)(cie + 2);
-       unsigned version = *ptr;
+       unsigned int version = *ptr;
 
        if (*++ptr) {
                const char *aug;
@@ -904,7 +904,7 @@ int arc_unwind(struct unwind_frame_info *frame)
        const u8 *ptr = NULL, *end = NULL;
        unsigned long pc = UNW_PC(frame) - frame->call_frame;
        unsigned long startLoc = 0, endLoc = 0, cfa;
-       unsigned i;
+       unsigned int i;
        signed ptrType = -1;
        uleb128_t retAddrReg = 0;
        const struct unwind_table *table;
index e2146a8..529ae50 100644 (file)
@@ -88,6 +88,8 @@ SECTIONS
                CPUIDLE_TEXT
                LOCK_TEXT
                KPROBES_TEXT
+               IRQENTRY_TEXT
+               SOFTIRQENTRY_TEXT
                *(.fixup)
                *(.gnu.warning)
        }
index 40ef397..ba58e6b 100644 (file)
                                compatible = "ti,am4372-d_can", "ti,am3352-d_can";
                                reg = <0x0 0x2000>;
                                clocks = <&dcan1_fck>;
-                               clock-name = "fck";
+                               clock-names = "fck";
                                syscon-raminit = <&scm_conf 0x644 1>;
                                interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
                                status = "disabled";
index aae0af1..2aa75ab 100644 (file)
        status = "okay";
        pinctrl-names = "default";
        pinctrl-0 = <&i2c0_pins>;
-       clock-frequency = <400000>;
+       clock-frequency = <100000>;
 
        tps65218: tps65218@24 {
                reg = <0x24>;
index f98691a..d3082b9 100644 (file)
 
                pinctrl_power_button: powerbutgrp {
                        fsl,pins = <
-                               MX53_PAD_SD2_DATA2__GPIO1_13            0x1e4
+                               MX53_PAD_SD2_DATA0__GPIO1_15            0x1e4
                        >;
                };
 
                pinctrl_power_out: poweroutgrp {
                        fsl,pins = <
-                               MX53_PAD_SD2_DATA0__GPIO1_15            0x1e4
+                               MX53_PAD_SD2_DATA2__GPIO1_13            0x1e4
                        >;
                };
 
index 0ad8ccd..f86efd0 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_microsom_enet_ar8035>;
        phy-mode = "rgmii-id";
-       phy-reset-duration = <2>;
+
+       /*
+        * The PHY seems to require a long-enough reset duration to avoid
+        * some rare issues where the PHY gets stuck in an inconsistent and
+        * non-functional state at boot-up. 10ms proved to be fine .
+        */
+       phy-reset-duration = <10>;
        phy-reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
        status = "okay";
 
index a054543..9f1e382 100644 (file)
@@ -43,6 +43,7 @@
        assigned-clock-rates = <0>, <198000000>;
        cap-power-off-card;
        keep-power-in-suspend;
+       max-frequency = <25000000>;
        mmc-pwrseq = <&wifi_pwrseq>;
        no-1-8-v;
        non-removable;
index 45435bb..373984c 100644 (file)
                regulator-max-microvolt = <5000000>;
        };
 
-       vdds_1v8_main: fixedregulator-vdds_1v8_main {
-               compatible = "regulator-fixed";
-               regulator-name = "vdds_1v8_main";
-               vin-supply = <&smps7_reg>;
-               regulator-min-microvolt = <1800000>;
-               regulator-max-microvolt = <1800000>;
-       };
-
        vmmcsd_fixed: fixedregulator-mmcsd {
                compatible = "regulator-fixed";
                regulator-name = "vmmcsd_fixed";
                                        regulator-boot-on;
                                };
 
+                               vdds_1v8_main:
                                smps7_reg: smps7 {
                                        /* VDDS_1v8_OMAP over VDDS_1v8_MAIN */
                                        regulator-name = "smps7";
index c9b9064..1815361 100644 (file)
                        status = "disabled";
                };
 
-               vica: intc@10140000 {
+               vica: interrupt-controller@10140000 {
                        compatible = "arm,versatile-vic";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        reg = <0x10140000 0x20>;
                };
 
-               vicb: intc@10140020 {
+               vicb: interrupt-controller@10140020 {
                        compatible = "arm,versatile-vic";
                        interrupt-controller;
                        #interrupt-cells = <1>;
index c5ea08f..6cf1c8b 100644 (file)
@@ -37,7 +37,7 @@
                poll-interval = <20>;
 
                /*
-                * The EXTi IRQ line 3 is shared with touchscreen and ethernet,
+                * The EXTi IRQ line 3 is shared with ethernet,
                 * so mark this as polled GPIO key.
                 */
                button-0 {
                        gpios = <&gpiof 3 GPIO_ACTIVE_LOW>;
                };
 
+               /*
+                * The EXTi IRQ line 6 is shared with touchscreen,
+                * so mark this as polled GPIO key.
+                */
+               button-1 {
+                       label = "TA2-GPIO-B";
+                       linux,code = <KEY_B>;
+                       gpios = <&gpiod 6 GPIO_ACTIVE_LOW>;
+               };
+
                /*
                 * The EXTi IRQ line 0 is shared with PMIC,
                 * so mark this as polled GPIO key.
        gpio-keys {
                compatible = "gpio-keys";
 
-               button-1 {
-                       label = "TA2-GPIO-B";
-                       linux,code = <KEY_B>;
-                       gpios = <&gpiod 6 GPIO_ACTIVE_LOW>;
-                       wakeup-source;
-               };
-
                button-3 {
                        label = "TA4-GPIO-D";
                        linux,code = <KEY_D>;
@@ -82,6 +85,7 @@
                        label = "green:led5";
                        gpios = <&gpioc 6 GPIO_ACTIVE_HIGH>;
                        default-state = "off";
+                       status = "disabled";
                };
 
                led-1 {
        touchscreen@38 {
                compatible = "edt,edt-ft5406";
                reg = <0x38>;
-               interrupt-parent = <&gpiog>;
-               interrupts = <2 IRQ_TYPE_EDGE_FALLING>; /* GPIO E */
+               interrupt-parent = <&gpioc>;
+               interrupts = <6 IRQ_TYPE_EDGE_FALLING>; /* GPIO E */
        };
 };
 
index 2af0a67..8c41f81 100644 (file)
@@ -12,6 +12,8 @@
        aliases {
                ethernet0 = &ethernet0;
                ethernet1 = &ksz8851;
+               rtc0 = &hwrtc;
+               rtc1 = &rtc;
        };
 
        memory@c0000000 {
                        reset-gpios = <&gpioh 3 GPIO_ACTIVE_LOW>;
                        reset-assert-us = <500>;
                        reset-deassert-us = <500>;
+                       smsc,disable-energy-detect;
                        interrupt-parent = <&gpioi>;
                        interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
                };
        /delete-property/dmas;
        /delete-property/dma-names;
 
-       rtc@32 {
+       hwrtc: rtc@32 {
                compatible = "microcrystal,rv8803";
                reg = <0x32>;
        };
index f0a073a..13f3068 100644 (file)
@@ -68,7 +68,6 @@ void imx_set_cpu_arg(int cpu, u32 arg);
 void v7_secondary_startup(void);
 void imx_scu_map_io(void);
 void imx_smp_prepare(void);
-void imx_gpcv2_set_core1_pdn_pup_by_software(bool pdn);
 #else
 static inline void imx_scu_map_io(void) {}
 static inline void imx_smp_prepare(void) {}
@@ -81,6 +80,7 @@ void imx_gpc_mask_all(void);
 void imx_gpc_restore_all(void);
 void imx_gpc_hwirq_mask(unsigned int hwirq);
 void imx_gpc_hwirq_unmask(unsigned int hwirq);
+void imx_gpcv2_set_core1_pdn_pup_by_software(bool pdn);
 void imx_anatop_init(void);
 void imx_anatop_pre_suspend(void);
 void imx_anatop_post_resume(void);
index 0dfd0ae..af12668 100644 (file)
@@ -103,6 +103,7 @@ struct mmdc_pmu {
        struct perf_event *mmdc_events[MMDC_NUM_COUNTERS];
        struct hlist_node node;
        struct fsl_mmdc_devtype_data *devtype_data;
+       struct clk *mmdc_ipg_clk;
 };
 
 /*
@@ -462,11 +463,14 @@ static int imx_mmdc_remove(struct platform_device *pdev)
 
        cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
        perf_pmu_unregister(&pmu_mmdc->pmu);
+       iounmap(pmu_mmdc->mmdc_base);
+       clk_disable_unprepare(pmu_mmdc->mmdc_ipg_clk);
        kfree(pmu_mmdc);
        return 0;
 }
 
-static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_base)
+static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_base,
+                             struct clk *mmdc_ipg_clk)
 {
        struct mmdc_pmu *pmu_mmdc;
        char *name;
@@ -494,6 +498,7 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
        }
 
        mmdc_num = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev);
+       pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
        if (mmdc_num == 0)
                name = "mmdc";
        else
@@ -529,7 +534,7 @@ pmu_free:
 
 #else
 #define imx_mmdc_remove NULL
-#define imx_mmdc_perf_init(pdev, mmdc_base) 0
+#define imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk) 0
 #endif
 
 static int imx_mmdc_probe(struct platform_device *pdev)
@@ -567,7 +572,13 @@ static int imx_mmdc_probe(struct platform_device *pdev)
        val &= ~(1 << BP_MMDC_MAPSR_PSD);
        writel_relaxed(val, reg);
 
-       return imx_mmdc_perf_init(pdev, mmdc_base);
+       err = imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk);
+       if (err) {
+               iounmap(mmdc_base);
+               clk_disable_unprepare(mmdc_ipg_clk);
+       }
+
+       return err;
 }
 
 int imx_mmdc_get_ddr_type(void)
index bf14d65..34a1c77 100644 (file)
@@ -91,6 +91,7 @@ config MACH_IXDP465
 
 config MACH_GORAMO_MLR
        bool "GORAMO Multi Link Router"
+       depends on IXP4XX_PCI_LEGACY
        help
          Say 'Y' here if you want your kernel to support GORAMO
          MultiLink router.
index 65934b2..12b26e0 100644 (file)
@@ -3776,6 +3776,7 @@ struct powerdomain *omap_hwmod_get_pwrdm(struct omap_hwmod *oh)
        struct omap_hwmod_ocp_if *oi;
        struct clockdomain *clkdm;
        struct clk_hw_omap *clk;
+       struct clk_hw *hw;
 
        if (!oh)
                return NULL;
@@ -3792,7 +3793,14 @@ struct powerdomain *omap_hwmod_get_pwrdm(struct omap_hwmod *oh)
                c = oi->_clk;
        }
 
-       clk = to_clk_hw_omap(__clk_get_hw(c));
+       hw = __clk_get_hw(c);
+       if (!hw)
+               return NULL;
+
+       clk = to_clk_hw_omap(hw);
+       if (!clk)
+               return NULL;
+
        clkdm = clk->clkdm;
        if (!clkdm)
                return NULL;
index b5b13a9..fdcd54d 100644 (file)
@@ -1800,11 +1800,11 @@ config RANDOMIZE_BASE
          If unsure, say N.
 
 config RANDOMIZE_MODULE_REGION_FULL
-       bool "Randomize the module region over a 4 GB range"
+       bool "Randomize the module region over a 2 GB range"
        depends on RANDOMIZE_BASE
        default y
        help
-         Randomizes the location of the module region inside a 4 GB window
+         Randomizes the location of the module region inside a 2 GB window
          covering the core kernel. This way, it is less likely for modules
          to leak information about the location of core kernel data structures
          but it does imply that function calls between modules and the core
@@ -1812,7 +1812,10 @@ config RANDOMIZE_MODULE_REGION_FULL
 
          When this option is not set, the module region will be randomized over
          a limited range that contains the [_stext, _etext] interval of the
-         core kernel, so branch relocations are always in range.
+         core kernel, so branch relocations are almost always in range unless
+         ARM64_MODULE_PLTS is enabled and the region is exhausted. In this
+         particular case of region exhaustion, modules might be able to fall
+         back to a larger 2GB area.
 
 config CC_HAVE_STACKPROTECTOR_SYSREG
        def_bool $(cc-option,-mstack-protector-guard=sysreg -mstack-protector-guard-reg=sp_el0 -mstack-protector-guard-offset=0)
index 7bc37d0..7b668db 100644 (file)
@@ -21,19 +21,11 @@ LDFLAGS_vmlinux             += -shared -Bsymbolic -z notext \
 endif
 
 ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
-  ifneq ($(CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419),y)
-$(warning ld does not support --fix-cortex-a53-843419; kernel may be susceptible to erratum)
-  else
+  ifeq ($(CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419),y)
 LDFLAGS_vmlinux        += --fix-cortex-a53-843419
   endif
 endif
 
-ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS), y)
-  ifneq ($(CONFIG_ARM64_LSE_ATOMICS), y)
-$(warning LSE atomics not supported by binutils)
-  endif
-endif
-
 cc_has_k_constraint := $(call try-run,echo                             \
        'int main(void) {                                               \
                asm volatile("and w0, w0, %w0" :: "K" (4294967295));    \
@@ -176,6 +168,17 @@ vdso_install:
 
 archprepare:
        $(Q)$(MAKE) $(build)=arch/arm64/tools kapi
+ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
+  ifneq ($(CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419),y)
+       @echo "warning: ld does not support --fix-cortex-a53-843419; kernel may be susceptible to erratum" >&2
+  endif
+endif
+ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS),y)
+  ifneq ($(CONFIG_ARM64_LSE_ATOMICS),y)
+       @echo "warning: LSE atomics not supported by binutils" >&2
+  endif
+endif
+
 
 # We use MRPROPER_FILES and CLEAN_FILES now
 archclean:
index dd764b7..f6a79c8 100644 (file)
@@ -54,6 +54,7 @@
 
 &mscc_felix_port0 {
        label = "swp0";
+       managed = "in-band-status";
        phy-handle = <&phy0>;
        phy-mode = "sgmii";
        status = "okay";
@@ -61,6 +62,7 @@
 
 &mscc_felix_port1 {
        label = "swp1";
+       managed = "in-band-status";
        phy-handle = <&phy1>;
        phy-mode = "sgmii";
        status = "okay";
index b2e3e5d..343ecf0 100644 (file)
@@ -66,7 +66,7 @@
                };
        };
 
-       sysclk: clock-sysclk {
+       sysclk: sysclk {
                compatible = "fixed-clock";
                #clock-cells = <0>;
                clock-frequency = <100000000>;
index ce2bcdd..a05b1ab 100644 (file)
@@ -19,6 +19,8 @@
        aliases {
                spi0 = &spi0;
                ethernet1 = &eth1;
+               mmc0 = &sdhci0;
+               mmc1 = &sdhci1;
        };
 
        chosen {
        pinctrl-names = "default";
        pinctrl-0 = <&i2c1_pins>;
        clock-frequency = <100000>;
+       /delete-property/ mrvl,i2c-fast-mode;
        status = "okay";
 
        rtc@6f {
index 076d5ef..5ba7a45 100644 (file)
 
                interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE1R &emc>,
                                <&mc TEGRA194_MEMORY_CLIENT_PCIE1W &emc>;
-               interconnect-names = "read", "write";
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE1>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE1 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        pcie@14120000 {
 
                interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE2AR &emc>,
                                <&mc TEGRA194_MEMORY_CLIENT_PCIE2AW &emc>;
-               interconnect-names = "read", "write";
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE2>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE2 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        pcie@14140000 {
 
                interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE3R &emc>,
                                <&mc TEGRA194_MEMORY_CLIENT_PCIE3W &emc>;
-               interconnect-names = "read", "write";
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE3>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE3 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        pcie@14160000 {
 
                interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE4R &emc>,
                                <&mc TEGRA194_MEMORY_CLIENT_PCIE4W &emc>;
-               interconnect-names = "read", "write";
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE4>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE4 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        pcie@14180000 {
 
                interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE0R &emc>,
                                <&mc TEGRA194_MEMORY_CLIENT_PCIE0W &emc>;
-               interconnect-names = "read", "write";
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE0>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE0 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        pcie@141a0000 {
 
                interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE5R &emc>,
                                <&mc TEGRA194_MEMORY_CLIENT_PCIE5W &emc>;
-               interconnect-names = "read", "write";
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE5>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE5 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        pcie_ep@14160000 {
                nvidia,aspm-cmrt-us = <60>;
                nvidia,aspm-pwr-on-t-us = <20>;
                nvidia,aspm-l0s-entrance-latency-us = <3>;
+
+               interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE4R &emc>,
+                               <&mc TEGRA194_MEMORY_CLIENT_PCIE4W &emc>;
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE4>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE4 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        pcie_ep@14180000 {
                nvidia,aspm-cmrt-us = <60>;
                nvidia,aspm-pwr-on-t-us = <20>;
                nvidia,aspm-l0s-entrance-latency-us = <3>;
+
+               interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE0R &emc>,
+                               <&mc TEGRA194_MEMORY_CLIENT_PCIE0W &emc>;
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE0>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE0 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        pcie_ep@141a0000 {
                nvidia,aspm-cmrt-us = <60>;
                nvidia,aspm-pwr-on-t-us = <20>;
                nvidia,aspm-l0s-entrance-latency-us = <3>;
+
+               interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE5R &emc>,
+                               <&mc TEGRA194_MEMORY_CLIENT_PCIE5W &emc>;
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE5>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE5 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        sram@40000000 {
index e58bca8..41b332c 100644 (file)
@@ -320,7 +320,17 @@ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
 
 static inline unsigned long regs_return_value(struct pt_regs *regs)
 {
-       return regs->regs[0];
+       unsigned long val = regs->regs[0];
+
+       /*
+        * Audit currently uses regs_return_value() instead of
+        * syscall_get_return_value(). Apply the same sign-extension here until
+        * audit is updated to use syscall_get_return_value().
+        */
+       if (compat_user_mode(regs))
+               val = sign_extend64(val, 31);
+
+       return val;
 }
 
 static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
index 1801399..8aebc00 100644 (file)
@@ -35,7 +35,7 @@ struct stack_info {
  * accounting information necessary for robust unwinding.
  *
  * @fp:          The fp value in the frame record (or the real fp)
- * @pc:          The fp value in the frame record (or the real lr)
+ * @pc:          The lr value in the frame record (or the real lr)
  *
  * @stacks_done: Stacks which have been entirely unwound, for which it is no
  *               longer valid to unwind to.
index cfc0672..03e2089 100644 (file)
@@ -29,22 +29,23 @@ static inline void syscall_rollback(struct task_struct *task,
        regs->regs[0] = regs->orig_x0;
 }
 
-
-static inline long syscall_get_error(struct task_struct *task,
-                                    struct pt_regs *regs)
+static inline long syscall_get_return_value(struct task_struct *task,
+                                           struct pt_regs *regs)
 {
-       unsigned long error = regs->regs[0];
+       unsigned long val = regs->regs[0];
 
        if (is_compat_thread(task_thread_info(task)))
-               error = sign_extend64(error, 31);
+               val = sign_extend64(val, 31);
 
-       return IS_ERR_VALUE(error) ? error : 0;
+       return val;
 }
 
-static inline long syscall_get_return_value(struct task_struct *task,
-                                           struct pt_regs *regs)
+static inline long syscall_get_error(struct task_struct *task,
+                                    struct pt_regs *regs)
 {
-       return regs->regs[0];
+       unsigned long error = syscall_get_return_value(task, regs);
+
+       return IS_ERR_VALUE(error) ? error : 0;
 }
 
 static inline void syscall_set_return_value(struct task_struct *task,
index cfa2cfd..418b2bb 100644 (file)
@@ -162,7 +162,9 @@ u64 __init kaslr_early_init(void)
                 * a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE,
                 * _stext) . This guarantees that the resulting region still
                 * covers [_stext, _etext], and that all relative branches can
-                * be resolved without veneers.
+                * be resolved without veneers unless this region is exhausted
+                * and we fall back to a larger 2GB window in module_alloc()
+                * when ARM64_MODULE_PLTS is enabled.
                 */
                module_range = MODULES_VSIZE - (u64)(_etext - _stext);
                module_alloc_base = (u64)_etext + offset - MODULES_VSIZE;
index 499b6b2..b381a1e 100644 (file)
@@ -1862,7 +1862,7 @@ void syscall_trace_exit(struct pt_regs *regs)
        audit_syscall_exit(regs);
 
        if (flags & _TIF_SYSCALL_TRACEPOINT)
-               trace_sys_exit(regs, regs_return_value(regs));
+               trace_sys_exit(regs, syscall_get_return_value(current, regs));
 
        if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
                tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
index f8192f4..2303633 100644 (file)
@@ -29,6 +29,7 @@
 #include <asm/unistd.h>
 #include <asm/fpsimd.h>
 #include <asm/ptrace.h>
+#include <asm/syscall.h>
 #include <asm/signal32.h>
 #include <asm/traps.h>
 #include <asm/vdso.h>
@@ -890,7 +891,7 @@ static void do_signal(struct pt_regs *regs)
                     retval == -ERESTART_RESTARTBLOCK ||
                     (retval == -ERESTARTSYS &&
                      !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
-                       regs->regs[0] = -EINTR;
+                       syscall_set_return_value(current, regs, -EINTR, 0);
                        regs->pc = continue_addr;
                }
 
index b83c8d9..8982a2b 100644 (file)
@@ -218,7 +218,7 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
 
 #ifdef CONFIG_STACKTRACE
 
-noinline void arch_stack_walk(stack_trace_consume_fn consume_entry,
+noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
                              void *cookie, struct task_struct *task,
                              struct pt_regs *regs)
 {
index 263d6c1..50a0f1a 100644 (file)
@@ -54,10 +54,7 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
                ret = do_ni_syscall(regs, scno);
        }
 
-       if (is_compat_task())
-               ret = lower_32_bits(ret);
-
-       regs->regs[0] = ret;
+       syscall_set_return_value(current, regs, 0, ret);
 
        /*
         * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
@@ -115,7 +112,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
                 * syscall. do_notify_resume() will send a signal to userspace
                 * before the syscall is restarted.
                 */
-               regs->regs[0] = -ERESTARTNOINTR;
+               syscall_set_return_value(current, regs, -ERESTARTNOINTR, 0);
                return;
        }
 
@@ -136,7 +133,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
                 * anyway.
                 */
                if (scno == NO_SYSCALL)
-                       regs->regs[0] = -ENOSYS;
+                       syscall_set_return_value(current, regs, -ENOSYS, 0);
                scno = syscall_trace_enter(regs);
                if (scno == NO_SYSCALL)
                        goto trace_exit;
index 4e942b7..653befc 100644 (file)
@@ -321,7 +321,7 @@ KBUILD_LDFLAGS              += -m $(ld-emul)
 
 ifdef CONFIG_MIPS
 CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
-       egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \
+       egrep -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \
        sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g')
 endif
 
index 4b2567d..c7925d0 100644 (file)
@@ -58,15 +58,20 @@ do {                                                        \
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 {
-       pmd_t *pmd = NULL;
+       pmd_t *pmd;
        struct page *pg;
 
-       pg = alloc_pages(GFP_KERNEL | __GFP_ACCOUNT, PMD_ORDER);
-       if (pg) {
-               pgtable_pmd_page_ctor(pg);
-               pmd = (pmd_t *)page_address(pg);
-               pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
+       pg = alloc_pages(GFP_KERNEL_ACCOUNT, PMD_ORDER);
+       if (!pg)
+               return NULL;
+
+       if (!pgtable_pmd_page_ctor(pg)) {
+               __free_pages(pg, PMD_ORDER);
+               return NULL;
        }
+
+       pmd = (pmd_t *)page_address(pg);
+       pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
        return pmd;
 }
 
index ee74719..4ffbcc5 100644 (file)
@@ -48,7 +48,8 @@ static struct plat_serial8250_port uart8250_data[] = {
                .mapbase        = 0x1f000900,   /* The CBUS UART */
                .irq            = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2,
                .uartclk        = 3686400,      /* Twice the usual clk! */
-               .iotype         = UPIO_MEM32,
+               .iotype         = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) ?
+                                 UPIO_MEM32BE : UPIO_MEM32,
                .flags          = CBUS_UART_FLAGS,
                .regshift       = 3,
        },
index 8fcceb8..4f7b70a 100644 (file)
@@ -492,10 +492,16 @@ config CC_HAVE_STACKPROTECTOR_TLS
 
 config STACKPROTECTOR_PER_TASK
        def_bool y
+       depends on !GCC_PLUGIN_RANDSTRUCT
        depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_TLS
 
+config PHYS_RAM_BASE_FIXED
+       bool "Explicitly specified physical RAM address"
+       default n
+
 config PHYS_RAM_BASE
        hex "Platform Physical RAM address"
+       depends on PHYS_RAM_BASE_FIXED
        default "0x80000000"
        help
          This is the physical address of RAM in the system. It has to be
@@ -508,6 +514,7 @@ config XIP_KERNEL
        # This prevents XIP from being enabled by all{yes,mod}config, which
        # fail to build since XIP doesn't support large kernels.
        depends on !COMPILE_TEST
+       select PHYS_RAM_BASE_FIXED
        help
          Execute-In-Place allows the kernel to run from non-volatile storage
          directly addressable by the CPU, such as NOR flash. This saves RAM
index b1c3c59..2e4ea84 100644 (file)
@@ -24,7 +24,7 @@
 
        memory@80000000 {
                device_type = "memory";
-               reg = <0x0 0x80000000 0x2 0x00000000>;
+               reg = <0x0 0x80000000 0x4 0x00000000>;
        };
 
        soc {
index cca8764..b0ca505 100644 (file)
@@ -103,6 +103,7 @@ struct kernel_mapping {
 };
 
 extern struct kernel_mapping kernel_map;
+extern phys_addr_t phys_ram_base;
 
 #ifdef CONFIG_64BIT
 #define is_kernel_mapping(x)   \
@@ -113,9 +114,9 @@ extern struct kernel_mapping kernel_map;
 #define linear_mapping_pa_to_va(x)     ((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
 #define kernel_mapping_pa_to_va(y)     ({                                              \
        unsigned long _y = y;                                                           \
-       (_y >= CONFIG_PHYS_RAM_BASE) ?                                                  \
-               (void *)((unsigned long)(_y) + kernel_map.va_kernel_pa_offset + XIP_OFFSET) :   \
-               (void *)((unsigned long)(_y) + kernel_map.va_kernel_xip_pa_offset);             \
+       (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ?                                 \
+               (void *)((unsigned long)(_y) + kernel_map.va_kernel_xip_pa_offset) :            \
+               (void *)((unsigned long)(_y) + kernel_map.va_kernel_pa_offset + XIP_OFFSET);    \
        })
 #define __pa_to_va_nodebug(x)          linear_mapping_pa_to_va(x)
 
index ac75936..315db3d 100644 (file)
@@ -27,7 +27,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
                fp = frame_pointer(regs);
                sp = user_stack_pointer(regs);
                pc = instruction_pointer(regs);
-       } else if (task == current) {
+       } else if (task == NULL || task == current) {
                fp = (unsigned long)__builtin_frame_address(1);
                sp = (unsigned long)__builtin_frame_address(0);
                pc = (unsigned long)__builtin_return_address(0);
index a14bf39..88134cc 100644 (file)
@@ -36,6 +36,9 @@ EXPORT_SYMBOL(kernel_map);
 #define kernel_map     (*(struct kernel_mapping *)XIP_FIXUP(&kernel_map))
 #endif
 
+phys_addr_t phys_ram_base __ro_after_init;
+EXPORT_SYMBOL(phys_ram_base);
+
 #ifdef CONFIG_XIP_KERNEL
 extern char _xiprom[], _exiprom[];
 #endif
@@ -160,7 +163,7 @@ static void __init setup_bootmem(void)
        phys_addr_t vmlinux_end = __pa_symbol(&_end);
        phys_addr_t vmlinux_start = __pa_symbol(&_start);
        phys_addr_t __maybe_unused max_mapped_addr;
-       phys_addr_t dram_end;
+       phys_addr_t phys_ram_end;
 
 #ifdef CONFIG_XIP_KERNEL
        vmlinux_start = __pa_symbol(&_sdata);
@@ -181,9 +184,12 @@ static void __init setup_bootmem(void)
 #endif
        memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
 
-       dram_end = memblock_end_of_DRAM();
 
+       phys_ram_end = memblock_end_of_DRAM();
 #ifndef CONFIG_64BIT
+#ifndef CONFIG_XIP_KERNEL
+       phys_ram_base = memblock_start_of_DRAM();
+#endif
        /*
         * memblock allocator is not aware of the fact that last 4K bytes of
         * the addressable memory can not be mapped because of IS_ERR_VALUE
@@ -194,12 +200,12 @@ static void __init setup_bootmem(void)
         * be done in create_kernel_page_table.
         */
        max_mapped_addr = __pa(~(ulong)0);
-       if (max_mapped_addr == (dram_end - 1))
+       if (max_mapped_addr == (phys_ram_end - 1))
                memblock_set_current_limit(max_mapped_addr - 4096);
 #endif
 
-       min_low_pfn = PFN_UP(memblock_start_of_DRAM());
-       max_low_pfn = max_pfn = PFN_DOWN(dram_end);
+       min_low_pfn = PFN_UP(phys_ram_base);
+       max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
 
        dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
        set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
@@ -558,6 +564,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
        kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
        kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
 
+       phys_ram_base = CONFIG_PHYS_RAM_BASE;
        kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
        kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata);
 
index 660c799..e30d3fd 100644 (file)
@@ -11,6 +11,7 @@ UBSAN_SANITIZE := n
 KASAN_SANITIZE := n
 
 obj-y  := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o
+obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o
 obj-all := $(obj-y) piggy.o syms.o
 targets        := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
 targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
diff --git a/arch/s390/boot/compressed/clz_ctz.c b/arch/s390/boot/compressed/clz_ctz.c
new file mode 100644 (file)
index 0000000..c3ebf24
--- /dev/null
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../../../../lib/clz_ctz.c"
index 7de253f..b881840 100644 (file)
@@ -335,7 +335,7 @@ CONFIG_L2TP_DEBUGFS=m
 CONFIG_L2TP_V3=y
 CONFIG_L2TP_IP=m
 CONFIG_L2TP_ETH=m
-CONFIG_BRIDGE=m
+CONFIG_BRIDGE=y
 CONFIG_BRIDGE_MRP=y
 CONFIG_VLAN_8021Q=m
 CONFIG_VLAN_8021Q_GVRP=y
index b671642..1667a3c 100644 (file)
@@ -325,7 +325,7 @@ CONFIG_L2TP_DEBUGFS=m
 CONFIG_L2TP_V3=y
 CONFIG_L2TP_IP=m
 CONFIG_L2TP_ETH=m
-CONFIG_BRIDGE=m
+CONFIG_BRIDGE=y
 CONFIG_BRIDGE_MRP=y
 CONFIG_VLAN_8021Q=m
 CONFIG_VLAN_8021Q_GVRP=y
index bff50b6..edf5ff1 100644 (file)
@@ -51,6 +51,7 @@ SECTIONS
 
        .rela.dyn ALIGN(8) : { *(.rela.dyn) }
        .got ALIGN(8)   : { *(.got .toc) }
+       .got.plt ALIGN(8) : { *(.got.plt) }
 
        _end = .;
        PROVIDE(end = .);
index d4fb336..4461ea1 100644 (file)
@@ -51,6 +51,7 @@ SECTIONS
 
        .rela.dyn ALIGN(8) : { *(.rela.dyn) }
        .got ALIGN(8)   : { *(.got .toc) }
+       .got.plt ALIGN(8) : { *(.got.plt) }
 
        _end = .;
        PROVIDE(end = .);
index 1eb4513..3092fbf 100644 (file)
@@ -2489,13 +2489,15 @@ void perf_clear_dirty_counters(void)
                return;
 
        for_each_set_bit(i, cpuc->dirty, X86_PMC_IDX_MAX) {
-               /* Metrics and fake events don't have corresponding HW counters. */
-               if (is_metric_idx(i) || (i == INTEL_PMC_IDX_FIXED_VLBR))
-                       continue;
-               else if (i >= INTEL_PMC_IDX_FIXED)
+               if (i >= INTEL_PMC_IDX_FIXED) {
+                       /* Metrics and fake events don't have corresponding HW counters. */
+                       if ((i - INTEL_PMC_IDX_FIXED) >= hybrid(cpuc->pmu, num_counters_fixed))
+                               continue;
+
                        wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + (i - INTEL_PMC_IDX_FIXED), 0);
-               else
+               } else {
                        wrmsrl(x86_pmu_event_addr(i), 0);
+               }
        }
 
        bitmap_zero(cpuc->dirty, X86_PMC_IDX_MAX);
index fca7a6e..ac6fd2d 100644 (file)
@@ -2904,24 +2904,28 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
  */
 static int intel_pmu_handle_irq(struct pt_regs *regs)
 {
-       struct cpu_hw_events *cpuc;
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       bool late_ack = hybrid_bit(cpuc->pmu, late_ack);
+       bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack);
        int loops;
        u64 status;
        int handled;
        int pmu_enabled;
 
-       cpuc = this_cpu_ptr(&cpu_hw_events);
-
        /*
         * Save the PMU state.
         * It needs to be restored when leaving the handler.
         */
        pmu_enabled = cpuc->enabled;
        /*
-        * No known reason to not always do late ACK,
-        * but just in case do it opt-in.
+        * In general, the early ACK is only applied for old platforms.
+        * For the big core starts from Haswell, the late ACK should be
+        * applied.
+        * For the small core after Tremont, we have to do the ACK right
+        * before re-enabling counters, which is in the middle of the
+        * NMI handler.
         */
-       if (!x86_pmu.late_ack)
+       if (!late_ack && !mid_ack)
                apic_write(APIC_LVTPC, APIC_DM_NMI);
        intel_bts_disable_local();
        cpuc->enabled = 0;
@@ -2958,6 +2962,8 @@ again:
                goto again;
 
 done:
+       if (mid_ack)
+               apic_write(APIC_LVTPC, APIC_DM_NMI);
        /* Only restore PMU state when it's active. See x86_pmu_disable(). */
        cpuc->enabled = pmu_enabled;
        if (pmu_enabled)
@@ -2969,7 +2975,7 @@ done:
         * have been reset. This avoids spurious NMIs on
         * Haswell CPUs.
         */
-       if (x86_pmu.late_ack)
+       if (late_ack)
                apic_write(APIC_LVTPC, APIC_DM_NMI);
        return handled;
 }
@@ -6129,7 +6135,6 @@ __init int intel_pmu_init(void)
                static_branch_enable(&perf_is_hybrid);
                x86_pmu.num_hybrid_pmus = X86_HYBRID_NUM_PMUS;
 
-               x86_pmu.late_ack = true;
                x86_pmu.pebs_aliases = NULL;
                x86_pmu.pebs_prec_dist = true;
                x86_pmu.pebs_block = true;
@@ -6167,6 +6172,7 @@ __init int intel_pmu_init(void)
                pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
                pmu->name = "cpu_core";
                pmu->cpu_type = hybrid_big;
+               pmu->late_ack = true;
                if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
                        pmu->num_counters = x86_pmu.num_counters + 2;
                        pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1;
@@ -6192,6 +6198,7 @@ __init int intel_pmu_init(void)
                pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
                pmu->name = "cpu_atom";
                pmu->cpu_type = hybrid_small;
+               pmu->mid_ack = true;
                pmu->num_counters = x86_pmu.num_counters;
                pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
                pmu->max_pebs_events = x86_pmu.max_pebs_events;
index 2bf1c7e..e3ac05c 100644 (file)
@@ -656,6 +656,10 @@ struct x86_hybrid_pmu {
        struct event_constraint         *event_constraints;
        struct event_constraint         *pebs_constraints;
        struct extra_reg                *extra_regs;
+
+       unsigned int                    late_ack        :1,
+                                       mid_ack         :1,
+                                       enabled_ack     :1;
 };
 
 static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu)
@@ -686,6 +690,16 @@ extern struct static_key_false perf_is_hybrid;
        __Fp;                                           \
 }))
 
+#define hybrid_bit(_pmu, _field)                       \
+({                                                     \
+       bool __Fp = x86_pmu._field;                     \
+                                                       \
+       if (is_hybrid() && (_pmu))                      \
+               __Fp = hybrid_pmu(_pmu)->_field;        \
+                                                       \
+       __Fp;                                           \
+})
+
 enum hybrid_pmu_type {
        hybrid_big              = 0x40,
        hybrid_small            = 0x20,
@@ -755,6 +769,7 @@ struct x86_pmu {
 
        /* PMI handler bits */
        unsigned int    late_ack                :1,
+                       mid_ack                 :1,
                        enabled_ack             :1;
        /*
         * sysfs attrs
@@ -1115,9 +1130,10 @@ void x86_pmu_stop(struct perf_event *event, int flags);
 
 static inline void x86_pmu_disable_event(struct perf_event *event)
 {
+       u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
        struct hw_perf_event *hwc = &event->hw;
 
-       wrmsrl(hwc->config_base, hwc->config);
+       wrmsrl(hwc->config_base, hwc->config & ~disable_mask);
 
        if (is_counter_pair(hwc))
                wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0);
index b07592c..0b38f94 100644 (file)
@@ -2016,6 +2016,7 @@ static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
 
 static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
 {
+       trace_kvm_hv_hypercall_done(result);
        kvm_hv_hypercall_set_result(vcpu, result);
        ++vcpu->stat.hypercalls;
        return kvm_skip_emulated_instruction(vcpu);
@@ -2139,6 +2140,7 @@ static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code)
 
 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
 {
+       struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
        struct kvm_hv_hcall hc;
        u64 ret = HV_STATUS_SUCCESS;
 
@@ -2173,17 +2175,25 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
        hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
        hc.rep = !!(hc.rep_cnt || hc.rep_idx);
 
-       if (hc.fast && is_xmm_fast_hypercall(&hc))
-               kvm_hv_hypercall_read_xmm(&hc);
-
        trace_kvm_hv_hypercall(hc.code, hc.fast, hc.rep_cnt, hc.rep_idx,
                               hc.ingpa, hc.outgpa);
 
-       if (unlikely(!hv_check_hypercall_access(to_hv_vcpu(vcpu), hc.code))) {
+       if (unlikely(!hv_check_hypercall_access(hv_vcpu, hc.code))) {
                ret = HV_STATUS_ACCESS_DENIED;
                goto hypercall_complete;
        }
 
+       if (hc.fast && is_xmm_fast_hypercall(&hc)) {
+               if (unlikely(hv_vcpu->enforce_cpuid &&
+                            !(hv_vcpu->cpuid_cache.features_edx &
+                              HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE))) {
+                       kvm_queue_exception(vcpu, UD_VECTOR);
+                       return 1;
+               }
+
+               kvm_hv_hypercall_read_xmm(&hc);
+       }
+
        switch (hc.code) {
        case HVCALL_NOTIFY_LONG_SPIN_WAIT:
                if (unlikely(hc.rep)) {
index 66f7f5b..c4f4fa2 100644 (file)
@@ -1644,7 +1644,7 @@ static int is_empty_shadow_page(u64 *spt)
  * aggregate version in order to make the slab shrinker
  * faster
  */
-static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
+static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
 {
        kvm->arch.n_used_mmu_pages += nr;
        percpu_counter_add(&kvm_total_used_mmu_pages, nr);
index 6710d9e..7fbce34 100644 (file)
@@ -64,6 +64,7 @@ static DEFINE_MUTEX(sev_bitmap_lock);
 unsigned int max_sev_asid;
 static unsigned int min_sev_asid;
 static unsigned long sev_me_mask;
+static unsigned int nr_asids;
 static unsigned long *sev_asid_bitmap;
 static unsigned long *sev_reclaim_asid_bitmap;
 
@@ -78,11 +79,11 @@ struct enc_region {
 /* Called with the sev_bitmap_lock held, or on shutdown  */
 static int sev_flush_asids(int min_asid, int max_asid)
 {
-       int ret, pos, error = 0;
+       int ret, asid, error = 0;
 
        /* Check if there are any ASIDs to reclaim before performing a flush */
-       pos = find_next_bit(sev_reclaim_asid_bitmap, max_asid, min_asid);
-       if (pos >= max_asid)
+       asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
+       if (asid > max_asid)
                return -EBUSY;
 
        /*
@@ -115,15 +116,15 @@ static bool __sev_recycle_asids(int min_asid, int max_asid)
 
        /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */
        bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
-                  max_sev_asid);
-       bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid);
+                  nr_asids);
+       bitmap_zero(sev_reclaim_asid_bitmap, nr_asids);
 
        return true;
 }
 
 static int sev_asid_new(struct kvm_sev_info *sev)
 {
-       int pos, min_asid, max_asid, ret;
+       int asid, min_asid, max_asid, ret;
        bool retry = true;
        enum misc_res_type type;
 
@@ -143,11 +144,11 @@ static int sev_asid_new(struct kvm_sev_info *sev)
         * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
         * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
         */
-       min_asid = sev->es_active ? 0 : min_sev_asid - 1;
+       min_asid = sev->es_active ? 1 : min_sev_asid;
        max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
 again:
-       pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_asid);
-       if (pos >= max_asid) {
+       asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
+       if (asid > max_asid) {
                if (retry && __sev_recycle_asids(min_asid, max_asid)) {
                        retry = false;
                        goto again;
@@ -157,11 +158,11 @@ again:
                goto e_uncharge;
        }
 
-       __set_bit(pos, sev_asid_bitmap);
+       __set_bit(asid, sev_asid_bitmap);
 
        mutex_unlock(&sev_bitmap_lock);
 
-       return pos + 1;
+       return asid;
 e_uncharge:
        misc_cg_uncharge(type, sev->misc_cg, 1);
        put_misc_cg(sev->misc_cg);
@@ -179,17 +180,16 @@ static int sev_get_asid(struct kvm *kvm)
 static void sev_asid_free(struct kvm_sev_info *sev)
 {
        struct svm_cpu_data *sd;
-       int cpu, pos;
+       int cpu;
        enum misc_res_type type;
 
        mutex_lock(&sev_bitmap_lock);
 
-       pos = sev->asid - 1;
-       __set_bit(pos, sev_reclaim_asid_bitmap);
+       __set_bit(sev->asid, sev_reclaim_asid_bitmap);
 
        for_each_possible_cpu(cpu) {
                sd = per_cpu(svm_data, cpu);
-               sd->sev_vmcbs[pos] = NULL;
+               sd->sev_vmcbs[sev->asid] = NULL;
        }
 
        mutex_unlock(&sev_bitmap_lock);
@@ -1857,12 +1857,17 @@ void __init sev_hardware_setup(void)
        min_sev_asid = edx;
        sev_me_mask = 1UL << (ebx & 0x3f);
 
-       /* Initialize SEV ASID bitmaps */
-       sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
+       /*
+        * Initialize SEV ASID bitmaps. Allocate space for ASID 0 in the bitmap,
+        * even though it's never used, so that the bitmap is indexed by the
+        * actual ASID.
+        */
+       nr_asids = max_sev_asid + 1;
+       sev_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
        if (!sev_asid_bitmap)
                goto out;
 
-       sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
+       sev_reclaim_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
        if (!sev_reclaim_asid_bitmap) {
                bitmap_free(sev_asid_bitmap);
                sev_asid_bitmap = NULL;
@@ -1907,7 +1912,7 @@ void sev_hardware_teardown(void)
                return;
 
        /* No need to take sev_bitmap_lock, all VMs have been destroyed. */
-       sev_flush_asids(0, max_sev_asid);
+       sev_flush_asids(1, max_sev_asid);
 
        bitmap_free(sev_asid_bitmap);
        bitmap_free(sev_reclaim_asid_bitmap);
@@ -1921,7 +1926,7 @@ int sev_cpu_init(struct svm_cpu_data *sd)
        if (!sev_enabled)
                return 0;
 
-       sd->sev_vmcbs = kcalloc(max_sev_asid + 1, sizeof(void *), GFP_KERNEL);
+       sd->sev_vmcbs = kcalloc(nr_asids, sizeof(void *), GFP_KERNEL);
        if (!sd->sev_vmcbs)
                return -ENOMEM;
 
index b484141..03ebe36 100644 (file)
@@ -92,6 +92,21 @@ TRACE_EVENT(kvm_hv_hypercall,
                  __entry->outgpa)
 );
 
+TRACE_EVENT(kvm_hv_hypercall_done,
+       TP_PROTO(u64 result),
+       TP_ARGS(result),
+
+       TP_STRUCT__entry(
+               __field(__u64, result)
+       ),
+
+       TP_fast_assign(
+               __entry->result = result;
+       ),
+
+       TP_printk("result 0x%llx", __entry->result)
+);
+
 /*
  * Tracepoint for Xen hypercall.
  */
index 4116567..e5d5c5e 100644 (file)
@@ -4358,8 +4358,17 @@ static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
 
 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
 {
-       return kvm_arch_interrupt_allowed(vcpu) &&
-               kvm_cpu_accept_dm_intr(vcpu);
+       /*
+        * Do not cause an interrupt window exit if an exception
+        * is pending or an event needs reinjection; userspace
+        * might want to inject the interrupt manually using KVM_SET_REGS
+        * or KVM_SET_SREGS.  For that to work, we must be at an
+        * instruction boundary and with no events half-injected.
+        */
+       return (kvm_arch_interrupt_allowed(vcpu) &&
+               kvm_cpu_accept_dm_intr(vcpu) &&
+               !kvm_event_needs_reinjection(vcpu) &&
+               !vcpu->arch.exception.pending);
 }
 
 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
index 04c5a44..9ba700d 100644 (file)
@@ -57,12 +57,12 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
        [S_REL] =
        "^(__init_(begin|end)|"
        "__x86_cpu_dev_(start|end)|"
-       "(__parainstructions|__alt_instructions)(|_end)|"
-       "(__iommu_table|__apicdrivers|__smp_locks)(|_end)|"
+       "(__parainstructions|__alt_instructions)(_end)?|"
+       "(__iommu_table|__apicdrivers|__smp_locks)(_end)?|"
        "__(start|end)_pci_.*|"
        "__(start|end)_builtin_fw|"
-       "__(start|stop)___ksymtab(|_gpl)|"
-       "__(start|stop)___kcrctab(|_gpl)|"
+       "__(start|stop)___ksymtab(_gpl)?|"
+       "__(start|stop)___kcrctab(_gpl)?|"
        "__(start|stop)___param|"
        "__(start|stop)___modver|"
        "__(start|stop)___bug_table|"
index 575d7a2..31fe9be 100644 (file)
@@ -790,6 +790,7 @@ static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
                struct blkcg_gq *parent = blkg->parent;
                struct blkg_iostat_set *bisc = per_cpu_ptr(blkg->iostat_cpu, cpu);
                struct blkg_iostat cur, delta;
+               unsigned long flags;
                unsigned int seq;
 
                /* fetch the current per-cpu values */
@@ -799,21 +800,21 @@ static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
                } while (u64_stats_fetch_retry(&bisc->sync, seq));
 
                /* propagate percpu delta to global */
-               u64_stats_update_begin(&blkg->iostat.sync);
+               flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
                blkg_iostat_set(&delta, &cur);
                blkg_iostat_sub(&delta, &bisc->last);
                blkg_iostat_add(&blkg->iostat.cur, &delta);
                blkg_iostat_add(&bisc->last, &delta);
-               u64_stats_update_end(&blkg->iostat.sync);
+               u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
 
                /* propagate global delta to parent (unless that's root) */
                if (parent && parent->parent) {
-                       u64_stats_update_begin(&parent->iostat.sync);
+                       flags = u64_stats_update_begin_irqsave(&parent->iostat.sync);
                        blkg_iostat_set(&delta, &blkg->iostat.cur);
                        blkg_iostat_sub(&delta, &blkg->iostat.last);
                        blkg_iostat_add(&parent->iostat.cur, &delta);
                        blkg_iostat_add(&blkg->iostat.last, &delta);
-                       u64_stats_update_end(&parent->iostat.sync);
+                       u64_stats_update_end_irqrestore(&parent->iostat.sync, flags);
                }
        }
 
@@ -848,6 +849,7 @@ static void blkcg_fill_root_iostats(void)
                memset(&tmp, 0, sizeof(tmp));
                for_each_possible_cpu(cpu) {
                        struct disk_stats *cpu_dkstats;
+                       unsigned long flags;
 
                        cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu);
                        tmp.ios[BLKG_IOSTAT_READ] +=
@@ -864,9 +866,9 @@ static void blkcg_fill_root_iostats(void)
                        tmp.bytes[BLKG_IOSTAT_DISCARD] +=
                                cpu_dkstats->sectors[STAT_DISCARD] << 9;
 
-                       u64_stats_update_begin(&blkg->iostat.sync);
+                       flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
                        blkg_iostat_set(&blkg->iostat.cur, &tmp);
-                       u64_stats_update_end(&blkg->iostat.sync);
+                       u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
                }
        }
 }
index 81be009..d8b0d8b 100644 (file)
@@ -833,7 +833,11 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
 
        enable = iolatency_set_min_lat_nsec(blkg, lat_val);
        if (enable) {
-               WARN_ON_ONCE(!blk_get_queue(blkg->q));
+               if (!blk_get_queue(blkg->q)) {
+                       ret = -ENODEV;
+                       goto out;
+               }
+
                blkg_get(blkg);
        }
 
index 81e3279..15a8be5 100644 (file)
@@ -596,13 +596,13 @@ static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
                struct list_head *head = &kcq->rq_list[sched_domain];
 
                spin_lock(&kcq->lock);
+               trace_block_rq_insert(rq);
                if (at_head)
                        list_move(&rq->queuelist, head);
                else
                        list_move_tail(&rq->queuelist, head);
                sbitmap_set_bit(&khd->kcq_map[sched_domain],
                                rq->mq_ctx->index_hw[hctx->type]);
-               trace_block_rq_insert(rq);
                spin_unlock(&kcq->lock);
        }
 }
index cc86534..b8b518d 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
-/**
+/*
  * ldm - Support for Windows Logical Disk Manager (Dynamic Disks)
  *
  * Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org>
index 38e10ab..14b71b4 100644 (file)
@@ -379,13 +379,6 @@ acpi_ns_repair_CID(struct acpi_evaluate_info *info,
 
                        (*element_ptr)->common.reference_count =
                            original_ref_count;
-
-                       /*
-                        * The original_element holds a reference from the package object
-                        * that represents _HID. Since a new element was created by _HID,
-                        * remove the reference from the _CID package.
-                        */
-                       acpi_ut_remove_reference(original_element);
                }
 
                element_ptr++;
index daeb9b5..437cd61 100644 (file)
@@ -653,8 +653,6 @@ dev_groups_failed:
        else if (drv->remove)
                drv->remove(dev);
 probe_failed:
-       kfree(dev->dma_range_map);
-       dev->dma_range_map = NULL;
        if (dev->bus)
                blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
                                             BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
@@ -662,6 +660,8 @@ pinctrl_bind_failed:
        device_links_no_driver(dev);
        devres_release_all(dev);
        arch_teardown_dma_ops(dev);
+       kfree(dev->dma_range_map);
+       dev->dma_range_map = NULL;
        driver_sysfs_remove(dev);
        dev->driver = NULL;
        dev_set_drvdata(dev, NULL);
index 91899d1..d7d63c1 100644 (file)
@@ -89,12 +89,11 @@ static void __fw_load_abort(struct fw_priv *fw_priv)
 {
        /*
         * There is a small window in which user can write to 'loading'
-        * between loading done and disappearance of 'loading'
+        * between loading done/aborted and disappearance of 'loading'
         */
-       if (fw_sysfs_done(fw_priv))
+       if (fw_state_is_aborted(fw_priv) || fw_sysfs_done(fw_priv))
                return;
 
-       list_del_init(&fw_priv->pending_list);
        fw_state_aborted(fw_priv);
 }
 
@@ -280,7 +279,6 @@ static ssize_t firmware_loading_store(struct device *dev,
                         * Same logic as fw_load_abort, only the DONE bit
                         * is ignored and we set ABORT only on failure.
                         */
-                       list_del_init(&fw_priv->pending_list);
                        if (rc) {
                                fw_state_aborted(fw_priv);
                                written = rc;
@@ -513,6 +511,11 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, long timeout)
        }
 
        mutex_lock(&fw_lock);
+       if (fw_state_is_aborted(fw_priv)) {
+               mutex_unlock(&fw_lock);
+               retval = -EINTR;
+               goto out;
+       }
        list_add(&fw_priv->pending_list, &pending_fw_head);
        mutex_unlock(&fw_lock);
 
@@ -535,11 +538,10 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, long timeout)
        if (fw_state_is_aborted(fw_priv)) {
                if (retval == -ERESTARTSYS)
                        retval = -EINTR;
-               else
-                       retval = -EAGAIN;
        } else if (fw_priv->is_paged_buf && !fw_priv->data)
                retval = -ENOMEM;
 
+out:
        device_del(f_dev);
 err_put_dev:
        put_device(f_dev);
index 63bd29f..a3014e9 100644 (file)
@@ -117,8 +117,16 @@ static inline void __fw_state_set(struct fw_priv *fw_priv,
 
        WRITE_ONCE(fw_st->status, status);
 
-       if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
+       if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED) {
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+               /*
+                * Doing this here ensures that the fw_priv is deleted from
+                * the pending list in all abort/done paths.
+                */
+               list_del_init(&fw_priv->pending_list);
+#endif
                complete_all(&fw_st->completion);
+       }
 }
 
 static inline void fw_state_aborted(struct fw_priv *fw_priv)
index 4fdb821..68c549d 100644 (file)
@@ -783,8 +783,10 @@ static void fw_abort_batch_reqs(struct firmware *fw)
                return;
 
        fw_priv = fw->priv;
+       mutex_lock(&fw_lock);
        if (!fw_state_is_aborted(fw_priv))
                fw_state_aborted(fw_priv);
+       mutex_unlock(&fw_lock);
 }
 
 /* called from request_firmware() and request_firmware_work_func() */
index 7b4dd10..c84be00 100644 (file)
@@ -74,7 +74,7 @@ static bool n64cart_do_bvec(struct device *dev, struct bio_vec *bv, u32 pos)
 
        n64cart_wait_dma();
 
-       n64cart_write_reg(PI_DRAM_REG, dma_addr + bv->bv_offset);
+       n64cart_write_reg(PI_DRAM_REG, dma_addr);
        n64cart_write_reg(PI_CART_REG, (bstart | CART_DOMAIN) & CART_MAX);
        n64cart_write_reg(PI_WRITE_REG, bv->bv_len - 1);
 
index 5b9ea66..bc239a1 100644 (file)
@@ -682,7 +682,7 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
                      struct image_info *img_info);
 void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
-                       struct mhi_chan *mhi_chan);
+                       struct mhi_chan *mhi_chan, unsigned int flags);
 int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
                       struct mhi_chan *mhi_chan);
 void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
index fc9196f..8444823 100644 (file)
@@ -1430,7 +1430,7 @@ exit_unprepare_channel:
 }
 
 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
-                       struct mhi_chan *mhi_chan)
+                       struct mhi_chan *mhi_chan, unsigned int flags)
 {
        int ret = 0;
        struct device *dev = &mhi_chan->mhi_dev->dev;
@@ -1455,6 +1455,9 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
        if (ret)
                goto error_pm_state;
 
+       if (mhi_chan->dir == DMA_FROM_DEVICE)
+               mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS);
+       
        /* Pre-allocate buffer for xfer ring */
        if (mhi_chan->pre_alloc) {
                int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
@@ -1610,7 +1613,7 @@ void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
 }
 
 /* Move channel to start state */
-int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
+int mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags)
 {
        int ret, dir;
        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
@@ -1621,7 +1624,7 @@ int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
                if (!mhi_chan)
                        continue;
 
-               ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
+               ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);
                if (ret)
                        goto error_open_chan;
        }
index 38cb116..0ef98e3 100644 (file)
@@ -100,6 +100,7 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = {
  * @cookie: data used by legacy platform callbacks
  * @name: name if available
  * @revision: interconnect target module revision
+ * @reserved: target module is reserved and already in use
  * @enabled: sysc runtime enabled status
  * @needs_resume: runtime resume needed on resume from suspend
  * @child_needs_resume: runtime resume needed for child on resume from suspend
@@ -130,6 +131,7 @@ struct sysc {
        struct ti_sysc_cookie cookie;
        const char *name;
        u32 revision;
+       unsigned int reserved:1;
        unsigned int enabled:1;
        unsigned int needs_resume:1;
        unsigned int child_needs_resume:1;
@@ -2951,6 +2953,8 @@ static int sysc_init_soc(struct sysc *ddata)
                case SOC_3430 ... SOC_3630:
                        sysc_add_disabled(0x48304000);  /* timer12 */
                        break;
+               case SOC_AM3:
+                       sysc_add_disabled(0x48310000);  /* rng */
                default:
                        break;
                }
@@ -3093,8 +3097,8 @@ static int sysc_probe(struct platform_device *pdev)
                return error;
 
        error = sysc_check_active_timer(ddata);
-       if (error)
-               return error;
+       if (error == -EBUSY)
+               ddata->reserved = true;
 
        error = sysc_get_clocks(ddata);
        if (error)
@@ -3130,11 +3134,15 @@ static int sysc_probe(struct platform_device *pdev)
        sysc_show_registers(ddata);
 
        ddata->dev->type = &sysc_device_type;
-       error = of_platform_populate(ddata->dev->of_node, sysc_match_table,
-                                    pdata ? pdata->auxdata : NULL,
-                                    ddata->dev);
-       if (error)
-               goto err;
+
+       if (!ddata->reserved) {
+               error = of_platform_populate(ddata->dev->of_node,
+                                            sysc_match_table,
+                                            pdata ? pdata->auxdata : NULL,
+                                            ddata->dev);
+               if (error)
+                       goto err;
+       }
 
        INIT_DELAYED_WORK(&ddata->idle_work, ti_sysc_idle);
 
index 2ccdf8a..6e32355 100644 (file)
@@ -254,11 +254,11 @@ static int ftpm_tee_probe(struct device *dev)
        pvt_data->session = sess_arg.session;
 
        /* Allocate dynamic shared memory with fTPM TA */
-       pvt_data->shm = tee_shm_alloc(pvt_data->ctx,
-                                     MAX_COMMAND_SIZE + MAX_RESPONSE_SIZE,
-                                     TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+       pvt_data->shm = tee_shm_alloc_kernel_buf(pvt_data->ctx,
+                                                MAX_COMMAND_SIZE +
+                                                MAX_RESPONSE_SIZE);
        if (IS_ERR(pvt_data->shm)) {
-               dev_err(dev, "%s: tee_shm_alloc failed\n", __func__);
+               dev_err(dev, "%s: tee_shm_alloc_kernel_buf failed\n", __func__);
                rc = -ENOMEM;
                goto out_shm_alloc;
        }
index be16076..f9d5b73 100644 (file)
@@ -92,13 +92,20 @@ int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks,
 }
 EXPORT_SYMBOL_GPL(devm_clk_bulk_get_optional);
 
+static void devm_clk_bulk_release_all(struct device *dev, void *res)
+{
+       struct clk_bulk_devres *devres = res;
+
+       clk_bulk_put_all(devres->num_clks, devres->clks);
+}
+
 int __must_check devm_clk_bulk_get_all(struct device *dev,
                                       struct clk_bulk_data **clks)
 {
        struct clk_bulk_devres *devres;
        int ret;
 
-       devres = devres_alloc(devm_clk_bulk_release,
+       devres = devres_alloc(devm_clk_bulk_release_all,
                              sizeof(*devres), GFP_KERNEL);
        if (!devres)
                return -ENOMEM;
index 18117ce..5c75e3d 100644 (file)
@@ -526,7 +526,7 @@ struct stm32f4_pll {
 
 struct stm32f4_pll_post_div_data {
        int idx;
-       u8 pll_num;
+       int pll_idx;
        const char *name;
        const char *parent;
        u8 flag;
@@ -557,13 +557,13 @@ static const struct clk_div_table post_divr_table[] = {
 
 #define MAX_POST_DIV 3
 static const struct stm32f4_pll_post_div_data  post_div_data[MAX_POST_DIV] = {
-       { CLK_I2SQ_PDIV, PLL_I2S, "plli2s-q-div", "plli2s-q",
+       { CLK_I2SQ_PDIV, PLL_VCO_I2S, "plli2s-q-div", "plli2s-q",
                CLK_SET_RATE_PARENT, STM32F4_RCC_DCKCFGR, 0, 5, 0, NULL},
 
-       { CLK_SAIQ_PDIV, PLL_SAI, "pllsai-q-div", "pllsai-q",
+       { CLK_SAIQ_PDIV, PLL_VCO_SAI, "pllsai-q-div", "pllsai-q",
                CLK_SET_RATE_PARENT, STM32F4_RCC_DCKCFGR, 8, 5, 0, NULL },
 
-       { NO_IDX, PLL_SAI, "pllsai-r-div", "pllsai-r", CLK_SET_RATE_PARENT,
+       { NO_IDX, PLL_VCO_SAI, "pllsai-r-div", "pllsai-r", CLK_SET_RATE_PARENT,
                STM32F4_RCC_DCKCFGR, 16, 2, 0, post_divr_table },
 };
 
@@ -1774,7 +1774,7 @@ static void __init stm32f4_rcc_init(struct device_node *np)
                                post_div->width,
                                post_div->flag_div,
                                post_div->div_table,
-                               clks[post_div->pll_num],
+                               clks[post_div->pll_idx],
                                &stm32f4_clk_lock);
 
                if (post_div->idx != NO_IDX)
index 5ecc37a..c1ec75a 100644 (file)
@@ -18,6 +18,7 @@ config COMMON_CLK_HI3519
 config COMMON_CLK_HI3559A
        bool "Hi3559A Clock Driver"
        depends on ARCH_HISI || COMPILE_TEST
+       select RESET_HISI
        default ARCH_HISI
        help
          Build the clock driver for hi3559a.
index 800b2fe..b2c142f 100644 (file)
@@ -467,7 +467,7 @@ DEFINE_CLK_SMD_RPM(msm8936, sysmmnoc_clk, sysmmnoc_a_clk, QCOM_SMD_RPM_BUS_CLK,
 
 static struct clk_smd_rpm *msm8936_clks[] = {
        [RPM_SMD_PCNOC_CLK]             = &msm8916_pcnoc_clk,
-       [RPM_SMD_PCNOC_A_CLK]           = &msm8916_pcnoc_clk,
+       [RPM_SMD_PCNOC_A_CLK]           = &msm8916_pcnoc_a_clk,
        [RPM_SMD_SNOC_CLK]              = &msm8916_snoc_clk,
        [RPM_SMD_SNOC_A_CLK]            = &msm8916_snoc_a_clk,
        [RPM_SMD_BIMC_CLK]              = &msm8916_bimc_clk,
index 316912d..4f2c330 100644 (file)
@@ -194,6 +194,15 @@ static void clk_sdmmc_mux_disable(struct clk_hw *hw)
        gate_ops->disable(gate_hw);
 }
 
+static void clk_sdmmc_mux_disable_unused(struct clk_hw *hw)
+{
+       struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+       const struct clk_ops *gate_ops = sdmmc_mux->gate_ops;
+       struct clk_hw *gate_hw = &sdmmc_mux->gate.hw;
+
+       gate_ops->disable_unused(gate_hw);
+}
+
 static void clk_sdmmc_mux_restore_context(struct clk_hw *hw)
 {
        struct clk_hw *parent = clk_hw_get_parent(hw);
@@ -218,6 +227,7 @@ static const struct clk_ops tegra_clk_sdmmc_mux_ops = {
        .is_enabled = clk_sdmmc_mux_is_enabled,
        .enable = clk_sdmmc_mux_enable,
        .disable = clk_sdmmc_mux_disable,
+       .disable_unused = clk_sdmmc_mux_disable_unused,
        .restore_context = clk_sdmmc_mux_restore_context,
 };
 
index 7b91060..d9262db 100644 (file)
@@ -382,8 +382,8 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
        alt_intercepts = 2 * idx_intercept_sum > cpu_data->total - idx_hit_sum;
        alt_recent = idx_recent_sum > NR_RECENT / 2;
        if (alt_recent || alt_intercepts) {
-               s64 last_enabled_span_ns = duration_ns;
-               int last_enabled_idx = idx;
+               s64 first_suitable_span_ns = duration_ns;
+               int first_suitable_idx = idx;
 
                /*
                 * Look for the deepest idle state whose target residency had
@@ -397,37 +397,51 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                intercept_sum = 0;
                recent_sum = 0;
 
-               for (i = idx - 1; i >= idx0; i--) {
+               for (i = idx - 1; i >= 0; i--) {
                        struct teo_bin *bin = &cpu_data->state_bins[i];
                        s64 span_ns;
 
                        intercept_sum += bin->intercepts;
                        recent_sum += bin->recent;
 
+                       span_ns = teo_middle_of_bin(i, drv);
+
+                       if ((!alt_recent || 2 * recent_sum > idx_recent_sum) &&
+                           (!alt_intercepts ||
+                            2 * intercept_sum > idx_intercept_sum)) {
+                               if (teo_time_ok(span_ns) &&
+                                   !dev->states_usage[i].disable) {
+                                       idx = i;
+                                       duration_ns = span_ns;
+                               } else {
+                                       /*
+                                        * The current state is too shallow or
+                                        * disabled, so take the first enabled
+                                        * deeper state with suitable time span.
+                                        */
+                                       idx = first_suitable_idx;
+                                       duration_ns = first_suitable_span_ns;
+                               }
+                               break;
+                       }
+
                        if (dev->states_usage[i].disable)
                                continue;
 
-                       span_ns = teo_middle_of_bin(i, drv);
                        if (!teo_time_ok(span_ns)) {
                                /*
-                                * The current state is too shallow, so select
-                                * the first enabled deeper state.
+                                * The current state is too shallow, but if an
+                                * alternative candidate state has been found,
+                                * it may still turn out to be a better choice.
                                 */
-                               duration_ns = last_enabled_span_ns;
-                               idx = last_enabled_idx;
-                               break;
-                       }
+                               if (first_suitable_idx != idx)
+                                       continue;
 
-                       if ((!alt_recent || 2 * recent_sum > idx_recent_sum) &&
-                           (!alt_intercepts ||
-                            2 * intercept_sum > idx_intercept_sum)) {
-                               idx = i;
-                               duration_ns = span_ns;
                                break;
                        }
 
-                       last_enabled_span_ns = span_ns;
-                       last_enabled_idx = i;
+                       first_suitable_span_ns = span_ns;
+                       first_suitable_idx = i;
                }
        }
 
index 26482c7..fc708be 100644 (file)
@@ -294,6 +294,14 @@ struct idxd_desc {
        struct idxd_wq *wq;
 };
 
+/*
+ * This is software defined error for the completion status. We overload the error code
+ * that will never appear in completion status and only SWERR register.
+ */
+enum idxd_completion_status {
+       IDXD_COMP_DESC_ABORT = 0xff,
+};
+
 #define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
 #define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
 
@@ -482,4 +490,10 @@ static inline void perfmon_init(void) {}
 static inline void perfmon_exit(void) {}
 #endif
 
+static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
+{
+       idxd_dma_complete_txd(desc, reason);
+       idxd_free_desc(desc->wq, desc);
+}
+
 #endif
index c8ae41d..c0f4c04 100644 (file)
@@ -102,6 +102,8 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
                spin_lock_init(&idxd->irq_entries[i].list_lock);
        }
 
+       idxd_msix_perm_setup(idxd);
+
        irq_entry = &idxd->irq_entries[0];
        rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread,
                                  0, "idxd-misc", irq_entry);
@@ -148,7 +150,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
        }
 
        idxd_unmask_error_interrupts(idxd);
-       idxd_msix_perm_setup(idxd);
        return 0;
 
  err_wq_irqs:
@@ -162,6 +163,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
  err_misc_irq:
        /* Disable error interrupt generation */
        idxd_mask_error_interrupts(idxd);
+       idxd_msix_perm_clear(idxd);
  err_irq_entries:
        pci_free_irq_vectors(pdev);
        dev_err(dev, "No usable interrupts\n");
@@ -758,32 +760,40 @@ static void idxd_shutdown(struct pci_dev *pdev)
        for (i = 0; i < msixcnt; i++) {
                irq_entry = &idxd->irq_entries[i];
                synchronize_irq(irq_entry->vector);
-               free_irq(irq_entry->vector, irq_entry);
                if (i == 0)
                        continue;
                idxd_flush_pending_llist(irq_entry);
                idxd_flush_work_list(irq_entry);
        }
-
-       idxd_msix_perm_clear(idxd);
-       idxd_release_int_handles(idxd);
-       pci_free_irq_vectors(pdev);
-       pci_iounmap(pdev, idxd->reg_base);
-       pci_disable_device(pdev);
-       destroy_workqueue(idxd->wq);
+       flush_workqueue(idxd->wq);
 }
 
 static void idxd_remove(struct pci_dev *pdev)
 {
        struct idxd_device *idxd = pci_get_drvdata(pdev);
+       struct idxd_irq_entry *irq_entry;
+       int msixcnt = pci_msix_vec_count(pdev);
+       int i;
 
        dev_dbg(&pdev->dev, "%s called\n", __func__);
        idxd_shutdown(pdev);
        if (device_pasid_enabled(idxd))
                idxd_disable_system_pasid(idxd);
        idxd_unregister_devices(idxd);
-       perfmon_pmu_remove(idxd);
+
+       for (i = 0; i < msixcnt; i++) {
+               irq_entry = &idxd->irq_entries[i];
+               free_irq(irq_entry->vector, irq_entry);
+       }
+       idxd_msix_perm_clear(idxd);
+       idxd_release_int_handles(idxd);
+       pci_free_irq_vectors(pdev);
+       pci_iounmap(pdev, idxd->reg_base);
        iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+       pci_disable_device(pdev);
+       destroy_workqueue(idxd->wq);
+       perfmon_pmu_remove(idxd);
+       device_unregister(&idxd->conf_dev);
 }
 
 static struct pci_driver idxd_pci_driver = {
index ae68e1e..4e3a719 100644 (file)
@@ -245,12 +245,6 @@ static inline bool match_fault(struct idxd_desc *desc, u64 fault_addr)
        return false;
 }
 
-static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
-{
-       idxd_dma_complete_txd(desc, reason);
-       idxd_free_desc(desc->wq, desc);
-}
-
 static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
                                     enum irq_work_type wtype,
                                     int *processed, u64 data)
@@ -272,8 +266,16 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
                reason = IDXD_COMPLETE_DEV_FAIL;
 
        llist_for_each_entry_safe(desc, t, head, llnode) {
-               if (desc->completion->status) {
-                       if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
+               u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
+
+               if (status) {
+                       if (unlikely(status == IDXD_COMP_DESC_ABORT)) {
+                               complete_desc(desc, IDXD_COMPLETE_ABORT);
+                               (*processed)++;
+                               continue;
+                       }
+
+                       if (unlikely(status != DSA_COMP_SUCCESS))
                                match_fault(desc, data);
                        complete_desc(desc, reason);
                        (*processed)++;
@@ -329,7 +331,14 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
        spin_unlock_irqrestore(&irq_entry->list_lock, flags);
 
        list_for_each_entry(desc, &flist, list) {
-               if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
+               u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
+
+               if (unlikely(status == IDXD_COMP_DESC_ABORT)) {
+                       complete_desc(desc, IDXD_COMPLETE_ABORT);
+                       continue;
+               }
+
+               if (unlikely(status != DSA_COMP_SUCCESS))
                        match_fault(desc, data);
                complete_desc(desc, reason);
        }
index 19afb62..36c9c1a 100644 (file)
@@ -25,11 +25,10 @@ static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
         * Descriptor completion vectors are 1...N for MSIX. We will round
         * robin through the N vectors.
         */
-       wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
+       wq->vec_ptr = desc->vector = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
        if (!idxd->int_handles) {
                desc->hw->int_handle = wq->vec_ptr;
        } else {
-               desc->vector = wq->vec_ptr;
                /*
                 * int_handles are only for descriptor completion. However for device
                 * MSIX enumeration, vec 0 is used for misc interrupts. Therefore even
@@ -88,9 +87,64 @@ void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
        sbitmap_queue_clear(&wq->sbq, desc->id, cpu);
 }
 
+static struct idxd_desc *list_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
+                                        struct idxd_desc *desc)
+{
+       struct idxd_desc *d, *n;
+
+       lockdep_assert_held(&ie->list_lock);
+       list_for_each_entry_safe(d, n, &ie->work_list, list) {
+               if (d == desc) {
+                       list_del(&d->list);
+                       return d;
+               }
+       }
+
+       /*
+        * At this point, the desc needs to be aborted is held by the completion
+        * handler where it has taken it off the pending list but has not added to the
+        * work list. It will be cleaned up by the interrupt handler when it sees the
+        * IDXD_COMP_DESC_ABORT for completion status.
+        */
+       return NULL;
+}
+
+static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
+                            struct idxd_desc *desc)
+{
+       struct idxd_desc *d, *t, *found = NULL;
+       struct llist_node *head;
+       unsigned long flags;
+
+       desc->completion->status = IDXD_COMP_DESC_ABORT;
+       /*
+        * Grab the list lock so it will block the irq thread handler. This allows the
+        * abort code to locate the descriptor need to be aborted.
+        */
+       spin_lock_irqsave(&ie->list_lock, flags);
+       head = llist_del_all(&ie->pending_llist);
+       if (head) {
+               llist_for_each_entry_safe(d, t, head, llnode) {
+                       if (d == desc) {
+                               found = desc;
+                               continue;
+                       }
+                       list_add_tail(&desc->list, &ie->work_list);
+               }
+       }
+
+       if (!found)
+               found = list_abort_desc(wq, ie, desc);
+       spin_unlock_irqrestore(&ie->list_lock, flags);
+
+       if (found)
+               complete_desc(found, IDXD_COMPLETE_ABORT);
+}
+
 int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
 {
        struct idxd_device *idxd = wq->idxd;
+       struct idxd_irq_entry *ie = NULL;
        void __iomem *portal;
        int rc;
 
@@ -108,6 +162,16 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
         * even on UP because the recipient is a device.
         */
        wmb();
+
+       /*
+        * Pending the descriptor to the lockless list for the irq_entry
+        * that we designated the descriptor to.
+        */
+       if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
+               ie = &idxd->irq_entries[desc->vector];
+               llist_add(&desc->llnode, &ie->pending_llist);
+       }
+
        if (wq_dedicated(wq)) {
                iosubmit_cmds512(portal, desc->hw, 1);
        } else {
@@ -118,29 +182,13 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
                 * device is not accepting descriptor at all.
                 */
                rc = enqcmds(portal, desc->hw);
-               if (rc < 0)
+               if (rc < 0) {
+                       if (ie)
+                               llist_abort_desc(wq, ie, desc);
                        return rc;
+               }
        }
 
        percpu_ref_put(&wq->wq_active);
-
-       /*
-        * Pending the descriptor to the lockless list for the irq_entry
-        * that we designated the descriptor to.
-        */
-       if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
-               int vec;
-
-               /*
-                * If the driver is on host kernel, it would be the value
-                * assigned to interrupt handle, which is index for MSIX
-                * vector. If it's guest then can't use the int_handle since
-                * that is the index to IMS for the entire device. The guest
-                * device local index will be used.
-                */
-               vec = !idxd->int_handles ? desc->hw->int_handle : desc->vector;
-               llist_add(&desc->llnode, &idxd->irq_entries[vec].pending_llist);
-       }
-
        return 0;
 }
index 0460d58..bb4df63 100644 (file)
@@ -1744,8 +1744,6 @@ void idxd_unregister_devices(struct idxd_device *idxd)
 
                device_unregister(&group->conf_dev);
        }
-
-       device_unregister(&idxd->conf_dev);
 }
 
 int idxd_register_bus_type(void)
index 7f116bb..2ddc31e 100644 (file)
@@ -812,6 +812,8 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
                dma_length += sg_dma_len(sg);
        }
 
+       imxdma_config_write(chan, &imxdmac->config, direction);
+
        switch (imxdmac->word_size) {
        case DMA_SLAVE_BUSWIDTH_4_BYTES:
                if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
index ec00b20..ac61ecd 100644 (file)
@@ -67,8 +67,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
                return NULL;
 
        ofdma_target = of_dma_find_controller(&dma_spec_target);
-       if (!ofdma_target)
-               return NULL;
+       if (!ofdma_target) {
+               ofdma->dma_router->route_free(ofdma->dma_router->dev,
+                                             route_data);
+               chan = ERR_PTR(-EPROBE_DEFER);
+               goto err;
+       }
 
        chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target);
        if (IS_ERR_OR_NULL(chan)) {
@@ -89,6 +93,7 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
                }
        }
 
+err:
        /*
         * Need to put the node back since the ofdma->of_dma_route_allocate
         * has taken it for generating the new, translated dma_spec
index 8f7ceb6..1cc0690 100644 (file)
@@ -855,8 +855,8 @@ static int usb_dmac_probe(struct platform_device *pdev)
 
 error:
        of_dma_controller_free(pdev->dev.of_node);
-       pm_runtime_put(&pdev->dev);
 error_pm:
+       pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
        return ret;
 }
index f54ecb1..7dd1d3d 100644 (file)
@@ -1200,7 +1200,7 @@ static int stm32_dma_alloc_chan_resources(struct dma_chan *c)
 
        chan->config_init = false;
 
-       ret = pm_runtime_get_sync(dmadev->ddev.dev);
+       ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
        if (ret < 0)
                return ret;
 
@@ -1470,7 +1470,7 @@ static int stm32_dma_suspend(struct device *dev)
        struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
        int id, ret, scr;
 
-       ret = pm_runtime_get_sync(dev);
+       ret = pm_runtime_resume_and_get(dev);
        if (ret < 0)
                return ret;
 
index ef0d055..a421643 100644 (file)
@@ -137,7 +137,7 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
 
        /* Set dma request */
        spin_lock_irqsave(&dmamux->lock, flags);
-       ret = pm_runtime_get_sync(&pdev->dev);
+       ret = pm_runtime_resume_and_get(&pdev->dev);
        if (ret < 0) {
                spin_unlock_irqrestore(&dmamux->lock, flags);
                goto error;
@@ -336,7 +336,7 @@ static int stm32_dmamux_suspend(struct device *dev)
        struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
        int i, ret;
 
-       ret = pm_runtime_get_sync(dev);
+       ret = pm_runtime_resume_and_get(dev);
        if (ret < 0)
                return ret;
 
@@ -361,7 +361,7 @@ static int stm32_dmamux_resume(struct device *dev)
        if (ret < 0)
                return ret;
 
-       ret = pm_runtime_get_sync(dev);
+       ret = pm_runtime_resume_and_get(dev);
        if (ret < 0)
                return ret;
 
index 16b1965..d6b8a20 100644 (file)
@@ -209,8 +209,8 @@ static int uniphier_xdmac_chan_stop(struct uniphier_xdmac_chan *xc)
        writel(0, xc->reg_ch_base + XDMAC_TSS);
 
        /* wait until transfer is stopped */
-       return readl_poll_timeout(xc->reg_ch_base + XDMAC_STAT, val,
-                                 !(val & XDMAC_STAT_TENF), 100, 1000);
+       return readl_poll_timeout_atomic(xc->reg_ch_base + XDMAC_STAT, val,
+                                        !(val & XDMAC_STAT_TENF), 100, 1000);
 }
 
 /* xc->vc.lock must be held by caller */
index 75c0b8e..4b9530a 100644 (file)
@@ -394,6 +394,7 @@ struct xilinx_dma_tx_descriptor {
  * @genlock: Support genlock mode
  * @err: Channel has errors
  * @idle: Check for channel idle
+ * @terminating: Check for channel being synchronized by user
  * @tasklet: Cleanup work after irq
  * @config: Device configuration info
  * @flush_on_fsync: Flush on Frame sync
@@ -431,6 +432,7 @@ struct xilinx_dma_chan {
        bool genlock;
        bool err;
        bool idle;
+       bool terminating;
        struct tasklet_struct tasklet;
        struct xilinx_vdma_config config;
        bool flush_on_fsync;
@@ -1049,6 +1051,13 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
                /* Run any dependencies, then free the descriptor */
                dma_run_dependencies(&desc->async_tx);
                xilinx_dma_free_tx_descriptor(chan, desc);
+
+               /*
+                * While we ran a callback the user called a terminate function,
+                * which takes care of cleaning up any remaining descriptors
+                */
+               if (chan->terminating)
+                       break;
        }
 
        spin_unlock_irqrestore(&chan->lock, flags);
@@ -1965,6 +1974,8 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
        if (desc->cyclic)
                chan->cyclic = true;
 
+       chan->terminating = false;
+
        spin_unlock_irqrestore(&chan->lock, flags);
 
        return cookie;
@@ -2436,6 +2447,7 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
 
        xilinx_dma_chan_reset(chan);
        /* Remove and free all of the descriptors in the lists */
+       chan->terminating = true;
        xilinx_dma_free_descriptors(chan);
        chan->idle = true;
 
index ed10da5..a5bf4c3 100644 (file)
@@ -212,10 +212,9 @@ static int tee_bnxt_fw_probe(struct device *dev)
 
        pvt_data.dev = dev;
 
-       fw_shm_pool = tee_shm_alloc(pvt_data.ctx, MAX_SHM_MEM_SZ,
-                                   TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+       fw_shm_pool = tee_shm_alloc_kernel_buf(pvt_data.ctx, MAX_SHM_MEM_SZ);
        if (IS_ERR(fw_shm_pool)) {
-               dev_err(pvt_data.dev, "tee_shm_alloc failed\n");
+               dev_err(pvt_data.dev, "tee_shm_alloc_kernel_buf failed\n");
                err = PTR_ERR(fw_shm_pool);
                goto out_sess;
        }
@@ -242,6 +241,14 @@ static int tee_bnxt_fw_remove(struct device *dev)
        return 0;
 }
 
+static void tee_bnxt_fw_shutdown(struct device *dev)
+{
+       tee_shm_free(pvt_data.fw_shm_pool);
+       tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+       tee_client_close_context(pvt_data.ctx);
+       pvt_data.ctx = NULL;
+}
+
 static const struct tee_client_device_id tee_bnxt_fw_id_table[] = {
        {UUID_INIT(0x6272636D, 0x2019, 0x0716,
                    0x42, 0x43, 0x4D, 0x5F, 0x53, 0x43, 0x48, 0x49)},
@@ -257,6 +264,7 @@ static struct tee_client_driver tee_bnxt_fw_driver = {
                .bus            = &tee_bus_type,
                .probe          = tee_bnxt_fw_probe,
                .remove         = tee_bnxt_fw_remove,
+               .shutdown       = tee_bnxt_fw_shutdown,
        },
 };
 
index 4299145..587c82b 100644 (file)
@@ -953,6 +953,8 @@ static int fme_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
                return 0;
 
        priv->cpu = target;
+       perf_pmu_migrate_context(&priv->pmu, cpu, target);
+
        return 0;
 }
 
index 4b9157a..50b321a 100644 (file)
@@ -405,7 +405,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
 
        ret = devm_request_irq(&pdev->dev, mpc8xxx_gc->irqn,
                               mpc8xxx_gpio_irq_cascade,
-                              IRQF_SHARED, "gpio-cascade",
+                              IRQF_NO_THREAD | IRQF_SHARED, "gpio-cascade",
                               mpc8xxx_gc);
        if (ret) {
                dev_err(&pdev->dev,
index 5022e0a..0f5d17f 100644 (file)
@@ -238,8 +238,8 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
        struct resource *res;
        int ret, irq;
 
-       irq = platform_get_irq(pdev, 0);
-       if (irq < 0)
+       irq = platform_get_irq_optional(pdev, 0);
+       if (irq < 0 && irq != -ENXIO)
                return irq;
 
        res = platform_get_resource(pdev, IORESOURCE_IO, 0);
@@ -278,7 +278,7 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
 
        pm_runtime_enable(&pdev->dev);
 
-       if (irq) {
+       if (irq > 0) {
                struct irq_chip *irq_chip = &gpio->irq_chip;
                u8 irq_status;
 
index 6cc0d4f..4137e84 100644 (file)
@@ -1040,7 +1040,7 @@ void amdgpu_acpi_detect(void)
  */
 bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev)
 {
-#if defined(CONFIG_AMD_PMC) || defined(CONFIG_AMD_PMC_MODULE)
+#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_PM_SLEEP)
        if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
                if (adev->flags & AMD_IS_APU)
                        return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
index 361b86b..5ed8381 100644 (file)
@@ -1213,6 +1213,13 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x740F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
        {0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
 
+       /* BEIGE_GOBY */
+       {0x1002, 0x7420, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+       {0x1002, 0x7421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+       {0x1002, 0x7422, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+       {0x1002, 0x7423, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+       {0x1002, 0x743F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+
        {0, 0, 0}
 };
 
index 59e0fef..acfa207 100644 (file)
@@ -54,11 +54,12 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
 {
        struct drm_mm_node *node;
 
-       if (!res) {
+       if (!res || res->mem_type == TTM_PL_SYSTEM) {
                cur->start = start;
                cur->size = size;
                cur->remaining = size;
                cur->node = NULL;
+               WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT);
                return;
        }
 
index 044076e..6a23c68 100644 (file)
@@ -1295,6 +1295,16 @@ static bool is_raven_kicker(struct amdgpu_device *adev)
                return false;
 }
 
+static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev)
+{
+       if ((adev->asic_type == CHIP_RENOIR) &&
+           (adev->gfx.me_fw_version >= 0x000000a5) &&
+           (adev->gfx.me_feature_version >= 52))
+               return true;
+       else
+               return false;
+}
+
 static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
 {
        if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
@@ -3675,7 +3685,16 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
        if (ring->use_doorbell) {
                WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
                                        (adev->doorbell_index.kiq * 2) << 2);
-               WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
+               /* If GC has entered CGPG, ringing doorbell > first page
+                * doesn't wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to
+                * workaround this issue. And this change has to align with firmware
+                * update.
+                */
+               if (check_if_enlarge_doorbell_range(adev))
+                       WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
+                                       (adev->doorbell.size - 4));
+               else
+                       WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
                                        (adev->doorbell_index.userqueue_end * 2) << 2);
        }
 
index b53f49a..c0ae73b 100644 (file)
@@ -1548,6 +1548,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
        }
 
        hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
+       adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
 
        if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
                adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
@@ -1561,7 +1562,6 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
                         adev->dm.dmcub_fw_version);
        }
 
-       adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
 
        adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
        dmub_srv = adev->dm.dmub_srv;
index c6f494f..6185f94 100644 (file)
@@ -66,9 +66,11 @@ int rn_get_active_display_cnt_wa(
        for (i = 0; i < context->stream_count; i++) {
                const struct dc_stream_state *stream = context->streams[i];
 
+               /* Extend the WA to DP for Linux*/
                if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
                                stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
-                               stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
+                               stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK ||
+                               stream->signal == SIGNAL_TYPE_DISPLAY_PORT)
                        tmds_present = true;
        }
 
index 9fb8c46..a6d0fd2 100644 (file)
@@ -3602,29 +3602,12 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
 bool dp_retrieve_lttpr_cap(struct dc_link *link)
 {
        uint8_t lttpr_dpcd_data[6];
-       bool vbios_lttpr_enable = false;
-       bool vbios_lttpr_interop = false;
-       struct dc_bios *bios = link->dc->ctx->dc_bios;
+       bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable;
+       bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
        enum dc_status status = DC_ERROR_UNEXPECTED;
        bool is_lttpr_present = false;
 
        memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
-       /* Query BIOS to determine if LTTPR functionality is forced on by system */
-       if (bios->funcs->get_lttpr_caps) {
-               enum bp_result bp_query_result;
-               uint8_t is_vbios_lttpr_enable = 0;
-
-               bp_query_result = bios->funcs->get_lttpr_caps(bios, &is_vbios_lttpr_enable);
-               vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
-       }
-
-       if (bios->funcs->get_lttpr_interop) {
-               enum bp_result bp_query_result;
-               uint8_t is_vbios_interop_enabled = 0;
-
-               bp_query_result = bios->funcs->get_lttpr_interop(bios, &is_vbios_interop_enabled);
-               vbios_lttpr_interop = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
-       }
 
        /*
         * Logic to determine LTTPR mode
index 8dcea8f..af7b601 100644 (file)
@@ -183,6 +183,8 @@ struct dc_caps {
        unsigned int cursor_cache_size;
        struct dc_plane_cap planes[MAX_PLANES];
        struct dc_color_caps color;
+       bool vbios_lttpr_aware;
+       bool vbios_lttpr_enable;
 };
 
 struct dc_bug_wa {
index 7fa9fc6..f6e747f 100644 (file)
@@ -464,7 +464,7 @@ void optc2_lock_doublebuffer_enable(struct timing_generator *optc)
 
        REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
                        MASTER_UPDATE_LOCK_DB_X,
-                       h_blank_start - 200 - 1,
+                       (h_blank_start - 200 - 1) / optc1->opp_count,
                        MASTER_UPDATE_LOCK_DB_Y,
                        v_blank_start - 1);
 }
index 596c97d..253654d 100644 (file)
@@ -2617,6 +2617,26 @@ static bool dcn30_resource_construct(
        dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
        dc->caps.color.mpc.ocsc = 1;
 
+       /* read VBIOS LTTPR caps */
+       {
+               if (ctx->dc_bios->funcs->get_lttpr_caps) {
+                       enum bp_result bp_query_result;
+                       uint8_t is_vbios_lttpr_enable = 0;
+
+                       bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
+                       dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+               }
+
+               if (ctx->dc_bios->funcs->get_lttpr_interop) {
+                       enum bp_result bp_query_result;
+                       uint8_t is_vbios_interop_enabled = 0;
+
+                       bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios,
+                                       &is_vbios_interop_enabled);
+                       dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
+               }
+       }
+
        if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
                dc->debug = debug_defaults_drv;
        else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
index 833ab13..dc7823d 100644 (file)
@@ -146,8 +146,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_03_soc = {
 
                .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
                .num_states = 1,
-               .sr_exit_time_us = 26.5,
-               .sr_enter_plus_exit_time_us = 31,
+               .sr_exit_time_us = 35.5,
+               .sr_enter_plus_exit_time_us = 40,
                .urgent_latency_us = 4.0,
                .urgent_latency_pixel_data_only_us = 4.0,
                .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
index 38c010a..cd3248d 100644 (file)
@@ -1968,6 +1968,22 @@ static bool dcn31_resource_construct(
        dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
        dc->caps.color.mpc.ocsc = 1;
 
+       /* read VBIOS LTTPR caps */
+       {
+               if (ctx->dc_bios->funcs->get_lttpr_caps) {
+                       enum bp_result bp_query_result;
+                       uint8_t is_vbios_lttpr_enable = 0;
+
+                       bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
+                       dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+               }
+
+               /* interop bit is implicit */
+               {
+                       dc->caps.vbios_lttpr_aware = true;
+               }
+       }
+
        if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
                dc->debug = debug_defaults_drv;
        else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
index 973de34..27c7fa3 100644 (file)
@@ -267,11 +267,13 @@ void dmub_dcn31_set_outbox1_rptr(struct dmub_srv *dmub, uint32_t rptr_offset)
 
 bool dmub_dcn31_is_hw_init(struct dmub_srv *dmub)
 {
-       uint32_t is_hw_init;
+       union dmub_fw_boot_status status;
+       uint32_t is_enable;
 
-       REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_hw_init);
+       status.all = REG_READ(DMCUB_SCRATCH0);
+       REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enable);
 
-       return is_hw_init != 0;
+       return is_enable != 0 && status.bits.dal_fw;
 }
 
 bool dmub_dcn31_is_supported(struct dmub_srv *dmub)
index 3fea243..dc91eb6 100644 (file)
@@ -26,7 +26,7 @@
 #include "amdgpu_smu.h"
 
 #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
-#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x03
+#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
 #define SMU13_DRIVER_IF_VERSION_ALDE 0x07
 
 /* MP Apertures */
index 77f1911..3acb0b6 100644 (file)
@@ -138,7 +138,7 @@ void i915_globals_unpark(void)
        atomic_inc(&active);
 }
 
-static void __exit __i915_globals_flush(void)
+static void  __i915_globals_flush(void)
 {
        atomic_inc(&active); /* skip shrinking */
 
@@ -148,7 +148,7 @@ static void __exit __i915_globals_flush(void)
        atomic_dec(&active);
 }
 
-void __exit i915_globals_exit(void)
+void i915_globals_exit(void)
 {
        GEM_BUG_ON(atomic_read(&active));
 
index 83b500b..2880ec5 100644 (file)
@@ -1195,6 +1195,7 @@ static int __init i915_init(void)
        err = pci_register_driver(&i915_pci_driver);
        if (err) {
                i915_pmu_exit();
+               i915_globals_exit();
                return err;
        }
 
index 94fde5c..41186c1 100644 (file)
@@ -422,7 +422,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   GEN12_HCP_SFC_LOCK_ACK_BIT           REG_BIT(1)
 #define   GEN12_HCP_SFC_USAGE_BIT                      REG_BIT(0)
 
-#define GEN12_SFC_DONE(n)              _MMIO(0x1cc00 + (n) * 0x100)
+#define GEN12_SFC_DONE(n)              _MMIO(0x1cc000 + (n) * 0x1000)
 #define GEN12_SFC_DONE_MAX             4
 
 #define RING_PP_DIR_BASE(base)         _MMIO((base) + 0x228)
index 96ea1a2..f54392e 100644 (file)
@@ -203,6 +203,7 @@ static irqreturn_t handle_lcd_irq(struct drm_device *dev)
        unsigned long status, val, val1;
        int plane_id, dma0_state, dma1_state;
        struct kmb_drm_private *kmb = to_kmb(dev);
+       u32 ctrl = 0;
 
        status = kmb_read_lcd(kmb, LCD_INT_STATUS);
 
@@ -227,6 +228,19 @@ static irqreturn_t handle_lcd_irq(struct drm_device *dev)
                                kmb_clr_bitmask_lcd(kmb, LCD_CONTROL,
                                                    kmb->plane_status[plane_id].ctrl);
 
+                               ctrl = kmb_read_lcd(kmb, LCD_CONTROL);
+                               if (!(ctrl & (LCD_CTRL_VL1_ENABLE |
+                                   LCD_CTRL_VL2_ENABLE |
+                                   LCD_CTRL_GL1_ENABLE |
+                                   LCD_CTRL_GL2_ENABLE))) {
+                                       /* If no LCD layers are using DMA,
+                                        * then disable DMA pipelined AXI read
+                                        * transactions.
+                                        */
+                                       kmb_clr_bitmask_lcd(kmb, LCD_CONTROL,
+                                                           LCD_CTRL_PIPELINE_DMA);
+                               }
+
                                kmb->plane_status[plane_id].disable = false;
                        }
                }
@@ -411,10 +425,10 @@ static const struct drm_driver kmb_driver = {
        .fops = &fops,
        DRM_GEM_CMA_DRIVER_OPS_VMAP,
        .name = "kmb-drm",
-       .desc = "KEEMBAY DISPLAY DRIVER ",
-       .date = "20201008",
-       .major = 1,
-       .minor = 0,
+       .desc = "KEEMBAY DISPLAY DRIVER",
+       .date = DRIVER_DATE,
+       .major = DRIVER_MAJOR,
+       .minor = DRIVER_MINOR,
 };
 
 static int kmb_remove(struct platform_device *pdev)
index 02e8067..ebbaa5f 100644 (file)
 #define KMB_MAX_HEIGHT                 1080 /*Max height in pixels */
 #define KMB_MIN_WIDTH                   1920 /*Max width in pixels */
 #define KMB_MIN_HEIGHT                  1080 /*Max height in pixels */
+
+#define DRIVER_DATE                    "20210223"
+#define DRIVER_MAJOR                   1
+#define DRIVER_MINOR                   1
+
 #define KMB_LCD_DEFAULT_CLK            200000000
 #define KMB_SYS_CLK_MHZ                        500
 
index d5b6195..ecee678 100644 (file)
@@ -427,8 +427,14 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
 
        kmb_set_bitmask_lcd(kmb, LCD_CONTROL, ctrl);
 
-       /* FIXME no doc on how to set output format,these values are
-        * taken from the Myriadx tests
+       /* Enable pipeline AXI read transactions for the DMA
+        * after setting graphics layers. This must be done
+        * in a separate write cycle.
+        */
+       kmb_set_bitmask_lcd(kmb, LCD_CONTROL, LCD_CTRL_PIPELINE_DMA);
+
+       /* FIXME no doc on how to set output format, these values are taken
+        * from the Myriadx tests
         */
        out_format |= LCD_OUTF_FORMAT_RGB888;
 
@@ -526,6 +532,11 @@ struct kmb_plane *kmb_plane_init(struct drm_device *drm)
                plane->id = i;
        }
 
+       /* Disable pipeline AXI read transactions for the DMA
+        * prior to setting graphics layers
+        */
+       kmb_clr_bitmask_lcd(kmb, LCD_CONTROL, LCD_CTRL_PIPELINE_DMA);
+
        return primary;
 cleanup:
        drmm_kfree(drm, plane);
index d1cef3b..5652d98 100644 (file)
@@ -492,7 +492,7 @@ struct vmw_private {
        resource_size_t vram_start;
        resource_size_t vram_size;
        resource_size_t prim_bb_mem;
-       void __iomem *rmmio;
+       u32 __iomem *rmmio;
        u32 *fifo_mem;
        resource_size_t fifo_mem_size;
        uint32_t fb_max_width;
index 515a7e9..5d3b8b8 100644 (file)
@@ -926,12 +926,25 @@ static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
        return ret;
 }
 
+static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
+{
+       struct ib_qp_attr qp_attr;
+       int qp_attr_mask, ret;
+
+       qp_attr.qp_state = IB_QPS_INIT;
+       ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
+       if (ret)
+               return ret;
+
+       return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
+}
+
 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
                   struct ib_qp_init_attr *qp_init_attr)
 {
        struct rdma_id_private *id_priv;
        struct ib_qp *qp;
-       int ret = 0;
+       int ret;
 
        id_priv = container_of(id, struct rdma_id_private, id);
        if (id->device != pd->device) {
@@ -948,6 +961,8 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
 
        if (id->qp_type == IB_QPT_UD)
                ret = cma_init_ud_qp(id_priv, qp);
+       else
+               ret = cma_init_conn_qp(id_priv, qp);
        if (ret)
                goto out_destroy;
 
index 6c8c910..c7e8d7b 100644 (file)
@@ -967,6 +967,12 @@ int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
        return !err || err == -ENODATA ? npolled : err;
 }
 
+void c4iw_cq_rem_ref(struct c4iw_cq *chp)
+{
+       if (refcount_dec_and_test(&chp->refcnt))
+               complete(&chp->cq_rel_comp);
+}
+
 int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
 {
        struct c4iw_cq *chp;
@@ -976,8 +982,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
        chp = to_c4iw_cq(ib_cq);
 
        xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
-       refcount_dec(&chp->refcnt);
-       wait_event(chp->wait, !refcount_read(&chp->refcnt));
+       c4iw_cq_rem_ref(chp);
+       wait_for_completion(&chp->cq_rel_comp);
 
        ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
                                             ibucontext);
@@ -1081,7 +1087,7 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
        spin_lock_init(&chp->lock);
        spin_lock_init(&chp->comp_handler_lock);
        refcount_set(&chp->refcnt, 1);
-       init_waitqueue_head(&chp->wait);
+       init_completion(&chp->cq_rel_comp);
        ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL);
        if (ret)
                goto err_destroy_cq;
index 7798d09..34211a5 100644 (file)
@@ -213,8 +213,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
                break;
        }
 done:
-       if (refcount_dec_and_test(&chp->refcnt))
-               wake_up(&chp->wait);
+       c4iw_cq_rem_ref(chp);
        c4iw_qp_rem_ref(&qhp->ibqp);
 out:
        return;
@@ -234,8 +233,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
                spin_lock_irqsave(&chp->comp_handler_lock, flag);
                (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
                spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
-               if (refcount_dec_and_test(&chp->refcnt))
-                       wake_up(&chp->wait);
+               c4iw_cq_rem_ref(chp);
        } else {
                pr_debug("unknown cqid 0x%x\n", qid);
                xa_unlock_irqrestore(&dev->cqs, flag);
index 3883af3..ac5f581 100644 (file)
@@ -428,7 +428,7 @@ struct c4iw_cq {
        spinlock_t lock;
        spinlock_t comp_handler_lock;
        refcount_t refcnt;
-       wait_queue_head_t wait;
+       struct completion cq_rel_comp;
        struct c4iw_wr_wait *wr_waitp;
 };
 
@@ -979,6 +979,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
 struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
 int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
 int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
+void c4iw_cq_rem_ref(struct c4iw_cq *chp);
 int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
                   struct ib_udata *udata);
 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
index 8f68cc3..84f3f2b 100644 (file)
@@ -213,8 +213,10 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
 
        hr_cmd->context =
                kcalloc(hr_cmd->max_cmds, sizeof(*hr_cmd->context), GFP_KERNEL);
-       if (!hr_cmd->context)
+       if (!hr_cmd->context) {
+               hr_dev->cmd_mod = 0;
                return -ENOMEM;
+       }
 
        for (i = 0; i < hr_cmd->max_cmds; ++i) {
                hr_cmd->context[i].token = i;
@@ -228,7 +230,6 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
        spin_lock_init(&hr_cmd->context_lock);
 
        hr_cmd->use_events = 1;
-       down(&hr_cmd->poll_sem);
 
        return 0;
 }
@@ -239,8 +240,6 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev)
 
        kfree(hr_cmd->context);
        hr_cmd->use_events = 0;
-
-       up(&hr_cmd->poll_sem);
 }
 
 struct hns_roce_cmd_mailbox *
index 078a971..cc6eab1 100644 (file)
@@ -873,11 +873,9 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
 
        if (hr_dev->cmd_mod) {
                ret = hns_roce_cmd_use_events(hr_dev);
-               if (ret) {
+               if (ret)
                        dev_warn(dev,
                                 "Cmd event  mode failed, set back to poll!\n");
-                       hns_roce_cmd_use_polling(hr_dev);
-               }
        }
 
        ret = hns_roce_init_hem(hr_dev);
index 3263851..3f1c5a4 100644 (file)
@@ -531,8 +531,8 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
                 */
                spin_unlock_irq(&ent->lock);
                need_delay = need_resched() || someone_adding(cache) ||
-                            time_after(jiffies,
-                                       READ_ONCE(cache->last_add) + 300 * HZ);
+                            !time_after(jiffies,
+                                        READ_ONCE(cache->last_add) + 300 * HZ);
                spin_lock_irq(&ent->lock);
                if (ent->disabled)
                        goto out;
index dec9292..5ac27f2 100644 (file)
@@ -259,6 +259,7 @@ static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb,
 
        iph->version    =       IPVERSION;
        iph->ihl        =       sizeof(struct iphdr) >> 2;
+       iph->tot_len    =       htons(skb->len);
        iph->frag_off   =       df;
        iph->protocol   =       proto;
        iph->tos        =       tos;
index 3743dc3..360ec67 100644 (file)
@@ -318,7 +318,7 @@ static enum resp_states get_srq_wqe(struct rxe_qp *qp)
                pr_warn("%s: invalid num_sge in SRQ entry\n", __func__);
                return RESPST_ERR_MALFORMED_WQE;
        }
-       size = sizeof(wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge);
+       size = sizeof(*wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge);
        memcpy(&qp->resp.srq_wqe, wqe, size);
 
        qp->resp.wqe = &qp->resp.srq_wqe.wqe;
index 8a1e70e..7887941 100644 (file)
@@ -403,7 +403,7 @@ struct icc_path *devm_of_icc_get(struct device *dev, const char *name)
 {
        struct icc_path **ptr, *path;
 
-       ptr = devres_alloc(devm_icc_release, sizeof(**ptr), GFP_KERNEL);
+       ptr = devres_alloc(devm_icc_release, sizeof(*ptr), GFP_KERNEL);
        if (!ptr)
                return ERR_PTR(-ENOMEM);
 
@@ -973,9 +973,14 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
        }
        node->avg_bw = node->init_avg;
        node->peak_bw = node->init_peak;
+
+       if (provider->pre_aggregate)
+               provider->pre_aggregate(node);
+
        if (provider->aggregate)
                provider->aggregate(node, 0, node->init_avg, node->init_peak,
                                    &node->avg_bw, &node->peak_bw);
+
        provider->set(node, node);
        node->avg_bw = 0;
        node->peak_bw = 0;
@@ -1106,6 +1111,8 @@ void icc_sync_state(struct device *dev)
                dev_dbg(p->dev, "interconnect provider is in synced state\n");
                list_for_each_entry(n, &p->nodes, node_list) {
                        if (n->init_avg || n->init_peak) {
+                               n->init_avg = 0;
+                               n->init_peak = 0;
                                aggregate_requests(n);
                                p->set(n, n);
                        }
index bf01d09..27cc5f0 100644 (file)
@@ -20,13 +20,18 @@ void qcom_icc_pre_aggregate(struct icc_node *node)
 {
        size_t i;
        struct qcom_icc_node *qn;
+       struct qcom_icc_provider *qp;
 
        qn = node->data;
+       qp = to_qcom_provider(node->provider);
 
        for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
                qn->sum_avg[i] = 0;
                qn->max_peak[i] = 0;
        }
+
+       for (i = 0; i < qn->num_bcms; i++)
+               qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]);
 }
 EXPORT_SYMBOL_GPL(qcom_icc_pre_aggregate);
 
@@ -44,10 +49,8 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
 {
        size_t i;
        struct qcom_icc_node *qn;
-       struct qcom_icc_provider *qp;
 
        qn = node->data;
-       qp = to_qcom_provider(node->provider);
 
        if (!tag)
                tag = QCOM_ICC_TAG_ALWAYS;
@@ -57,14 +60,16 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
                        qn->sum_avg[i] += avg_bw;
                        qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw);
                }
+
+               if (node->init_avg || node->init_peak) {
+                       qn->sum_avg[i] = max_t(u64, qn->sum_avg[i], node->init_avg);
+                       qn->max_peak[i] = max_t(u64, qn->max_peak[i], node->init_peak);
+               }
        }
 
        *agg_avg += avg_bw;
        *agg_peak = max_t(u32, *agg_peak, peak_bw);
 
-       for (i = 0; i < qn->num_bcms; i++)
-               qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]);
-
        return 0;
 }
 EXPORT_SYMBOL_GPL(qcom_icc_aggregate);
@@ -79,7 +84,6 @@ EXPORT_SYMBOL_GPL(qcom_icc_aggregate);
 int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
 {
        struct qcom_icc_provider *qp;
-       struct qcom_icc_node *qn;
        struct icc_node *node;
 
        if (!src)
@@ -88,12 +92,6 @@ int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
                node = src;
 
        qp = to_qcom_provider(node->provider);
-       qn = node->data;
-
-       qn->sum_avg[QCOM_ICC_BUCKET_AMC] = max_t(u64, qn->sum_avg[QCOM_ICC_BUCKET_AMC],
-                                                node->avg_bw);
-       qn->max_peak[QCOM_ICC_BUCKET_AMC] = max_t(u64, qn->max_peak[QCOM_ICC_BUCKET_AMC],
-                                                 node->peak_bw);
 
        qcom_icc_bcm_voter_commit(qp->voter);
 
index 51f2547..3c44c4b 100644 (file)
@@ -474,8 +474,6 @@ static void raid1_end_write_request(struct bio *bio)
                /*
                 * When the device is faulty, it is not necessary to
                 * handle write error.
-                * For failfast, this is the only remaining device,
-                * We need to retry the write without FailFast.
                 */
                if (!test_bit(Faulty, &rdev->flags))
                        set_bit(R1BIO_WriteError, &r1_bio->state);
index 16977e8..07119d7 100644 (file)
@@ -471,12 +471,12 @@ static void raid10_end_write_request(struct bio *bio)
                        /*
                         * When the device is faulty, it is not necessary to
                         * handle write error.
-                        * For failfast, this is the only remaining device,
-                        * We need to retry the write without FailFast.
                         */
                        if (!test_bit(Faulty, &rdev->flags))
                                set_bit(R10BIO_WriteError, &r10_bio->state);
                        else {
+                               /* Fail the request */
+                               set_bit(R10BIO_Degraded, &r10_bio->state);
                                r10_bio->devs[slot].bio = NULL;
                                to_put = bio;
                                dec_rdev = 1;
index 02281d1..508ac29 100644 (file)
@@ -1573,6 +1573,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
                  struct media_request *req)
 {
        struct vb2_buffer *vb;
+       enum vb2_buffer_state orig_state;
        int ret;
 
        if (q->error) {
@@ -1673,6 +1674,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
         * Add to the queued buffers list, a buffer will stay on it until
         * dequeued in dqbuf.
         */
+       orig_state = vb->state;
        list_add_tail(&vb->queued_entry, &q->queued_list);
        q->queued_count++;
        q->waiting_for_buffers = false;
@@ -1703,8 +1705,17 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
        if (q->streaming && !q->start_streaming_called &&
            q->queued_count >= q->min_buffers_needed) {
                ret = vb2_start_streaming(q);
-               if (ret)
+               if (ret) {
+                       /*
+                        * Since vb2_core_qbuf will return with an error,
+                        * we should return it to state DEQUEUED since
+                        * the error indicates that the buffer wasn't queued.
+                        */
+                       list_del(&vb->queued_entry);
+                       q->queued_count--;
+                       vb->state = orig_state;
                        return ret;
+               }
        }
 
        dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index);
index 99b5121..dda2f27 100644 (file)
@@ -8,6 +8,7 @@ config VIDEO_ATMEL_ISC
        select VIDEOBUF2_DMA_CONTIG
        select REGMAP_MMIO
        select V4L2_FWNODE
+       select VIDEO_ATMEL_ISC_BASE
        help
           This module makes the ATMEL Image Sensor Controller available
           as a v4l2 device.
@@ -19,10 +20,17 @@ config VIDEO_ATMEL_XISC
        select VIDEOBUF2_DMA_CONTIG
        select REGMAP_MMIO
        select V4L2_FWNODE
+       select VIDEO_ATMEL_ISC_BASE
        help
           This module makes the ATMEL eXtended Image Sensor Controller
           available as a v4l2 device.
 
+config VIDEO_ATMEL_ISC_BASE
+       tristate
+       default n
+       help
+         ATMEL ISC and XISC common code base.
+
 config VIDEO_ATMEL_ISI
        tristate "ATMEL Image Sensor Interface (ISI) support"
        depends on VIDEO_V4L2 && OF
index c5c0155..46d264a 100644 (file)
@@ -1,7 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0-only
-atmel-isc-objs = atmel-sama5d2-isc.o atmel-isc-base.o
-atmel-xisc-objs = atmel-sama7g5-isc.o atmel-isc-base.o
+atmel-isc-objs = atmel-sama5d2-isc.o
+atmel-xisc-objs = atmel-sama7g5-isc.o
 
 obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel-isi.o
+obj-$(CONFIG_VIDEO_ATMEL_ISC_BASE) += atmel-isc-base.o
 obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel-isc.o
 obj-$(CONFIG_VIDEO_ATMEL_XISC) += atmel-xisc.o
index 19daa49..136ab7c 100644 (file)
@@ -378,6 +378,7 @@ int isc_clk_init(struct isc_device *isc)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(isc_clk_init);
 
 void isc_clk_cleanup(struct isc_device *isc)
 {
@@ -392,6 +393,7 @@ void isc_clk_cleanup(struct isc_device *isc)
                        clk_unregister(isc_clk->clk);
        }
 }
+EXPORT_SYMBOL_GPL(isc_clk_cleanup);
 
 static int isc_queue_setup(struct vb2_queue *vq,
                            unsigned int *nbuffers, unsigned int *nplanes,
@@ -1578,6 +1580,7 @@ irqreturn_t isc_interrupt(int irq, void *dev_id)
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(isc_interrupt);
 
 static void isc_hist_count(struct isc_device *isc, u32 *min, u32 *max)
 {
@@ -2212,6 +2215,7 @@ const struct v4l2_async_notifier_operations isc_async_ops = {
        .unbind = isc_async_unbind,
        .complete = isc_async_complete,
 };
+EXPORT_SYMBOL_GPL(isc_async_ops);
 
 void isc_subdev_cleanup(struct isc_device *isc)
 {
@@ -2224,6 +2228,7 @@ void isc_subdev_cleanup(struct isc_device *isc)
 
        INIT_LIST_HEAD(&isc->subdev_entities);
 }
+EXPORT_SYMBOL_GPL(isc_subdev_cleanup);
 
 int isc_pipeline_init(struct isc_device *isc)
 {
@@ -2264,6 +2269,7 @@ int isc_pipeline_init(struct isc_device *isc)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(isc_pipeline_init);
 
 /* regmap configuration */
 #define ATMEL_ISC_REG_MAX    0xd5c
@@ -2273,4 +2279,9 @@ const struct regmap_config isc_regmap_config = {
        .val_bits       = 32,
        .max_register   = ATMEL_ISC_REG_MAX,
 };
+EXPORT_SYMBOL_GPL(isc_regmap_config);
 
+MODULE_AUTHOR("Songjun Wu");
+MODULE_AUTHOR("Eugen Hristev");
+MODULE_DESCRIPTION("Atmel ISC common code base");
+MODULE_LICENSE("GPL v2");
index 8370573..795a012 100644 (file)
@@ -37,7 +37,16 @@ static int rtl28xxu_ctrl_msg(struct dvb_usb_device *d, struct rtl28xxu_req *req)
        } else {
                /* read */
                requesttype = (USB_TYPE_VENDOR | USB_DIR_IN);
-               pipe = usb_rcvctrlpipe(d->udev, 0);
+
+               /*
+                * Zero-length transfers must use usb_sndctrlpipe() and
+                * rtl28xxu_identify_state() uses a zero-length i2c read
+                * command to determine the chip type.
+                */
+               if (req->size)
+                       pipe = usb_rcvctrlpipe(d->udev, 0);
+               else
+                       pipe = usb_sndctrlpipe(d->udev, 0);
        }
 
        ret = usb_control_msg(d->udev, pipe, 0, requesttype, req->value,
@@ -612,9 +621,8 @@ static int rtl28xxu_read_config(struct dvb_usb_device *d)
 static int rtl28xxu_identify_state(struct dvb_usb_device *d, const char **name)
 {
        struct rtl28xxu_dev *dev = d_to_priv(d);
-       u8 buf[1];
        int ret;
-       struct rtl28xxu_req req_demod_i2c = {0x0020, CMD_I2C_DA_RD, 1, buf};
+       struct rtl28xxu_req req_demod_i2c = {0x0020, CMD_I2C_DA_RD, 0, NULL};
 
        dev_dbg(&d->intf->dev, "\n");
 
index ca2ad77..6686192 100644 (file)
@@ -837,16 +837,24 @@ static int ar9331_mdio_write(void *ctx, u32 reg, u32 val)
                return 0;
        }
 
-       ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg, val);
+       /* In case of this switch we work with 32bit registers on top of 16bit
+        * bus. Some registers (for example access to forwarding database) have
+        * trigger bit on the first 16bit half of request, the result and
+        * configuration of request in the second half.
+        * To make it work properly, we should do the second part of transfer
+        * before the first one is done.
+        */
+       ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg + 2,
+                                 val >> 16);
        if (ret < 0)
                goto error;
 
-       ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg + 2,
-                                 val >> 16);
+       ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg, val);
        if (ret < 0)
                goto error;
 
        return 0;
+
 error:
        dev_err_ratelimited(&sbus->dev, "Bus error. Failed to write register.\n");
        return ret;
index 56fead6..1477091 100644 (file)
@@ -304,6 +304,15 @@ sja1105pqrs_common_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
                        hostcmd = SJA1105_HOSTCMD_INVALIDATE;
        }
        sja1105_packing(p, &hostcmd, 25, 23, size, op);
+}
+
+static void
+sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+                                 enum packing_op op)
+{
+       int entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+
+       sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, entry_size);
 
        /* Hack - The hardware takes the 'index' field within
         * struct sja1105_l2_lookup_entry as the index on which this command
@@ -313,26 +322,18 @@ sja1105pqrs_common_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
         * such that our API doesn't need to ask for a full-blown entry
         * structure when e.g. a delete is requested.
         */
-       sja1105_packing(buf, &cmd->index, 15, 6,
-                       SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY, op);
-}
-
-static void
-sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
-                                 enum packing_op op)
-{
-       int size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
-
-       return sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, size);
+       sja1105_packing(buf, &cmd->index, 15, 6, entry_size, op);
 }
 
 static void
 sja1110_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
                              enum packing_op op)
 {
-       int size = SJA1110_SIZE_L2_LOOKUP_ENTRY;
+       int entry_size = SJA1110_SIZE_L2_LOOKUP_ENTRY;
+
+       sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, entry_size);
 
-       return sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, size);
+       sja1105_packing(buf, &cmd->index, 10, 1, entry_size, op);
 }
 
 /* The switch is so retarded that it makes our command/entry abstraction
index e2dc997..8667c97 100644 (file)
@@ -1318,10 +1318,11 @@ static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
 int sja1105et_fdb_add(struct dsa_switch *ds, int port,
                      const unsigned char *addr, u16 vid)
 {
-       struct sja1105_l2_lookup_entry l2_lookup = {0};
+       struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp;
        struct sja1105_private *priv = ds->priv;
        struct device *dev = ds->dev;
        int last_unused = -1;
+       int start, end, i;
        int bin, way, rc;
 
        bin = sja1105et_fdb_hash(priv, addr, vid);
@@ -1333,7 +1334,7 @@ int sja1105et_fdb_add(struct dsa_switch *ds, int port,
                 * mask? If yes, we need to do nothing. If not, we need
                 * to rewrite the entry by adding this port to it.
                 */
-               if (l2_lookup.destports & BIT(port))
+               if ((l2_lookup.destports & BIT(port)) && l2_lookup.lockeds)
                        return 0;
                l2_lookup.destports |= BIT(port);
        } else {
@@ -1364,6 +1365,7 @@ int sja1105et_fdb_add(struct dsa_switch *ds, int port,
                                                     index, NULL, false);
                }
        }
+       l2_lookup.lockeds = true;
        l2_lookup.index = sja1105et_fdb_index(bin, way);
 
        rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
@@ -1372,6 +1374,29 @@ int sja1105et_fdb_add(struct dsa_switch *ds, int port,
        if (rc < 0)
                return rc;
 
+       /* Invalidate a dynamically learned entry if that exists */
+       start = sja1105et_fdb_index(bin, 0);
+       end = sja1105et_fdb_index(bin, way);
+
+       for (i = start; i < end; i++) {
+               rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+                                                i, &tmp);
+               if (rc == -ENOENT)
+                       continue;
+               if (rc)
+                       return rc;
+
+               if (tmp.macaddr != ether_addr_to_u64(addr) || tmp.vlanid != vid)
+                       continue;
+
+               rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+                                                 i, NULL, false);
+               if (rc)
+                       return rc;
+
+               break;
+       }
+
        return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
 }
 
@@ -1413,32 +1438,30 @@ int sja1105et_fdb_del(struct dsa_switch *ds, int port,
 int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
                        const unsigned char *addr, u16 vid)
 {
-       struct sja1105_l2_lookup_entry l2_lookup = {0};
+       struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp;
        struct sja1105_private *priv = ds->priv;
        int rc, i;
 
        /* Search for an existing entry in the FDB table */
        l2_lookup.macaddr = ether_addr_to_u64(addr);
        l2_lookup.vlanid = vid;
-       l2_lookup.iotag = SJA1105_S_TAG;
        l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
-       if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
-               l2_lookup.mask_vlanid = VLAN_VID_MASK;
-               l2_lookup.mask_iotag = BIT(0);
-       } else {
-               l2_lookup.mask_vlanid = 0;
-               l2_lookup.mask_iotag = 0;
-       }
+       l2_lookup.mask_vlanid = VLAN_VID_MASK;
        l2_lookup.destports = BIT(port);
 
+       tmp = l2_lookup;
+
        rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
-                                        SJA1105_SEARCH, &l2_lookup);
-       if (rc == 0) {
-               /* Found and this port is already in the entry's
+                                        SJA1105_SEARCH, &tmp);
+       if (rc == 0 && tmp.index != SJA1105_MAX_L2_LOOKUP_COUNT - 1) {
+               /* Found a static entry and this port is already in the entry's
                 * port mask => job done
                 */
-               if (l2_lookup.destports & BIT(port))
+               if ((tmp.destports & BIT(port)) && tmp.lockeds)
                        return 0;
+
+               l2_lookup = tmp;
+
                /* l2_lookup.index is populated by the switch in case it
                 * found something.
                 */
@@ -1460,16 +1483,46 @@ int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
                dev_err(ds->dev, "FDB is full, cannot add entry.\n");
                return -EINVAL;
        }
-       l2_lookup.lockeds = true;
        l2_lookup.index = i;
 
 skip_finding_an_index:
+       l2_lookup.lockeds = true;
+
        rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
                                          l2_lookup.index, &l2_lookup,
                                          true);
        if (rc < 0)
                return rc;
 
+       /* The switch learns dynamic entries and looks up the FDB left to
+        * right. It is possible that our addition was concurrent with the
+        * dynamic learning of the same address, so now that the static entry
+        * has been installed, we are certain that address learning for this
+        * particular address has been turned off, so the dynamic entry either
+        * is in the FDB at an index smaller than the static one, or isn't (it
+        * can also be at a larger index, but in that case it is inactive
+        * because the static FDB entry will match first, and the dynamic one
+        * will eventually age out). Search for a dynamically learned address
+        * prior to our static one and invalidate it.
+        */
+       tmp = l2_lookup;
+
+       rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+                                        SJA1105_SEARCH, &tmp);
+       if (rc < 0) {
+               dev_err(ds->dev,
+                       "port %d failed to read back entry for %pM vid %d: %pe\n",
+                       port, addr, vid, ERR_PTR(rc));
+               return rc;
+       }
+
+       if (tmp.index < l2_lookup.index) {
+               rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+                                                 tmp.index, NULL, false);
+               if (rc < 0)
+                       return rc;
+       }
+
        return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
 }
 
@@ -1483,15 +1536,8 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
 
        l2_lookup.macaddr = ether_addr_to_u64(addr);
        l2_lookup.vlanid = vid;
-       l2_lookup.iotag = SJA1105_S_TAG;
        l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
-       if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
-               l2_lookup.mask_vlanid = VLAN_VID_MASK;
-               l2_lookup.mask_iotag = BIT(0);
-       } else {
-               l2_lookup.mask_vlanid = 0;
-               l2_lookup.mask_iotag = 0;
-       }
+       l2_lookup.mask_vlanid = VLAN_VID_MASK;
        l2_lookup.destports = BIT(port);
 
        rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
index 1a6ec1a..b5d954c 100644 (file)
@@ -2669,7 +2669,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
        }
 
        /* Allocated memory for FW statistics  */
-       if (bnx2x_alloc_fw_stats_mem(bp))
+       rc = bnx2x_alloc_fw_stats_mem(bp);
+       if (rc)
                LOAD_ERROR_EXIT(bp, load_error0);
 
        /* request pf to initialize status blocks */
index 8aea707..7e4c498 100644 (file)
@@ -3843,13 +3843,13 @@ fec_drv_remove(struct platform_device *pdev)
        if (of_phy_is_fixed_link(np))
                of_phy_deregister_fixed_link(np);
        of_node_put(fep->phy_node);
-       free_netdev(ndev);
 
        clk_disable_unprepare(fep->clk_ahb);
        clk_disable_unprepare(fep->clk_ipg);
        pm_runtime_put_noidle(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
+       free_netdev(ndev);
        return 0;
 }
 
index d12e21d..fa7a068 100644 (file)
@@ -530,6 +530,8 @@ err_trap_register:
                prestera_trap = &prestera_trap_items_arr[i];
                devlink_traps_unregister(devlink, &prestera_trap->trap, 1);
        }
+       devlink_trap_groups_unregister(devlink, prestera_trap_groups_arr,
+                                      groups_count);
 err_groups_register:
        kfree(trap_data->trap_items_arr);
 err_trap_items_alloc:
index 9d485a9..cb68eaa 100644 (file)
  */
 #define VSTAX 73
 
-static void ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width)
+#define ifh_encode_bitfield(ifh, value, pos, _width)                   \
+       ({                                                              \
+               u32 width = (_width);                                   \
+                                                                       \
+               /* Max width is 5 bytes - 40 bits. In worst case this will
+                * spread over 6 bytes - 48 bits
+                */                                                     \
+               compiletime_assert(width <= 40,                         \
+                                  "Unsupported width, must be <= 40"); \
+               __ifh_encode_bitfield((ifh), (value), (pos), width);    \
+       })
+
+static void __ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width)
 {
        u8 *ifh_hdr = ifh;
        /* Calculate the Start IFH byte position of this IFH bit position */
        u32 byte = (35 - (pos / 8));
        /* Calculate the Start bit position in the Start IFH byte */
        u32 bit  = (pos % 8);
-       u64 encode = GENMASK(bit + width - 1, bit) & (value << bit);
-
-       /* Max width is 5 bytes - 40 bits. In worst case this will
-        * spread over 6 bytes - 48 bits
-        */
-       compiletime_assert(width <= 40, "Unsupported width, must be <= 40");
+       u64 encode = GENMASK_ULL(bit + width - 1, bit) & (value << bit);
 
        /* The b0-b7 goes into the start IFH byte */
        if (encode & 0xFF)
index 51b4b25..84f7dbe 100644 (file)
@@ -819,7 +819,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
                printk(version);
 #endif
 
-       i = pci_enable_device(pdev);
+       i = pcim_enable_device(pdev);
        if (i) return i;
 
        /* natsemi has a non-standard PM control register
@@ -852,7 +852,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
        ioaddr = ioremap(iostart, iosize);
        if (!ioaddr) {
                i = -ENOMEM;
-               goto err_ioremap;
+               goto err_pci_request_regions;
        }
 
        /* Work around the dropped serial bit. */
@@ -974,9 +974,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
  err_register_netdev:
        iounmap(ioaddr);
 
- err_ioremap:
-       pci_release_regions(pdev);
-
  err_pci_request_regions:
        free_netdev(dev);
        return i;
@@ -3241,7 +3238,6 @@ static void natsemi_remove1(struct pci_dev *pdev)
 
        NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround);
        unregister_netdev (dev);
-       pci_release_regions (pdev);
        iounmap(ioaddr);
        free_netdev (dev);
 }
index 82eef4c..7abd13e 100644 (file)
@@ -3512,13 +3512,13 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev)
 
        kfree(vdev->vpaths);
 
-       /* we are safe to free it now */
-       free_netdev(dev);
-
        vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
                        buf);
        vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d  Exiting...", buf,
                             __func__, __LINE__);
+
+       /* we are safe to free it now */
+       free_netdev(dev);
 }
 
 /*
index 1b48244..8803faa 100644 (file)
@@ -286,6 +286,8 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
 
        /* Init to unknowns */
        ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+       ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+       ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
        cmd->base.port = PORT_OTHER;
        cmd->base.speed = SPEED_UNKNOWN;
        cmd->base.duplex = DUPLEX_UNKNOWN;
index 2e62a2c..5630008 100644 (file)
@@ -501,6 +501,7 @@ struct qede_fastpath {
 #define QEDE_SP_HW_ERR                  4
 #define QEDE_SP_ARFS_CONFIG             5
 #define QEDE_SP_AER                    7
+#define QEDE_SP_DISABLE                        8
 
 #ifdef CONFIG_RFS_ACCEL
 int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
index 01ac1e9..7c6064b 100644 (file)
@@ -1009,6 +1009,13 @@ static void qede_sp_task(struct work_struct *work)
        struct qede_dev *edev = container_of(work, struct qede_dev,
                                             sp_task.work);
 
+       /* Disable execution of this deferred work once
+        * qede removal is in progress, this stop any future
+        * scheduling of sp_task.
+        */
+       if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags))
+               return;
+
        /* The locking scheme depends on the specific flag:
         * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
         * ensure that ongoing flows are ended and new ones are not started.
@@ -1300,6 +1307,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
        qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
 
        if (mode != QEDE_REMOVE_RECOVERY) {
+               set_bit(QEDE_SP_DISABLE, &edev->sp_flags);
                unregister_netdev(ndev);
 
                cancel_delayed_work_sync(&edev->sp_task);
index 718539c..67a08cb 100644 (file)
@@ -2060,8 +2060,12 @@ static void am65_cpsw_port_offload_fwd_mark_update(struct am65_cpsw_common *comm
 
        for (i = 1; i <= common->port_num; i++) {
                struct am65_cpsw_port *port = am65_common_get_port(common, i);
-               struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(port->ndev);
+               struct am65_cpsw_ndev_priv *priv;
 
+               if (!port->ndev)
+                       continue;
+
+               priv = am65_ndev_to_priv(port->ndev);
                priv->offload_fwd_mark = set_val;
        }
 }
index e60e38c..11be6bc 100644 (file)
@@ -335,7 +335,7 @@ static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
        u64_stats_init(&mhi_netdev->stats.tx_syncp);
 
        /* Start MHI channels */
-       err = mhi_prepare_for_transfer(mhi_dev);
+       err = mhi_prepare_for_transfer(mhi_dev, 0);
        if (err)
                goto out_err;
 
index 4d53886..53bdd67 100644 (file)
@@ -401,11 +401,11 @@ static int ksz8041_config_aneg(struct phy_device *phydev)
 }
 
 static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
-                                           const u32 ksz_phy_id)
+                                           const bool ksz_8051)
 {
        int ret;
 
-       if ((phydev->phy_id & MICREL_PHY_ID_MASK) != ksz_phy_id)
+       if ((phydev->phy_id & MICREL_PHY_ID_MASK) != PHY_ID_KSZ8051)
                return 0;
 
        ret = phy_read(phydev, MII_BMSR);
@@ -418,7 +418,7 @@ static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
         * the switch does not.
         */
        ret &= BMSR_ERCAP;
-       if (ksz_phy_id == PHY_ID_KSZ8051)
+       if (ksz_8051)
                return ret;
        else
                return !ret;
@@ -426,7 +426,7 @@ static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
 
 static int ksz8051_match_phy_device(struct phy_device *phydev)
 {
-       return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ8051);
+       return ksz8051_ksz8795_match_phy_device(phydev, true);
 }
 
 static int ksz8081_config_init(struct phy_device *phydev)
@@ -535,7 +535,7 @@ static int ksz8061_config_init(struct phy_device *phydev)
 
 static int ksz8795_match_phy_device(struct phy_device *phydev)
 {
-       return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ87XX);
+       return ksz8051_ksz8795_match_phy_device(phydev, false);
 }
 
 static int ksz9021_load_values_from_of(struct phy_device *phydev,
index 2548938..6d092d7 100644 (file)
@@ -1154,7 +1154,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
 {
        struct phy_device *phydev = dev->net->phydev;
        struct ethtool_link_ksettings ecmd;
-       int ladv, radv, ret;
+       int ladv, radv, ret, link;
        u32 buf;
 
        /* clear LAN78xx interrupt status */
@@ -1162,9 +1162,12 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
        if (unlikely(ret < 0))
                return -EIO;
 
+       mutex_lock(&phydev->lock);
        phy_read_status(phydev);
+       link = phydev->link;
+       mutex_unlock(&phydev->lock);
 
-       if (!phydev->link && dev->link_on) {
+       if (!link && dev->link_on) {
                dev->link_on = false;
 
                /* reset MAC */
@@ -1177,7 +1180,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
                        return -EIO;
 
                del_timer(&dev->stat_monitor);
-       } else if (phydev->link && !dev->link_on) {
+       } else if (link && !dev->link_on) {
                dev->link_on = true;
 
                phy_ethtool_ksettings_get(phydev, &ecmd);
@@ -1466,9 +1469,14 @@ static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
 
 static u32 lan78xx_get_link(struct net_device *net)
 {
+       u32 link;
+
+       mutex_lock(&net->phydev->lock);
        phy_read_status(net->phydev);
+       link = net->phydev->link;
+       mutex_unlock(&net->phydev->lock);
 
-       return net->phydev->link;
+       return link;
 }
 
 static void lan78xx_get_drvinfo(struct net_device *net,
index 9a90718..652e9fc 100644 (file)
@@ -1,31 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- *  Copyright (c) 1999-2013 Petko Manolov (petkan@nucleusys.com)
+ *  Copyright (c) 1999-2021 Petko Manolov (petkan@nucleusys.com)
  *
- *     ChangeLog:
- *             ....    Most of the time spent on reading sources & docs.
- *             v0.2.x  First official release for the Linux kernel.
- *             v0.3.0  Beutified and structured, some bugs fixed.
- *             v0.3.x  URBifying bulk requests and bugfixing. First relatively
- *                     stable release. Still can touch device's registers only
- *                     from top-halves.
- *             v0.4.0  Control messages remained unurbified are now URBs.
- *                     Now we can touch the HW at any time.
- *             v0.4.9  Control urbs again use process context to wait. Argh...
- *                     Some long standing bugs (enable_net_traffic) fixed.
- *                     Also nasty trick about resubmiting control urb from
- *                     interrupt context used. Please let me know how it
- *                     behaves. Pegasus II support added since this version.
- *                     TODO: suppressing HCD warnings spewage on disconnect.
- *             v0.4.13 Ethernet address is now set at probe(), not at open()
- *                     time as this seems to break dhcpd.
- *             v0.5.0  branch to 2.5.x kernels
- *             v0.5.1  ethtool support added
- *             v0.5.5  rx socket buffers are in a pool and the their allocation
- *                     is out of the interrupt routine.
- *             ...
- *             v0.9.3  simplified [get|set]_register(s), async update registers
- *                     logic revisited, receive skb_pool removed.
  */
 
 #include <linux/sched.h>
@@ -45,7 +21,6 @@
 /*
  * Version Information
  */
-#define DRIVER_VERSION "v0.9.3 (2013/04/25)"
 #define DRIVER_AUTHOR "Petko Manolov <petkan@nucleusys.com>"
 #define DRIVER_DESC "Pegasus/Pegasus II USB Ethernet driver"
 
@@ -132,9 +107,15 @@ static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
 static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
                         const void *data)
 {
-       return usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REGS,
+       int ret;
+
+       ret = usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REGS,
                                    PEGASUS_REQT_WRITE, 0, indx, data, size,
                                    1000, GFP_NOIO);
+       if (ret < 0)
+               netif_dbg(pegasus, drv, pegasus->net, "%s failed with %d\n", __func__, ret);
+
+       return ret;
 }
 
 /*
@@ -145,10 +126,15 @@ static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
 static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
 {
        void *buf = &data;
+       int ret;
 
-       return usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REG,
+       ret = usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REG,
                                    PEGASUS_REQT_WRITE, data, indx, buf, 1,
                                    1000, GFP_NOIO);
+       if (ret < 0)
+               netif_dbg(pegasus, drv, pegasus->net, "%s failed with %d\n", __func__, ret);
+
+       return ret;
 }
 
 static int update_eth_regs_async(pegasus_t *pegasus)
@@ -188,10 +174,9 @@ static int update_eth_regs_async(pegasus_t *pegasus)
 
 static int __mii_op(pegasus_t *p, __u8 phy, __u8 indx, __u16 *regd, __u8 cmd)
 {
-       int i;
-       __u8 data[4] = { phy, 0, 0, indx };
+       int i, ret;
        __le16 regdi;
-       int ret = -ETIMEDOUT;
+       __u8 data[4] = { phy, 0, 0, indx };
 
        if (cmd & PHY_WRITE) {
                __le16 *t = (__le16 *) & data[1];
@@ -207,12 +192,15 @@ static int __mii_op(pegasus_t *p, __u8 phy, __u8 indx, __u16 *regd, __u8 cmd)
                if (data[0] & PHY_DONE)
                        break;
        }
-       if (i >= REG_TIMEOUT)
+       if (i >= REG_TIMEOUT) {
+               ret = -ETIMEDOUT;
                goto fail;
+       }
        if (cmd & PHY_READ) {
                ret = get_registers(p, PhyData, 2, &regdi);
+               if (ret < 0)
+                       goto fail;
                *regd = le16_to_cpu(regdi);
-               return ret;
        }
        return 0;
 fail:
@@ -235,9 +223,13 @@ static int write_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 *regd)
 static int mdio_read(struct net_device *dev, int phy_id, int loc)
 {
        pegasus_t *pegasus = netdev_priv(dev);
+       int ret;
        u16 res;
 
-       read_mii_word(pegasus, phy_id, loc, &res);
+       ret = read_mii_word(pegasus, phy_id, loc, &res);
+       if (ret < 0)
+               return ret;
+
        return (int)res;
 }
 
@@ -251,10 +243,9 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
 
 static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
 {
-       int i;
-       __u8 tmp = 0;
+       int ret, i;
        __le16 retdatai;
-       int ret;
+       __u8 tmp = 0;
 
        set_register(pegasus, EpromCtrl, 0);
        set_register(pegasus, EpromOffset, index);
@@ -262,21 +253,25 @@ static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
 
        for (i = 0; i < REG_TIMEOUT; i++) {
                ret = get_registers(pegasus, EpromCtrl, 1, &tmp);
+               if (ret < 0)
+                       goto fail;
                if (tmp & EPROM_DONE)
                        break;
-               if (ret == -ESHUTDOWN)
-                       goto fail;
        }
-       if (i >= REG_TIMEOUT)
+       if (i >= REG_TIMEOUT) {
+               ret = -ETIMEDOUT;
                goto fail;
+       }
 
        ret = get_registers(pegasus, EpromData, 2, &retdatai);
+       if (ret < 0)
+               goto fail;
        *retdata = le16_to_cpu(retdatai);
        return ret;
 
 fail:
-       netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
-       return -ETIMEDOUT;
+       netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
+       return ret;
 }
 
 #ifdef PEGASUS_WRITE_EEPROM
@@ -324,10 +319,10 @@ static int write_eprom_word(pegasus_t *pegasus, __u8 index, __u16 data)
        return ret;
 
 fail:
-       netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
+       netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
        return -ETIMEDOUT;
 }
-#endif                         /* PEGASUS_WRITE_EEPROM */
+#endif /* PEGASUS_WRITE_EEPROM */
 
 static inline int get_node_id(pegasus_t *pegasus, u8 *id)
 {
@@ -367,19 +362,21 @@ static void set_ethernet_addr(pegasus_t *pegasus)
        return;
 err:
        eth_hw_addr_random(pegasus->net);
-       dev_info(&pegasus->intf->dev, "software assigned MAC address.\n");
+       netif_dbg(pegasus, drv, pegasus->net, "software assigned MAC address.\n");
 
        return;
 }
 
 static inline int reset_mac(pegasus_t *pegasus)
 {
+       int ret, i;
        __u8 data = 0x8;
-       int i;
 
        set_register(pegasus, EthCtrl1, data);
        for (i = 0; i < REG_TIMEOUT; i++) {
-               get_registers(pegasus, EthCtrl1, 1, &data);
+               ret = get_registers(pegasus, EthCtrl1, 1, &data);
+               if (ret < 0)
+                       goto fail;
                if (~data & 0x08) {
                        if (loopback)
                                break;
@@ -402,22 +399,29 @@ static inline int reset_mac(pegasus_t *pegasus)
        }
        if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_ELCON) {
                __u16 auxmode;
-               read_mii_word(pegasus, 3, 0x1b, &auxmode);
+               ret = read_mii_word(pegasus, 3, 0x1b, &auxmode);
+               if (ret < 0)
+                       goto fail;
                auxmode |= 4;
                write_mii_word(pegasus, 3, 0x1b, &auxmode);
        }
 
        return 0;
+fail:
+       netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
+       return ret;
 }
 
 static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
 {
-       __u16 linkpart;
-       __u8 data[4];
        pegasus_t *pegasus = netdev_priv(dev);
        int ret;
+       __u16 linkpart;
+       __u8 data[4];
 
-       read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart);
+       ret = read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart);
+       if (ret < 0)
+               goto fail;
        data[0] = 0xc8; /* TX & RX enable, append status, no CRC */
        data[1] = 0;
        if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL))
@@ -435,11 +439,16 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
            usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS2 ||
            usb_dev_id[pegasus->dev_index].vendor == VENDOR_DLINK) {
                u16 auxmode;
-               read_mii_word(pegasus, 0, 0x1b, &auxmode);
+               ret = read_mii_word(pegasus, 0, 0x1b, &auxmode);
+               if (ret < 0)
+                       goto fail;
                auxmode |= 4;
                write_mii_word(pegasus, 0, 0x1b, &auxmode);
        }
 
+       return 0;
+fail:
+       netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
        return ret;
 }
 
@@ -447,9 +456,9 @@ static void read_bulk_callback(struct urb *urb)
 {
        pegasus_t *pegasus = urb->context;
        struct net_device *net;
+       u8 *buf = urb->transfer_buffer;
        int rx_status, count = urb->actual_length;
        int status = urb->status;
-       u8 *buf = urb->transfer_buffer;
        __u16 pkt_len;
 
        if (!pegasus)
@@ -735,12 +744,16 @@ static inline void disable_net_traffic(pegasus_t *pegasus)
        set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp);
 }
 
-static inline void get_interrupt_interval(pegasus_t *pegasus)
+static inline int get_interrupt_interval(pegasus_t *pegasus)
 {
        u16 data;
        u8 interval;
+       int ret;
+
+       ret = read_eprom_word(pegasus, 4, &data);
+       if (ret < 0)
+               return ret;
 
-       read_eprom_word(pegasus, 4, &data);
        interval = data >> 8;
        if (pegasus->usb->speed != USB_SPEED_HIGH) {
                if (interval < 0x80) {
@@ -755,6 +768,8 @@ static inline void get_interrupt_interval(pegasus_t *pegasus)
                }
        }
        pegasus->intr_interval = interval;
+
+       return 0;
 }
 
 static void set_carrier(struct net_device *net)
@@ -880,7 +895,6 @@ static void pegasus_get_drvinfo(struct net_device *dev,
        pegasus_t *pegasus = netdev_priv(dev);
 
        strlcpy(info->driver, driver_name, sizeof(info->driver));
-       strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
        usb_make_path(pegasus->usb, info->bus_info, sizeof(info->bus_info));
 }
 
@@ -998,8 +1012,7 @@ static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
                data[0] = pegasus->phy;
                fallthrough;
        case SIOCDEVPRIVATE + 1:
-               read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]);
-               res = 0;
+               res = read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]);
                break;
        case SIOCDEVPRIVATE + 2:
                if (!capable(CAP_NET_ADMIN))
@@ -1033,22 +1046,25 @@ static void pegasus_set_multicast(struct net_device *net)
 
 static __u8 mii_phy_probe(pegasus_t *pegasus)
 {
-       int i;
+       int i, ret;
        __u16 tmp;
 
        for (i = 0; i < 32; i++) {
-               read_mii_word(pegasus, i, MII_BMSR, &tmp);
+               ret = read_mii_word(pegasus, i, MII_BMSR, &tmp);
+               if (ret < 0)
+                       goto fail;
                if (tmp == 0 || tmp == 0xffff || (tmp & BMSR_MEDIA) == 0)
                        continue;
                else
                        return i;
        }
-
+fail:
        return 0xff;
 }
 
 static inline void setup_pegasus_II(pegasus_t *pegasus)
 {
+       int ret;
        __u8 data = 0xa5;
 
        set_register(pegasus, Reg1d, 0);
@@ -1060,7 +1076,9 @@ static inline void setup_pegasus_II(pegasus_t *pegasus)
                set_register(pegasus, Reg7b, 2);
 
        set_register(pegasus, 0x83, data);
-       get_registers(pegasus, 0x83, 1, &data);
+       ret = get_registers(pegasus, 0x83, 1, &data);
+       if (ret < 0)
+               goto fail;
 
        if (data == 0xa5)
                pegasus->chip = 0x8513;
@@ -1075,6 +1093,10 @@ static inline void setup_pegasus_II(pegasus_t *pegasus)
                set_register(pegasus, Reg81, 6);
        else
                set_register(pegasus, Reg81, 2);
+
+       return;
+fail:
+       netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
 }
 
 static void check_carrier(struct work_struct *work)
@@ -1149,7 +1171,9 @@ static int pegasus_probe(struct usb_interface *intf,
                                | NETIF_MSG_PROBE | NETIF_MSG_LINK);
 
        pegasus->features = usb_dev_id[dev_index].private;
-       get_interrupt_interval(pegasus);
+       res = get_interrupt_interval(pegasus);
+       if (res)
+               goto out2;
        if (reset_mac(pegasus)) {
                dev_err(&intf->dev, "can't reset MAC\n");
                res = -EIO;
@@ -1296,7 +1320,7 @@ static void __init parse_id(char *id)
 
 static int __init pegasus_init(void)
 {
-       pr_info("%s: %s, " DRIVER_DESC "\n", driver_name, DRIVER_VERSION);
+       pr_info("%s: " DRIVER_DESC "\n", driver_name);
        if (devid)
                parse_id(devid);
        return usb_register(&pegasus_driver);
index 45e6923..f861994 100644 (file)
 #define IOSM_CP_VERSION 0x0100UL
 
 /* DL dir Aggregation support mask */
-#define DL_AGGR BIT(23)
+#define DL_AGGR BIT(9)
 
 /* UL dir Aggregation support mask */
-#define UL_AGGR BIT(22)
+#define UL_AGGR BIT(8)
 
 /* UL flow credit support mask */
 #define UL_FLOW_CREDIT BIT(21)
index 562de27..bdb2d32 100644 (file)
@@ -320,7 +320,7 @@ static void ipc_mux_dl_fcth_decode(struct iosm_mux *ipc_mux,
                return;
        }
 
-       ul_credits = fct->vfl.nr_of_bytes;
+       ul_credits = le32_to_cpu(fct->vfl.nr_of_bytes);
 
        dev_dbg(ipc_mux->dev, "Flow_Credit:: if_id[%d] Old: %d Grants: %d",
                if_id, ipc_mux->session[if_id].ul_flow_credits, ul_credits);
@@ -586,7 +586,7 @@ static bool ipc_mux_lite_send_qlt(struct iosm_mux *ipc_mux)
                qlt->reserved[0] = 0;
                qlt->reserved[1] = 0;
 
-               qlt->vfl.nr_of_bytes = session->ul_list.qlen;
+               qlt->vfl.nr_of_bytes = cpu_to_le32(session->ul_list.qlen);
 
                /* Add QLT to the transfer list. */
                skb_queue_tail(&ipc_mux->channel->ul_list,
index 4a74e3c..aae83db 100644 (file)
@@ -106,7 +106,7 @@ struct mux_lite_cmdh {
  * @nr_of_bytes:       Number of bytes available to transmit in the queue.
  */
 struct mux_lite_vfl {
-       u32 nr_of_bytes;
+       __le32 nr_of_bytes;
 };
 
 /**
index 91109e2..35d5907 100644 (file)
@@ -412,8 +412,8 @@ struct sk_buff *ipc_protocol_dl_td_process(struct iosm_protocol *ipc_protocol,
        }
 
        if (p_td->buffer.address != IPC_CB(skb)->mapping) {
-               dev_err(ipc_protocol->dev, "invalid buf=%p or skb=%p",
-                       (void *)p_td->buffer.address, skb->data);
+               dev_err(ipc_protocol->dev, "invalid buf=%llx or skb=%p",
+                       (unsigned long long)p_td->buffer.address, skb->data);
                ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
                skb = NULL;
                goto ret;
index b2357ad..b571d9c 100644 (file)
@@ -228,7 +228,7 @@ static void ipc_wwan_dellink(void *ctxt, struct net_device *dev,
 
        RCU_INIT_POINTER(ipc_wwan->sub_netlist[if_id], NULL);
        /* unregistering includes synchronize_net() */
-       unregister_netdevice(dev);
+       unregister_netdevice_queue(dev, head);
 
 unlock:
        mutex_unlock(&ipc_wwan->if_mutex);
index 1bc6b69..1e18420 100644 (file)
@@ -110,7 +110,7 @@ static int mhi_wwan_ctrl_start(struct wwan_port *port)
        int ret;
 
        /* Start mhi device's channel(s) */
-       ret = mhi_prepare_for_transfer(mhiwwan->mhi_dev);
+       ret = mhi_prepare_for_transfer(mhiwwan->mhi_dev, 0);
        if (ret)
                return ret;
 
index 85887d8..192c904 100644 (file)
@@ -112,6 +112,7 @@ static int i82092aa_pci_probe(struct pci_dev *dev,
        for (i = 0; i < socket_count; i++) {
                sockets[i].card_state = 1; /* 1 = present but empty */
                sockets[i].io_base = pci_resource_start(dev, 0);
+               sockets[i].dev = dev;
                sockets[i].socket.features |= SS_CAP_PCCARD;
                sockets[i].socket.map_size = 0x1000;
                sockets[i].socket.irq_mask = 0;
index 7d385c3..d12db6c 100644 (file)
@@ -508,6 +508,7 @@ config THINKPAD_ACPI
        depends on RFKILL || RFKILL = n
        depends on ACPI_VIDEO || ACPI_VIDEO = n
        depends on BACKLIGHT_CLASS_DEVICE
+       depends on I2C
        select ACPI_PLATFORM_PROFILE
        select HWMON
        select NVRAM
@@ -691,6 +692,7 @@ config INTEL_HID_EVENT
        tristate "INTEL HID Event"
        depends on ACPI
        depends on INPUT
+       depends on I2C
        select INPUT_SPARSEKMAP
        help
          This driver provides support for the Intel HID Event hotkey interface.
@@ -742,6 +744,7 @@ config INTEL_VBTN
        tristate "INTEL VIRTUAL BUTTON"
        depends on ACPI
        depends on INPUT
+       depends on I2C
        select INPUT_SPARSEKMAP
        help
          This driver provides support for the Intel Virtual Button interface.
diff --git a/drivers/platform/x86/dual_accel_detect.h b/drivers/platform/x86/dual_accel_detect.h
new file mode 100644 (file)
index 0000000..a9eae17
--- /dev/null
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Helper code to detect 360 degree hinges (yoga) style 2-in-1 devices using 2 accelerometers
+ * to allow the OS to determine the angle between the display and the base of the device.
+ *
+ * On Windows these are read by a special HingeAngleService process which calls undocumented
+ * ACPI methods, to let the firmware know if the 2-in-1 is in tablet- or laptop-mode.
+ * The firmware may use this to disable the kbd and touchpad to avoid spurious input in
+ * tablet-mode as well as to report SW_TABLET_MODE info to the OS.
+ *
+ * Since Linux does not call these undocumented methods, the SW_TABLET_MODE info reported
+ * by various drivers/platform/x86 drivers is incorrect. These drivers use the detection
+ * code in this file to disable SW_TABLET_MODE reporting to avoid reporting broken info
+ * (instead userspace can derive the status itself by directly reading the 2 accels).
+ */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+
+static int dual_accel_i2c_resource_count(struct acpi_resource *ares, void *data)
+{
+       struct acpi_resource_i2c_serialbus *sb;
+       int *count = data;
+
+       if (i2c_acpi_get_i2c_resource(ares, &sb))
+               *count = *count + 1;
+
+       return 1;
+}
+
+static int dual_accel_i2c_client_count(struct acpi_device *adev)
+{
+       int ret, count = 0;
+       LIST_HEAD(r);
+
+       ret = acpi_dev_get_resources(adev, &r, dual_accel_i2c_resource_count, &count);
+       if (ret < 0)
+               return ret;
+
+       acpi_dev_free_resource_list(&r);
+       return count;
+}
+
+static bool dual_accel_detect_bosc0200(void)
+{
+       struct acpi_device *adev;
+       int count;
+
+       adev = acpi_dev_get_first_match_dev("BOSC0200", NULL, -1);
+       if (!adev)
+               return false;
+
+       count = dual_accel_i2c_client_count(adev);
+
+       acpi_dev_put(adev);
+
+       return count == 2;
+}
+
+static bool dual_accel_detect(void)
+{
+       /* Systems which use a pair of accels with KIOX010A / KIOX020A ACPI ids */
+       if (acpi_dev_present("KIOX010A", NULL, -1) &&
+           acpi_dev_present("KIOX020A", NULL, -1))
+               return true;
+
+       /* Systems which use a single DUAL250E ACPI device to model 2 accels */
+       if (acpi_dev_present("DUAL250E", NULL, -1))
+               return true;
+
+       /* Systems which use a single BOSC0200 ACPI device to model 2 accels */
+       if (dual_accel_detect_bosc0200())
+               return true;
+
+       return false;
+}
index e5fbe01..2e4e97a 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/suspend.h>
+#include "dual_accel_detect.h"
 
 /* When NOT in tablet mode, VGBS returns with the flag 0x40 */
 #define TABLET_MODE_FLAG BIT(6)
@@ -122,6 +123,7 @@ struct intel_hid_priv {
        struct input_dev *array;
        struct input_dev *switches;
        bool wakeup_mode;
+       bool dual_accel;
 };
 
 #define HID_EVENT_FILTER_UUID  "eeec56b3-4442-408f-a792-4edd4d758054"
@@ -451,22 +453,9 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
         * SW_TABLET_MODE report, in these cases we enable support when receiving
         * the first event instead of during driver setup.
         *
-        * Some 360 degree hinges (yoga) style 2-in-1 devices use 2 accelerometers
-        * to allow the OS to determine the angle between the display and the base
-        * of the device. On Windows these are read by a special HingeAngleService
-        * process which calls an ACPI DSM (Device Specific Method) on the
-        * ACPI KIOX010A device node for the sensor in the display, to let the
-        * firmware know if the 2-in-1 is in tablet- or laptop-mode so that it can
-        * disable the kbd and touchpad to avoid spurious input in tablet-mode.
-        *
-        * The linux kxcjk1013 driver calls the DSM for this once at probe time
-        * to ensure that the builtin kbd and touchpad work. On some devices this
-        * causes a "spurious" 0xcd event on the intel-hid ACPI dev. In this case
-        * there is not a functional tablet-mode switch, so we should not register
-        * the tablet-mode switch device.
+        * See dual_accel_detect.h for more info on the dual_accel check.
         */
-       if (!priv->switches && (event == 0xcc || event == 0xcd) &&
-           !acpi_dev_present("KIOX010A", NULL, -1)) {
+       if (!priv->switches && !priv->dual_accel && (event == 0xcc || event == 0xcd)) {
                dev_info(&device->dev, "switch event received, enable switches supports\n");
                err = intel_hid_switches_setup(device);
                if (err)
@@ -607,6 +596,8 @@ static int intel_hid_probe(struct platform_device *device)
                return -ENOMEM;
        dev_set_drvdata(&device->dev, priv);
 
+       priv->dual_accel = dual_accel_detect();
+
        err = intel_hid_input_setup(device);
        if (err) {
                pr_err("Failed to setup Intel HID hotkeys\n");
index 888a764..3091664 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/suspend.h>
+#include "dual_accel_detect.h"
 
 /* Returned when NOT in tablet mode on some HP Stream x360 11 models */
 #define VGBS_TABLET_MODE_FLAG_ALT      0x10
@@ -66,6 +67,7 @@ static const struct key_entry intel_vbtn_switchmap[] = {
 struct intel_vbtn_priv {
        struct input_dev *buttons_dev;
        struct input_dev *switches_dev;
+       bool dual_accel;
        bool has_buttons;
        bool has_switches;
        bool wakeup_mode;
@@ -160,6 +162,10 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
                input_dev = priv->buttons_dev;
        } else if ((ke = sparse_keymap_entry_from_scancode(priv->switches_dev, event))) {
                if (!priv->has_switches) {
+                       /* See dual_accel_detect.h for more info */
+                       if (priv->dual_accel)
+                               return;
+
                        dev_info(&device->dev, "Registering Intel Virtual Switches input-dev after receiving a switch event\n");
                        ret = input_register_device(priv->switches_dev);
                        if (ret)
@@ -248,11 +254,15 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
        {} /* Array terminator */
 };
 
-static bool intel_vbtn_has_switches(acpi_handle handle)
+static bool intel_vbtn_has_switches(acpi_handle handle, bool dual_accel)
 {
        unsigned long long vgbs;
        acpi_status status;
 
+       /* See dual_accel_detect.h for more info */
+       if (dual_accel)
+               return false;
+
        if (!dmi_check_system(dmi_switches_allow_list))
                return false;
 
@@ -263,13 +273,14 @@ static bool intel_vbtn_has_switches(acpi_handle handle)
 static int intel_vbtn_probe(struct platform_device *device)
 {
        acpi_handle handle = ACPI_HANDLE(&device->dev);
-       bool has_buttons, has_switches;
+       bool dual_accel, has_buttons, has_switches;
        struct intel_vbtn_priv *priv;
        acpi_status status;
        int err;
 
+       dual_accel = dual_accel_detect();
        has_buttons = acpi_has_method(handle, "VBDL");
-       has_switches = intel_vbtn_has_switches(handle);
+       has_switches = intel_vbtn_has_switches(handle, dual_accel);
 
        if (!has_buttons && !has_switches) {
                dev_warn(&device->dev, "failed to read Intel Virtual Button driver\n");
@@ -281,6 +292,7 @@ static int intel_vbtn_probe(struct platform_device *device)
                return -ENOMEM;
        dev_set_drvdata(&device->dev, priv);
 
+       priv->dual_accel = dual_accel;
        priv->has_buttons = has_buttons;
        priv->has_switches = has_switches;
 
index c37349f..d063d91 100644 (file)
@@ -94,6 +94,7 @@ static struct gpiod_lookup_table gpios_led_table = {
                                NULL, 1, GPIO_ACTIVE_LOW),
                GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_LED3,
                                NULL, 2, GPIO_ACTIVE_LOW),
+               {} /* Terminating entry */
        }
 };
 
@@ -123,6 +124,7 @@ static struct gpiod_lookup_table gpios_key_table = {
        .table = {
                GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_MODESW,
                                NULL, 0, GPIO_ACTIVE_LOW),
+               {} /* Terminating entry */
        }
 };
 
index 603156a..50ff04c 100644 (file)
@@ -73,6 +73,7 @@
 #include <linux/uaccess.h>
 #include <acpi/battery.h>
 #include <acpi/video.h>
+#include "dual_accel_detect.h"
 
 /* ThinkPad CMOS commands */
 #define TP_CMOS_VOLUME_DOWN    0
@@ -3232,7 +3233,7 @@ static int hotkey_init_tablet_mode(void)
                 * the laptop/tent/tablet mode to the EC. The bmc150 iio driver
                 * does not support this, so skip the hotkey on these models.
                 */
-               if (has_tablet_mode && !acpi_dev_present("BOSC0200", "1", -1))
+               if (has_tablet_mode && !dual_accel_detect())
                        tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_GMMS;
                type = "GMMS";
        } else if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) {
index 0de1a46..fb5d815 100644 (file)
@@ -1004,15 +1004,23 @@ static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
 static void dasd_eckd_store_conf_data(struct dasd_device *device,
                                      struct dasd_conf_data *conf_data, int chp)
 {
+       struct dasd_eckd_private *private = device->private;
        struct channel_path_desc_fmt0 *chp_desc;
        struct subchannel_id sch_id;
+       void *cdp;
 
-       ccw_device_get_schid(device->cdev, &sch_id);
        /*
         * path handling and read_conf allocate data
         * free it before replacing the pointer
+        * also replace the old private->conf_data pointer
+        * with the new one if this points to the same data
         */
-       kfree(device->path[chp].conf_data);
+       cdp = device->path[chp].conf_data;
+       if (private->conf_data == cdp) {
+               private->conf_data = (void *)conf_data;
+               dasd_eckd_identify_conf_parts(private);
+       }
+       ccw_device_get_schid(device->cdev, &sch_id);
        device->path[chp].conf_data = conf_data;
        device->path[chp].cssid = sch_id.cssid;
        device->path[chp].ssid = sch_id.ssid;
@@ -1020,6 +1028,7 @@ static void dasd_eckd_store_conf_data(struct dasd_device *device,
        if (chp_desc)
                device->path[chp].chpid = chp_desc->chpid;
        kfree(chp_desc);
+       kfree(cdp);
 }
 
 static void dasd_eckd_clear_conf_data(struct dasd_device *device)
index 25f6e1a..66652ab 100644 (file)
@@ -453,8 +453,8 @@ static int initialize_controller(struct scsi_device *sdev,
                if (!h->ctlr)
                        err = SCSI_DH_RES_TEMP_UNAVAIL;
                else {
-                       list_add_rcu(&h->node, &h->ctlr->dh_list);
                        h->sdev = sdev;
+                       list_add_rcu(&h->node, &h->ctlr->dh_list);
                }
                spin_unlock(&list_lock);
                err = SCSI_DH_OK;
@@ -778,11 +778,11 @@ static void rdac_bus_detach( struct scsi_device *sdev )
        spin_lock(&list_lock);
        if (h->ctlr) {
                list_del_rcu(&h->node);
-               h->sdev = NULL;
                kref_put(&h->ctlr->kref, release_controller);
        }
        spin_unlock(&list_lock);
        sdev->handler_data = NULL;
+       synchronize_rcu();
        kfree(h);
 }
 
index bee1bec..935b01e 100644 (file)
@@ -807,6 +807,13 @@ static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
        for (i = 0; i < size; ++i) {
                struct ibmvfc_event *evt = &pool->events[i];
 
+               /*
+                * evt->active states
+                *  1 = in flight
+                *  0 = being completed
+                * -1 = free/freed
+                */
+               atomic_set(&evt->active, -1);
                atomic_set(&evt->free, 1);
                evt->crq.valid = 0x80;
                evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
@@ -1017,6 +1024,7 @@ static void ibmvfc_free_event(struct ibmvfc_event *evt)
 
        BUG_ON(!ibmvfc_valid_event(pool, evt));
        BUG_ON(atomic_inc_return(&evt->free) != 1);
+       BUG_ON(atomic_dec_and_test(&evt->active));
 
        spin_lock_irqsave(&evt->queue->l_lock, flags);
        list_add_tail(&evt->queue_list, &evt->queue->free);
@@ -1072,6 +1080,12 @@ static void ibmvfc_complete_purge(struct list_head *purge_list)
  **/
 static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
 {
+       /*
+        * Anything we are failing should still be active. Otherwise, it
+        * implies we already got a response for the command and are doing
+        * something bad like double completing it.
+        */
+       BUG_ON(!atomic_dec_and_test(&evt->active));
        if (evt->cmnd) {
                evt->cmnd->result = (error_code << 16);
                evt->done = ibmvfc_scsi_eh_done;
@@ -1723,6 +1737,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
 
                evt->done(evt);
        } else {
+               atomic_set(&evt->active, 1);
                spin_unlock_irqrestore(&evt->queue->l_lock, flags);
                ibmvfc_trc_start(evt);
        }
@@ -3251,7 +3266,7 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
                return;
        }
 
-       if (unlikely(atomic_read(&evt->free))) {
+       if (unlikely(atomic_dec_if_positive(&evt->active))) {
                dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
                        crq->ioba);
                return;
@@ -3778,7 +3793,7 @@ static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost
                return;
        }
 
-       if (unlikely(atomic_read(&evt->free))) {
+       if (unlikely(atomic_dec_if_positive(&evt->active))) {
                dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
                        crq->ioba);
                return;
index 4f0f3ba..92fb889 100644 (file)
@@ -745,6 +745,7 @@ struct ibmvfc_event {
        struct ibmvfc_target *tgt;
        struct scsi_cmnd *cmnd;
        atomic_t free;
+       atomic_t active;
        union ibmvfc_iu *xfer_iu;
        void (*done)(struct ibmvfc_event *evt);
        void (*_done)(struct ibmvfc_event *evt);
index abf7b40..c509440 100644 (file)
@@ -238,7 +238,7 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
        mimd_t          mimd;
        uint32_t        adapno;
        int             iterator;
-
+       bool            is_found;
 
        if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
                *rval = -EFAULT;
@@ -254,12 +254,16 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
 
        adapter = NULL;
        iterator = 0;
+       is_found = false;
 
        list_for_each_entry(adapter, &adapters_list_g, list) {
-               if (iterator++ == adapno) break;
+               if (iterator++ == adapno) {
+                       is_found = true;
+                       break;
+               }
        }
 
-       if (!adapter) {
+       if (!is_found) {
                *rval = -ENODEV;
                return NULL;
        }
@@ -725,6 +729,7 @@ ioctl_done(uioc_t *kioc)
        uint32_t        adapno;
        int             iterator;
        mraid_mmadp_t*  adapter;
+       bool            is_found;
 
        /*
         * When the kioc returns from driver, make sure it still doesn't
@@ -747,19 +752,23 @@ ioctl_done(uioc_t *kioc)
                iterator        = 0;
                adapter         = NULL;
                adapno          = kioc->adapno;
+               is_found        = false;
 
                con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
                                        "ioctl that was timedout before\n"));
 
                list_for_each_entry(adapter, &adapters_list_g, list) {
-                       if (iterator++ == adapno) break;
+                       if (iterator++ == adapno) {
+                               is_found = true;
+                               break;
+                       }
                }
 
                kioc->timedout = 0;
 
-               if (adapter) {
+               if (is_found)
                        mraid_mm_dealloc_kioc( adapter, kioc );
-               }
+
        }
        else {
                wake_up(&wait_q);
index 48548a9..32e60f0 100644 (file)
@@ -684,8 +684,7 @@ int pm8001_dev_found(struct domain_device *dev)
 
 void pm8001_task_done(struct sas_task *task)
 {
-       if (!del_timer(&task->slow_task->timer))
-               return;
+       del_timer(&task->slow_task->timer);
        complete(&task->slow_task->completion);
 }
 
@@ -693,9 +692,14 @@ static void pm8001_tmf_timedout(struct timer_list *t)
 {
        struct sas_task_slow *slow = from_timer(slow, t, timer);
        struct sas_task *task = slow->task;
+       unsigned long flags;
 
-       task->task_state_flags |= SAS_TASK_STATE_ABORTED;
-       complete(&task->slow_task->completion);
+       spin_lock_irqsave(&task->task_state_lock, flags);
+       if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+               task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+               complete(&task->slow_task->completion);
+       }
+       spin_unlock_irqrestore(&task->task_state_lock, flags);
 }
 
 #define PM8001_TASK_TIMEOUT 20
@@ -748,13 +752,10 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
                }
                res = -TMF_RESP_FUNC_FAILED;
                /* Even TMF timed out, return direct. */
-               if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
-                       if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
-                               pm8001_dbg(pm8001_ha, FAIL,
-                                          "TMF task[%x]timeout.\n",
-                                          tmf->tmf);
-                               goto ex_err;
-                       }
+               if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+                       pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n",
+                                  tmf->tmf);
+                       goto ex_err;
                }
 
                if (task->task_status.resp == SAS_TASK_COMPLETE &&
@@ -834,12 +835,9 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
                wait_for_completion(&task->slow_task->completion);
                res = TMF_RESP_FUNC_FAILED;
                /* Even TMF timed out, return direct. */
-               if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
-                       if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
-                               pm8001_dbg(pm8001_ha, FAIL,
-                                          "TMF task timeout.\n");
-                               goto ex_err;
-                       }
+               if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+                       pm8001_dbg(pm8001_ha, FAIL, "TMF task timeout.\n");
+                       goto ex_err;
                }
 
                if (task->task_status.resp == SAS_TASK_COMPLETE &&
index b059bf2..5b6996a 100644 (file)
@@ -475,7 +475,8 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
                error = shost->hostt->target_alloc(starget);
 
                if(error) {
-                       dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error);
+                       if (error != -ENXIO)
+                               dev_err(dev, "target allocation failed, error %d\n", error);
                        /* don't want scsi_target_reap to do the final
                         * put because it will be under the host lock */
                        scsi_target_destroy(starget);
index 32489d2..ae9bfc6 100644 (file)
@@ -807,11 +807,14 @@ store_state_field(struct device *dev, struct device_attribute *attr,
        mutex_lock(&sdev->state_mutex);
        ret = scsi_device_set_state(sdev, state);
        /*
-        * If the device state changes to SDEV_RUNNING, we need to run
-        * the queue to avoid I/O hang.
+        * If the device state changes to SDEV_RUNNING, we need to
+        * rescan the device to revalidate it, and run the queue to
+        * avoid I/O hang.
         */
-       if (ret == 0 && state == SDEV_RUNNING)
+       if (ret == 0 && state == SDEV_RUNNING) {
+               scsi_rescan_device(dev);
                blk_mq_run_hw_queues(sdev->request_queue, true);
+       }
        mutex_unlock(&sdev->state_mutex);
 
        return ret == 0 ? count : -EINVAL;
index 94c254e..a6d3ac0 100644 (file)
@@ -221,7 +221,7 @@ static unsigned int sr_get_events(struct scsi_device *sdev)
        else if (med->media_event_code == 2)
                return DISK_EVENT_MEDIA_CHANGE;
        else if (med->media_event_code == 3)
-               return DISK_EVENT_EJECT_REQUEST;
+               return DISK_EVENT_MEDIA_CHANGE;
        return 0;
 }
 
index f678e4d..a05e9fb 100644 (file)
@@ -13,7 +13,7 @@ obj-$(CONFIG_MACH_DOVE)               += dove/
 obj-y                          += fsl/
 obj-$(CONFIG_ARCH_GEMINI)      += gemini/
 obj-y                          += imx/
-obj-$(CONFIG_ARCH_IXP4XX)      += ixp4xx/
+obj-y                          += ixp4xx/
 obj-$(CONFIG_SOC_XWAY)         += lantiq/
 obj-$(CONFIG_LITEX_SOC_CONTROLLER) += litex/
 obj-y                          += mediatek/
index 071e144..cc57a38 100644 (file)
@@ -5,8 +5,6 @@
 
 #include <linux/init.h>
 #include <linux/io.h>
-#include <linux/module.h>
-#include <linux/nvmem-consumer.h>
 #include <linux/of_address.h>
 #include <linux/slab.h>
 #include <linux/sys_soc.h>
@@ -31,7 +29,7 @@
 
 struct imx8_soc_data {
        char *name;
-       u32 (*soc_revision)(struct device *dev);
+       u32 (*soc_revision)(void);
 };
 
 static u64 soc_uid;
@@ -52,7 +50,7 @@ static u32 imx8mq_soc_revision_from_atf(void)
 static inline u32 imx8mq_soc_revision_from_atf(void) { return 0; };
 #endif
 
-static u32 __init imx8mq_soc_revision(struct device *dev)
+static u32 __init imx8mq_soc_revision(void)
 {
        struct device_node *np;
        void __iomem *ocotp_base;
@@ -77,20 +75,9 @@ static u32 __init imx8mq_soc_revision(struct device *dev)
                        rev = REV_B1;
        }
 
-       if (dev) {
-               int ret;
-
-               ret = nvmem_cell_read_u64(dev, "soc_unique_id", &soc_uid);
-               if (ret) {
-                       iounmap(ocotp_base);
-                       of_node_put(np);
-                       return ret;
-               }
-       } else {
-               soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
-               soc_uid <<= 32;
-               soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
-       }
+       soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
+       soc_uid <<= 32;
+       soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
 
        iounmap(ocotp_base);
        of_node_put(np);
@@ -120,7 +107,7 @@ static void __init imx8mm_soc_uid(void)
        of_node_put(np);
 }
 
-static u32 __init imx8mm_soc_revision(struct device *dev)
+static u32 __init imx8mm_soc_revision(void)
 {
        struct device_node *np;
        void __iomem *anatop_base;
@@ -138,15 +125,7 @@ static u32 __init imx8mm_soc_revision(struct device *dev)
        iounmap(anatop_base);
        of_node_put(np);
 
-       if (dev) {
-               int ret;
-
-               ret = nvmem_cell_read_u64(dev, "soc_unique_id", &soc_uid);
-               if (ret)
-                       return ret;
-       } else {
-               imx8mm_soc_uid();
-       }
+       imx8mm_soc_uid();
 
        return rev;
 }
@@ -171,7 +150,7 @@ static const struct imx8_soc_data imx8mp_soc_data = {
        .soc_revision = imx8mm_soc_revision,
 };
 
-static __maybe_unused const struct of_device_id imx8_machine_match[] = {
+static __maybe_unused const struct of_device_id imx8_soc_match[] = {
        { .compatible = "fsl,imx8mq", .data = &imx8mq_soc_data, },
        { .compatible = "fsl,imx8mm", .data = &imx8mm_soc_data, },
        { .compatible = "fsl,imx8mn", .data = &imx8mn_soc_data, },
@@ -179,20 +158,12 @@ static __maybe_unused const struct of_device_id imx8_machine_match[] = {
        { }
 };
 
-static __maybe_unused const struct of_device_id imx8_soc_match[] = {
-       { .compatible = "fsl,imx8mq-soc", .data = &imx8mq_soc_data, },
-       { .compatible = "fsl,imx8mm-soc", .data = &imx8mm_soc_data, },
-       { .compatible = "fsl,imx8mn-soc", .data = &imx8mn_soc_data, },
-       { .compatible = "fsl,imx8mp-soc", .data = &imx8mp_soc_data, },
-       { }
-};
-
 #define imx8_revision(soc_rev) \
        soc_rev ? \
        kasprintf(GFP_KERNEL, "%d.%d", (soc_rev >> 4) & 0xf,  soc_rev & 0xf) : \
        "unknown"
 
-static int imx8_soc_info(struct platform_device *pdev)
+static int __init imx8_soc_init(void)
 {
        struct soc_device_attribute *soc_dev_attr;
        struct soc_device *soc_dev;
@@ -211,10 +182,7 @@ static int imx8_soc_info(struct platform_device *pdev)
        if (ret)
                goto free_soc;
 
-       if (pdev)
-               id = of_match_node(imx8_soc_match, pdev->dev.of_node);
-       else
-               id = of_match_node(imx8_machine_match, of_root);
+       id = of_match_node(imx8_soc_match, of_root);
        if (!id) {
                ret = -ENODEV;
                goto free_soc;
@@ -223,16 +191,8 @@ static int imx8_soc_info(struct platform_device *pdev)
        data = id->data;
        if (data) {
                soc_dev_attr->soc_id = data->name;
-               if (data->soc_revision) {
-                       if (pdev) {
-                               soc_rev = data->soc_revision(&pdev->dev);
-                               ret = soc_rev;
-                               if (ret < 0)
-                                       goto free_soc;
-                       } else {
-                               soc_rev = data->soc_revision(NULL);
-                       }
-               }
+               if (data->soc_revision)
+                       soc_rev = data->soc_revision();
        }
 
        soc_dev_attr->revision = imx8_revision(soc_rev);
@@ -270,24 +230,4 @@ free_soc:
        kfree(soc_dev_attr);
        return ret;
 }
-
-/* Retain device_initcall is for backward compatibility with DTS. */
-static int __init imx8_soc_init(void)
-{
-       if (of_find_matching_node_and_match(NULL, imx8_soc_match, NULL))
-               return 0;
-
-       return imx8_soc_info(NULL);
-}
 device_initcall(imx8_soc_init);
-
-static struct platform_driver imx8_soc_info_driver = {
-       .probe = imx8_soc_info,
-       .driver = {
-               .name = "imx8_soc_info",
-               .of_match_table = imx8_soc_match,
-       },
-};
-
-module_platform_driver(imx8_soc_info_driver);
-MODULE_LICENSE("GPL v2");
index 7bd1935..f490c4c 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/soc/ixp4xx/npe.h>
-#include <mach/hardware.h>
 #include <linux/soc/ixp4xx/cpu.h>
 
 #define DEBUG_MSG                      0
@@ -694,8 +693,8 @@ static int ixp4xx_npe_probe(struct platform_device *pdev)
 
                if (!(ixp4xx_read_feature_bits() &
                      (IXP4XX_FEATURE_RESET_NPEA << i))) {
-                       dev_info(dev, "NPE%d at 0x%08x-0x%08x not available\n",
-                                i, res->start, res->end);
+                       dev_info(dev, "NPE%d at %pR not available\n",
+                                i, res);
                        continue; /* NPE already disabled or not present */
                }
                npe->regs = devm_ioremap_resource(dev, res);
@@ -703,13 +702,12 @@ static int ixp4xx_npe_probe(struct platform_device *pdev)
                        return PTR_ERR(npe->regs);
 
                if (npe_reset(npe)) {
-                       dev_info(dev, "NPE%d at 0x%08x-0x%08x does not reset\n",
-                                i, res->start, res->end);
+                       dev_info(dev, "NPE%d at %pR does not reset\n",
+                                i, res);
                        continue;
                }
                npe->valid = 1;
-               dev_info(dev, "NPE%d at 0x%08x-0x%08x registered\n",
-                        i, res->start, res->end);
+               dev_info(dev, "NPE%d at %pR registered\n", i, res);
                found++;
        }
 
index 7149510..9154c70 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/soc/ixp4xx/qmgr.h>
-#include <mach/hardware.h>
 #include <linux/soc/ixp4xx/cpu.h>
 
 static struct qmgr_regs __iomem *qmgr_regs;
@@ -147,12 +146,12 @@ static irqreturn_t qmgr_irq1_a0(int irq, void *pdev)
        /* ACK - it may clear any bits so don't rely on it */
        __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]);
 
-       en_bitmap = qmgr_regs->irqen[0];
+       en_bitmap = __raw_readl(&qmgr_regs->irqen[0]);
        while (en_bitmap) {
                i = __fls(en_bitmap); /* number of the last "low" queue */
                en_bitmap &= ~BIT(i);
-               src = qmgr_regs->irqsrc[i >> 3];
-               stat = qmgr_regs->stat1[i >> 3];
+               src = __raw_readl(&qmgr_regs->irqsrc[i >> 3]);
+               stat = __raw_readl(&qmgr_regs->stat1[i >> 3]);
                if (src & 4) /* the IRQ condition is inverted */
                        stat = ~stat;
                if (stat & BIT(src & 3)) {
@@ -172,7 +171,8 @@ static irqreturn_t qmgr_irq2_a0(int irq, void *pdev)
        /* ACK - it may clear any bits so don't rely on it */
        __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]);
 
-       req_bitmap = qmgr_regs->irqen[1] & qmgr_regs->statne_h;
+       req_bitmap = __raw_readl(&qmgr_regs->irqen[1]) &
+                    __raw_readl(&qmgr_regs->statne_h);
        while (req_bitmap) {
                i = __fls(req_bitmap); /* number of the last "high" queue */
                req_bitmap &= ~BIT(i);
index 20ace65..8b53ed1 100644 (file)
@@ -15,7 +15,7 @@ config ARCH_TEGRA_2x_SOC
        select PL310_ERRATA_769419 if CACHE_L2X0
        select SOC_TEGRA_FLOWCTRL
        select SOC_TEGRA_PMC
-       select SOC_TEGRA20_VOLTAGE_COUPLER
+       select SOC_TEGRA20_VOLTAGE_COUPLER if REGULATOR
        select TEGRA_TIMER
        help
          Support for NVIDIA Tegra AP20 and T20 processors, based on the
@@ -29,7 +29,7 @@ config ARCH_TEGRA_3x_SOC
        select PL310_ERRATA_769419 if CACHE_L2X0
        select SOC_TEGRA_FLOWCTRL
        select SOC_TEGRA_PMC
-       select SOC_TEGRA30_VOLTAGE_COUPLER
+       select SOC_TEGRA30_VOLTAGE_COUPLER if REGULATOR
        select TEGRA_TIMER
        help
          Support for NVIDIA Tegra T30 processor family, based on the
@@ -155,7 +155,9 @@ config SOC_TEGRA_POWERGATE_BPMP
 config SOC_TEGRA20_VOLTAGE_COUPLER
        bool "Voltage scaling support for Tegra20 SoCs"
        depends on ARCH_TEGRA_2x_SOC || COMPILE_TEST
+       depends on REGULATOR
 
 config SOC_TEGRA30_VOLTAGE_COUPLER
        bool "Voltage scaling support for Tegra30 SoCs"
        depends on ARCH_TEGRA_3x_SOC || COMPILE_TEST
+       depends on REGULATOR
index a2de235..101cc71 100644 (file)
@@ -325,7 +325,15 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
        f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
        f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
        f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
-       f_pdata->dtr = op->data.dtr && op->cmd.dtr && op->addr.dtr;
+
+       /*
+        * For an op to be DTR, cmd phase along with every other non-empty
+        * phase should have dtr field set to 1. If an op phase has zero
+        * nbytes, ignore its dtr field; otherwise, check its dtr field.
+        */
+       f_pdata->dtr = op->cmd.dtr &&
+                      (!op->addr.nbytes || op->addr.dtr) &&
+                      (!op->data.nbytes || op->data.dtr);
 
        switch (op->data.buswidth) {
        case 0:
@@ -1228,8 +1236,15 @@ static bool cqspi_supports_mem_op(struct spi_mem *mem,
 {
        bool all_true, all_false;
 
-       all_true = op->cmd.dtr && op->addr.dtr && op->dummy.dtr &&
-                  op->data.dtr;
+       /*
+        * op->dummy.dtr is required for converting nbytes into ncycles.
+        * Also, don't check the dtr field of the op phase having zero nbytes.
+        */
+       all_true = op->cmd.dtr &&
+                  (!op->addr.nbytes || op->addr.dtr) &&
+                  (!op->dummy.nbytes || op->dummy.dtr) &&
+                  (!op->data.nbytes || op->data.dtr);
+
        all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
                    !op->data.dtr;
 
index 4aee3db..fa68e98 100644 (file)
@@ -505,7 +505,9 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
                                      struct spi_message *msg)
 {
        struct spi_device *spi = msg->spi;
+       struct spi_transfer *xfer;
        u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
+       u32 min_speed_hz = ~0U;
        u32 testreg, delay;
        u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
 
@@ -577,9 +579,21 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
         * be asserted before the SCLK polarity changes, which would disrupt
         * the SPI communication as the device on the other end would consider
         * the change of SCLK polarity as a clock tick already.
+        *
+        * Because spi_imx->spi_bus_clk is only set in bitbang prepare_message
+        * callback, iterate over all the transfers in spi_message, find the
+        * one with lowest bus frequency, and use that bus frequency for the
+        * delay calculation. In case all transfers have speed_hz == 0, then
+        * min_speed_hz is ~0 and the resulting delay is zero.
         */
-       delay = (2 * 1000000) / spi_imx->spi_bus_clk;
-       if (likely(delay < 10)) /* SCLK is faster than 100 kHz */
+       list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+               if (!xfer->speed_hz)
+                       continue;
+               min_speed_hz = min(xfer->speed_hz, min_speed_hz);
+       }
+
+       delay = (2 * 1000000) / min_speed_hz;
+       if (likely(delay < 10)) /* SCLK is faster than 200 kHz */
                udelay(delay);
        else                    /* SCLK is _very_ slow */
                usleep_range(delay, delay + 10);
index b2c4621..c208efe 100644 (file)
@@ -785,6 +785,8 @@ static int meson_spicc_remove(struct platform_device *pdev)
        clk_disable_unprepare(spicc->core);
        clk_disable_unprepare(spicc->pclk);
 
+       spi_master_put(spicc->master);
+
        return 0;
 }
 
index 68dca8c..7914255 100644 (file)
@@ -426,24 +426,15 @@ static int mtk_spi_fifo_transfer(struct spi_master *master,
        mtk_spi_prepare_transfer(master, xfer);
        mtk_spi_setup_packet(master);
 
-       cnt = xfer->len / 4;
-       if (xfer->tx_buf)
+       if (xfer->tx_buf) {
+               cnt = xfer->len / 4;
                iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
-
-       if (xfer->rx_buf)
-               ioread32_rep(mdata->base + SPI_RX_DATA_REG, xfer->rx_buf, cnt);
-
-       remainder = xfer->len % 4;
-       if (remainder > 0) {
-               reg_val = 0;
-               if (xfer->tx_buf) {
+               remainder = xfer->len % 4;
+               if (remainder > 0) {
+                       reg_val = 0;
                        memcpy(&reg_val, xfer->tx_buf + (cnt * 4), remainder);
                        writel(reg_val, mdata->base + SPI_TX_DATA_REG);
                }
-               if (xfer->rx_buf) {
-                       reg_val = readl(mdata->base + SPI_RX_DATA_REG);
-                       memcpy(xfer->rx_buf + (cnt * 4), &reg_val, remainder);
-               }
        }
 
        mtk_spi_enable_transfer(master);
index 37dfc6e..9708b78 100644 (file)
@@ -167,10 +167,17 @@ err_put_ctlr:
        return ret;
 }
 
+static const struct spi_device_id spi_mux_id[] = {
+       { "spi-mux" },
+       { }
+};
+MODULE_DEVICE_TABLE(spi, spi_mux_id);
+
 static const struct of_device_id spi_mux_of_match[] = {
        { .compatible = "spi-mux" },
        { }
 };
+MODULE_DEVICE_TABLE(of, spi_mux_of_match);
 
 static struct spi_driver spi_mux_driver = {
        .probe  = spi_mux_probe,
@@ -178,6 +185,7 @@ static struct spi_driver spi_mux_driver = {
                .name   = "spi-mux",
                .of_match_table = spi_mux_of_match,
        },
+       .id_table = spi_mux_id,
 };
 
 module_spi_driver(spi_mux_driver);
index c991811..e4dc593 100644 (file)
@@ -58,6 +58,10 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf)
        const struct spi_device *spi = to_spi_device(dev);
        int len;
 
+       len = of_device_modalias(dev, buf, PAGE_SIZE);
+       if (len != -ENODEV)
+               return len;
+
        len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
        if (len != -ENODEV)
                return len;
index 691030e..f9bdf4e 100644 (file)
@@ -422,7 +422,6 @@ static void mt7621_pcie_init_ports(struct mt7621_pcie *pcie)
                        dev_err(dev, "pcie%d no card, disable it (RST & CLK)\n",
                                slot);
                        mt7621_control_assert(port);
-                       clk_disable_unprepare(port->clk);
                        port->enabled = false;
 
                        if (slot == 0) {
index 2297427..4eff3fd 100644 (file)
 #define FWBUFF_ALIGN_SZ 512
 #define MAX_DUMP_FWSZ (48 * 1024)
 
+static void rtl871x_load_fw_fail(struct _adapter *adapter)
+{
+       struct usb_device *udev = adapter->dvobjpriv.pusbdev;
+       struct device *dev = &udev->dev;
+       struct device *parent = dev->parent;
+
+       complete(&adapter->rtl8712_fw_ready);
+
+       dev_err(&udev->dev, "r8712u: Firmware request failed\n");
+
+       if (parent)
+               device_lock(parent);
+
+       device_release_driver(dev);
+
+       if (parent)
+               device_unlock(parent);
+}
+
 static void rtl871x_load_fw_cb(const struct firmware *firmware, void *context)
 {
        struct _adapter *adapter = context;
 
        if (!firmware) {
-               struct usb_device *udev = adapter->dvobjpriv.pusbdev;
-               struct usb_interface *usb_intf = adapter->pusb_intf;
-
-               dev_err(&udev->dev, "r8712u: Firmware request failed\n");
-               usb_put_dev(udev);
-               usb_set_intfdata(usb_intf, NULL);
-               r8712_free_drv_sw(adapter);
-               adapter->dvobj_deinit(adapter);
-               complete(&adapter->rtl8712_fw_ready);
-               free_netdev(adapter->pnetdev);
+               rtl871x_load_fw_fail(adapter);
                return;
        }
        adapter->fw = firmware;
index 5901026..d5fc902 100644 (file)
@@ -1820,3 +1820,11 @@ void LedControl871x(struct _adapter *padapter, enum LED_CTL_MODE LedAction)
                break;
        }
 }
+
+void r8712_flush_led_works(struct _adapter *padapter)
+{
+       struct led_priv *pledpriv = &padapter->ledpriv;
+
+       flush_work(&pledpriv->SwLed0.BlinkWorkItem);
+       flush_work(&pledpriv->SwLed1.BlinkWorkItem);
+}
index ee19c87..2f07681 100644 (file)
@@ -112,6 +112,7 @@ struct led_priv {
 void r8712_InitSwLeds(struct _adapter *padapter);
 void r8712_DeInitSwLeds(struct _adapter *padapter);
 void LedControl871x(struct _adapter *padapter, enum LED_CTL_MODE LedAction);
+void r8712_flush_led_works(struct _adapter *padapter);
 
 #endif
 
index 23cff43..cd6d9ff 100644 (file)
@@ -224,3 +224,11 @@ void r8712_unregister_cmd_alive(struct _adapter *padapter)
        }
        mutex_unlock(&pwrctrl->mutex_lock);
 }
+
+void r8712_flush_rwctrl_works(struct _adapter *padapter)
+{
+       struct pwrctrl_priv *pwrctrl = &padapter->pwrctrlpriv;
+
+       flush_work(&pwrctrl->SetPSModeWorkItem);
+       flush_work(&pwrctrl->rpwm_workitem);
+}
index bf6623c..b35b9c7 100644 (file)
@@ -108,5 +108,6 @@ void r8712_cpwm_int_hdl(struct _adapter *padapter,
 void r8712_set_ps_mode(struct _adapter *padapter, uint ps_mode,
                        uint smart_ps);
 void r8712_set_rpwm(struct _adapter *padapter, u8 val8);
+void r8712_flush_rwctrl_works(struct _adapter *padapter);
 
 #endif  /* __RTL871X_PWRCTRL_H_ */
index 2434b13..505ebeb 100644 (file)
@@ -591,35 +591,30 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
 {
        struct net_device *pnetdev = usb_get_intfdata(pusb_intf);
        struct usb_device *udev = interface_to_usbdev(pusb_intf);
+       struct _adapter *padapter = netdev_priv(pnetdev);
+
+       /* never exit with a firmware callback pending */
+       wait_for_completion(&padapter->rtl8712_fw_ready);
+       usb_set_intfdata(pusb_intf, NULL);
+       release_firmware(padapter->fw);
+       if (drvpriv.drv_registered)
+               padapter->surprise_removed = true;
+       if (pnetdev->reg_state != NETREG_UNINITIALIZED)
+               unregister_netdev(pnetdev); /* will call netdev_close() */
+       r8712_flush_rwctrl_works(padapter);
+       r8712_flush_led_works(padapter);
+       udelay(1);
+       /* Stop driver mlme relation timer */
+       r8712_stop_drv_timers(padapter);
+       r871x_dev_unload(padapter);
+       r8712_free_drv_sw(padapter);
+       free_netdev(pnetdev);
+
+       /* decrease the reference count of the usb device structure
+        * when disconnect
+        */
+       usb_put_dev(udev);
 
-       if (pnetdev) {
-               struct _adapter *padapter = netdev_priv(pnetdev);
-
-               /* never exit with a firmware callback pending */
-               wait_for_completion(&padapter->rtl8712_fw_ready);
-               pnetdev = usb_get_intfdata(pusb_intf);
-               usb_set_intfdata(pusb_intf, NULL);
-               if (!pnetdev)
-                       goto firmware_load_fail;
-               release_firmware(padapter->fw);
-               if (drvpriv.drv_registered)
-                       padapter->surprise_removed = true;
-               if (pnetdev->reg_state != NETREG_UNINITIALIZED)
-                       unregister_netdev(pnetdev); /* will call netdev_close() */
-               flush_scheduled_work();
-               udelay(1);
-               /* Stop driver mlme relation timer */
-               r8712_stop_drv_timers(padapter);
-               r871x_dev_unload(padapter);
-               r8712_free_drv_sw(padapter);
-               free_netdev(pnetdev);
-
-               /* decrease the reference count of the usb device structure
-                * when disconnect
-                */
-               usb_put_dev(udev);
-       }
-firmware_load_fail:
        /* If we didn't unplug usb dongle and remove/insert module, driver
         * fails on sitesurvey for the first time when device is up.
         * Reset usb port for sitesurvey fail issue.
index a884673..7eae820 100644 (file)
@@ -5,6 +5,7 @@ config RTL8723BS
        depends on m
        select WIRELESS_EXT
        select WEXT_PRIV
+       select CRYPTO_LIB_ARC4
        help
        This option enables support for RTL8723BS SDIO drivers, such as
        the wifi found on the 1st gen Intel Compute Stick, the CHIP
index 2dd251c..a545832 100644 (file)
@@ -909,6 +909,8 @@ void sd_int_dpc(struct adapter *adapter)
                                } else {
                                        rtw_c2h_wk_cmd(adapter, (u8 *)c2h_evt);
                                }
+                       } else {
+                               kfree(c2h_evt);
                        }
                } else {
                        /* Error handling for malloc fail */
index 6e6eb83..945f03d 100644 (file)
@@ -184,7 +184,7 @@ static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params,
        struct optee_msg_arg *ma;
 
        shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params),
-                           TEE_SHM_MAPPED);
+                           TEE_SHM_MAPPED | TEE_SHM_PRIV);
        if (IS_ERR(shm))
                return shm;
 
@@ -416,11 +416,13 @@ void optee_enable_shm_cache(struct optee *optee)
 }
 
 /**
- * optee_disable_shm_cache() - Disables caching of some shared memory allocation
- *                           in OP-TEE
+ * __optee_disable_shm_cache() - Disables caching of some shared memory
+ *                               allocation in OP-TEE
  * @optee:     main service struct
+ * @is_mapped: true if the cached shared memory addresses were mapped by this
+ *             kernel, are safe to dereference, and should be freed
  */
-void optee_disable_shm_cache(struct optee *optee)
+static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
 {
        struct optee_call_waiter w;
 
@@ -439,6 +441,13 @@ void optee_disable_shm_cache(struct optee *optee)
                if (res.result.status == OPTEE_SMC_RETURN_OK) {
                        struct tee_shm *shm;
 
+                       /*
+                        * Shared memory references that were not mapped by
+                        * this kernel must be ignored to prevent a crash.
+                        */
+                       if (!is_mapped)
+                               continue;
+
                        shm = reg_pair_to_ptr(res.result.shm_upper32,
                                              res.result.shm_lower32);
                        tee_shm_free(shm);
@@ -449,6 +458,27 @@ void optee_disable_shm_cache(struct optee *optee)
        optee_cq_wait_final(&optee->call_queue, &w);
 }
 
+/**
+ * optee_disable_shm_cache() - Disables caching of mapped shared memory
+ *                             allocations in OP-TEE
+ * @optee:     main service struct
+ */
+void optee_disable_shm_cache(struct optee *optee)
+{
+       return __optee_disable_shm_cache(optee, true);
+}
+
+/**
+ * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
+ *                                      allocations in OP-TEE which are not
+ *                                      currently mapped
+ * @optee:     main service struct
+ */
+void optee_disable_unmapped_shm_cache(struct optee *optee)
+{
+       return __optee_disable_shm_cache(optee, false);
+}
+
 #define PAGELIST_ENTRIES_PER_PAGE                              \
        ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
 
index ddb8f9e..5ce13b0 100644 (file)
@@ -6,6 +6,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/arm-smccc.h>
+#include <linux/crash_dump.h>
 #include <linux/errno.h>
 #include <linux/io.h>
 #include <linux/module.h>
@@ -277,7 +278,8 @@ static void optee_release(struct tee_context *ctx)
        if (!ctxdata)
                return;
 
-       shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg), TEE_SHM_MAPPED);
+       shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg),
+                           TEE_SHM_MAPPED | TEE_SHM_PRIV);
        if (!IS_ERR(shm)) {
                arg = tee_shm_get_va(shm, 0);
                /*
@@ -572,6 +574,13 @@ static optee_invoke_fn *get_invoke_func(struct device *dev)
        return ERR_PTR(-EINVAL);
 }
 
+/* optee_remove - Device Removal Routine
+ * @pdev: platform device information struct
+ *
+ * optee_remove is called by platform subsystem to alert the driver
+ * that it should release the device
+ */
+
 static int optee_remove(struct platform_device *pdev)
 {
        struct optee *optee = platform_get_drvdata(pdev);
@@ -602,6 +611,18 @@ static int optee_remove(struct platform_device *pdev)
        return 0;
 }
 
+/* optee_shutdown - Device Removal Routine
+ * @pdev: platform device information struct
+ *
+ * platform_shutdown is called by the platform subsystem to alert
+ * the driver that a shutdown, reboot, or kexec is happening and
+ * device must be disabled.
+ */
+static void optee_shutdown(struct platform_device *pdev)
+{
+       optee_disable_shm_cache(platform_get_drvdata(pdev));
+}
+
 static int optee_probe(struct platform_device *pdev)
 {
        optee_invoke_fn *invoke_fn;
@@ -612,6 +633,16 @@ static int optee_probe(struct platform_device *pdev)
        u32 sec_caps;
        int rc;
 
+       /*
+        * The kernel may have crashed at the same time that all available
+        * secure world threads were suspended and we cannot reschedule the
+        * suspended threads without access to the crashed kernel's wait_queue.
+        * Therefore, we cannot reliably initialize the OP-TEE driver in the
+        * kdump kernel.
+        */
+       if (is_kdump_kernel())
+               return -ENODEV;
+
        invoke_fn = get_invoke_func(&pdev->dev);
        if (IS_ERR(invoke_fn))
                return PTR_ERR(invoke_fn);
@@ -686,6 +717,15 @@ static int optee_probe(struct platform_device *pdev)
        optee->memremaped_shm = memremaped_shm;
        optee->pool = pool;
 
+       /*
+        * Ensure that there are no pre-existing shm objects before enabling
+        * the shm cache so that there's no chance of receiving an invalid
+        * address during shutdown. This could occur, for example, if we're
+        * kexec booting from an older kernel that did not properly cleanup the
+        * shm cache.
+        */
+       optee_disable_unmapped_shm_cache(optee);
+
        optee_enable_shm_cache(optee);
 
        if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
@@ -728,6 +768,7 @@ MODULE_DEVICE_TABLE(of, optee_dt_match);
 static struct platform_driver optee_driver = {
        .probe  = optee_probe,
        .remove = optee_remove,
+       .shutdown = optee_shutdown,
        .driver = {
                .name = "optee",
                .of_match_table = optee_dt_match,
index e25b216..dbdd367 100644 (file)
@@ -159,6 +159,7 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
 
 void optee_enable_shm_cache(struct optee *optee);
 void optee_disable_shm_cache(struct optee *optee);
+void optee_disable_unmapped_shm_cache(struct optee *optee);
 
 int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
                       struct page **pages, size_t num_pages,
index 1849180..efbaff7 100644 (file)
@@ -314,7 +314,7 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
                shm = cmd_alloc_suppl(ctx, sz);
                break;
        case OPTEE_RPC_SHM_TYPE_KERNEL:
-               shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED);
+               shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
                break;
        default:
                arg->ret = TEEC_ERROR_BAD_PARAMETERS;
@@ -502,7 +502,8 @@ void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
 
        switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
        case OPTEE_SMC_RPC_FUNC_ALLOC:
-               shm = tee_shm_alloc(ctx, param->a1, TEE_SHM_MAPPED);
+               shm = tee_shm_alloc(ctx, param->a1,
+                                   TEE_SHM_MAPPED | TEE_SHM_PRIV);
                if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
                        reg_pair_from_64(&param->a1, &param->a2, pa);
                        reg_pair_from_64(&param->a4, &param->a5,
index d767eeb..c41a9a5 100644 (file)
@@ -27,13 +27,19 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
        shm->paddr = page_to_phys(page);
        shm->size = PAGE_SIZE << order;
 
-       if (shm->flags & TEE_SHM_DMA_BUF) {
+       /*
+        * Shared memory private to the OP-TEE driver doesn't need
+        * to be registered with OP-TEE.
+        */
+       if (!(shm->flags & TEE_SHM_PRIV)) {
                unsigned int nr_pages = 1 << order, i;
                struct page **pages;
 
                pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL);
-               if (!pages)
-                       return -ENOMEM;
+               if (!pages) {
+                       rc = -ENOMEM;
+                       goto err;
+               }
 
                for (i = 0; i < nr_pages; i++) {
                        pages[i] = page;
@@ -44,15 +50,21 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
                rc = optee_shm_register(shm->ctx, shm, pages, nr_pages,
                                        (unsigned long)shm->kaddr);
                kfree(pages);
+               if (rc)
+                       goto err;
        }
 
+       return 0;
+
+err:
+       __free_pages(page, order);
        return rc;
 }
 
 static void pool_op_free(struct tee_shm_pool_mgr *poolm,
                         struct tee_shm *shm)
 {
-       if (shm->flags & TEE_SHM_DMA_BUF)
+       if (!(shm->flags & TEE_SHM_PRIV))
                optee_shm_unregister(shm->ctx, shm);
 
        free_pages((unsigned long)shm->kaddr, get_order(shm->size));
index 00472f5..8a9384a 100644 (file)
@@ -117,7 +117,7 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
                return ERR_PTR(-EINVAL);
        }
 
-       if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF))) {
+       if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF | TEE_SHM_PRIV))) {
                dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags);
                return ERR_PTR(-EINVAL);
        }
@@ -193,6 +193,24 @@ err_dev_put:
 }
 EXPORT_SYMBOL_GPL(tee_shm_alloc);
 
+/**
+ * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer
+ * @ctx:       Context that allocates the shared memory
+ * @size:      Requested size of shared memory
+ *
+ * The returned memory registered in secure world and is suitable to be
+ * passed as a memory buffer in parameter argument to
+ * tee_client_invoke_func(). The memory allocated is later freed with a
+ * call to tee_shm_free().
+ *
+ * @returns a pointer to 'struct tee_shm'
+ */
+struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
+{
+       return tee_shm_alloc(ctx, size, TEE_SHM_MAPPED);
+}
+EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf);
+
 struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
                                 size_t length, u32 flags)
 {
index 83b1ef3..10d6b22 100644 (file)
@@ -1875,18 +1875,6 @@ static struct attribute *switch_attrs[] = {
        NULL,
 };
 
-static bool has_port(const struct tb_switch *sw, enum tb_port_type type)
-{
-       const struct tb_port *port;
-
-       tb_switch_for_each_port(sw, port) {
-               if (!port->disabled && port->config.type == type)
-                       return true;
-       }
-
-       return false;
-}
-
 static umode_t switch_attr_is_visible(struct kobject *kobj,
                                      struct attribute *attr, int n)
 {
@@ -1895,8 +1883,7 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
 
        if (attr == &dev_attr_authorized.attr) {
                if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
-                   sw->tb->security_level == TB_SECURITY_DPONLY ||
-                   !has_port(sw, TB_TYPE_PCIE_UP))
+                   sw->tb->security_level == TB_SECURITY_DPONLY)
                        return 0;
        } else if (attr == &dev_attr_device.attr) {
                if (!sw->device)
index 4caab87..2350fb3 100644 (file)
@@ -329,6 +329,7 @@ static int aspeed_vuart_handle_irq(struct uart_port *port)
 {
        struct uart_8250_port *up = up_to_u8250p(port);
        unsigned int iir, lsr;
+       unsigned long flags;
        unsigned int space, count;
 
        iir = serial_port_in(port, UART_IIR);
@@ -336,7 +337,7 @@ static int aspeed_vuart_handle_irq(struct uart_port *port)
        if (iir & UART_IIR_NO_INT)
                return 0;
 
-       spin_lock(&port->lock);
+       spin_lock_irqsave(&port->lock, flags);
 
        lsr = serial_port_in(port, UART_LSR);
 
@@ -370,7 +371,7 @@ static int aspeed_vuart_handle_irq(struct uart_port *port)
        if (lsr & UART_LSR_THRE)
                serial8250_tx_chars(up);
 
-       uart_unlock_and_check_sysrq(port);
+       uart_unlock_and_check_sysrq_irqrestore(port, flags);
 
        return 1;
 }
index 4e75d2e..fc65a22 100644 (file)
@@ -30,10 +30,11 @@ struct fsl8250_data {
 int fsl8250_handle_irq(struct uart_port *port)
 {
        unsigned char lsr, orig_lsr;
+       unsigned long flags;
        unsigned int iir;
        struct uart_8250_port *up = up_to_u8250p(port);
 
-       spin_lock(&up->port.lock);
+       spin_lock_irqsave(&up->port.lock, flags);
 
        iir = port->serial_in(port, UART_IIR);
        if (iir & UART_IIR_NO_INT) {
@@ -82,7 +83,7 @@ int fsl8250_handle_irq(struct uart_port *port)
 
        up->lsr_saved_flags = orig_lsr;
 
-       uart_unlock_and_check_sysrq(&up->port);
+       uart_unlock_and_check_sysrq_irqrestore(&up->port, flags);
 
        return 1;
 }
index f7d3023..fb65dc6 100644 (file)
@@ -93,10 +93,13 @@ static void mtk8250_dma_rx_complete(void *param)
        struct dma_tx_state state;
        int copied, total, cnt;
        unsigned char *ptr;
+       unsigned long flags;
 
        if (data->rx_status == DMA_RX_SHUTDOWN)
                return;
 
+       spin_lock_irqsave(&up->port.lock, flags);
+
        dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
        total = dma->rx_size - state.residue;
        cnt = total;
@@ -120,6 +123,8 @@ static void mtk8250_dma_rx_complete(void *param)
        tty_flip_buffer_push(tty_port);
 
        mtk8250_rx_dma(up);
+
+       spin_unlock_irqrestore(&up->port.lock, flags);
 }
 
 static void mtk8250_rx_dma(struct uart_8250_port *up)
index 75827b6..a808c28 100644 (file)
@@ -3836,6 +3836,12 @@ static const struct pci_device_id blacklist[] = {
        { PCI_VDEVICE(INTEL, 0x0f0c), },
        { PCI_VDEVICE(INTEL, 0x228a), },
        { PCI_VDEVICE(INTEL, 0x228c), },
+       { PCI_VDEVICE(INTEL, 0x4b96), },
+       { PCI_VDEVICE(INTEL, 0x4b97), },
+       { PCI_VDEVICE(INTEL, 0x4b98), },
+       { PCI_VDEVICE(INTEL, 0x4b99), },
+       { PCI_VDEVICE(INTEL, 0x4b9a), },
+       { PCI_VDEVICE(INTEL, 0x4b9b), },
        { PCI_VDEVICE(INTEL, 0x9ce3), },
        { PCI_VDEVICE(INTEL, 0x9ce4), },
 
@@ -3996,6 +4002,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board)
                if (pci_match_id(pci_use_msi, dev)) {
                        dev_dbg(&dev->dev, "Using MSI(-X) interrupts\n");
                        pci_set_master(dev);
+                       uart.port.flags &= ~UPF_SHARE_IRQ;
                        rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_ALL_TYPES);
                } else {
                        dev_dbg(&dev->dev, "Using legacy interrupts\n");
index 2164290..1da29a2 100644 (file)
@@ -311,7 +311,11 @@ static const struct serial8250_config uart_config[] = {
 /* Uart divisor latch read */
 static int default_serial_dl_read(struct uart_8250_port *up)
 {
-       return serial_in(up, UART_DLL) | serial_in(up, UART_DLM) << 8;
+       /* Assign these in pieces to truncate any bits above 7.  */
+       unsigned char dll = serial_in(up, UART_DLL);
+       unsigned char dlm = serial_in(up, UART_DLM);
+
+       return dll | dlm << 8;
 }
 
 /* Uart divisor latch write */
@@ -1297,9 +1301,11 @@ static void autoconfig(struct uart_8250_port *up)
        serial_out(up, UART_LCR, 0);
 
        serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
-       scratch = serial_in(up, UART_IIR) >> 6;
 
-       switch (scratch) {
+       /* Assign this as it is to truncate any bits above 7.  */
+       scratch = serial_in(up, UART_IIR);
+
+       switch (scratch >> 6) {
        case 0:
                autoconfig_8250(up);
                break;
@@ -1893,11 +1899,12 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
        unsigned char status;
        struct uart_8250_port *up = up_to_u8250p(port);
        bool skip_rx = false;
+       unsigned long flags;
 
        if (iir & UART_IIR_NO_INT)
                return 0;
 
-       spin_lock(&port->lock);
+       spin_lock_irqsave(&port->lock, flags);
 
        status = serial_port_in(port, UART_LSR);
 
@@ -1923,7 +1930,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
                (up->ier & UART_IER_THRI))
                serial8250_tx_chars(up);
 
-       uart_unlock_and_check_sysrq(port);
+       uart_unlock_and_check_sysrq_irqrestore(port, flags);
 
        return 1;
 }
index 508128d..f0e5da7 100644 (file)
@@ -1415,7 +1415,7 @@ static unsigned int lpuart_get_mctrl(struct uart_port *port)
 
 static unsigned int lpuart32_get_mctrl(struct uart_port *port)
 {
-       unsigned int mctrl = 0;
+       unsigned int mctrl = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
        u32 reg;
 
        reg = lpuart32_read(port, UARTCTRL);
index 0c1e4df..ef11860 100644 (file)
@@ -1293,7 +1293,8 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
                freq = uartclk;
        if (freq == 0) {
                dev_err(dev, "Cannot get clock rate\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out_clk;
        }
 
        if (xtal) {
index 2220327..eba5b9e 100644 (file)
@@ -1045,9 +1045,11 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
 
        if (tup->cdata->fifo_mode_enable_status) {
                ret = tegra_uart_wait_fifo_mode_enabled(tup);
-               dev_err(tup->uport.dev, "FIFO mode not enabled\n");
-               if (ret < 0)
+               if (ret < 0) {
+                       dev_err(tup->uport.dev,
+                               "Failed to enable FIFO mode: %d\n", ret);
                        return ret;
+               }
        } else {
                /*
                 * For all tegra devices (up to t210), there is a hardware
index 02ec7ab..e29989d 100644 (file)
@@ -731,6 +731,7 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
                request->actual = 0;
                priv_dev->status_completion_no_call = true;
                priv_dev->pending_status_request = request;
+               usb_gadget_set_state(&priv_dev->gadget, USB_STATE_CONFIGURED);
                spin_unlock_irqrestore(&priv_dev->lock, flags);
 
                /*
index c23f53e..27df0c6 100644 (file)
@@ -1882,7 +1882,7 @@ static int __cdnsp_gadget_init(struct cdns *cdns)
        pdev->gadget.name = "cdnsp-gadget";
        pdev->gadget.speed = USB_SPEED_UNKNOWN;
        pdev->gadget.sg_supported = 1;
-       pdev->gadget.max_speed = USB_SPEED_SUPER_PLUS;
+       pdev->gadget.max_speed = max_speed;
        pdev->gadget.lpm_capable = 1;
 
        pdev->setup_buf = kzalloc(CDNSP_EP0_SETUP_SIZE, GFP_KERNEL);
index 783ca8f..f740fa6 100644 (file)
@@ -383,8 +383,8 @@ struct cdnsp_intr_reg {
 #define IMAN_IE                        BIT(1)
 #define IMAN_IP                        BIT(0)
 /* bits 2:31 need to be preserved */
-#define IMAN_IE_SET(p)         (((p) & IMAN_IE) | 0x2)
-#define IMAN_IE_CLEAR(p)       (((p) & IMAN_IE) & ~(0x2))
+#define IMAN_IE_SET(p)         ((p) | IMAN_IE)
+#define IMAN_IE_CLEAR(p)       ((p) & ~IMAN_IE)
 
 /* IMOD - Interrupter Moderation Register - irq_control bitmasks. */
 /*
index 6897274..1b14384 100644 (file)
@@ -1932,15 +1932,13 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
                }
 
                if (enqd_len + trb_buff_len >= full_len) {
-                       if (need_zero_pkt && zero_len_trb) {
-                               zero_len_trb = true;
-                       } else {
-                               field &= ~TRB_CHAIN;
-                               field |= TRB_IOC;
-                               more_trbs_coming = false;
-                               need_zero_pkt = false;
-                               preq->td.last_trb = ring->enqueue;
-                       }
+                       if (need_zero_pkt)
+                               zero_len_trb = !zero_len_trb;
+
+                       field &= ~TRB_CHAIN;
+                       field |= TRB_IOC;
+                       more_trbs_coming = false;
+                       preq->td.last_trb = ring->enqueue;
                }
 
                /* Only set interrupt on short packet for OUT endpoints. */
@@ -1955,7 +1953,7 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
                length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) |
                        TRB_INTR_TARGET(0);
 
-               cdnsp_queue_trb(pdev, ring, more_trbs_coming | need_zero_pkt,
+               cdnsp_queue_trb(pdev, ring, more_trbs_coming | zero_len_trb,
                                lower_32_bits(send_addr),
                                upper_32_bits(send_addr),
                                length_field,
index 74d5a9c..73f419a 100644 (file)
@@ -2324,17 +2324,10 @@ static void usbtmc_interrupt(struct urb *urb)
                dev_err(dev, "overflow with length %d, actual length is %d\n",
                        data->iin_wMaxPacketSize, urb->actual_length);
                fallthrough;
-       case -ECONNRESET:
-       case -ENOENT:
-       case -ESHUTDOWN:
-       case -EILSEQ:
-       case -ETIME:
-       case -EPIPE:
+       default:
                /* urb terminated, clean up */
                dev_dbg(dev, "urb terminated, status: %d\n", status);
                return;
-       default:
-               dev_err(dev, "unknown status received: %d\n", status);
        }
 exit:
        rv = usb_submit_urb(urb, GFP_ATOMIC);
index 3740cf9..0697fde 100644 (file)
@@ -193,7 +193,11 @@ static void otg_start_hnp_polling(struct otg_fsm *fsm)
        if (!fsm->host_req_flag)
                return;
 
-       INIT_DELAYED_WORK(&fsm->hnp_polling_work, otg_hnp_polling_work);
+       if (!fsm->hnp_work_inited) {
+               INIT_DELAYED_WORK(&fsm->hnp_polling_work, otg_hnp_polling_work);
+               fsm->hnp_work_inited = true;
+       }
+
        schedule_delayed_work(&fsm->hnp_polling_work,
                                        msecs_to_jiffies(T_HOST_REQ_POLL));
 }
index 45f2bc0..b8d4b2d 100644 (file)
@@ -1741,9 +1741,13 @@ static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep)
 {
        struct dwc3_request             *req;
        struct dwc3_request             *tmp;
+       struct list_head                local;
        struct dwc3                     *dwc = dep->dwc;
 
-       list_for_each_entry_safe(req, tmp, &dep->cancelled_list, list) {
+restart:
+       list_replace_init(&dep->cancelled_list, &local);
+
+       list_for_each_entry_safe(req, tmp, &local, list) {
                dwc3_gadget_ep_skip_trbs(dep, req);
                switch (req->status) {
                case DWC3_REQUEST_STATUS_DISCONNECTED:
@@ -1761,6 +1765,9 @@ static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep)
                        break;
                }
        }
+
+       if (!list_empty(&dep->cancelled_list))
+               goto restart;
 }
 
 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
@@ -2249,6 +2256,17 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
                }
        }
 
+       /*
+        * Avoid issuing a runtime resume if the device is already in the
+        * suspended state during gadget disconnect.  DWC3 gadget was already
+        * halted/stopped during runtime suspend.
+        */
+       if (!is_on) {
+               pm_runtime_barrier(dwc->dev);
+               if (pm_runtime_suspended(dwc->dev))
+                       return 0;
+       }
+
        /*
         * Check the return value for successful resume, or error.  For a
         * successful resume, the DWC3 runtime PM resume routine will handle
@@ -2958,8 +2976,12 @@ static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep,
 {
        struct dwc3_request     *req;
        struct dwc3_request     *tmp;
+       struct list_head        local;
 
-       list_for_each_entry_safe(req, tmp, &dep->started_list, list) {
+restart:
+       list_replace_init(&dep->started_list, &local);
+
+       list_for_each_entry_safe(req, tmp, &local, list) {
                int ret;
 
                ret = dwc3_gadget_ep_cleanup_completed_request(dep, event,
@@ -2967,6 +2989,9 @@ static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep,
                if (ret)
                        break;
        }
+
+       if (!list_empty(&dep->started_list))
+               goto restart;
 }
 
 static bool dwc3_gadget_ep_should_continue(struct dwc3_ep *dep)
index 02683ac..bb476e1 100644 (file)
@@ -41,6 +41,7 @@ struct f_hidg {
        unsigned char                   bInterfaceSubClass;
        unsigned char                   bInterfaceProtocol;
        unsigned char                   protocol;
+       unsigned char                   idle;
        unsigned short                  report_desc_length;
        char                            *report_desc;
        unsigned short                  report_length;
@@ -338,6 +339,11 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
 
        spin_lock_irqsave(&hidg->write_spinlock, flags);
 
+       if (!hidg->req) {
+               spin_unlock_irqrestore(&hidg->write_spinlock, flags);
+               return -ESHUTDOWN;
+       }
+
 #define WRITE_COND (!hidg->write_pending)
 try_again:
        /* write queue */
@@ -358,8 +364,14 @@ try_again:
        count  = min_t(unsigned, count, hidg->report_length);
 
        spin_unlock_irqrestore(&hidg->write_spinlock, flags);
-       status = copy_from_user(req->buf, buffer, count);
 
+       if (!req) {
+               ERROR(hidg->func.config->cdev, "hidg->req is NULL\n");
+               status = -ESHUTDOWN;
+               goto release_write_pending;
+       }
+
+       status = copy_from_user(req->buf, buffer, count);
        if (status != 0) {
                ERROR(hidg->func.config->cdev,
                        "copy_from_user error\n");
@@ -387,14 +399,17 @@ try_again:
 
        spin_unlock_irqrestore(&hidg->write_spinlock, flags);
 
+       if (!hidg->in_ep->enabled) {
+               ERROR(hidg->func.config->cdev, "in_ep is disabled\n");
+               status = -ESHUTDOWN;
+               goto release_write_pending;
+       }
+
        status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
-       if (status < 0) {
-               ERROR(hidg->func.config->cdev,
-                       "usb_ep_queue error on int endpoint %zd\n", status);
+       if (status < 0)
                goto release_write_pending;
-       } else {
+       else
                status = count;
-       }
 
        return status;
 release_write_pending:
@@ -523,6 +538,14 @@ static int hidg_setup(struct usb_function *f,
                goto respond;
                break;
 
+       case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+                 | HID_REQ_GET_IDLE):
+               VDBG(cdev, "get_idle\n");
+               length = min_t(unsigned int, length, 1);
+               ((u8 *) req->buf)[0] = hidg->idle;
+               goto respond;
+               break;
+
        case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
                  | HID_REQ_SET_REPORT):
                VDBG(cdev, "set_report | wLength=%d\n", ctrl->wLength);
@@ -546,6 +569,14 @@ static int hidg_setup(struct usb_function *f,
                goto stall;
                break;
 
+       case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+                 | HID_REQ_SET_IDLE):
+               VDBG(cdev, "set_idle\n");
+               length = 0;
+               hidg->idle = value >> 8;
+               goto respond;
+               break;
+
        case ((USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_INTERFACE) << 8
                  | USB_REQ_GET_DESCRIPTOR):
                switch (value >> 8) {
@@ -773,6 +804,7 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
        hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass;
        hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol;
        hidg->protocol = HID_REPORT_PROTOCOL;
+       hidg->idle = 1;
        hidg_ss_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
        hidg_ss_in_comp_desc.wBytesPerInterval =
                                cpu_to_le16(hidg->report_length);
index 34f4db5..d2a2b20 100644 (file)
@@ -1255,12 +1255,14 @@ static int max3420_probe(struct spi_device *spi)
        err = devm_request_irq(&spi->dev, irq, max3420_irq_handler, 0,
                               "max3420", udc);
        if (err < 0)
-               return err;
+               goto del_gadget;
 
        udc->thread_task = kthread_create(max3420_thread, udc,
                                          "max3420-thread");
-       if (IS_ERR(udc->thread_task))
-               return PTR_ERR(udc->thread_task);
+       if (IS_ERR(udc->thread_task)) {
+               err = PTR_ERR(udc->thread_task);
+               goto del_gadget;
+       }
 
        irq = of_irq_get_byname(spi->dev.of_node, "vbus");
        if (irq <= 0) { /* no vbus irq implies self-powered design */
@@ -1280,10 +1282,14 @@ static int max3420_probe(struct spi_device *spi)
                err = devm_request_irq(&spi->dev, irq,
                                       max3420_vbus_handler, 0, "vbus", udc);
                if (err < 0)
-                       return err;
+                       goto del_gadget;
        }
 
        return 0;
+
+del_gadget:
+       usb_del_gadget_udc(&udc->gadget);
+       return err;
 }
 
 static int max3420_remove(struct spi_device *spi)
index 9bbd7dd..a24aea3 100644 (file)
@@ -611,8 +611,6 @@ ohci_hcd_at91_drv_suspend(struct device *dev)
        if (ohci_at91->wakeup)
                enable_irq_wake(hcd->irq);
 
-       ohci_at91_port_suspend(ohci_at91->sfr_regmap, 1);
-
        ret = ohci_suspend(hcd, ohci_at91->wakeup);
        if (ret) {
                if (ohci_at91->wakeup)
@@ -632,7 +630,10 @@ ohci_hcd_at91_drv_suspend(struct device *dev)
                /* flush the writes */
                (void) ohci_readl (ohci, &ohci->regs->control);
                msleep(1);
+               ohci_at91_port_suspend(ohci_at91->sfr_regmap, 1);
                at91_stop_clock(ohci_at91);
+       } else {
+               ohci_at91_port_suspend(ohci_at91->sfr_regmap, 1);
        }
 
        return ret;
@@ -644,6 +645,8 @@ ohci_hcd_at91_drv_resume(struct device *dev)
        struct usb_hcd  *hcd = dev_get_drvdata(dev);
        struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd);
 
+       ohci_at91_port_suspend(ohci_at91->sfr_regmap, 0);
+
        if (ohci_at91->wakeup)
                disable_irq_wake(hcd->irq);
        else
@@ -651,8 +654,6 @@ ohci_hcd_at91_drv_resume(struct device *dev)
 
        ohci_resume(hcd, false);
 
-       ohci_at91_port_suspend(ohci_at91->sfr_regmap, 0);
-
        return 0;
 }
 
index 640a46f..f086960 100644 (file)
@@ -35,6 +35,7 @@ struct omap2430_glue {
        struct device           *control_otghs;
        unsigned int            is_runtime_suspended:1;
        unsigned int            needs_resume:1;
+       unsigned int            phy_suspended:1;
 };
 #define glue_to_musb(g)                platform_get_drvdata(g->musb)
 
@@ -458,8 +459,10 @@ static int omap2430_runtime_suspend(struct device *dev)
 
        omap2430_low_level_exit(musb);
 
-       phy_power_off(musb->phy);
-       phy_exit(musb->phy);
+       if (!glue->phy_suspended) {
+               phy_power_off(musb->phy);
+               phy_exit(musb->phy);
+       }
 
        glue->is_runtime_suspended = 1;
 
@@ -474,8 +477,10 @@ static int omap2430_runtime_resume(struct device *dev)
        if (!musb)
                return 0;
 
-       phy_init(musb->phy);
-       phy_power_on(musb->phy);
+       if (!glue->phy_suspended) {
+               phy_init(musb->phy);
+               phy_power_on(musb->phy);
+       }
 
        omap2430_low_level_init(musb);
        musb_writel(musb->mregs, OTG_INTERFSEL,
@@ -489,7 +494,21 @@ static int omap2430_runtime_resume(struct device *dev)
        return 0;
 }
 
+/* I2C and SPI PHYs need to be suspended before the glue layer */
 static int omap2430_suspend(struct device *dev)
+{
+       struct omap2430_glue *glue = dev_get_drvdata(dev);
+       struct musb *musb = glue_to_musb(glue);
+
+       phy_power_off(musb->phy);
+       phy_exit(musb->phy);
+       glue->phy_suspended = 1;
+
+       return 0;
+}
+
+/* Glue layer needs to be suspended after musb_suspend() */
+static int omap2430_suspend_late(struct device *dev)
 {
        struct omap2430_glue *glue = dev_get_drvdata(dev);
 
@@ -501,7 +520,7 @@ static int omap2430_suspend(struct device *dev)
        return omap2430_runtime_suspend(dev);
 }
 
-static int omap2430_resume(struct device *dev)
+static int omap2430_resume_early(struct device *dev)
 {
        struct omap2430_glue *glue = dev_get_drvdata(dev);
 
@@ -513,10 +532,24 @@ static int omap2430_resume(struct device *dev)
        return omap2430_runtime_resume(dev);
 }
 
+static int omap2430_resume(struct device *dev)
+{
+       struct omap2430_glue *glue = dev_get_drvdata(dev);
+       struct musb *musb = glue_to_musb(glue);
+
+       phy_init(musb->phy);
+       phy_power_on(musb->phy);
+       glue->phy_suspended = 0;
+
+       return 0;
+}
+
 static const struct dev_pm_ops omap2430_pm_ops = {
        .runtime_suspend = omap2430_runtime_suspend,
        .runtime_resume = omap2430_runtime_resume,
        .suspend = omap2430_suspend,
+       .suspend_late = omap2430_suspend_late,
+       .resume_early = omap2430_resume_early,
        .resume = omap2430_resume,
 };
 
index 2db917e..8a521b5 100644 (file)
@@ -851,6 +851,7 @@ static struct usb_serial_driver ch341_device = {
                .owner  = THIS_MODULE,
                .name   = "ch341-uart",
        },
+       .bulk_in_size      = 512,
        .id_table          = id_table,
        .num_ports         = 1,
        .open              = ch341_open,
index 4a1f3a9..33bbb34 100644 (file)
@@ -219,6 +219,7 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_R2000KU_TRUE_RNG) },
        { USB_DEVICE(FTDI_VID, FTDI_VARDAAN_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_AUTO_M3_OP_COM_V2_PID) },
        { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0100_PID) },
        { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0101_PID) },
        { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0102_PID) },
index add602b..755858c 100644 (file)
 /* Vardaan Enterprises Serial Interface VEUSB422R3 */
 #define FTDI_VARDAAN_PID       0xF070
 
+/* Auto-M3 Ltd. - OP-COM USB V2 - OBD interface Adapter */
+#define FTDI_AUTO_M3_OP_COM_V2_PID     0x4f50
+
 /*
  * Xsens Technologies BV products (http://www.xsens.com).
  */
index 0fbe253..0394500 100644 (file)
@@ -1203,6 +1203,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(2) | RSVD(3) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1055, 0xff),    /* Telit FN980 (PCIe) */
          .driver_info = NCTRL(0) | RSVD(1) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1056, 0xff),    /* Telit FD980 */
+         .driver_info = NCTRL(2) | RSVD(3) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
index 2f2f504..930b3d5 100644 (file)
@@ -418,24 +418,34 @@ static int pl2303_detect_type(struct usb_serial *serial)
        bcdDevice = le16_to_cpu(desc->bcdDevice);
        bcdUSB = le16_to_cpu(desc->bcdUSB);
 
-       switch (bcdDevice) {
-       case 0x100:
-               /*
-                * Assume it's an HXN-type if the device doesn't support the old read
-                * request value.
-                */
-               if (bcdUSB == 0x200 && !pl2303_supports_hx_status(serial))
-                       return TYPE_HXN;
+       switch (bcdUSB) {
+       case 0x110:
+               switch (bcdDevice) {
+               case 0x300:
+                       return TYPE_HX;
+               case 0x400:
+                       return TYPE_HXD;
+               default:
+                       return TYPE_HX;
+               }
                break;
-       case 0x300:
-               if (bcdUSB == 0x200)
+       case 0x200:
+               switch (bcdDevice) {
+               case 0x100:
+               case 0x305:
+                       /*
+                        * Assume it's an HXN-type if the device doesn't
+                        * support the old read request value.
+                        */
+                       if (!pl2303_supports_hx_status(serial))
+                               return TYPE_HXN;
+                       break;
+               case 0x300:
                        return TYPE_TA;
-
-               return TYPE_HX;
-       case 0x400:
-               return TYPE_HXD;
-       case 0x500:
-               return TYPE_TB;
+               case 0x500:
+                       return TYPE_TB;
+               }
+               break;
        }
 
        dev_err(&serial->interface->dev,
index 5b22a1c..b9bb63d 100644 (file)
@@ -5369,7 +5369,7 @@ EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
 void tcpm_sink_frs(struct tcpm_port *port)
 {
        spin_lock(&port->pd_event_lock);
-       port->pd_events = TCPM_FRS_EVENT;
+       port->pd_events |= TCPM_FRS_EVENT;
        spin_unlock(&port->pd_event_lock);
        kthread_queue_work(port->wq, &port->event_work);
 }
@@ -5378,7 +5378,7 @@ EXPORT_SYMBOL_GPL(tcpm_sink_frs);
 void tcpm_sourcing_vbus(struct tcpm_port *port)
 {
        spin_lock(&port->pd_event_lock);
-       port->pd_events = TCPM_SOURCING_VBUS;
+       port->pd_events |= TCPM_SOURCING_VBUS;
        spin_unlock(&port->pd_event_lock);
        kthread_queue_work(port->wq, &port->event_work);
 }
index 0d002a3..fbc9f10 100644 (file)
@@ -64,6 +64,14 @@ int acrn_vm_destroy(struct acrn_vm *vm)
            test_and_set_bit(ACRN_VM_FLAG_DESTROYED, &vm->flags))
                return 0;
 
+       ret = hcall_destroy_vm(vm->vmid);
+       if (ret < 0) {
+               dev_err(acrn_dev.this_device,
+                       "Failed to destroy VM %u\n", vm->vmid);
+               clear_bit(ACRN_VM_FLAG_DESTROYED, &vm->flags);
+               return ret;
+       }
+
        /* Remove from global VM list */
        write_lock_bh(&acrn_vm_list_lock);
        list_del_init(&vm->list);
@@ -78,14 +86,6 @@ int acrn_vm_destroy(struct acrn_vm *vm)
                vm->monitor_page = NULL;
        }
 
-       ret = hcall_destroy_vm(vm->vmid);
-       if (ret < 0) {
-               dev_err(acrn_dev.this_device,
-                       "Failed to destroy VM %u\n", vm->vmid);
-               clear_bit(ACRN_VM_FLAG_DESTROYED, &vm->flags);
-               return ret;
-       }
-
        acrn_vm_all_ram_unmap(vm);
 
        dev_dbg(acrn_dev.this_device, "VM %u destroyed.\n", vm->vmid);
index b96ecba..b60f015 100644 (file)
@@ -244,9 +244,6 @@ int __ext4_journal_get_write_access(const char *where, unsigned int line,
  * "bh" may be NULL: a metadata block may have been freed from memory
  * but there may still be a record of it in the journal, and that record
  * still needs to be revoked.
- *
- * If the handle isn't valid we're not journaling, but we still need to
- * call into ext4_journal_revoke() to put the buffer head.
  */
 int __ext4_forget(const char *where, unsigned int line, handle_t *handle,
                  int is_metadata, struct inode *inode,
index bc364c1..cebea42 100644 (file)
@@ -138,7 +138,7 @@ static int kmmpd(void *data)
        unsigned mmp_check_interval;
        unsigned long last_update_time;
        unsigned long diff;
-       int retval;
+       int retval = 0;
 
        mmp_block = le64_to_cpu(es->s_mmp_block);
        mmp = (struct mmp_struct *)(bh->b_data);
index 5fd56f6..f3bbcd4 100644 (file)
@@ -2517,7 +2517,7 @@ again:
                                goto journal_error;
                        err = ext4_handle_dirty_dx_node(handle, dir,
                                                        frame->bh);
-                       if (err)
+                       if (restart || err)
                                goto journal_error;
                } else {
                        struct dx_root *dxroot;
index cf086b0..12fc193 100644 (file)
@@ -130,6 +130,7 @@ struct io_cb_cancel_data {
 };
 
 static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index);
+static void io_wqe_dec_running(struct io_worker *worker);
 
 static bool io_worker_get(struct io_worker *worker)
 {
@@ -168,26 +169,21 @@ static void io_worker_exit(struct io_worker *worker)
 {
        struct io_wqe *wqe = worker->wqe;
        struct io_wqe_acct *acct = io_wqe_get_acct(worker);
-       unsigned flags;
 
        if (refcount_dec_and_test(&worker->ref))
                complete(&worker->ref_done);
        wait_for_completion(&worker->ref_done);
 
-       preempt_disable();
-       current->flags &= ~PF_IO_WORKER;
-       flags = worker->flags;
-       worker->flags = 0;
-       if (flags & IO_WORKER_F_RUNNING)
-               atomic_dec(&acct->nr_running);
-       worker->flags = 0;
-       preempt_enable();
-
        raw_spin_lock_irq(&wqe->lock);
-       if (flags & IO_WORKER_F_FREE)
+       if (worker->flags & IO_WORKER_F_FREE)
                hlist_nulls_del_rcu(&worker->nulls_node);
        list_del_rcu(&worker->all_list);
        acct->nr_workers--;
+       preempt_disable();
+       io_wqe_dec_running(worker);
+       worker->flags = 0;
+       current->flags &= ~PF_IO_WORKER;
+       preempt_enable();
        raw_spin_unlock_irq(&wqe->lock);
 
        kfree_rcu(worker, rcu);
@@ -214,15 +210,19 @@ static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
        struct hlist_nulls_node *n;
        struct io_worker *worker;
 
-       n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list));
-       if (is_a_nulls(n))
-               return false;
-
-       worker = hlist_nulls_entry(n, struct io_worker, nulls_node);
-       if (io_worker_get(worker)) {
-               wake_up_process(worker->task);
+       /*
+        * Iterate free_list and see if we can find an idle worker to
+        * activate. If a given worker is on the free_list but in the process
+        * of exiting, keep trying.
+        */
+       hlist_nulls_for_each_entry_rcu(worker, n, &wqe->free_list, nulls_node) {
+               if (!io_worker_get(worker))
+                       continue;
+               if (wake_up_process(worker->task)) {
+                       io_worker_release(worker);
+                       return true;
+               }
                io_worker_release(worker);
-               return true;
        }
 
        return false;
@@ -247,10 +247,19 @@ static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
        ret = io_wqe_activate_free_worker(wqe);
        rcu_read_unlock();
 
-       if (!ret && acct->nr_workers < acct->max_workers) {
-               atomic_inc(&acct->nr_running);
-               atomic_inc(&wqe->wq->worker_refs);
-               create_io_worker(wqe->wq, wqe, acct->index);
+       if (!ret) {
+               bool do_create = false;
+
+               raw_spin_lock_irq(&wqe->lock);
+               if (acct->nr_workers < acct->max_workers) {
+                       atomic_inc(&acct->nr_running);
+                       atomic_inc(&wqe->wq->worker_refs);
+                       acct->nr_workers++;
+                       do_create = true;
+               }
+               raw_spin_unlock_irq(&wqe->lock);
+               if (do_create)
+                       create_io_worker(wqe->wq, wqe, acct->index);
        }
 }
 
@@ -271,9 +280,17 @@ static void create_worker_cb(struct callback_head *cb)
 {
        struct create_worker_data *cwd;
        struct io_wq *wq;
+       struct io_wqe *wqe;
+       struct io_wqe_acct *acct;
 
        cwd = container_of(cb, struct create_worker_data, work);
-       wq = cwd->wqe->wq;
+       wqe = cwd->wqe;
+       wq = wqe->wq;
+       acct = &wqe->acct[cwd->index];
+       raw_spin_lock_irq(&wqe->lock);
+       if (acct->nr_workers < acct->max_workers)
+               acct->nr_workers++;
+       raw_spin_unlock_irq(&wqe->lock);
        create_io_worker(wq, cwd->wqe, cwd->index);
        kfree(cwd);
 }
@@ -635,6 +652,9 @@ static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
                kfree(worker);
 fail:
                atomic_dec(&acct->nr_running);
+               raw_spin_lock_irq(&wqe->lock);
+               acct->nr_workers--;
+               raw_spin_unlock_irq(&wqe->lock);
                io_worker_ref_put(wq);
                return;
        }
@@ -650,9 +670,8 @@ fail:
        worker->flags |= IO_WORKER_F_FREE;
        if (index == IO_WQ_ACCT_BOUND)
                worker->flags |= IO_WORKER_F_BOUND;
-       if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND))
+       if ((acct->nr_workers == 1) && (worker->flags & IO_WORKER_F_BOUND))
                worker->flags |= IO_WORKER_F_FIXED;
-       acct->nr_workers++;
        raw_spin_unlock_irq(&wqe->lock);
        wake_up_new_task(tsk);
 }
index ab4174a..f79d947 100644 (file)
@@ -1938,6 +1938,20 @@ void drop_collected_mounts(struct vfsmount *mnt)
        namespace_unlock();
 }
 
+static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
+{
+       struct mount *child;
+
+       list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
+               if (!is_subdir(child->mnt_mountpoint, dentry))
+                       continue;
+
+               if (child->mnt.mnt_flags & MNT_LOCKED)
+                       return true;
+       }
+       return false;
+}
+
 /**
  * clone_private_mount - create a private clone of a path
  * @path: path to clone
@@ -1953,10 +1967,19 @@ struct vfsmount *clone_private_mount(const struct path *path)
        struct mount *old_mnt = real_mount(path->mnt);
        struct mount *new_mnt;
 
+       down_read(&namespace_sem);
        if (IS_MNT_UNBINDABLE(old_mnt))
-               return ERR_PTR(-EINVAL);
+               goto invalid;
+
+       if (!check_mnt(old_mnt))
+               goto invalid;
+
+       if (has_locked_children(old_mnt, path->dentry))
+               goto invalid;
 
        new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
+       up_read(&namespace_sem);
+
        if (IS_ERR(new_mnt))
                return ERR_CAST(new_mnt);
 
@@ -1964,6 +1987,10 @@ struct vfsmount *clone_private_mount(const struct path *path)
        new_mnt->mnt_ns = MNT_NS_INTERNAL;
 
        return &new_mnt->mnt;
+
+invalid:
+       up_read(&namespace_sem);
+       return ERR_PTR(-EINVAL);
 }
 EXPORT_SYMBOL_GPL(clone_private_mount);
 
@@ -2315,19 +2342,6 @@ static int do_change_type(struct path *path, int ms_flags)
        return err;
 }
 
-static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
-{
-       struct mount *child;
-       list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
-               if (!is_subdir(child->mnt_mountpoint, dentry))
-                       continue;
-
-               if (child->mnt.mnt_flags & MNT_LOCKED)
-                       return true;
-       }
-       return false;
-}
-
 static struct mount *__do_loopback(struct path *old_path, int recurse)
 {
        struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt);
index 41ebf52..ebde05c 100644 (file)
@@ -392,6 +392,7 @@ static struct dentry *ovl_lookup_real_one(struct dentry *connected,
         */
        take_dentry_name_snapshot(&name, real);
        this = lookup_one_len(name.name.name, connected, name.name.len);
+       release_dentry_name_snapshot(&name);
        err = PTR_ERR(this);
        if (IS_ERR(this)) {
                goto fail;
@@ -406,7 +407,6 @@ static struct dentry *ovl_lookup_real_one(struct dentry *connected,
        }
 
 out:
-       release_dentry_name_snapshot(&name);
        dput(parent);
        inode_unlock(dir);
        return this;
index 4d53d3b..d081faa 100644 (file)
@@ -392,6 +392,51 @@ out_unlock:
        return ret;
 }
 
+/*
+ * Calling iter_file_splice_write() directly from overlay's f_op may deadlock
+ * due to lock order inversion between pipe->mutex in iter_file_splice_write()
+ * and file_start_write(real.file) in ovl_write_iter().
+ *
+ * So do everything ovl_write_iter() does and call iter_file_splice_write() on
+ * the real file.
+ */
+static ssize_t ovl_splice_write(struct pipe_inode_info *pipe, struct file *out,
+                               loff_t *ppos, size_t len, unsigned int flags)
+{
+       struct fd real;
+       const struct cred *old_cred;
+       struct inode *inode = file_inode(out);
+       struct inode *realinode = ovl_inode_real(inode);
+       ssize_t ret;
+
+       inode_lock(inode);
+       /* Update mode */
+       ovl_copyattr(realinode, inode);
+       ret = file_remove_privs(out);
+       if (ret)
+               goto out_unlock;
+
+       ret = ovl_real_fdget(out, &real);
+       if (ret)
+               goto out_unlock;
+
+       old_cred = ovl_override_creds(inode->i_sb);
+       file_start_write(real.file);
+
+       ret = iter_file_splice_write(pipe, real.file, ppos, len, flags);
+
+       file_end_write(real.file);
+       /* Update size */
+       ovl_copyattr(realinode, inode);
+       revert_creds(old_cred);
+       fdput(real);
+
+out_unlock:
+       inode_unlock(inode);
+
+       return ret;
+}
+
 static int ovl_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
        struct fd real;
@@ -603,7 +648,7 @@ const struct file_operations ovl_file_operations = {
        .fadvise        = ovl_fadvise,
        .flush          = ovl_flush,
        .splice_read    = generic_file_splice_read,
-       .splice_write   = iter_file_splice_write,
+       .splice_write   = ovl_splice_write,
 
        .copy_file_range        = ovl_copy_file_range,
        .remap_file_range       = ovl_remap_file_range,
index e8ad2c2..150fdf3 100644 (file)
@@ -481,6 +481,8 @@ static int ovl_cache_update_ino(struct path *path, struct ovl_cache_entry *p)
        }
        this = lookup_one_len(p->name, dir, p->len);
        if (IS_ERR_OR_NULL(this) || !this->d_inode) {
+               /* Mark a stale entry */
+               p->is_whiteout = true;
                if (IS_ERR(this)) {
                        err = PTR_ERR(this);
                        this = NULL;
@@ -776,6 +778,9 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
                                if (err)
                                        goto out;
                        }
+               }
+               /* ovl_cache_update_ino() sets is_whiteout on stale entry */
+               if (!p->is_whiteout) {
                        if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
                                break;
                }
index 9ef4231..8e6ef62 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
 
 #include "internal.h"
 
+/*
+ * New pipe buffers will be restricted to this size while the user is exceeding
+ * their pipe buffer quota. The general pipe use case needs at least two
+ * buffers: one for data yet to be read, and one for new data. If this is less
+ * than two, then a write to a non-empty pipe may block even if the pipe is not
+ * full. This can occur with GNU make jobserver or similar uses of pipes as
+ * semaphores: multiple processes may be waiting to write tokens back to the
+ * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/.
+ *
+ * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their
+ * own risk, namely: pipe writes to non-full pipes may block until the pipe is
+ * emptied.
+ */
+#define PIPE_MIN_DEF_BUFFERS 2
+
 /*
  * The max size that a non-root user is allowed to grow the pipe. Can
  * be set by root in /proc/sys/fs/pipe-max-size
@@ -781,8 +796,8 @@ struct pipe_inode_info *alloc_pipe_info(void)
        user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
 
        if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) {
-               user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
-               pipe_bufs = 1;
+               user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS);
+               pipe_bufs = PIPE_MIN_DEF_BUFFERS;
        }
 
        if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user())
index 944aa3a..5e08468 100644 (file)
@@ -719,8 +719,13 @@ void mhi_device_put(struct mhi_device *mhi_dev);
  *                            host and device execution environments match and
  *                            channels are in a DISABLED state.
  * @mhi_dev: Device associated with the channels
+ * @flags: MHI channel flags
  */
-int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
+int mhi_prepare_for_transfer(struct mhi_device *mhi_dev,
+                            unsigned int flags);
+
+/* Automatically allocate and queue inbound buffers */
+#define MHI_CH_INBOUND_ALLOC_BUFS BIT(0)
 
 /**
  * mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer.
index 52d7fb9..c58cc14 100644 (file)
@@ -518,6 +518,25 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
        if (sysrq_ch)
                handle_sysrq(sysrq_ch);
 }
+
+static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
+               unsigned long flags)
+{
+       int sysrq_ch;
+
+       if (!port->has_sysrq) {
+               spin_unlock_irqrestore(&port->lock, flags);
+               return;
+       }
+
+       sysrq_ch = port->sysrq_ch;
+       port->sysrq_ch = 0;
+
+       spin_unlock_irqrestore(&port->lock, flags);
+
+       if (sysrq_ch)
+               handle_sysrq(sysrq_ch);
+}
 #else  /* CONFIG_MAGIC_SYSRQ_SERIAL */
 static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
 {
@@ -531,6 +550,11 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
 {
        spin_unlock(&port->lock);
 }
+static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
+               unsigned long flags)
+{
+       spin_unlock_irqrestore(&port->lock, flags);
+}
 #endif /* CONFIG_MAGIC_SYSRQ_SERIAL */
 
 /*
index 54269e4..3ebfea0 100644 (file)
@@ -27,6 +27,7 @@
 #define TEE_SHM_USER_MAPPED    BIT(4)  /* Memory mapped in user space */
 #define TEE_SHM_POOL           BIT(5)  /* Memory allocated from pool */
 #define TEE_SHM_KERNEL_MAPPED  BIT(6)  /* Memory mapped in kernel space */
+#define TEE_SHM_PRIV           BIT(7)  /* Memory private to TEE driver */
 
 struct device;
 struct tee_device;
@@ -332,6 +333,7 @@ void *tee_get_drvdata(struct tee_device *teedev);
  * @returns a pointer to 'struct tee_shm'
  */
 struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags);
+struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size);
 
 /**
  * tee_shm_register() - Register shared memory buffer
index 3aee78d..784659d 100644 (file)
@@ -196,6 +196,7 @@ struct otg_fsm {
        struct mutex lock;
        u8 *host_req_flag;
        struct delayed_work hnp_polling_work;
+       bool hnp_work_inited;
        bool state_changed;
 };
 
index a53e944..db4312e 100644 (file)
@@ -1230,6 +1230,7 @@ struct hci_dev *hci_alloc_dev(void);
 void hci_free_dev(struct hci_dev *hdev);
 int hci_register_dev(struct hci_dev *hdev);
 void hci_unregister_dev(struct hci_dev *hdev);
+void hci_cleanup_dev(struct hci_dev *hdev);
 int hci_suspend_dev(struct hci_dev *hdev);
 int hci_resume_dev(struct hci_dev *hdev);
 int hci_reset_dev(struct hci_dev *hdev);
index 69c9eab..f3c2841 100644 (file)
@@ -293,7 +293,7 @@ static inline bool flow_action_has_entries(const struct flow_action *action)
 }
 
 /**
- * flow_action_has_one_action() - check if exactly one action is present
+ * flow_offload_has_one_action() - check if exactly one action is present
  * @action: tc filter flow offload action
  *
  * Returns true if exactly one action is present.
index 625a38c..0bf09a9 100644 (file)
@@ -265,7 +265,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
 
 static inline unsigned int ip6_skb_dst_mtu(struct sk_buff *skb)
 {
-       int mtu;
+       unsigned int mtu;
 
        struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
                                inet6_sk(skb->sk) : NULL;
index e946366..1f4e181 100644 (file)
@@ -75,6 +75,7 @@ struct netns_xfrm {
 #endif
        spinlock_t              xfrm_state_lock;
        seqcount_spinlock_t     xfrm_state_hash_generation;
+       seqcount_spinlock_t     xfrm_policy_hash_generation;
 
        spinlock_t xfrm_policy_lock;
        struct mutex xfrm_cfg_mutex;
index ec78239..298a8d1 100644 (file)
@@ -337,6 +337,9 @@ int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
 
 /**
  * struct tcf_pkt_info - packet information
+ *
+ * @ptr: start of the pkt data
+ * @nexthdr: offset of the next header
  */
 struct tcf_pkt_info {
        unsigned char *         ptr;
@@ -355,6 +358,7 @@ struct tcf_ematch_ops;
  * @ops: the operations lookup table of the corresponding ematch module
  * @datalen: length of the ematch specific configuration data
  * @data: ematch specific data
+ * @net: the network namespace
  */
 struct tcf_ematch {
        struct tcf_ematch_ops * ops;
index 7f0e589..b264ab5 100644 (file)
@@ -347,19 +347,20 @@ static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
 }
 
 static struct cgroup_rstat_cpu *
-cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp)
+cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp, unsigned long *flags)
 {
        struct cgroup_rstat_cpu *rstatc;
 
        rstatc = get_cpu_ptr(cgrp->rstat_cpu);
-       u64_stats_update_begin(&rstatc->bsync);
+       *flags = u64_stats_update_begin_irqsave(&rstatc->bsync);
        return rstatc;
 }
 
 static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
-                                                struct cgroup_rstat_cpu *rstatc)
+                                                struct cgroup_rstat_cpu *rstatc,
+                                                unsigned long flags)
 {
-       u64_stats_update_end(&rstatc->bsync);
+       u64_stats_update_end_irqrestore(&rstatc->bsync, flags);
        cgroup_rstat_updated(cgrp, smp_processor_id());
        put_cpu_ptr(rstatc);
 }
@@ -367,18 +368,20 @@ static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
 void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec)
 {
        struct cgroup_rstat_cpu *rstatc;
+       unsigned long flags;
 
-       rstatc = cgroup_base_stat_cputime_account_begin(cgrp);
+       rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
        rstatc->bstat.cputime.sum_exec_runtime += delta_exec;
-       cgroup_base_stat_cputime_account_end(cgrp, rstatc);
+       cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags);
 }
 
 void __cgroup_account_cputime_field(struct cgroup *cgrp,
                                    enum cpu_usage_stat index, u64 delta_exec)
 {
        struct cgroup_rstat_cpu *rstatc;
+       unsigned long flags;
 
-       rstatc = cgroup_base_stat_cputime_account_begin(cgrp);
+       rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
 
        switch (index) {
        case CPUTIME_USER:
@@ -394,7 +397,7 @@ void __cgroup_account_cputime_field(struct cgroup *cgrp,
                break;
        }
 
-       cgroup_base_stat_cputime_account_end(cgrp, rstatc);
+       cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags);
 }
 
 /*
index 4649170..1cb1f9b 100644 (file)
@@ -11917,6 +11917,37 @@ again:
        return gctx;
 }
 
+static bool
+perf_check_permission(struct perf_event_attr *attr, struct task_struct *task)
+{
+       unsigned int ptrace_mode = PTRACE_MODE_READ_REALCREDS;
+       bool is_capable = perfmon_capable();
+
+       if (attr->sigtrap) {
+               /*
+                * perf_event_attr::sigtrap sends signals to the other task.
+                * Require the current task to also have CAP_KILL.
+                */
+               rcu_read_lock();
+               is_capable &= ns_capable(__task_cred(task)->user_ns, CAP_KILL);
+               rcu_read_unlock();
+
+               /*
+                * If the required capabilities aren't available, checks for
+                * ptrace permissions: upgrade to ATTACH, since sending signals
+                * can effectively change the target task.
+                */
+               ptrace_mode = PTRACE_MODE_ATTACH_REALCREDS;
+       }
+
+       /*
+        * Preserve ptrace permission check for backwards compatibility. The
+        * ptrace check also includes checks that the current task and other
+        * task have matching uids, and is therefore not done here explicitly.
+        */
+       return is_capable || ptrace_may_access(task, ptrace_mode);
+}
+
 /**
  * sys_perf_event_open - open a performance event, associate it to a task/cpu
  *
@@ -12163,15 +12194,13 @@ SYSCALL_DEFINE5(perf_event_open,
                        goto err_file;
 
                /*
-                * Preserve ptrace permission check for backwards compatibility.
-                *
                 * We must hold exec_update_lock across this and any potential
                 * perf_install_in_context() call for this new event to
                 * serialize against exec() altering our credentials (and the
                 * perf_event_exit_task() that could imply).
                 */
                err = -EACCES;
-               if (!perfmon_capable() && !ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
+               if (!perf_check_permission(&attr, task))
                        goto err_cred;
        }
 
index 2d9ff40..20ffcc0 100644 (file)
@@ -1981,12 +1981,18 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
        dequeue_task(rq, p, flags);
 }
 
-/*
- * __normal_prio - return the priority that is based on the static prio
- */
-static inline int __normal_prio(struct task_struct *p)
+static inline int __normal_prio(int policy, int rt_prio, int nice)
 {
-       return p->static_prio;
+       int prio;
+
+       if (dl_policy(policy))
+               prio = MAX_DL_PRIO - 1;
+       else if (rt_policy(policy))
+               prio = MAX_RT_PRIO - 1 - rt_prio;
+       else
+               prio = NICE_TO_PRIO(nice);
+
+       return prio;
 }
 
 /*
@@ -1998,15 +2004,7 @@ static inline int __normal_prio(struct task_struct *p)
  */
 static inline int normal_prio(struct task_struct *p)
 {
-       int prio;
-
-       if (task_has_dl_policy(p))
-               prio = MAX_DL_PRIO-1;
-       else if (task_has_rt_policy(p))
-               prio = MAX_RT_PRIO-1 - p->rt_priority;
-       else
-               prio = __normal_prio(p);
-       return prio;
+       return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
 }
 
 /*
@@ -4099,7 +4097,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
                } else if (PRIO_TO_NICE(p->static_prio) < 0)
                        p->static_prio = NICE_TO_PRIO(0);
 
-               p->prio = p->normal_prio = __normal_prio(p);
+               p->prio = p->normal_prio = p->static_prio;
                set_load_weight(p, false);
 
                /*
@@ -6341,6 +6339,18 @@ int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flag
 }
 EXPORT_SYMBOL(default_wake_function);
 
+static void __setscheduler_prio(struct task_struct *p, int prio)
+{
+       if (dl_prio(prio))
+               p->sched_class = &dl_sched_class;
+       else if (rt_prio(prio))
+               p->sched_class = &rt_sched_class;
+       else
+               p->sched_class = &fair_sched_class;
+
+       p->prio = prio;
+}
+
 #ifdef CONFIG_RT_MUTEXES
 
 static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
@@ -6456,22 +6466,19 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
                } else {
                        p->dl.pi_se = &p->dl;
                }
-               p->sched_class = &dl_sched_class;
        } else if (rt_prio(prio)) {
                if (dl_prio(oldprio))
                        p->dl.pi_se = &p->dl;
                if (oldprio < prio)
                        queue_flag |= ENQUEUE_HEAD;
-               p->sched_class = &rt_sched_class;
        } else {
                if (dl_prio(oldprio))
                        p->dl.pi_se = &p->dl;
                if (rt_prio(oldprio))
                        p->rt.timeout = 0;
-               p->sched_class = &fair_sched_class;
        }
 
-       p->prio = prio;
+       __setscheduler_prio(p, prio);
 
        if (queued)
                enqueue_task(rq, p, queue_flag);
@@ -6824,35 +6831,6 @@ static void __setscheduler_params(struct task_struct *p,
        set_load_weight(p, true);
 }
 
-/* Actually do priority change: must hold pi & rq lock. */
-static void __setscheduler(struct rq *rq, struct task_struct *p,
-                          const struct sched_attr *attr, bool keep_boost)
-{
-       /*
-        * If params can't change scheduling class changes aren't allowed
-        * either.
-        */
-       if (attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)
-               return;
-
-       __setscheduler_params(p, attr);
-
-       /*
-        * Keep a potential priority boosting if called from
-        * sched_setscheduler().
-        */
-       p->prio = normal_prio(p);
-       if (keep_boost)
-               p->prio = rt_effective_prio(p, p->prio);
-
-       if (dl_prio(p->prio))
-               p->sched_class = &dl_sched_class;
-       else if (rt_prio(p->prio))
-               p->sched_class = &rt_sched_class;
-       else
-               p->sched_class = &fair_sched_class;
-}
-
 /*
  * Check the target process has a UID that matches the current process's:
  */
@@ -6873,10 +6851,8 @@ static int __sched_setscheduler(struct task_struct *p,
                                const struct sched_attr *attr,
                                bool user, bool pi)
 {
-       int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
-                     MAX_RT_PRIO - 1 - attr->sched_priority;
-       int retval, oldprio, oldpolicy = -1, queued, running;
-       int new_effective_prio, policy = attr->sched_policy;
+       int oldpolicy = -1, policy = attr->sched_policy;
+       int retval, oldprio, newprio, queued, running;
        const struct sched_class *prev_class;
        struct callback_head *head;
        struct rq_flags rf;
@@ -7074,6 +7050,7 @@ change:
        p->sched_reset_on_fork = reset_on_fork;
        oldprio = p->prio;
 
+       newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice);
        if (pi) {
                /*
                 * Take priority boosted tasks into account. If the new
@@ -7082,8 +7059,8 @@ change:
                 * the runqueue. This will be done when the task deboost
                 * itself.
                 */
-               new_effective_prio = rt_effective_prio(p, newprio);
-               if (new_effective_prio == oldprio)
+               newprio = rt_effective_prio(p, newprio);
+               if (newprio == oldprio)
                        queue_flags &= ~DEQUEUE_MOVE;
        }
 
@@ -7096,7 +7073,10 @@ change:
 
        prev_class = p->sched_class;
 
-       __setscheduler(rq, p, attr, pi);
+       if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
+               __setscheduler_params(p, attr);
+               __setscheduler_prio(p, newprio);
+       }
        __setscheduler_uclamp(p, attr);
 
        if (queued) {
index 9eb11c2..e3d2c23 100644 (file)
@@ -1265,8 +1265,10 @@ static inline void timer_base_unlock_expiry(struct timer_base *base)
 static void timer_sync_wait_running(struct timer_base *base)
 {
        if (atomic_read(&base->timer_waiters)) {
+               raw_spin_unlock_irq(&base->lock);
                spin_unlock(&base->expiry_lock);
                spin_lock(&base->expiry_lock);
+               raw_spin_lock_irq(&base->lock);
        }
 }
 
@@ -1457,14 +1459,14 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
                if (timer->flags & TIMER_IRQSAFE) {
                        raw_spin_unlock(&base->lock);
                        call_timer_fn(timer, fn, baseclk);
-                       base->running_timer = NULL;
                        raw_spin_lock(&base->lock);
+                       base->running_timer = NULL;
                } else {
                        raw_spin_unlock_irq(&base->lock);
                        call_timer_fn(timer, fn, baseclk);
+                       raw_spin_lock_irq(&base->lock);
                        base->running_timer = NULL;
                        timer_sync_wait_running(base);
-                       raw_spin_lock_irq(&base->lock);
                }
        }
 }
index c59dd35..33899a7 100644 (file)
@@ -9135,8 +9135,10 @@ static int trace_array_create_dir(struct trace_array *tr)
                return -EINVAL;
 
        ret = event_trace_add_tracer(tr->dir, tr);
-       if (ret)
+       if (ret) {
                tracefs_remove(tr->dir);
+               return ret;
+       }
 
        init_tracer_tracefs(tr, tr->dir);
        __update_tracer_options(tr);
index 34325f4..949ef09 100644 (file)
@@ -65,7 +65,8 @@
        C(INVALID_SORT_MODIFIER,"Invalid sort modifier"),               \
        C(EMPTY_SORT_FIELD,     "Empty sort field"),                    \
        C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"),      \
-       C(INVALID_SORT_FIELD,   "Sort field must be a key or a val"),
+       C(INVALID_SORT_FIELD,   "Sort field must be a key or a val"),   \
+       C(INVALID_STR_OPERAND,  "String type can not be an operand in expression"),
 
 #undef C
 #define C(a, b)                HIST_ERR_##a
@@ -2156,6 +2157,13 @@ static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
                ret = PTR_ERR(operand1);
                goto free;
        }
+       if (operand1->flags & HIST_FIELD_FL_STRING) {
+               /* String type can not be the operand of unary operator. */
+               hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
+               destroy_hist_field(operand1, 0);
+               ret = -EINVAL;
+               goto free;
+       }
 
        expr->flags |= operand1->flags &
                (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
@@ -2257,6 +2265,11 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
                operand1 = NULL;
                goto free;
        }
+       if (operand1->flags & HIST_FIELD_FL_STRING) {
+               hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(operand1_str));
+               ret = -EINVAL;
+               goto free;
+       }
 
        /* rest of string could be another expression e.g. b+c in a+b+c */
        operand_flags = 0;
@@ -2266,6 +2279,11 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
                operand2 = NULL;
                goto free;
        }
+       if (operand2->flags & HIST_FIELD_FL_STRING) {
+               hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
+               ret = -EINVAL;
+               goto free;
+       }
 
        ret = check_expr_operands(file->tr, operand1, operand2);
        if (ret)
@@ -2287,6 +2305,10 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
 
        expr->operands[0] = operand1;
        expr->operands[1] = operand2;
+
+       /* The operand sizes should be the same, so just pick one */
+       expr->size = operand1->size;
+
        expr->operator = field_op;
        expr->name = expr_str(expr, 0);
        expr->type = kstrdup(operand1->type, GFP_KERNEL);
index a6c0cda..14f46aa 100644 (file)
@@ -327,7 +327,7 @@ static void move_to_next_cpu(void)
 
        get_online_cpus();
        cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
-       next_cpu = cpumask_next(smp_processor_id(), current_mask);
+       next_cpu = cpumask_next(raw_smp_processor_id(), current_mask);
        put_online_cpus();
 
        if (next_cpu >= nr_cpu_ids)
index fc32821..efd14c7 100644 (file)
 #include <linux/sched/task.h>
 #include <linux/static_key.h>
 
+enum tp_func_state {
+       TP_FUNC_0,
+       TP_FUNC_1,
+       TP_FUNC_2,
+       TP_FUNC_N,
+};
+
 extern tracepoint_ptr_t __start___tracepoints_ptrs[];
 extern tracepoint_ptr_t __stop___tracepoints_ptrs[];
 
 DEFINE_SRCU(tracepoint_srcu);
 EXPORT_SYMBOL_GPL(tracepoint_srcu);
 
+enum tp_transition_sync {
+       TP_TRANSITION_SYNC_1_0_1,
+       TP_TRANSITION_SYNC_N_2_1,
+
+       _NR_TP_TRANSITION_SYNC,
+};
+
+struct tp_transition_snapshot {
+       unsigned long rcu;
+       unsigned long srcu;
+       bool ongoing;
+};
+
+/* Protected by tracepoints_mutex */
+static struct tp_transition_snapshot tp_transition_snapshot[_NR_TP_TRANSITION_SYNC];
+
+static void tp_rcu_get_state(enum tp_transition_sync sync)
+{
+       struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync];
+
+       /* Keep the latest get_state snapshot. */
+       snapshot->rcu = get_state_synchronize_rcu();
+       snapshot->srcu = start_poll_synchronize_srcu(&tracepoint_srcu);
+       snapshot->ongoing = true;
+}
+
+static void tp_rcu_cond_sync(enum tp_transition_sync sync)
+{
+       struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync];
+
+       if (!snapshot->ongoing)
+               return;
+       cond_synchronize_rcu(snapshot->rcu);
+       if (!poll_state_synchronize_srcu(&tracepoint_srcu, snapshot->srcu))
+               synchronize_srcu(&tracepoint_srcu);
+       snapshot->ongoing = false;
+}
+
 /* Set to 1 to enable tracepoint debug output */
 static const int tracepoint_debug;
 
@@ -246,26 +291,29 @@ static void *func_remove(struct tracepoint_func **funcs,
        return old;
 }
 
-static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs, bool sync)
+/*
+ * Count the number of functions (enum tp_func_state) in a tp_funcs array.
+ */
+static enum tp_func_state nr_func_state(const struct tracepoint_func *tp_funcs)
+{
+       if (!tp_funcs)
+               return TP_FUNC_0;
+       if (!tp_funcs[1].func)
+               return TP_FUNC_1;
+       if (!tp_funcs[2].func)
+               return TP_FUNC_2;
+       return TP_FUNC_N;       /* 3 or more */
+}
+
+static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs)
 {
        void *func = tp->iterator;
 
        /* Synthetic events do not have static call sites */
        if (!tp->static_call_key)
                return;
-
-       if (!tp_funcs[1].func) {
+       if (nr_func_state(tp_funcs) == TP_FUNC_1)
                func = tp_funcs[0].func;
-               /*
-                * If going from the iterator back to a single caller,
-                * we need to synchronize with __DO_TRACE to make sure
-                * that the data passed to the callback is the one that
-                * belongs to that callback.
-                */
-               if (sync)
-                       tracepoint_synchronize_unregister();
-       }
-
        __static_call_update(tp->static_call_key, tp->static_call_tramp, func);
 }
 
@@ -299,9 +347,41 @@ static int tracepoint_add_func(struct tracepoint *tp,
         * a pointer to it.  This array is referenced by __DO_TRACE from
         * include/linux/tracepoint.h using rcu_dereference_sched().
         */
-       tracepoint_update_call(tp, tp_funcs, false);
-       rcu_assign_pointer(tp->funcs, tp_funcs);
-       static_key_enable(&tp->key);
+       switch (nr_func_state(tp_funcs)) {
+       case TP_FUNC_1:         /* 0->1 */
+               /*
+                * Make sure new static func never uses old data after a
+                * 1->0->1 transition sequence.
+                */
+               tp_rcu_cond_sync(TP_TRANSITION_SYNC_1_0_1);
+               /* Set static call to first function */
+               tracepoint_update_call(tp, tp_funcs);
+               /* Both iterator and static call handle NULL tp->funcs */
+               rcu_assign_pointer(tp->funcs, tp_funcs);
+               static_key_enable(&tp->key);
+               break;
+       case TP_FUNC_2:         /* 1->2 */
+               /* Set iterator static call */
+               tracepoint_update_call(tp, tp_funcs);
+               /*
+                * Iterator callback installed before updating tp->funcs.
+                * Requires ordering between RCU assign/dereference and
+                * static call update/call.
+                */
+               fallthrough;
+       case TP_FUNC_N:         /* N->N+1 (N>1) */
+               rcu_assign_pointer(tp->funcs, tp_funcs);
+               /*
+                * Make sure static func never uses incorrect data after a
+                * N->...->2->1 (N>1) transition sequence.
+                */
+               if (tp_funcs[0].data != old[0].data)
+                       tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               break;
+       }
 
        release_probes(old);
        return 0;
@@ -328,17 +408,52 @@ static int tracepoint_remove_func(struct tracepoint *tp,
                /* Failed allocating new tp_funcs, replaced func with stub */
                return 0;
 
-       if (!tp_funcs) {
+       switch (nr_func_state(tp_funcs)) {
+       case TP_FUNC_0:         /* 1->0 */
                /* Removed last function */
                if (tp->unregfunc && static_key_enabled(&tp->key))
                        tp->unregfunc();
 
                static_key_disable(&tp->key);
+               /* Set iterator static call */
+               tracepoint_update_call(tp, tp_funcs);
+               /* Both iterator and static call handle NULL tp->funcs */
+               rcu_assign_pointer(tp->funcs, NULL);
+               /*
+                * Make sure new static func never uses old data after a
+                * 1->0->1 transition sequence.
+                */
+               tp_rcu_get_state(TP_TRANSITION_SYNC_1_0_1);
+               break;
+       case TP_FUNC_1:         /* 2->1 */
                rcu_assign_pointer(tp->funcs, tp_funcs);
-       } else {
+               /*
+                * Make sure static func never uses incorrect data after a
+                * N->...->2->1 (N>2) transition sequence. If the first
+                * element's data has changed, then force the synchronization
+                * to prevent current readers that have loaded the old data
+                * from calling the new function.
+                */
+               if (tp_funcs[0].data != old[0].data)
+                       tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
+               tp_rcu_cond_sync(TP_TRANSITION_SYNC_N_2_1);
+               /* Set static call to first function */
+               tracepoint_update_call(tp, tp_funcs);
+               break;
+       case TP_FUNC_2:         /* N->N-1 (N>2) */
+               fallthrough;
+       case TP_FUNC_N:
                rcu_assign_pointer(tp->funcs, tp_funcs);
-               tracepoint_update_call(tp, tp_funcs,
-                                      tp_funcs[0].func != old[0].func);
+               /*
+                * Make sure static func never uses incorrect data after a
+                * N->...->2->1 (N>2) transition sequence.
+                */
+               if (tp_funcs[0].data != old[0].data)
+                       tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               break;
        }
        release_probes(old);
        return 0;
index 87799e2..77be3bb 100644 (file)
@@ -160,6 +160,7 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
 {
        struct hlist_head *hashent = ucounts_hashentry(ns, uid);
        struct ucounts *ucounts, *new;
+       long overflow;
 
        spin_lock_irq(&ucounts_lock);
        ucounts = find_ucounts(ns, uid, hashent);
@@ -184,8 +185,12 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
                        return new;
                }
        }
+       overflow = atomic_add_negative(1, &ucounts->count);
        spin_unlock_irq(&ucounts_lock);
-       ucounts = get_ucounts(ucounts);
+       if (overflow) {
+               put_ucounts(ucounts);
+               return NULL;
+       }
        return ucounts;
 }
 
@@ -193,8 +198,7 @@ void put_ucounts(struct ucounts *ucounts)
 {
        unsigned long flags;
 
-       if (atomic_dec_and_test(&ucounts->count)) {
-               spin_lock_irqsave(&ucounts_lock, flags);
+       if (atomic_dec_and_lock_irqsave(&ucounts->count, &ucounts_lock, flags)) {
                hlist_del_init(&ucounts->node);
                spin_unlock_irqrestore(&ucounts_lock, flags);
                kfree(ucounts);
index 2560ed2..e1a545c 100644 (file)
@@ -3996,14 +3996,10 @@ EXPORT_SYMBOL(hci_register_dev);
 /* Unregister HCI device */
 void hci_unregister_dev(struct hci_dev *hdev)
 {
-       int id;
-
        BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
 
        hci_dev_set_flag(hdev, HCI_UNREGISTER);
 
-       id = hdev->id;
-
        write_lock(&hci_dev_list_lock);
        list_del(&hdev->list);
        write_unlock(&hci_dev_list_lock);
@@ -4038,7 +4034,14 @@ void hci_unregister_dev(struct hci_dev *hdev)
        }
 
        device_del(&hdev->dev);
+       /* Actual cleanup is deferred until hci_cleanup_dev(). */
+       hci_dev_put(hdev);
+}
+EXPORT_SYMBOL(hci_unregister_dev);
 
+/* Cleanup HCI device */
+void hci_cleanup_dev(struct hci_dev *hdev)
+{
        debugfs_remove_recursive(hdev->debugfs);
        kfree_const(hdev->hw_info);
        kfree_const(hdev->fw_info);
@@ -4063,11 +4066,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
        hci_blocked_keys_clear(hdev);
        hci_dev_unlock(hdev);
 
-       hci_dev_put(hdev);
-
-       ida_simple_remove(&hci_index_ida, id);
+       ida_simple_remove(&hci_index_ida, hdev->id);
 }
-EXPORT_SYMBOL(hci_unregister_dev);
 
 /* Suspend HCI device */
 int hci_suspend_dev(struct hci_dev *hdev)
index b04a5a0..f1128c2 100644 (file)
@@ -59,6 +59,17 @@ struct hci_pinfo {
        char              comm[TASK_COMM_LEN];
 };
 
+static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
+{
+       struct hci_dev *hdev = hci_pi(sk)->hdev;
+
+       if (!hdev)
+               return ERR_PTR(-EBADFD);
+       if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
+               return ERR_PTR(-EPIPE);
+       return hdev;
+}
+
 void hci_sock_set_flag(struct sock *sk, int nr)
 {
        set_bit(nr, &hci_pi(sk)->flags);
@@ -759,19 +770,13 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
        if (event == HCI_DEV_UNREG) {
                struct sock *sk;
 
-               /* Detach sockets from device */
+               /* Wake up sockets using this dead device */
                read_lock(&hci_sk_list.lock);
                sk_for_each(sk, &hci_sk_list.head) {
-                       lock_sock(sk);
                        if (hci_pi(sk)->hdev == hdev) {
-                               hci_pi(sk)->hdev = NULL;
                                sk->sk_err = EPIPE;
-                               sk->sk_state = BT_OPEN;
                                sk->sk_state_change(sk);
-
-                               hci_dev_put(hdev);
                        }
-                       release_sock(sk);
                }
                read_unlock(&hci_sk_list.lock);
        }
@@ -930,10 +935,10 @@ static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
                                unsigned long arg)
 {
-       struct hci_dev *hdev = hci_pi(sk)->hdev;
+       struct hci_dev *hdev = hci_hdev_from_sock(sk);
 
-       if (!hdev)
-               return -EBADFD;
+       if (IS_ERR(hdev))
+               return PTR_ERR(hdev);
 
        if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
                return -EBUSY;
@@ -1103,6 +1108,18 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
 
        lock_sock(sk);
 
+       /* Allow detaching from dead device and attaching to alive device, if
+        * the caller wants to re-bind (instead of close) this socket in
+        * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
+        */
+       hdev = hci_pi(sk)->hdev;
+       if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
+               hci_pi(sk)->hdev = NULL;
+               sk->sk_state = BT_OPEN;
+               hci_dev_put(hdev);
+       }
+       hdev = NULL;
+
        if (sk->sk_state == BT_BOUND) {
                err = -EALREADY;
                goto done;
@@ -1379,9 +1396,9 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
 
        lock_sock(sk);
 
-       hdev = hci_pi(sk)->hdev;
-       if (!hdev) {
-               err = -EBADFD;
+       hdev = hci_hdev_from_sock(sk);
+       if (IS_ERR(hdev)) {
+               err = PTR_ERR(hdev);
                goto done;
        }
 
@@ -1743,9 +1760,9 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
                goto done;
        }
 
-       hdev = hci_pi(sk)->hdev;
-       if (!hdev) {
-               err = -EBADFD;
+       hdev = hci_hdev_from_sock(sk);
+       if (IS_ERR(hdev)) {
+               err = PTR_ERR(hdev);
                goto done;
        }
 
index 9874844..b69d88b 100644 (file)
@@ -83,6 +83,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
 static void bt_host_release(struct device *dev)
 {
        struct hci_dev *hdev = to_hci_dev(dev);
+
+       if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
+               hci_cleanup_dev(hdev);
        kfree(hdev);
        module_put(THIS_MODULE);
 }
index ef743f9..bbab998 100644 (file)
@@ -166,7 +166,8 @@ static int br_switchdev_event(struct notifier_block *unused,
        case SWITCHDEV_FDB_ADD_TO_BRIDGE:
                fdb_info = ptr;
                err = br_fdb_external_learn_add(br, p, fdb_info->addr,
-                                               fdb_info->vid, false);
+                                               fdb_info->vid,
+                                               fdb_info->is_local, false);
                if (err) {
                        err = notifier_from_errno(err);
                        break;
index a16191d..835cec1 100644 (file)
@@ -1019,7 +1019,8 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
 
 static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
                        struct net_bridge_port *p, const unsigned char *addr,
-                       u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[])
+                       u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[],
+                       struct netlink_ext_ack *extack)
 {
        int err = 0;
 
@@ -1038,7 +1039,15 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
                rcu_read_unlock();
                local_bh_enable();
        } else if (ndm->ndm_flags & NTF_EXT_LEARNED) {
-               err = br_fdb_external_learn_add(br, p, addr, vid, true);
+               if (!p && !(ndm->ndm_state & NUD_PERMANENT)) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "FDB entry towards bridge must be permanent");
+                       return -EINVAL;
+               }
+
+               err = br_fdb_external_learn_add(br, p, addr, vid,
+                                               ndm->ndm_state & NUD_PERMANENT,
+                                               true);
        } else {
                spin_lock_bh(&br->hash_lock);
                err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid, nfea_tb);
@@ -1110,9 +1119,11 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                }
 
                /* VID was specified, so use it. */
-               err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb);
+               err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb,
+                                  extack);
        } else {
-               err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb);
+               err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb,
+                                  extack);
                if (err || !vg || !vg->num_vlans)
                        goto out;
 
@@ -1124,7 +1135,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                        if (!br_vlan_should_use(v))
                                continue;
                        err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid,
-                                          nfea_tb);
+                                          nfea_tb, extack);
                        if (err)
                                goto out;
                }
@@ -1264,7 +1275,7 @@ void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
 }
 
 int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
-                             const unsigned char *addr, u16 vid,
+                             const unsigned char *addr, u16 vid, bool is_local,
                              bool swdev_notify)
 {
        struct net_bridge_fdb_entry *fdb;
@@ -1281,6 +1292,10 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
 
                if (swdev_notify)
                        flags |= BIT(BR_FDB_ADDED_BY_USER);
+
+               if (is_local)
+                       flags |= BIT(BR_FDB_LOCAL);
+
                fdb = fdb_create(br, p, addr, vid, flags);
                if (!fdb) {
                        err = -ENOMEM;
@@ -1307,6 +1322,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
                if (swdev_notify)
                        set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
 
+               if (is_local)
+                       set_bit(BR_FDB_LOCAL, &fdb->flags);
+
                if (modified)
                        fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
        }
index 2b48b20..aa64d8d 100644 (file)
@@ -711,7 +711,7 @@ int br_fdb_get(struct sk_buff *skb, struct nlattr *tb[], struct net_device *dev,
 int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p);
 void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p);
 int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
-                             const unsigned char *addr, u16 vid,
+                             const unsigned char *addr, u16 vid, bool is_local,
                              bool swdev_notify);
 int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
                              const unsigned char *addr, u16 vid,
index e09147a..fc61cd3 100644 (file)
@@ -298,6 +298,9 @@ int tcp_gro_complete(struct sk_buff *skb)
        if (th->cwr)
                skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
 
+       if (skb->encapsulation)
+               skb->inner_transport_header = skb->transport_header;
+
        return 0;
 }
 EXPORT_SYMBOL(tcp_gro_complete);
index 9dde1e5..1380a6b 100644 (file)
@@ -624,6 +624,10 @@ static int udp_gro_complete_segment(struct sk_buff *skb)
 
        skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
        skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
+
+       if (skb->encapsulation)
+               skb->inner_transport_header = skb->transport_header;
+
        return 0;
 }
 
index d2591eb..56263c2 100644 (file)
@@ -27,7 +27,6 @@ struct mptcp_pm_addr_entry {
        struct mptcp_addr_info  addr;
        u8                      flags;
        int                     ifindex;
-       struct rcu_head         rcu;
        struct socket           *lsk;
 };
 
index fa61167..1dc955c 100644 (file)
@@ -15,6 +15,7 @@ struct qrtr_mhi_dev {
        struct qrtr_endpoint ep;
        struct mhi_device *mhi_dev;
        struct device *dev;
+       struct completion ready;
 };
 
 /* From MHI to QRTR */
@@ -50,6 +51,10 @@ static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
        struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep);
        int rc;
 
+       rc = wait_for_completion_interruptible(&qdev->ready);
+       if (rc)
+               goto free_skb;
+
        if (skb->sk)
                sock_hold(skb->sk);
 
@@ -79,7 +84,7 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
        int rc;
 
        /* start channels */
-       rc = mhi_prepare_for_transfer(mhi_dev);
+       rc = mhi_prepare_for_transfer(mhi_dev, 0);
        if (rc)
                return rc;
 
@@ -96,6 +101,15 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
        if (rc)
                return rc;
 
+       /* start channels */
+       rc = mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS);
+       if (rc) {
+               qrtr_endpoint_unregister(&qdev->ep);
+               dev_set_drvdata(&mhi_dev->dev, NULL);
+               return rc;
+       }
+
+       complete_all(&qdev->ready);
        dev_dbg(qdev->dev, "Qualcomm MHI QRTR driver probed\n");
 
        return 0;
index d9ac60f..a8dd06c 100644 (file)
@@ -913,7 +913,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
 
        /* seqlock has the same scope of busylock, for NOLOCK qdisc */
        spin_lock_init(&sch->seqlock);
-       lockdep_set_class(&sch->busylock,
+       lockdep_set_class(&sch->seqlock,
                          dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
 
        seqcount_init(&sch->running);
index 07b30d0..9c79374 100644 (file)
@@ -1739,8 +1739,6 @@ static void taprio_attach(struct Qdisc *sch)
                if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
                        qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
                        old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
-                       if (ntx < dev->real_num_tx_queues)
-                               qdisc_hash_add(qdisc, false);
                } else {
                        old = dev_graft_qdisc(qdisc->dev_queue, sch);
                        qdisc_refcount_inc(sch);
index fe74c5f..db6b737 100644 (file)
@@ -857,14 +857,18 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
        memcpy(key->data, &auth_key->sca_key[0], auth_key->sca_keylength);
        cur_key->key = key;
 
-       if (replace) {
-               list_del_init(&shkey->key_list);
-               sctp_auth_shkey_release(shkey);
-               if (asoc && asoc->active_key_id == auth_key->sca_keynumber)
-                       sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL);
+       if (!replace) {
+               list_add(&cur_key->key_list, sh_keys);
+               return 0;
        }
+
+       list_del_init(&shkey->key_list);
+       sctp_auth_shkey_release(shkey);
        list_add(&cur_key->key_list, sh_keys);
 
+       if (asoc && asoc->active_key_id == auth_key->sca_keynumber)
+               sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL);
+
        return 0;
 }
 
index 169ba8b..081e7ae 100644 (file)
@@ -1079,6 +1079,9 @@ virtio_transport_recv_connected(struct sock *sk,
                virtio_transport_recv_enqueue(vsk, pkt);
                sk->sk_data_ready(sk);
                return err;
+       case VIRTIO_VSOCK_OP_CREDIT_REQUEST:
+               virtio_transport_send_credit_update(vsk);
+               break;
        case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
                sk->sk_write_space(sk);
                break;
index a20aec9..2bf2693 100644 (file)
@@ -298,8 +298,16 @@ static int xfrm_xlate64(struct sk_buff *dst, const struct nlmsghdr *nlh_src)
        len = nlmsg_attrlen(nlh_src, xfrm_msg_min[type]);
 
        nla_for_each_attr(nla, attrs, len, remaining) {
-               int err = xfrm_xlate64_attr(dst, nla);
+               int err;
 
+               switch (type) {
+               case XFRM_MSG_NEWSPDINFO:
+                       err = xfrm_nla_cpy(dst, nla, nla_len(nla));
+                       break;
+               default:
+                       err = xfrm_xlate64_attr(dst, nla);
+                       break;
+               }
                if (err)
                        return err;
        }
@@ -341,7 +349,8 @@ static int xfrm_alloc_compat(struct sk_buff *skb, const struct nlmsghdr *nlh_src
 
 /* Calculates len of translated 64-bit message. */
 static size_t xfrm_user_rcv_calculate_len64(const struct nlmsghdr *src,
-                                           struct nlattr *attrs[XFRMA_MAX+1])
+                                           struct nlattr *attrs[XFRMA_MAX + 1],
+                                           int maxtype)
 {
        size_t len = nlmsg_len(src);
 
@@ -358,10 +367,20 @@ static size_t xfrm_user_rcv_calculate_len64(const struct nlmsghdr *src,
        case XFRM_MSG_POLEXPIRE:
                len += 8;
                break;
+       case XFRM_MSG_NEWSPDINFO:
+               /* attirbutes are xfrm_spdattr_type_t, not xfrm_attr_type_t */
+               return len;
        default:
                break;
        }
 
+       /* Unexpected for anything, but XFRM_MSG_NEWSPDINFO, please
+        * correct both 64=>32-bit and 32=>64-bit translators to copy
+        * new attributes.
+        */
+       if (WARN_ON_ONCE(maxtype))
+               return len;
+
        if (attrs[XFRMA_SA])
                len += 4;
        if (attrs[XFRMA_POLICY])
@@ -440,7 +459,8 @@ static int xfrm_xlate32_attr(void *dst, const struct nlattr *nla,
 
 static int xfrm_xlate32(struct nlmsghdr *dst, const struct nlmsghdr *src,
                        struct nlattr *attrs[XFRMA_MAX+1],
-                       size_t size, u8 type, struct netlink_ext_ack *extack)
+                       size_t size, u8 type, int maxtype,
+                       struct netlink_ext_ack *extack)
 {
        size_t pos;
        int i;
@@ -520,6 +540,25 @@ static int xfrm_xlate32(struct nlmsghdr *dst, const struct nlmsghdr *src,
        }
        pos = dst->nlmsg_len;
 
+       if (maxtype) {
+               /* attirbutes are xfrm_spdattr_type_t, not xfrm_attr_type_t */
+               WARN_ON_ONCE(src->nlmsg_type != XFRM_MSG_NEWSPDINFO);
+
+               for (i = 1; i <= maxtype; i++) {
+                       int err;
+
+                       if (!attrs[i])
+                               continue;
+
+                       /* just copy - no need for translation */
+                       err = xfrm_attr_cpy32(dst, &pos, attrs[i], size,
+                                       nla_len(attrs[i]), nla_len(attrs[i]));
+                       if (err)
+                               return err;
+               }
+               return 0;
+       }
+
        for (i = 1; i < XFRMA_MAX + 1; i++) {
                int err;
 
@@ -564,7 +603,7 @@ static struct nlmsghdr *xfrm_user_rcv_msg_compat(const struct nlmsghdr *h32,
        if (err < 0)
                return ERR_PTR(err);
 
-       len = xfrm_user_rcv_calculate_len64(h32, attrs);
+       len = xfrm_user_rcv_calculate_len64(h32, attrs, maxtype);
        /* The message doesn't need translation */
        if (len == nlmsg_len(h32))
                return NULL;
@@ -574,7 +613,7 @@ static struct nlmsghdr *xfrm_user_rcv_msg_compat(const struct nlmsghdr *h32,
        if (!h64)
                return ERR_PTR(-ENOMEM);
 
-       err = xfrm_xlate32(h64, h32, attrs, len, type, extack);
+       err = xfrm_xlate32(h64, h32, attrs, len, type, maxtype, extack);
        if (err < 0) {
                kvfree(h64);
                return ERR_PTR(err);
index 2e8afe0..cb40ff0 100644 (file)
@@ -241,7 +241,7 @@ static void ipcomp_free_tfms(struct crypto_comp * __percpu *tfms)
                        break;
        }
 
-       WARN_ON(!pos);
+       WARN_ON(list_entry_is_head(pos, &ipcomp_tfms_list, list));
 
        if (--pos->users)
                return;
index 827d842..7f881f5 100644 (file)
@@ -155,7 +155,6 @@ static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
                                                __read_mostly;
 
 static struct kmem_cache *xfrm_dst_cache __ro_after_init;
-static __read_mostly seqcount_mutex_t xfrm_policy_hash_generation;
 
 static struct rhashtable xfrm_policy_inexact_table;
 static const struct rhashtable_params xfrm_pol_inexact_params;
@@ -585,7 +584,7 @@ static void xfrm_bydst_resize(struct net *net, int dir)
                return;
 
        spin_lock_bh(&net->xfrm.xfrm_policy_lock);
-       write_seqcount_begin(&xfrm_policy_hash_generation);
+       write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
 
        odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
                                lockdep_is_held(&net->xfrm.xfrm_policy_lock));
@@ -596,7 +595,7 @@ static void xfrm_bydst_resize(struct net *net, int dir)
        rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
        net->xfrm.policy_bydst[dir].hmask = nhashmask;
 
-       write_seqcount_end(&xfrm_policy_hash_generation);
+       write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
        spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 
        synchronize_rcu();
@@ -1245,7 +1244,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
        } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
 
        spin_lock_bh(&net->xfrm.xfrm_policy_lock);
-       write_seqcount_begin(&xfrm_policy_hash_generation);
+       write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
 
        /* make sure that we can insert the indirect policies again before
         * we start with destructive action.
@@ -1354,7 +1353,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
 
 out_unlock:
        __xfrm_policy_inexact_flush(net);
-       write_seqcount_end(&xfrm_policy_hash_generation);
+       write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
        spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 
        mutex_unlock(&hash_resize_mutex);
@@ -2091,15 +2090,12 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
        if (unlikely(!daddr || !saddr))
                return NULL;
 
- retry:
-       sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
        rcu_read_lock();
-
-       chain = policy_hash_direct(net, daddr, saddr, family, dir);
-       if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)) {
-               rcu_read_unlock();
-               goto retry;
-       }
+ retry:
+       do {
+               sequence = read_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
+               chain = policy_hash_direct(net, daddr, saddr, family, dir);
+       } while (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence));
 
        ret = NULL;
        hlist_for_each_entry_rcu(pol, chain, bydst) {
@@ -2130,15 +2126,11 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
        }
 
 skip_inexact:
-       if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)) {
-               rcu_read_unlock();
+       if (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence))
                goto retry;
-       }
 
-       if (ret && !xfrm_pol_hold_rcu(ret)) {
-               rcu_read_unlock();
+       if (ret && !xfrm_pol_hold_rcu(ret))
                goto retry;
-       }
 fail:
        rcu_read_unlock();
 
@@ -4089,6 +4081,7 @@ static int __net_init xfrm_net_init(struct net *net)
        /* Initialize the per-net locks here */
        spin_lock_init(&net->xfrm.xfrm_state_lock);
        spin_lock_init(&net->xfrm.xfrm_policy_lock);
+       seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock);
        mutex_init(&net->xfrm.xfrm_cfg_mutex);
 
        rv = xfrm_statistics_init(net);
@@ -4133,7 +4126,6 @@ void __init xfrm_init(void)
 {
        register_pernet_subsys(&xfrm_net_ops);
        xfrm_dev_init();
-       seqcount_mutex_init(&xfrm_policy_hash_generation, &hash_resize_mutex);
        xfrm_input_init();
 
 #ifdef CONFIG_XFRM_ESPINTCP
index b47d613..7aff641 100644 (file)
@@ -2811,6 +2811,16 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        err = link->doit(skb, nlh, attrs);
 
+       /* We need to free skb allocated in xfrm_alloc_compat() before
+        * returning from this function, because consume_skb() won't take
+        * care of frag_list since netlink destructor sets
+        * sbk->head to NULL. (see netlink_skb_destructor())
+        */
+       if (skb_has_frag_list(skb)) {
+               kfree_skb(skb_shinfo(skb)->frag_list);
+               skb_shinfo(skb)->frag_list = NULL;
+       }
+
 err:
        kvfree(nlh64);
        return err;
index f67b125..94cd49e 100755 (executable)
@@ -1,10 +1,10 @@
 #! /usr/bin/env perl
 # SPDX-License-Identifier: GPL-2.0
 #
-# checkversion find uses of LINUX_VERSION_CODE or KERNEL_VERSION
-# without including <linux/version.h>, or cases of
-# including <linux/version.h> that don't need it.
-# Copyright (C) 2003, Randy Dunlap <rdunlap@xenotime.net>
+# checkversion finds uses of all macros in <linux/version.h>
+# where the source files do not #include <linux/version.h>; or cases
+# of including <linux/version.h> where it is not needed.
+# Copyright (C) 2003, Randy Dunlap <rdunlap@infradead.org>
 
 use strict;
 
@@ -13,7 +13,8 @@ $| = 1;
 my $debugging;
 
 foreach my $file (@ARGV) {
-    next if $file =~ "include/linux/version\.h";
+    next if $file =~ "include/generated/uapi/linux/version\.h";
+    next if $file =~ "usr/include/linux/version\.h";
     # Open this file.
     open( my $f, '<', $file )
       or die "Can't open $file: $!\n";
@@ -41,8 +42,11 @@ foreach my $file (@ARGV) {
            $iLinuxVersion      = $. if m/^\s*#\s*include\s*<linux\/version\.h>/o;
        }
 
-       # Look for uses: LINUX_VERSION_CODE, KERNEL_VERSION, UTS_RELEASE
-       if (($_ =~ /LINUX_VERSION_CODE/) || ($_ =~ /\WKERNEL_VERSION/)) {
+       # Look for uses: LINUX_VERSION_CODE, KERNEL_VERSION,
+       # LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL, LINUX_VERSION_SUBLEVEL
+       if (($_ =~ /LINUX_VERSION_CODE/) || ($_ =~ /\WKERNEL_VERSION/) ||
+           ($_ =~ /LINUX_VERSION_MAJOR/) || ($_ =~ /LINUX_VERSION_PATCHLEVEL/) ||
+           ($_ =~ /LINUX_VERSION_SUBLEVEL/)) {
            $fUseVersion = 1;
             last if $iLinuxVersion;
         }
index c17e480..8f6b13a 100755 (executable)
@@ -173,39 +173,6 @@ my $mcount_regex;  # Find the call site to mcount (return offset)
 my $mcount_adjust;     # Address adjustment to mcount offset
 my $alignment;         # The .align value to use for $mcount_section
 my $section_type;      # Section header plus possible alignment command
-my $can_use_local = 0;         # If we can use local function references
-
-# Shut up recordmcount if user has older objcopy
-my $quiet_recordmcount = ".tmp_quiet_recordmcount";
-my $print_warning = 1;
-$print_warning = 0 if ( -f $quiet_recordmcount);
-
-##
-# check_objcopy - whether objcopy supports --globalize-symbols
-#
-#  --globalize-symbols came out in 2.17, we must test the version
-#  of objcopy, and if it is less than 2.17, then we can not
-#  record local functions.
-sub check_objcopy
-{
-    open (IN, "$objcopy --version |") or die "error running $objcopy";
-    while (<IN>) {
-       if (/objcopy.*\s(\d+)\.(\d+)/) {
-           $can_use_local = 1 if ($1 > 2 || ($1 == 2 && $2 >= 17));
-           last;
-       }
-    }
-    close (IN);
-
-    if (!$can_use_local && $print_warning) {
-       print STDERR "WARNING: could not find objcopy version or version " .
-           "is less than 2.17.\n" .
-           "\tLocal function references are disabled.\n";
-       open (QUIET, ">$quiet_recordmcount");
-       printf QUIET "Disables the warning from recordmcount.pl\n";
-       close QUIET;
-    }
-}
 
 if ($arch =~ /(x86(_64)?)|(i386)/) {
     if ($bits == 64) {
@@ -434,8 +401,6 @@ if ($filename =~ m,^(.*)(\.\S),) {
 my $mcount_s = $dirname . "/.tmp_mc_" . $prefix . ".s";
 my $mcount_o = $dirname . "/.tmp_mc_" . $prefix . ".o";
 
-check_objcopy();
-
 #
 # Step 1: find all the local (static functions) and weak symbols.
 #         't' is local, 'w/W' is weak
@@ -473,11 +438,6 @@ sub update_funcs
 
     # is this function static? If so, note this fact.
     if (defined $locals{$ref_func}) {
-
-       # only use locals if objcopy supports globalize-symbols
-       if (!$can_use_local) {
-           return;
-       }
        $convert{$ref_func} = 1;
     }
 
index 74f8aad..7011fbe 100755 (executable)
@@ -17,7 +17,7 @@ Usage:
        $ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
        Wait some times but not too much, the script is a bit slow.
        Break the pipe (Ctrl + Z)
-       $ scripts/draw_functrace.py < raw_trace_func > draw_functrace
+       $ scripts/tracing/draw_functrace.py < ~/raw_trace_func > draw_functrace
        Then you have your drawn trace in draw_functrace
 """
 
@@ -103,10 +103,10 @@ def parseLine(line):
        line = line.strip()
        if line.startswith("#"):
                raise CommentLineException
-       m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
+       m = re.match("[^]]+?\\] +([a-z.]+) +([0-9.]+): (\\w+) <-(\\w+)", line)
        if m is None:
                raise BrokenLineException
-       return (m.group(1), m.group(2), m.group(3))
+       return (m.group(2), m.group(3), m.group(4))
 
 
 def main():
index defc5ef..0ae1b71 100644 (file)
@@ -874,7 +874,7 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s)
        rc = sidtab_init(s);
        if (rc) {
                pr_err("SELinux:  out of memory on SID table init\n");
-               goto out;
+               return rc;
        }
 
        head = p->ocontexts[OCON_ISID];
@@ -885,7 +885,7 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s)
                if (sid == SECSID_NULL) {
                        pr_err("SELinux:  SID 0 was assigned a context.\n");
                        sidtab_destroy(s);
-                       goto out;
+                       return -EINVAL;
                }
 
                /* Ignore initial SIDs unused by this kernel. */
@@ -897,12 +897,10 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s)
                        pr_err("SELinux:  unable to load initial SID %s.\n",
                               name);
                        sidtab_destroy(s);
-                       goto out;
+                       return rc;
                }
        }
-       rc = 0;
-out:
-       return rc;
+       return 0;
 }
 
 int policydb_class_isvalid(struct policydb *p, unsigned int class)
index 83b79ed..439a358 100644 (file)
@@ -215,7 +215,7 @@ static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
                                   struct vm_area_struct *area)
 {
        return remap_pfn_range(area, area->vm_start,
-                              dmab->addr >> PAGE_SHIFT,
+                              page_to_pfn(virt_to_page(dmab->area)),
                               area->vm_end - area->vm_start,
                               area->vm_page_prot);
 }
index 6a2971a..09c0e2a 100644 (file)
@@ -246,7 +246,7 @@ static bool hw_support_mmap(struct snd_pcm_substream *substream)
        if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP))
                return false;
 
-       if (substream->ops->mmap)
+       if (substream->ops->mmap || substream->ops->page)
                return true;
 
        switch (substream->dma_buffer.dev.type) {
index b9c2ce2..84d7863 100644 (file)
@@ -514,10 +514,11 @@ static int check_and_subscribe_port(struct snd_seq_client *client,
        return err;
 }
 
-static void delete_and_unsubscribe_port(struct snd_seq_client *client,
-                                       struct snd_seq_client_port *port,
-                                       struct snd_seq_subscribers *subs,
-                                       bool is_src, bool ack)
+/* called with grp->list_mutex held */
+static void __delete_and_unsubscribe_port(struct snd_seq_client *client,
+                                         struct snd_seq_client_port *port,
+                                         struct snd_seq_subscribers *subs,
+                                         bool is_src, bool ack)
 {
        struct snd_seq_port_subs_info *grp;
        struct list_head *list;
@@ -525,7 +526,6 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client,
 
        grp = is_src ? &port->c_src : &port->c_dest;
        list = is_src ? &subs->src_list : &subs->dest_list;
-       down_write(&grp->list_mutex);
        write_lock_irq(&grp->list_lock);
        empty = list_empty(list);
        if (!empty)
@@ -535,6 +535,18 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client,
 
        if (!empty)
                unsubscribe_port(client, port, grp, &subs->info, ack);
+}
+
+static void delete_and_unsubscribe_port(struct snd_seq_client *client,
+                                       struct snd_seq_client_port *port,
+                                       struct snd_seq_subscribers *subs,
+                                       bool is_src, bool ack)
+{
+       struct snd_seq_port_subs_info *grp;
+
+       grp = is_src ? &port->c_src : &port->c_dest;
+       down_write(&grp->list_mutex);
+       __delete_and_unsubscribe_port(client, port, subs, is_src, ack);
        up_write(&grp->list_mutex);
 }
 
@@ -590,27 +602,30 @@ int snd_seq_port_disconnect(struct snd_seq_client *connector,
                            struct snd_seq_client_port *dest_port,
                            struct snd_seq_port_subscribe *info)
 {
-       struct snd_seq_port_subs_info *src = &src_port->c_src;
+       struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
        struct snd_seq_subscribers *subs;
        int err = -ENOENT;
 
-       down_write(&src->list_mutex);
+       /* always start from deleting the dest port for avoiding concurrent
+        * deletions
+        */
+       down_write(&dest->list_mutex);
        /* look for the connection */
-       list_for_each_entry(subs, &src->list_head, src_list) {
+       list_for_each_entry(subs, &dest->list_head, dest_list) {
                if (match_subs_info(info, &subs->info)) {
-                       atomic_dec(&subs->ref_count); /* mark as not ready */
+                       __delete_and_unsubscribe_port(dest_client, dest_port,
+                                                     subs, false,
+                                                     connector->number != dest_client->number);
                        err = 0;
                        break;
                }
        }
-       up_write(&src->list_mutex);
+       up_write(&dest->list_mutex);
        if (err < 0)
                return err;
 
        delete_and_unsubscribe_port(src_client, src_port, subs, true,
                                    connector->number != src_client->number);
-       delete_and_unsubscribe_port(dest_client, dest_port, subs, false,
-                                   connector->number != dest_client->number);
        kfree(subs);
        return 0;
 }
index caaf0e8..21c5215 100644 (file)
@@ -8274,9 +8274,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x129c, "Acer SWIFT SF314-55", ALC256_FIXUP_ACER_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1025, 0x1300, "Acer SWIFT SF314-56", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1025, 0x142b, "Acer Swift SF314-42", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x1466, "Acer Aspire A515-56", ALC255_FIXUP_ACER_HEADPHONE_AND_MIC),
        SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
index 2f6a624..a1f8c3a 100644 (file)
@@ -907,7 +907,7 @@ static void usb_audio_disconnect(struct usb_interface *intf)
                }
        }
 
-       if (chip->quirk_type & QUIRK_SETUP_DISABLE_AUTOSUSPEND)
+       if (chip->quirk_type == QUIRK_SETUP_DISABLE_AUTOSUSPEND)
                usb_enable_autosuspend(interface_to_usbdev(intf));
 
        chip->num_interfaces--;
index 52de522..14456f6 100644 (file)
@@ -324,6 +324,12 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
                                              sources[ret - 1],
                                              visited, validate);
                if (ret > 0) {
+                       /*
+                        * For Samsung USBC Headset (AKG), setting clock selector again
+                        * will result in incorrect default clock setting problems
+                        */
+                       if (chip->usb_id == USB_ID(0x04e8, 0xa051))
+                               return ret;
                        err = uac_clock_selector_set_val(chip, entity_id, cur);
                        if (err < 0)
                                return err;
index f4cdaf1..9b713b4 100644 (file)
@@ -1816,6 +1816,15 @@ static void get_connector_control_name(struct usb_mixer_interface *mixer,
                strlcat(name, " - Output Jack", name_size);
 }
 
+/* get connector value to "wake up" the USB audio */
+static int connector_mixer_resume(struct usb_mixer_elem_list *list)
+{
+       struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list);
+
+       get_connector_value(cval, NULL, NULL);
+       return 0;
+}
+
 /* Build a mixer control for a UAC connector control (jack-detect) */
 static void build_connector_control(struct usb_mixer_interface *mixer,
                                    const struct usbmix_name_map *imap,
@@ -1833,6 +1842,10 @@ static void build_connector_control(struct usb_mixer_interface *mixer,
        if (!cval)
                return;
        snd_usb_mixer_elem_init_std(&cval->head, mixer, term->id);
+
+       /* set up a specific resume callback */
+       cval->head.resume = connector_mixer_resume;
+
        /*
         * UAC2: The first byte from reading the UAC2_TE_CONNECTOR control returns the
         * number of channels connected.
@@ -3642,23 +3655,15 @@ static int restore_mixer_value(struct usb_mixer_elem_list *list)
        return 0;
 }
 
-static int default_mixer_resume(struct usb_mixer_elem_list *list)
-{
-       struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list);
-
-       /* get connector value to "wake up" the USB audio */
-       if (cval->val_type == USB_MIXER_BOOLEAN && cval->channels == 1)
-               get_connector_value(cval, NULL, NULL);
-
-       return 0;
-}
-
 static int default_mixer_reset_resume(struct usb_mixer_elem_list *list)
 {
-       int err = default_mixer_resume(list);
+       int err;
 
-       if (err < 0)
-               return err;
+       if (list->resume) {
+               err = list->resume(list);
+               if (err < 0)
+                       return err;
+       }
        return restore_mixer_value(list);
 }
 
@@ -3697,7 +3702,7 @@ void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list,
        list->id = unitid;
        list->dump = snd_usb_mixer_dump_cval;
 #ifdef CONFIG_PM
-       list->resume = default_mixer_resume;
+       list->resume = NULL;
        list->reset_resume = default_mixer_reset_resume;
 #endif
 }
index f9d698a..3d5848d 100644 (file)
@@ -228,7 +228,7 @@ enum {
 };
 
 static const char *const scarlett2_dim_mute_names[SCARLETT2_DIM_MUTE_COUNT] = {
-       "Mute", "Dim"
+       "Mute Playback Switch", "Dim Playback Switch"
 };
 
 /* Description of each hardware port type:
@@ -1856,9 +1856,15 @@ static int scarlett2_mute_ctl_get(struct snd_kcontrol *kctl,
                                        struct snd_ctl_elem_value *ucontrol)
 {
        struct usb_mixer_elem_info *elem = kctl->private_data;
-       struct scarlett2_data *private = elem->head.mixer->private_data;
+       struct usb_mixer_interface *mixer = elem->head.mixer;
+       struct scarlett2_data *private = mixer->private_data;
        int index = line_out_remap(private, elem->control);
 
+       mutex_lock(&private->data_mutex);
+       if (private->vol_updated)
+               scarlett2_update_volumes(mixer);
+       mutex_unlock(&private->data_mutex);
+
        ucontrol->value.integer.value[0] = private->mute_switch[index];
        return 0;
 }
@@ -1955,10 +1961,12 @@ static void scarlett2_vol_ctl_set_writable(struct usb_mixer_interface *mixer,
                        ~SNDRV_CTL_ELEM_ACCESS_WRITE;
        }
 
-       /* Notify of write bit change */
-       snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO,
+       /* Notify of write bit and possible value change */
+       snd_ctl_notify(card,
+                      SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
                       &private->vol_ctls[index]->id);
-       snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO,
+       snd_ctl_notify(card,
+                      SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
                       &private->mute_ctls[index]->id);
 }
 
@@ -2530,14 +2538,18 @@ static int scarlett2_add_direct_monitor_ctl(struct usb_mixer_interface *mixer)
 {
        struct scarlett2_data *private = mixer->private_data;
        const struct scarlett2_device_info *info = private->info;
+       const char *s;
 
        if (!info->direct_monitor)
                return 0;
 
+       s = info->direct_monitor == 1
+             ? "Direct Monitor Playback Switch"
+             : "Direct Monitor Playback Enum";
+
        return scarlett2_add_new_ctl(
                mixer, &scarlett2_direct_monitor_ctl[info->direct_monitor - 1],
-               0, 1, "Direct Monitor Playback Switch",
-               &private->direct_monitor_ctl);
+               0, 1, s, &private->direct_monitor_ctl);
 }
 
 /*** Speaker Switching Control ***/
@@ -2589,7 +2601,9 @@ static int scarlett2_speaker_switch_enable(struct usb_mixer_interface *mixer)
 
                /* disable the line out SW/HW switch */
                scarlett2_sw_hw_ctl_ro(private, i);
-               snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO,
+               snd_ctl_notify(card,
+                              SNDRV_CTL_EVENT_MASK_VALUE |
+                                SNDRV_CTL_EVENT_MASK_INFO,
                               &private->sw_hw_ctls[i]->id);
        }
 
@@ -2913,7 +2927,7 @@ static int scarlett2_dim_mute_ctl_put(struct snd_kcontrol *kctl,
                        if (private->vol_sw_hw_switch[line_index]) {
                                private->mute_switch[line_index] = val;
                                snd_ctl_notify(mixer->chip->card,
-                                              SNDRV_CTL_EVENT_MASK_INFO,
+                                              SNDRV_CTL_EVENT_MASK_VALUE,
                                               &private->mute_ctls[i]->id);
                        }
                }
@@ -3455,7 +3469,7 @@ static int scarlett2_add_msd_ctl(struct usb_mixer_interface *mixer)
 
        /* Add MSD control */
        return scarlett2_add_new_ctl(mixer, &scarlett2_msd_ctl,
-                                    0, 1, "MSD Mode", NULL);
+                                    0, 1, "MSD Mode Switch", NULL);
 }
 
 /*** Cleanup/Suspend Callbacks ***/
index e7accd8..326d1b0 100644 (file)
@@ -1899,6 +1899,7 @@ static const struct registration_quirk registration_quirks[] = {
        REG_QUIRK_ENTRY(0x0951, 0x16ea, 2),     /* Kingston HyperX Cloud Flight S */
        REG_QUIRK_ENTRY(0x0ecb, 0x1f46, 2),     /* JBL Quantum 600 */
        REG_QUIRK_ENTRY(0x0ecb, 0x2039, 2),     /* JBL Quantum 400 */
+       REG_QUIRK_ENTRY(0x0ecb, 0x203c, 2),     /* JBL Quantum 600 */
        REG_QUIRK_ENTRY(0x0ecb, 0x203e, 2),     /* JBL Quantum 800 */
        { 0 }                                   /* terminator */
 };
index 412eaee..b669107 100644 (file)
 #define HV_X64_GUEST_DEBUGGING_AVAILABLE               BIT(1)
 #define HV_X64_PERF_MONITOR_AVAILABLE                  BIT(2)
 #define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE      BIT(3)
-#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE          BIT(4)
+#define HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE           BIT(4)
 #define HV_X64_GUEST_IDLE_STATE_AVAILABLE              BIT(5)
 #define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE            BIT(8)
 #define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE           BIT(10)
 #define HV_STATUS_INVALID_CONNECTION_ID                18
 #define HV_STATUS_INSUFFICIENT_BUFFERS         19
 
+/* hypercall options */
+#define HV_HYPERCALL_FAST_BIT          BIT(16)
+
 #endif /* !SELFTEST_KVM_HYPERV_H */
index bab10ae..e0b2bb1 100644 (file)
@@ -215,7 +215,7 @@ int main(void)
        vcpu_set_hv_cpuid(vm, VCPU_ID);
 
        tsc_page_gva = vm_vaddr_alloc_page(vm);
-       memset(addr_gpa2hva(vm, tsc_page_gva), 0x0, getpagesize());
+       memset(addr_gva2hva(vm, tsc_page_gva), 0x0, getpagesize());
        TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0,
                "TSC page has to be page aligned\n");
        vcpu_args_set(vm, VCPU_ID, 2, tsc_page_gva, addr_gva2gpa(vm, tsc_page_gva));
index af27c7e..91d88aa 100644 (file)
@@ -47,6 +47,7 @@ static void do_wrmsr(u32 idx, u64 val)
 }
 
 static int nr_gp;
+static int nr_ud;
 
 static inline u64 hypercall(u64 control, vm_vaddr_t input_address,
                            vm_vaddr_t output_address)
@@ -80,6 +81,12 @@ static void guest_gp_handler(struct ex_regs *regs)
                regs->rip = (uint64_t)&wrmsr_end;
 }
 
+static void guest_ud_handler(struct ex_regs *regs)
+{
+       nr_ud++;
+       regs->rip += 3;
+}
+
 struct msr_data {
        uint32_t idx;
        bool available;
@@ -90,6 +97,7 @@ struct msr_data {
 struct hcall_data {
        uint64_t control;
        uint64_t expect;
+       bool ud_expected;
 };
 
 static void guest_msr(struct msr_data *msr)
@@ -117,13 +125,26 @@ static void guest_msr(struct msr_data *msr)
 static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
 {
        int i = 0;
+       u64 res, input, output;
 
        wrmsr(HV_X64_MSR_GUEST_OS_ID, LINUX_OS_ID);
        wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
 
        while (hcall->control) {
-               GUEST_ASSERT(hypercall(hcall->control, pgs_gpa,
-                                      pgs_gpa + 4096) == hcall->expect);
+               nr_ud = 0;
+               if (!(hcall->control & HV_HYPERCALL_FAST_BIT)) {
+                       input = pgs_gpa;
+                       output = pgs_gpa + 4096;
+               } else {
+                       input = output = 0;
+               }
+
+               res = hypercall(hcall->control, input, output);
+               if (hcall->ud_expected)
+                       GUEST_ASSERT(nr_ud == 1);
+               else
+                       GUEST_ASSERT(res == hcall->expect);
+
                GUEST_SYNC(i++);
        }
 
@@ -552,8 +573,18 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall
                        recomm.ebx = 0xfff;
                        hcall->expect = HV_STATUS_SUCCESS;
                        break;
-
                case 17:
+                       /* XMM fast hypercall */
+                       hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT;
+                       hcall->ud_expected = true;
+                       break;
+               case 18:
+                       feat.edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
+                       hcall->ud_expected = false;
+                       hcall->expect = HV_STATUS_SUCCESS;
+                       break;
+
+               case 19:
                        /* END */
                        hcall->control = 0;
                        break;
@@ -625,6 +656,10 @@ int main(void)
        /* Test hypercalls */
        vm = vm_create_default(VCPU_ID, 0, guest_hcall);
 
+       vm_init_descriptor_tables(vm);
+       vcpu_init_descriptor_tables(vm, VCPU_ID);
+       vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
+
        /* Hypercall input/output */
        hcall_page = vm_vaddr_alloc_pages(vm, 2);
        memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
index f23438d..3d7dde2 100644 (file)
@@ -484,13 +484,16 @@ enum desc_type {
        MONITOR_ACQUIRE,
        EXPIRE_STATE,
        EXPIRE_POLICY,
+       SPDINFO_ATTRS,
 };
 const char *desc_name[] = {
        "create tunnel",
        "alloc spi",
        "monitor acquire",
        "expire state",
-       "expire policy"
+       "expire policy",
+       "spdinfo attributes",
+       ""
 };
 struct xfrm_desc {
        enum desc_type  type;
@@ -1593,6 +1596,155 @@ out_close:
        return ret;
 }
 
+static int xfrm_spdinfo_set_thresh(int xfrm_sock, uint32_t *seq,
+               unsigned thresh4_l, unsigned thresh4_r,
+               unsigned thresh6_l, unsigned thresh6_r,
+               bool add_bad_attr)
+
+{
+       struct {
+               struct nlmsghdr         nh;
+               union {
+                       uint32_t        unused;
+                       int             error;
+               };
+               char                    attrbuf[MAX_PAYLOAD];
+       } req;
+       struct xfrmu_spdhthresh thresh;
+
+       memset(&req, 0, sizeof(req));
+       req.nh.nlmsg_len        = NLMSG_LENGTH(sizeof(req.unused));
+       req.nh.nlmsg_type       = XFRM_MSG_NEWSPDINFO;
+       req.nh.nlmsg_flags      = NLM_F_REQUEST | NLM_F_ACK;
+       req.nh.nlmsg_seq        = (*seq)++;
+
+       thresh.lbits = thresh4_l;
+       thresh.rbits = thresh4_r;
+       if (rtattr_pack(&req.nh, sizeof(req), XFRMA_SPD_IPV4_HTHRESH, &thresh, sizeof(thresh)))
+               return -1;
+
+       thresh.lbits = thresh6_l;
+       thresh.rbits = thresh6_r;
+       if (rtattr_pack(&req.nh, sizeof(req), XFRMA_SPD_IPV6_HTHRESH, &thresh, sizeof(thresh)))
+               return -1;
+
+       if (add_bad_attr) {
+               BUILD_BUG_ON(XFRMA_IF_ID <= XFRMA_SPD_MAX + 1);
+               if (rtattr_pack(&req.nh, sizeof(req), XFRMA_IF_ID, NULL, 0)) {
+                       pr_err("adding attribute failed: no space");
+                       return -1;
+               }
+       }
+
+       if (send(xfrm_sock, &req, req.nh.nlmsg_len, 0) < 0) {
+               pr_err("send()");
+               return -1;
+       }
+
+       if (recv(xfrm_sock, &req, sizeof(req), 0) < 0) {
+               pr_err("recv()");
+               return -1;
+       } else if (req.nh.nlmsg_type != NLMSG_ERROR) {
+               printk("expected NLMSG_ERROR, got %d", (int)req.nh.nlmsg_type);
+               return -1;
+       }
+
+       if (req.error) {
+               printk("NLMSG_ERROR: %d: %s", req.error, strerror(-req.error));
+               return -1;
+       }
+
+       return 0;
+}
+
+static int xfrm_spdinfo_attrs(int xfrm_sock, uint32_t *seq)
+{
+       struct {
+               struct nlmsghdr                 nh;
+               union {
+                       uint32_t        unused;
+                       int             error;
+               };
+               char                    attrbuf[MAX_PAYLOAD];
+       } req;
+
+       if (xfrm_spdinfo_set_thresh(xfrm_sock, seq, 32, 31, 120, 16, false)) {
+               pr_err("Can't set SPD HTHRESH");
+               return KSFT_FAIL;
+       }
+
+       memset(&req, 0, sizeof(req));
+
+       req.nh.nlmsg_len        = NLMSG_LENGTH(sizeof(req.unused));
+       req.nh.nlmsg_type       = XFRM_MSG_GETSPDINFO;
+       req.nh.nlmsg_flags      = NLM_F_REQUEST;
+       req.nh.nlmsg_seq        = (*seq)++;
+       if (send(xfrm_sock, &req, req.nh.nlmsg_len, 0) < 0) {
+               pr_err("send()");
+               return KSFT_FAIL;
+       }
+
+       if (recv(xfrm_sock, &req, sizeof(req), 0) < 0) {
+               pr_err("recv()");
+               return KSFT_FAIL;
+       } else if (req.nh.nlmsg_type == XFRM_MSG_NEWSPDINFO) {
+               size_t len = NLMSG_PAYLOAD(&req.nh, sizeof(req.unused));
+               struct rtattr *attr = (void *)req.attrbuf;
+               int got_thresh = 0;
+
+               for (; RTA_OK(attr, len); attr = RTA_NEXT(attr, len)) {
+                       if (attr->rta_type == XFRMA_SPD_IPV4_HTHRESH) {
+                               struct xfrmu_spdhthresh *t = RTA_DATA(attr);
+
+                               got_thresh++;
+                               if (t->lbits != 32 || t->rbits != 31) {
+                                       pr_err("thresh differ: %u, %u",
+                                                       t->lbits, t->rbits);
+                                       return KSFT_FAIL;
+                               }
+                       }
+                       if (attr->rta_type == XFRMA_SPD_IPV6_HTHRESH) {
+                               struct xfrmu_spdhthresh *t = RTA_DATA(attr);
+
+                               got_thresh++;
+                               if (t->lbits != 120 || t->rbits != 16) {
+                                       pr_err("thresh differ: %u, %u",
+                                                       t->lbits, t->rbits);
+                                       return KSFT_FAIL;
+                               }
+                       }
+               }
+               if (got_thresh != 2) {
+                       pr_err("only %d thresh returned by XFRM_MSG_GETSPDINFO", got_thresh);
+                       return KSFT_FAIL;
+               }
+       } else if (req.nh.nlmsg_type != NLMSG_ERROR) {
+               printk("expected NLMSG_ERROR, got %d", (int)req.nh.nlmsg_type);
+               return KSFT_FAIL;
+       } else {
+               printk("NLMSG_ERROR: %d: %s", req.error, strerror(-req.error));
+               return -1;
+       }
+
+       /* Restore the default */
+       if (xfrm_spdinfo_set_thresh(xfrm_sock, seq, 32, 32, 128, 128, false)) {
+               pr_err("Can't restore SPD HTHRESH");
+               return KSFT_FAIL;
+       }
+
+       /*
+        * At this moment xfrm uses nlmsg_parse_deprecated(), which
+        * implies NL_VALIDATE_LIBERAL - ignoring attributes with
+        * (type > maxtype). nla_parse_depricated_strict() would enforce
+        * it. Or even stricter nla_parse().
+        * Right now it's not expected to fail, but to be ignored.
+        */
+       if (xfrm_spdinfo_set_thresh(xfrm_sock, seq, 32, 32, 128, 128, true))
+               return KSFT_PASS;
+
+       return KSFT_PASS;
+}
+
 static int child_serv(int xfrm_sock, uint32_t *seq,
                unsigned int nr, int cmd_fd, void *buf, struct xfrm_desc *desc)
 {
@@ -1717,6 +1869,9 @@ static int child_f(unsigned int nr, int test_desc_fd, int cmd_fd, void *buf)
                case EXPIRE_POLICY:
                        ret = xfrm_expire_policy(xfrm_sock, &seq, nr, &desc);
                        break;
+               case SPDINFO_ATTRS:
+                       ret = xfrm_spdinfo_attrs(xfrm_sock, &seq);
+                       break;
                default:
                        printk("Unknown desc type %d", desc.type);
                        exit(KSFT_FAIL);
@@ -1994,8 +2149,10 @@ static int write_proto_plan(int fd, int proto)
  *   sizeof(xfrm_user_polexpire)  = 168  |  sizeof(xfrm_user_polexpire)  = 176
  *
  * Check the affected by the UABI difference structures.
+ * Also, check translation for xfrm_set_spdinfo: it has it's own attributes
+ * which needs to be correctly copied, but not translated.
  */
-const unsigned int compat_plan = 4;
+const unsigned int compat_plan = 5;
 static int write_compat_struct_tests(int test_desc_fd)
 {
        struct xfrm_desc desc = {};
@@ -2019,6 +2176,10 @@ static int write_compat_struct_tests(int test_desc_fd)
        if (__write_desc(test_desc_fd, &desc))
                return -1;
 
+       desc.type = SPDINFO_ATTRS;
+       if (__write_desc(test_desc_fd, &desc))
+               return -1;
+
        return 0;
 }
 
index d20fba0..b50dbe2 100644 (file)
@@ -892,6 +892,8 @@ static void kvm_destroy_vm_debugfs(struct kvm *kvm)
 
 static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
 {
+       static DEFINE_MUTEX(kvm_debugfs_lock);
+       struct dentry *dent;
        char dir_name[ITOA_MAX_LEN * 2];
        struct kvm_stat_data *stat_data;
        const struct _kvm_stats_desc *pdesc;
@@ -903,8 +905,20 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
                return 0;
 
        snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd);
-       kvm->debugfs_dentry = debugfs_create_dir(dir_name, kvm_debugfs_dir);
+       mutex_lock(&kvm_debugfs_lock);
+       dent = debugfs_lookup(dir_name, kvm_debugfs_dir);
+       if (dent) {
+               pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name);
+               dput(dent);
+               mutex_unlock(&kvm_debugfs_lock);
+               return 0;
+       }
+       dent = debugfs_create_dir(dir_name, kvm_debugfs_dir);
+       mutex_unlock(&kvm_debugfs_lock);
+       if (IS_ERR(dent))
+               return 0;
 
+       kvm->debugfs_dentry = dent;
        kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
                                         sizeof(*kvm->debugfs_stat_data),
                                         GFP_KERNEL_ACCOUNT);
@@ -5201,7 +5215,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
        }
        add_uevent_var(env, "PID=%d", kvm->userspace_pid);
 
-       if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) {
+       if (kvm->debugfs_dentry) {
                char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
 
                if (p) {