Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 3 Apr 2016 11:32:28 +0000 (06:32 -0500)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 3 Apr 2016 11:32:28 +0000 (06:32 -0500)
Pull x86 fixes from Thomas Gleixner:
 "This lot contains:

   - Some fixups for the fallout of the topology consolidation which
     unearthed AMD/Intel inconsistencies
   - Documentation for the x86 topology management
   - Support for AMD advanced power management bits
   - Two simple cleanups removing duplicated code"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/cpu: Add advanced power management bits
  x86/thread_info: Merge two !__ASSEMBLY__ sections
  x86/cpufreq: Remove duplicated TDP MSR macro definitions
  x86/Documentation: Start documenting x86 topology
  x86/cpu: Get rid of compute_unit_id
  perf/x86/amd: Cleanup Fam10h NB event constraints
  x86/topology: Fix AMD core count

204 files changed:
.mailmap
Documentation/networking/switchdev.txt
MAINTAINERS
arch/arm64/configs/defconfig
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_hyp.h
arch/arm64/include/asm/kvm_perf_event.h [deleted file]
arch/arm64/include/asm/opcodes.h
arch/arm64/include/asm/perf_event.h
arch/arm64/kernel/perf_event.c
arch/nios2/kernel/prom.c
arch/parisc/Kconfig
arch/parisc/include/asm/compat.h
arch/parisc/include/asm/syscall.h
arch/parisc/kernel/ptrace.c
arch/parisc/kernel/signal32.c
arch/parisc/kernel/syscall.S
arch/powerpc/include/asm/processor.h
arch/powerpc/kernel/process.c
arch/powerpc/mm/hugetlbpage.c
arch/s390/Kconfig
arch/s390/crypto/prng.c
arch/s390/include/asm/cache.h
arch/s390/include/uapi/asm/unistd.h
arch/s390/kernel/perf_cpum_cf.c
arch/s390/kernel/perf_cpum_sf.c
arch/s390/kernel/syscalls.S
arch/s390/mm/gup.c
arch/s390/mm/init.c
arch/s390/pci/pci_clp.c
arch/sparc/include/asm/compat_signal.h
arch/sparc/include/asm/obio.h
arch/sparc/include/asm/openprom.h
arch/sparc/include/asm/pgtable_64.h
arch/sparc/include/asm/processor_64.h
arch/sparc/include/asm/sigcontext.h
arch/sparc/include/asm/tsb.h
arch/sparc/include/uapi/asm/stat.h
arch/sparc/kernel/audit.c
arch/sparc/kernel/compat_audit.c
arch/sparc/kernel/entry.S
arch/sparc/kernel/ioport.c
arch/sparc/kernel/kernel.h
arch/sparc/kernel/leon_kernel.c
arch/sparc/kernel/process_64.c
arch/sparc/kernel/setup_32.c
arch/sparc/kernel/setup_64.c
arch/sparc/kernel/signal32.c
arch/sparc/kernel/sys_sparc_64.c
arch/sparc/kernel/sysfs.c
arch/sparc/kernel/unaligned_64.c
arch/sparc/mm/fault_32.c
arch/sparc/net/bpf_jit_comp.c
arch/tile/include/hv/drv_mpipe_intf.h
arch/tile/kernel/kgdb.c
arch/tile/kernel/pci_gx.c
arch/x86/include/asm/pmem.h
arch/x86/include/asm/tlbflush.h
arch/x86/kernel/cpu/mcheck/therm_throt.c
arch/x86/mm/tlb.c
crypto/asymmetric_keys/pkcs7_trust.c
drivers/acpi/acpi_processor.c
drivers/acpi/bus.c
drivers/acpi/internal.h
drivers/clk/mediatek/reset.c
drivers/clk/mmp/reset.c
drivers/clk/qcom/gcc-ipq4019.c
drivers/clk/qcom/reset.c
drivers/clk/qcom/reset.h
drivers/clk/rockchip/softrst.c
drivers/clk/sirf/clk-atlas7.c
drivers/clk/sunxi/clk-a10-ve.c
drivers/clk/sunxi/clk-sun9i-mmc.c
drivers/clk/sunxi/clk-usb.c
drivers/clk/tegra/clk.c
drivers/gpio/gpio-menz127.c
drivers/gpio/gpio-xgene.c
drivers/gpu/drm/amd/acp/Kconfig
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/drm_dp_helper.c
drivers/gpu/drm/msm/hdmi/hdmi.h
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_kms.h
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.h
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/udl/udl_fb.c
drivers/gpu/drm/udl/udl_gem.c
drivers/hwmon/max1111.c
drivers/ide/icside.c
drivers/ide/palm_bk3710.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/isert/ib_isert.h
drivers/isdn/hisax/isac.c
drivers/net/dsa/mv88e6xxx.c
drivers/net/dsa/mv88e6xxx.h
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/hisilicon/hns/hnae.h
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
drivers/net/ethernet/intel/ixgbevf/ethtool.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/intel/ixgbevf/vf.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2.c
drivers/net/ethernet/qlogic/qed/qed_int.c
drivers/net/ethernet/qlogic/qlge/qlge.h
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
drivers/net/ethernet/stmicro/stmmac/norm_desc.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/phy/bcm7xxx.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/plusb.c
drivers/net/usb/qmi_wwan.c
drivers/nvdimm/pmem.c
drivers/platform/goldfish/goldfish_pipe.c
drivers/rapidio/devices/rio_mport_cdev.c
drivers/remoteproc/st_remoteproc.c
drivers/s390/block/dasd_alias.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_eckd.h
drivers/s390/block/dasd_int.h
drivers/target/iscsi/iscsi_target_configfs.c
drivers/target/target_core_fabric_configfs.c
fs/btrfs/disk-io.c
fs/dlm/config.c
fs/namei.c
fs/orangefs/dir.c
fs/orangefs/protocol.h
include/linux/brcmphy.h
include/linux/configfs.h
include/linux/filter.h
include/linux/huge_mm.h
include/linux/netfilter/ipset/ip_set.h
include/linux/pmem.h
include/linux/stmmac.h
include/target/target_core_fabric.h
include/trace/events/page_isolation.h
include/uapi/linux/bpf.h
init/Kconfig
kernel/bpf/syscall.c
mm/kasan/kasan.c
mm/oom_kill.c
mm/page_isolation.c
mm/rmap.c
net/bridge/br_stp.c
net/bridge/netfilter/ebtables.c
net/bridge/netfilter/nft_reject_bridge.c
net/core/filter.c
net/core/netpoll.c
net/core/rtnetlink.c
net/ipv4/fou.c
net/ipv4/ip_tunnel_core.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_SYNPROXY.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/udp.c
net/netfilter/ipset/ip_set_bitmap_gen.h
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_hash_gen.h
net/netfilter/ipset/ip_set_list_set.c
net/netfilter/nfnetlink_queue.c
net/openvswitch/Kconfig
net/openvswitch/conntrack.c
net/sctp/output.c
net/switchdev/switchdev.c
net/xfrm/xfrm_input.c
sound/core/timer.c
sound/core/timer_compat.c
sound/firewire/dice/dice-stream.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/usb/quirks.c
sound/usb/stream.c

index 7e6c533..90c0aef 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -33,6 +33,7 @@ Björn Steinbrink <B.Steinbrink@gmx.de>
 Brian Avery <b.avery@hp.com>
 Brian King <brking@us.ibm.com>
 Christoph Hellwig <hch@lst.de>
+Christophe Ricard <christophe.ricard@gmail.com>
 Corey Minyard <minyard@acm.org>
 Damian Hobson-Garcia <dhobsong@igel.co.jp>
 David Brownell <david-b@pacbell.net>
index fad6313..2f65912 100644 (file)
@@ -386,7 +386,7 @@ used. First phase is to "prepare" anything needed, including various checks,
 memory allocation, etc. The goal is to handle the stuff that is not unlikely
 to fail here. The second phase is to "commit" the actual changes.
 
-Switchdev provides an inftrastructure for sharing items (for example memory
+Switchdev provides an infrastructure for sharing items (for example memory
 allocations) between the two phases.
 
 The object created by a driver in "prepare" phase and it is queued up by:
index 03e00c7..fbd6f52 100644 (file)
@@ -5042,6 +5042,7 @@ F:        include/linux/hw_random.h
 HARDWARE SPINLOCK CORE
 M:     Ohad Ben-Cohen <ohad@wizery.com>
 M:     Bjorn Andersson <bjorn.andersson@linaro.org>
+L:     linux-remoteproc@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/hwspinlock.git
 F:     Documentation/hwspinlock.txt
@@ -8253,7 +8254,7 @@ F:        Documentation/filesystems/overlayfs.txt
 
 ORANGEFS FILESYSTEM
 M:     Mike Marshall <hubcap@omnibond.com>
-L:     pvfs2-developers@beowulf-underground.org
+L:     pvfs2-developers@beowulf-underground.org (subscribers-only)
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/hubcap/linux.git
 S:     Supported
 F:     fs/orangefs/
@@ -9314,6 +9315,7 @@ F:        include/linux/regmap.h
 REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM
 M:     Ohad Ben-Cohen <ohad@wizery.com>
 M:     Bjorn Andersson <bjorn.andersson@linaro.org>
+L:     linux-remoteproc@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/remoteproc.git
 S:     Maintained
 F:     drivers/remoteproc/
@@ -9323,6 +9325,7 @@ F:        include/linux/remoteproc.h
 REMOTE PROCESSOR MESSAGING (RPMSG) SUBSYSTEM
 M:     Ohad Ben-Cohen <ohad@wizery.com>
 M:     Bjorn Andersson <bjorn.andersson@linaro.org>
+L:     linux-remoteproc@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/rpmsg.git
 S:     Maintained
 F:     drivers/rpmsg/
@@ -11137,8 +11140,8 @@ F:      include/uapi/linux/tipc*.h
 F:     net/tipc/
 
 TILE ARCHITECTURE
-M:     Chris Metcalf <cmetcalf@ezchip.com>
-W:     http://www.ezchip.com/scm/
+M:     Chris Metcalf <cmetcalf@mellanox.com>
+W:     http://www.mellanox.com/repository/solutions/tile-scm/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile.git
 S:     Supported
 F:     arch/tile/
index f705051..a44ef99 100644 (file)
@@ -68,11 +68,13 @@ CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
 CONFIG_CMA=y
 CONFIG_XEN=y
-CONFIG_CMDLINE="console=ttyAMA0"
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_COMPAT=y
 CONFIG_CPU_IDLE=y
 CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
+CONFIG_ARM_SCPI_CPUFREQ=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -80,7 +82,6 @@ CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 CONFIG_BPF_JIT=y
 # CONFIG_WIRELESS is not set
@@ -144,16 +145,18 @@ CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
 CONFIG_SERIAL_MVEBU_UART=y
 CONFIG_VIRTIO_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
 CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
 CONFIG_I2C_MV64XXX=y
 CONFIG_I2C_QUP=y
+CONFIG_I2C_TEGRA=y
 CONFIG_I2C_UNIPHIER_F=y
 CONFIG_I2C_RCAR=y
 CONFIG_SPI=y
 CONFIG_SPI_PL022=y
 CONFIG_SPI_QUP=y
 CONFIG_SPMI=y
+CONFIG_PINCTRL_SINGLE=y
 CONFIG_PINCTRL_MSM8916=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_GPIO_SYSFS=y
@@ -196,6 +199,7 @@ CONFIG_USB_EHCI_HCD_PLATFORM=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_OHCI_HCD_PLATFORM=y
 CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC2=y
 CONFIG_USB_CHIPIDEA=y
 CONFIG_USB_CHIPIDEA_UDC=y
 CONFIG_USB_CHIPIDEA_HOST=y
@@ -205,19 +209,20 @@ CONFIG_USB_MSM_OTG=y
 CONFIG_USB_ULPI=y
 CONFIG_USB_GADGET=y
 CONFIG_MMC=y
-CONFIG_MMC_BLOCK_MINORS=16
+CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_ARMMMCI=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_TEGRA=y
 CONFIG_MMC_SDHCI_MSM=y
 CONFIG_MMC_SPI=y
-CONFIG_MMC_SUNXI=y
 CONFIG_MMC_DW=y
 CONFIG_MMC_DW_EXYNOS=y
-CONFIG_MMC_BLOCK_MINORS=16
+CONFIG_MMC_DW_K3=y
+CONFIG_MMC_SUNXI=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
 CONFIG_LEDS_SYSCON=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
@@ -229,8 +234,8 @@ CONFIG_RTC_DRV_PL031=y
 CONFIG_RTC_DRV_SUN6I=y
 CONFIG_RTC_DRV_XGENE=y
 CONFIG_DMADEVICES=y
-CONFIG_QCOM_BAM_DMA=y
 CONFIG_TEGRA20_APB_DMA=y
+CONFIG_QCOM_BAM_DMA=y
 CONFIG_RCAR_DMAC=y
 CONFIG_VFIO=y
 CONFIG_VFIO_PCI=y
@@ -239,20 +244,26 @@ CONFIG_VIRTIO_BALLOON=y
 CONFIG_VIRTIO_MMIO=y
 CONFIG_XEN_GNTDEV=y
 CONFIG_XEN_GRANT_DEV_ALLOC=y
+CONFIG_COMMON_CLK_SCPI=y
 CONFIG_COMMON_CLK_CS2000_CP=y
 CONFIG_COMMON_CLK_QCOM=y
 CONFIG_MSM_GCC_8916=y
 CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_MAILBOX=y
+CONFIG_ARM_MHU=y
+CONFIG_HI6220_MBOX=y
 CONFIG_ARM_SMMU=y
 CONFIG_QCOM_SMEM=y
 CONFIG_QCOM_SMD=y
 CONFIG_QCOM_SMD_RPM=y
 CONFIG_ARCH_TEGRA_132_SOC=y
 CONFIG_ARCH_TEGRA_210_SOC=y
-CONFIG_HISILICON_IRQ_MBIGEN=y
 CONFIG_EXTCON_USB_GPIO=y
+CONFIG_COMMON_RESET_HI6220=y
 CONFIG_PHY_RCAR_GEN3_USB2=y
+CONFIG_PHY_HI6220_USB=y
 CONFIG_PHY_XGENE=y
+CONFIG_ARM_SCPI_PROTOCOL=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 CONFIG_FANOTIFY=y
@@ -264,6 +275,7 @@ CONFIG_CUSE=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
 CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=y
 CONFIG_EFIVAR_FS=y
 CONFIG_SQUASHFS=y
 CONFIG_NFS_FS=y
index 227ed47..b7e82a7 100644 (file)
@@ -27,7 +27,6 @@
 #include <asm/kvm.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_mmio.h>
-#include <asm/kvm_perf_event.h>
 
 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
 
index a46b019..44eaff7 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/compiler.h>
 #include <linux/kvm_host.h>
 #include <asm/kvm_mmu.h>
-#include <asm/kvm_perf_event.h>
 #include <asm/sysreg.h>
 
 #define __hyp_text __section(.hyp.text) notrace
diff --git a/arch/arm64/include/asm/kvm_perf_event.h b/arch/arm64/include/asm/kvm_perf_event.h
deleted file mode 100644 (file)
index c18fdeb..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __ASM_KVM_PERF_EVENT_H
-#define __ASM_KVM_PERF_EVENT_H
-
-#define        ARMV8_PMU_MAX_COUNTERS  32
-#define        ARMV8_PMU_COUNTER_MASK  (ARMV8_PMU_MAX_COUNTERS - 1)
-
-/*
- * Per-CPU PMCR: config reg
- */
-#define ARMV8_PMU_PMCR_E       (1 << 0) /* Enable all counters */
-#define ARMV8_PMU_PMCR_P       (1 << 1) /* Reset all counters */
-#define ARMV8_PMU_PMCR_C       (1 << 2) /* Cycle counter reset */
-#define ARMV8_PMU_PMCR_D       (1 << 3) /* CCNT counts every 64th cpu cycle */
-#define ARMV8_PMU_PMCR_X       (1 << 4) /* Export to ETM */
-#define ARMV8_PMU_PMCR_DP      (1 << 5) /* Disable CCNT if non-invasive debug*/
-/* Determines which bit of PMCCNTR_EL0 generates an overflow */
-#define ARMV8_PMU_PMCR_LC      (1 << 6)
-#define        ARMV8_PMU_PMCR_N_SHIFT  11       /* Number of counters supported */
-#define        ARMV8_PMU_PMCR_N_MASK   0x1f
-#define        ARMV8_PMU_PMCR_MASK     0x7f     /* Mask for writable bits */
-
-/*
- * PMOVSR: counters overflow flag status reg
- */
-#define        ARMV8_PMU_OVSR_MASK             0xffffffff      /* Mask for writable bits */
-#define        ARMV8_PMU_OVERFLOWED_MASK       ARMV8_PMU_OVSR_MASK
-
-/*
- * PMXEVTYPER: Event selection reg
- */
-#define        ARMV8_PMU_EVTYPE_MASK   0xc80003ff      /* Mask for writable bits */
-#define        ARMV8_PMU_EVTYPE_EVENT  0x3ff           /* Mask for EVENT bits */
-
-#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0       /* Software increment event */
-
-/*
- * Event filters for PMUv3
- */
-#define        ARMV8_PMU_EXCLUDE_EL1   (1 << 31)
-#define        ARMV8_PMU_EXCLUDE_EL0   (1 << 30)
-#define        ARMV8_PMU_INCLUDE_EL2   (1 << 27)
-
-/*
- * PMUSERENR: user enable reg
- */
-#define ARMV8_PMU_USERENR_MASK 0xf             /* Mask for writable bits */
-#define ARMV8_PMU_USERENR_EN   (1 << 0) /* PMU regs can be accessed at EL0 */
-#define ARMV8_PMU_USERENR_SW   (1 << 1) /* PMSWINC can be written at EL0 */
-#define ARMV8_PMU_USERENR_CR   (1 << 2) /* Cycle counter can be read at EL0 */
-#define ARMV8_PMU_USERENR_ER   (1 << 3) /* Event counter can be read at EL0 */
-
-#endif
index 4e603ea..123f45d 100644 (file)
@@ -1 +1,5 @@
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define CONFIG_CPU_ENDIAN_BE8 CONFIG_CPU_BIG_ENDIAN
+#endif
+
 #include <../../arm/include/asm/opcodes.h>
index 7bd3cdb..2065f46 100644 (file)
 #ifndef __ASM_PERF_EVENT_H
 #define __ASM_PERF_EVENT_H
 
+#define        ARMV8_PMU_MAX_COUNTERS  32
+#define        ARMV8_PMU_COUNTER_MASK  (ARMV8_PMU_MAX_COUNTERS - 1)
+
+/*
+ * Per-CPU PMCR: config reg
+ */
+#define ARMV8_PMU_PMCR_E       (1 << 0) /* Enable all counters */
+#define ARMV8_PMU_PMCR_P       (1 << 1) /* Reset all counters */
+#define ARMV8_PMU_PMCR_C       (1 << 2) /* Cycle counter reset */
+#define ARMV8_PMU_PMCR_D       (1 << 3) /* CCNT counts every 64th cpu cycle */
+#define ARMV8_PMU_PMCR_X       (1 << 4) /* Export to ETM */
+#define ARMV8_PMU_PMCR_DP      (1 << 5) /* Disable CCNT if non-invasive debug*/
+#define ARMV8_PMU_PMCR_LC      (1 << 6) /* Overflow on 64 bit cycle counter */
+#define        ARMV8_PMU_PMCR_N_SHIFT  11       /* Number of counters supported */
+#define        ARMV8_PMU_PMCR_N_MASK   0x1f
+#define        ARMV8_PMU_PMCR_MASK     0x7f     /* Mask for writable bits */
+
+/*
+ * PMOVSR: counters overflow flag status reg
+ */
+#define        ARMV8_PMU_OVSR_MASK             0xffffffff      /* Mask for writable bits */
+#define        ARMV8_PMU_OVERFLOWED_MASK       ARMV8_PMU_OVSR_MASK
+
+/*
+ * PMXEVTYPER: Event selection reg
+ */
+#define        ARMV8_PMU_EVTYPE_MASK   0xc800ffff      /* Mask for writable bits */
+#define        ARMV8_PMU_EVTYPE_EVENT  0xffff          /* Mask for EVENT bits */
+
+#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0       /* Software increment event */
+
+/*
+ * Event filters for PMUv3
+ */
+#define        ARMV8_PMU_EXCLUDE_EL1   (1 << 31)
+#define        ARMV8_PMU_EXCLUDE_EL0   (1 << 30)
+#define        ARMV8_PMU_INCLUDE_EL2   (1 << 27)
+
+/*
+ * PMUSERENR: user enable reg
+ */
+#define ARMV8_PMU_USERENR_MASK 0xf             /* Mask for writable bits */
+#define ARMV8_PMU_USERENR_EN   (1 << 0) /* PMU regs can be accessed at EL0 */
+#define ARMV8_PMU_USERENR_SW   (1 << 1) /* PMSWINC can be written at EL0 */
+#define ARMV8_PMU_USERENR_CR   (1 << 2) /* Cycle counter can be read at EL0 */
+#define ARMV8_PMU_USERENR_ER   (1 << 3) /* Event counter can be read at EL0 */
+
 #ifdef CONFIG_PERF_EVENTS
 struct pt_regs;
 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
index 767c4f6..f419a7c 100644 (file)
@@ -20,6 +20,7 @@
  */
 
 #include <asm/irq_regs.h>
+#include <asm/perf_event.h>
 #include <asm/virt.h>
 
 #include <linux/of.h>
@@ -384,9 +385,6 @@ static const struct attribute_group *armv8_pmuv3_attr_groups[] = {
 #define        ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
        (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
 
-#define        ARMV8_MAX_COUNTERS      32
-#define        ARMV8_COUNTER_MASK      (ARMV8_MAX_COUNTERS - 1)
-
 /*
  * ARMv8 low level PMU access
  */
@@ -395,40 +393,7 @@ static const struct attribute_group *armv8_pmuv3_attr_groups[] = {
  * Perf Event to low level counters mapping
  */
 #define        ARMV8_IDX_TO_COUNTER(x) \
-       (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)
-
-/*
- * Per-CPU PMCR: config reg
- */
-#define ARMV8_PMCR_E           (1 << 0) /* Enable all counters */
-#define ARMV8_PMCR_P           (1 << 1) /* Reset all counters */
-#define ARMV8_PMCR_C           (1 << 2) /* Cycle counter reset */
-#define ARMV8_PMCR_D           (1 << 3) /* CCNT counts every 64th cpu cycle */
-#define ARMV8_PMCR_X           (1 << 4) /* Export to ETM */
-#define ARMV8_PMCR_DP          (1 << 5) /* Disable CCNT if non-invasive debug*/
-#define ARMV8_PMCR_LC          (1 << 6) /* Overflow on 64 bit cycle counter */
-#define        ARMV8_PMCR_N_SHIFT      11       /* Number of counters supported */
-#define        ARMV8_PMCR_N_MASK       0x1f
-#define        ARMV8_PMCR_MASK         0x7f     /* Mask for writable bits */
-
-/*
- * PMOVSR: counters overflow flag status reg
- */
-#define        ARMV8_OVSR_MASK         0xffffffff      /* Mask for writable bits */
-#define        ARMV8_OVERFLOWED_MASK   ARMV8_OVSR_MASK
-
-/*
- * PMXEVTYPER: Event selection reg
- */
-#define        ARMV8_EVTYPE_MASK       0xc800ffff      /* Mask for writable bits */
-#define        ARMV8_EVTYPE_EVENT      0xffff          /* Mask for EVENT bits */
-
-/*
- * Event filters for PMUv3
- */
-#define        ARMV8_EXCLUDE_EL1       (1 << 31)
-#define        ARMV8_EXCLUDE_EL0       (1 << 30)
-#define        ARMV8_INCLUDE_EL2       (1 << 27)
+       (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
 
 static inline u32 armv8pmu_pmcr_read(void)
 {
@@ -439,14 +404,14 @@ static inline u32 armv8pmu_pmcr_read(void)
 
 static inline void armv8pmu_pmcr_write(u32 val)
 {
-       val &= ARMV8_PMCR_MASK;
+       val &= ARMV8_PMU_PMCR_MASK;
        isb();
        asm volatile("msr pmcr_el0, %0" :: "r" (val));
 }
 
 static inline int armv8pmu_has_overflowed(u32 pmovsr)
 {
-       return pmovsr & ARMV8_OVERFLOWED_MASK;
+       return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
 }
 
 static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
@@ -512,7 +477,7 @@ static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
 static inline void armv8pmu_write_evtype(int idx, u32 val)
 {
        if (armv8pmu_select_counter(idx) == idx) {
-               val &= ARMV8_EVTYPE_MASK;
+               val &= ARMV8_PMU_EVTYPE_MASK;
                asm volatile("msr pmxevtyper_el0, %0" :: "r" (val));
        }
 }
@@ -558,7 +523,7 @@ static inline u32 armv8pmu_getreset_flags(void)
        asm volatile("mrs %0, pmovsclr_el0" : "=r" (value));
 
        /* Write to clear flags */
-       value &= ARMV8_OVSR_MASK;
+       value &= ARMV8_PMU_OVSR_MASK;
        asm volatile("msr pmovsclr_el0, %0" :: "r" (value));
 
        return value;
@@ -696,7 +661,7 @@ static void armv8pmu_start(struct arm_pmu *cpu_pmu)
 
        raw_spin_lock_irqsave(&events->pmu_lock, flags);
        /* Enable all counters */
-       armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E);
+       armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 }
 
@@ -707,7 +672,7 @@ static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
 
        raw_spin_lock_irqsave(&events->pmu_lock, flags);
        /* Disable all counters */
-       armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E);
+       armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 }
 
@@ -717,7 +682,7 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
        int idx;
        struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
        struct hw_perf_event *hwc = &event->hw;
-       unsigned long evtype = hwc->config_base & ARMV8_EVTYPE_EVENT;
+       unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
 
        /* Always place a cycle counter into the cycle counter. */
        if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
@@ -754,11 +719,11 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
            attr->exclude_kernel != attr->exclude_hv)
                return -EINVAL;
        if (attr->exclude_user)
-               config_base |= ARMV8_EXCLUDE_EL0;
+               config_base |= ARMV8_PMU_EXCLUDE_EL0;
        if (!is_kernel_in_hyp_mode() && attr->exclude_kernel)
-               config_base |= ARMV8_EXCLUDE_EL1;
+               config_base |= ARMV8_PMU_EXCLUDE_EL1;
        if (!attr->exclude_hv)
-               config_base |= ARMV8_INCLUDE_EL2;
+               config_base |= ARMV8_PMU_INCLUDE_EL2;
 
        /*
         * Install the filter into config_base as this is used to
@@ -784,35 +749,36 @@ static void armv8pmu_reset(void *info)
         * Initialize & Reset PMNC. Request overflow interrupt for
         * 64 bit cycle counter but cheat in armv8pmu_write_counter().
         */
-       armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C | ARMV8_PMCR_LC);
+       armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C |
+                           ARMV8_PMU_PMCR_LC);
 }
 
 static int armv8_pmuv3_map_event(struct perf_event *event)
 {
        return armpmu_map_event(event, &armv8_pmuv3_perf_map,
                                &armv8_pmuv3_perf_cache_map,
-                               ARMV8_EVTYPE_EVENT);
+                               ARMV8_PMU_EVTYPE_EVENT);
 }
 
 static int armv8_a53_map_event(struct perf_event *event)
 {
        return armpmu_map_event(event, &armv8_a53_perf_map,
                                &armv8_a53_perf_cache_map,
-                               ARMV8_EVTYPE_EVENT);
+                               ARMV8_PMU_EVTYPE_EVENT);
 }
 
 static int armv8_a57_map_event(struct perf_event *event)
 {
        return armpmu_map_event(event, &armv8_a57_perf_map,
                                &armv8_a57_perf_cache_map,
-                               ARMV8_EVTYPE_EVENT);
+                               ARMV8_PMU_EVTYPE_EVENT);
 }
 
 static int armv8_thunder_map_event(struct perf_event *event)
 {
        return armpmu_map_event(event, &armv8_thunder_perf_map,
                                &armv8_thunder_perf_cache_map,
-                               ARMV8_EVTYPE_EVENT);
+                               ARMV8_PMU_EVTYPE_EVENT);
 }
 
 static void armv8pmu_read_num_pmnc_events(void *info)
@@ -820,7 +786,7 @@ static void armv8pmu_read_num_pmnc_events(void *info)
        int *nb_cnt = info;
 
        /* Read the nb of CNTx counters supported from PMNC */
-       *nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;
+       *nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
 
        /* Add the CPU cycles counter */
        *nb_cnt += 1;
index 718dd19..367c542 100644 (file)
@@ -97,8 +97,7 @@ static int __init early_init_dt_scan_serial(unsigned long node,
                return 0;
 #endif
 
-       *addr64 = fdt_translate_address((const void *)initial_boot_params,
-               node);
+       *addr64 = of_flat_dt_translate_address(node);
 
        return *addr64 == OF_BAD_ADDR ? 0 : 1;
 }
index 989fa14..bd3c873 100644 (file)
@@ -30,6 +30,7 @@ config PARISC
        select TTY # Needed for pdc_cons.c
        select HAVE_DEBUG_STACKOVERFLOW
        select HAVE_ARCH_AUDITSYSCALL
+       select HAVE_ARCH_SECCOMP_FILTER
        select ARCH_NO_COHERENT_DMA_MMAP
 
        help
index 0448a2c..3387307 100644 (file)
@@ -183,6 +183,13 @@ typedef struct compat_siginfo {
                        int _band;      /* POLL_IN, POLL_OUT, POLL_MSG */
                        int _fd;
                } _sigpoll;
+
+               /* SIGSYS */
+               struct {
+                       compat_uptr_t _call_addr; /* calling user insn */
+                       int _syscall;   /* triggering system call number */
+                       compat_uint_t _arch;    /* AUDIT_ARCH_* of syscall */
+               } _sigsys;
        } _sifields;
 } compat_siginfo_t;
 
index a5eba95..637ce8d 100644 (file)
@@ -39,6 +39,19 @@ static inline void syscall_get_arguments(struct task_struct *tsk,
        }
 }
 
+static inline void syscall_set_return_value(struct task_struct *task,
+                                           struct pt_regs *regs,
+                                           int error, long val)
+{
+       regs->gr[28] = error ? error : val;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+                                   struct pt_regs *regs)
+{
+       /* do nothing */
+}
+
 static inline int syscall_get_arch(void)
 {
        int arch = AUDIT_ARCH_PARISC;
index ce0b2b4..8fb81a3 100644 (file)
@@ -270,7 +270,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
 long do_syscall_trace_enter(struct pt_regs *regs)
 {
        /* Do the secure computing check first. */
-       secure_computing_strict(regs->gr[20]);
+       if (secure_computing() == -1)
+               return -1;
 
        if (test_thread_flag(TIF_SYSCALL_TRACE) &&
            tracehook_report_syscall_entry(regs)) {
@@ -296,7 +297,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
                        regs->gr[23] & 0xffffffff);
 
 out:
-       return regs->gr[20];
+       /*
+        * Sign extend the syscall number to 64bit since it may have been
+        * modified by a compat ptrace call
+        */
+       return (int) ((u32) regs->gr[20]);
 }
 
 void do_syscall_trace_exit(struct pt_regs *regs)
index 984abbe..c342b2e 100644 (file)
@@ -371,6 +371,11 @@ copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from)
                        val = (compat_int_t)from->si_int;
                        err |= __put_user(val, &to->si_int);
                        break;
+               case __SI_SYS >> 16:
+                       err |= __put_user(ptr_to_compat(from->si_call_addr), &to->si_call_addr);
+                       err |= __put_user(from->si_syscall, &to->si_syscall);
+                       err |= __put_user(from->si_arch, &to->si_arch);
+                       break;
                }
        }
        return err;
index fbafa0d..c976ebf 100644 (file)
@@ -329,6 +329,7 @@ tracesys_next:
 
        ldo     -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1      /* get task ptr */
        LDREG   TI_TASK(%r1), %r1
+       LDREG   TASK_PT_GR28(%r1), %r28         /* Restore return value */
        LDREG   TASK_PT_GR26(%r1), %r26         /* Restore the users args */
        LDREG   TASK_PT_GR25(%r1), %r25
        LDREG   TASK_PT_GR24(%r1), %r24
@@ -342,6 +343,7 @@ tracesys_next:
        stw     %r21, -56(%r30)                 /* 6th argument */
 #endif
 
+       cmpib,COND(=),n -1,%r20,tracesys_exit /* seccomp may have returned -1 */
        comiclr,>>=     __NR_Linux_syscalls, %r20, %r0
        b,n     .Ltracesys_nosys
 
index 8ab8a1a..009fab1 100644 (file)
@@ -246,7 +246,7 @@ struct thread_struct {
 #endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_VSX
        /* VSR status */
-       int             used_vsr;       /* set if process has used altivec */
+       int             used_vsr;       /* set if process has used VSX */
 #endif /* CONFIG_VSX */
 #ifdef CONFIG_SPE
        unsigned long   evr[32];        /* upper 32-bits of SPE regs */
index 612df30..b8500b4 100644 (file)
@@ -983,7 +983,7 @@ void restore_tm_state(struct pt_regs *regs)
 static inline void save_sprs(struct thread_struct *t)
 {
 #ifdef CONFIG_ALTIVEC
-       if (cpu_has_feature(cpu_has_feature(CPU_FTR_ALTIVEC)))
+       if (cpu_has_feature(CPU_FTR_ALTIVEC))
                t->vrsave = mfspr(SPRN_VRSAVE);
 #endif
 #ifdef CONFIG_PPC_BOOK3S_64
index 6dd272b..d991b9e 100644 (file)
@@ -413,13 +413,13 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
 {
        struct hugepd_freelist **batchp;
 
-       batchp = this_cpu_ptr(&hugepd_freelist_cur);
+       batchp = &get_cpu_var(hugepd_freelist_cur);
 
        if (atomic_read(&tlb->mm->mm_users) < 2 ||
            cpumask_equal(mm_cpumask(tlb->mm),
                          cpumask_of(smp_processor_id()))) {
                kmem_cache_free(hugepte_cache, hugepte);
-        put_cpu_var(hugepd_freelist_cur);
+               put_cpu_var(hugepd_freelist_cur);
                return;
        }
 
index b9df8d1..aad23e3 100644 (file)
@@ -59,6 +59,9 @@ config PCI_QUIRKS
 config ARCH_SUPPORTS_UPROBES
        def_bool y
 
+config DEBUG_RODATA
+       def_bool y
+
 config S390
        def_bool y
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
index b8045b9..d750cc0 100644 (file)
@@ -669,11 +669,13 @@ static const struct file_operations prng_tdes_fops = {
 static struct miscdevice prng_sha512_dev = {
        .name   = "prandom",
        .minor  = MISC_DYNAMIC_MINOR,
+       .mode   = 0644,
        .fops   = &prng_sha512_fops,
 };
 static struct miscdevice prng_tdes_dev = {
        .name   = "prandom",
        .minor  = MISC_DYNAMIC_MINOR,
+       .mode   = 0644,
        .fops   = &prng_tdes_fops,
 };
 
index 4d7ccac..22da3b3 100644 (file)
@@ -15,4 +15,7 @@
 
 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
 
+/* Read-only memory is marked before mark_rodata_ro() is called. */
+#define __ro_after_init __read_mostly
+
 #endif
index ab3aa68..4384bc7 100644 (file)
 #define __NR_shutdown          373
 #define __NR_mlock2            374
 #define __NR_copy_file_range   375
-#define NR_syscalls 376
+#define __NR_preadv2           376
+#define __NR_pwritev2          377
+#define NR_syscalls 378
 
 /* 
  * There are some system calls that are not present on 64 bit, some
index 58bf457..62f066b 100644 (file)
@@ -670,6 +670,7 @@ static int cpumf_pmu_notifier(struct notifier_block *self, unsigned long action,
 
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_ONLINE:
+       case CPU_DOWN_FAILED:
                flags = PMC_INIT;
                smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
                break;
index 1a43474..eaab9a7 100644 (file)
@@ -1521,7 +1521,7 @@ static int cpumf_pmu_notifier(struct notifier_block *self,
 
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_ONLINE:
-       case CPU_ONLINE_FROZEN:
+       case CPU_DOWN_FAILED:
                flags = PMC_INIT;
                smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
                break;
index 293d8b9..9b59e62 100644 (file)
@@ -384,3 +384,5 @@ SYSCALL(sys_recvmsg,compat_sys_recvmsg)
 SYSCALL(sys_shutdown,sys_shutdown)
 SYSCALL(sys_mlock2,compat_sys_mlock2)
 SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */
+SYSCALL(sys_preadv2,compat_sys_preadv2)
+SYSCALL(sys_pwritev2,compat_sys_pwritev2)
index 49a1c84..a8a6765 100644 (file)
@@ -20,9 +20,9 @@
 static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
                unsigned long end, int write, struct page **pages, int *nr)
 {
+       struct page *head, *page;
        unsigned long mask;
        pte_t *ptep, pte;
-       struct page *page;
 
        mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
 
@@ -37,12 +37,14 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
                        return 0;
                VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
                page = pte_page(pte);
-               if (!page_cache_get_speculative(page))
+               head = compound_head(page);
+               if (!page_cache_get_speculative(head))
                        return 0;
                if (unlikely(pte_val(pte) != pte_val(*ptep))) {
-                       put_page(page);
+                       put_page(head);
                        return 0;
                }
+               VM_BUG_ON_PAGE(compound_head(page) != head, page);
                pages[*nr] = page;
                (*nr)++;
 
index 73e2903..c7b0451 100644 (file)
@@ -108,6 +108,13 @@ void __init paging_init(void)
        free_area_init_nodes(max_zone_pfns);
 }
 
+void mark_rodata_ro(void)
+{
+       /* Text and rodata are already protected. Nothing to do here. */
+       pr_info("Write protecting the kernel read-only data: %luk\n",
+               ((unsigned long)&_eshared - (unsigned long)&_stext) >> 10);
+}
+
 void __init mem_init(void)
 {
        if (MACHINE_HAS_TLB_LC)
@@ -126,9 +133,6 @@ void __init mem_init(void)
        setup_zero_pages();     /* Setup zeroed pages. */
 
        mem_init_print_info(NULL);
-       printk("Write protected kernel read-only data: %#lx - %#lx\n",
-              (unsigned long)&_stext,
-              PFN_ALIGN((unsigned long)&_eshared) - 1);
 }
 
 void free_initmem(void)
index 21591dd..1a4512c 100644 (file)
@@ -176,8 +176,7 @@ static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
                rc = clp_store_query_pci_fn(zdev, &rrb->response);
                if (rc)
                        goto out;
-               if (rrb->response.pfgid)
-                       rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
+               rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
        } else {
                zpci_err("Q PCI FN:\n");
                zpci_err_clp(rrb->response.hdr.rsp, rc);
index 9ed1f12..4b027b1 100644 (file)
@@ -6,17 +6,17 @@
 
 #ifdef CONFIG_COMPAT
 struct __new_sigaction32 {
-       unsigned                sa_handler;
+       unsigned int            sa_handler;
        unsigned int            sa_flags;
-       unsigned                sa_restorer;     /* not used by Linux/SPARC yet */
+       unsigned int            sa_restorer;     /* not used by Linux/SPARC yet */
        compat_sigset_t         sa_mask;
 };
 
 struct __old_sigaction32 {
-       unsigned                sa_handler;
+       unsigned int            sa_handler;
        compat_old_sigset_t     sa_mask;
        unsigned int            sa_flags;
-       unsigned                sa_restorer;     /* not used by Linux/SPARC yet */
+       unsigned int            sa_restorer;     /* not used by Linux/SPARC yet */
 };
 #endif
 
index 910c1d9..426ad75 100644 (file)
@@ -117,9 +117,9 @@ static inline void bw_clear_intr_mask(int sbus_level, int mask)
                              "i" (ASI_M_CTL));
 }
 
-static inline unsigned bw_get_prof_limit(int cpu)
+static inline unsigned int bw_get_prof_limit(int cpu)
 {
-       unsigned limit;
+       unsigned int limit;
        
        __asm__ __volatile__ ("lda [%1] %2, %0" :
                              "=r" (limit) :
@@ -128,7 +128,7 @@ static inline unsigned bw_get_prof_limit(int cpu)
        return limit;
 }
 
-static inline void bw_set_prof_limit(int cpu, unsigned limit)
+static inline void bw_set_prof_limit(int cpu, unsigned int limit)
 {
        __asm__ __volatile__ ("sta %0, [%1] %2" : :
                              "r" (limit),
@@ -136,9 +136,9 @@ static inline void bw_set_prof_limit(int cpu, unsigned limit)
                              "i" (ASI_M_CTL));
 }
 
-static inline unsigned bw_get_ctrl(int cpu)
+static inline unsigned int bw_get_ctrl(int cpu)
 {
-       unsigned ctrl;
+       unsigned int ctrl;
        
        __asm__ __volatile__ ("lda [%1] %2, %0" :
                              "=r" (ctrl) :
@@ -147,7 +147,7 @@ static inline unsigned bw_get_ctrl(int cpu)
        return ctrl;
 }
 
-static inline void bw_set_ctrl(int cpu, unsigned ctrl)
+static inline void bw_set_ctrl(int cpu, unsigned int ctrl)
 {
        __asm__ __volatile__ ("sta %0, [%1] %2" : :
                              "r" (ctrl),
@@ -155,9 +155,9 @@ static inline void bw_set_ctrl(int cpu, unsigned ctrl)
                              "i" (ASI_M_CTL));
 }
 
-static inline unsigned cc_get_ipen(void)
+static inline unsigned int cc_get_ipen(void)
 {
-       unsigned pending;
+       unsigned int pending;
        
        __asm__ __volatile__ ("lduha [%1] %2, %0" :
                              "=r" (pending) :
@@ -166,7 +166,7 @@ static inline unsigned cc_get_ipen(void)
        return pending;
 }
 
-static inline void cc_set_iclr(unsigned clear)
+static inline void cc_set_iclr(unsigned int clear)
 {
        __asm__ __volatile__ ("stha %0, [%1] %2" : :
                              "r" (clear),
@@ -174,9 +174,9 @@ static inline void cc_set_iclr(unsigned clear)
                              "i" (ASI_M_MXCC));
 }
 
-static inline unsigned cc_get_imsk(void)
+static inline unsigned int cc_get_imsk(void)
 {
-       unsigned mask;
+       unsigned int mask;
        
        __asm__ __volatile__ ("lduha [%1] %2, %0" :
                              "=r" (mask) :
@@ -185,7 +185,7 @@ static inline unsigned cc_get_imsk(void)
        return mask;
 }
 
-static inline void cc_set_imsk(unsigned mask)
+static inline void cc_set_imsk(unsigned int mask)
 {
        __asm__ __volatile__ ("stha %0, [%1] %2" : :
                              "r" (mask),
@@ -193,9 +193,9 @@ static inline void cc_set_imsk(unsigned mask)
                              "i" (ASI_M_MXCC));
 }
 
-static inline unsigned cc_get_imsk_other(int cpuid)
+static inline unsigned int cc_get_imsk_other(int cpuid)
 {
-       unsigned mask;
+       unsigned int mask;
        
        __asm__ __volatile__ ("lduha [%1] %2, %0" :
                              "=r" (mask) :
@@ -204,7 +204,7 @@ static inline unsigned cc_get_imsk_other(int cpuid)
        return mask;
 }
 
-static inline void cc_set_imsk_other(int cpuid, unsigned mask)
+static inline void cc_set_imsk_other(int cpuid, unsigned int mask)
 {
        __asm__ __volatile__ ("stha %0, [%1] %2" : :
                              "r" (mask),
@@ -212,7 +212,7 @@ static inline void cc_set_imsk_other(int cpuid, unsigned mask)
                              "i" (ASI_M_CTL));
 }
 
-static inline void cc_set_igen(unsigned gen)
+static inline void cc_set_igen(unsigned int gen)
 {
        __asm__ __volatile__ ("sta %0, [%1] %2" : :
                              "r" (gen),
index 47eaafa..63374c4 100644 (file)
@@ -29,12 +29,12 @@ struct linux_dev_v0_funcs {
 /* V2 and later prom device operations. */
 struct linux_dev_v2_funcs {
        phandle (*v2_inst2pkg)(int d);  /* Convert ihandle to phandle */
-       char * (*v2_dumb_mem_alloc)(char *va, unsigned sz);
-       void (*v2_dumb_mem_free)(char *va, unsigned sz);
+       char * (*v2_dumb_mem_alloc)(char *va, unsigned int sz);
+       void (*v2_dumb_mem_free)(char *va, unsigned int sz);
 
        /* To map devices into virtual I/O space. */
-       char * (*v2_dumb_mmap)(char *virta, int which_io, unsigned paddr, unsigned sz);
-       void (*v2_dumb_munmap)(char *virta, unsigned size);
+       char * (*v2_dumb_mmap)(char *virta, int which_io, unsigned int paddr, unsigned int sz);
+       void (*v2_dumb_munmap)(char *virta, unsigned int size);
 
        int (*v2_dev_open)(char *devpath);
        void (*v2_dev_close)(int d);
@@ -50,7 +50,7 @@ struct linux_dev_v2_funcs {
 struct linux_mlist_v0 {
        struct linux_mlist_v0 *theres_more;
        unsigned int start_adr;
-       unsigned num_bytes;
+       unsigned int num_bytes;
 };
 
 struct linux_mem_v0 {
index 7a38d6a..f089cfa 100644 (file)
@@ -218,7 +218,7 @@ extern pgprot_t PAGE_KERNEL_LOCKED;
 extern pgprot_t PAGE_COPY;
 extern pgprot_t PAGE_SHARED;
 
-/* XXX This uglyness is for the atyfb driver's sparc mmap() support. XXX */
+/* XXX This ugliness is for the atyfb driver's sparc mmap() support. XXX */
 extern unsigned long _PAGE_IE;
 extern unsigned long _PAGE_E;
 extern unsigned long _PAGE_CACHE;
index 6924bde..ce2595c 100644 (file)
@@ -201,7 +201,7 @@ unsigned long get_wchan(struct task_struct *task);
 #define KSTK_ESP(tsk)  (task_pt_regs(tsk)->u_regs[UREG_FP])
 
 /* Please see the commentary in asm/backoff.h for a description of
- * what these instructions are doing and how they have been choosen.
+ * what these instructions are doing and how they have been chosen.
  * To make a long story short, we are trying to yield the current cpu
  * strand during busy loops.
  */
index fc2df1e..f4eb630 100644 (file)
@@ -25,7 +25,7 @@ struct sigcontext32 {
        int sigc_oswins;       /* outstanding windows */
 
        /* stack ptrs for each regwin buf */
-       unsigned sigc_spbuf[__SUNOS_MAXWIN];
+       unsigned int sigc_spbuf[__SUNOS_MAXWIN];
 
        /* Windows to restore after signal */
        struct reg_window32 sigc_wbuf[__SUNOS_MAXWIN];
index ecb49cf..c6a155c 100644 (file)
@@ -149,7 +149,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
         * page size in question.  So for PMD mappings (which fall on
         * bit 23, for 8MB per PMD) we must propagate bit 22 for a
         * 4MB huge page.  For huge PUDs (which fall on bit 33, for
-        * 8GB per PUD), we have to accomodate 256MB and 2GB huge
+        * 8GB per PUD), we have to accommodate 256MB and 2GB huge
         * pages.  So for those we propagate bits 32 to 28.
         */
 #define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL)       \
index a232e9e..2f0583a 100644 (file)
@@ -6,13 +6,13 @@
 #if defined(__sparc__) && defined(__arch64__)
 /* 64 bit sparc */
 struct stat {
-       unsigned   st_dev;
+       unsigned int st_dev;
        ino_t   st_ino;
        mode_t  st_mode;
        short   st_nlink;
        uid_t   st_uid;
        gid_t   st_gid;
-       unsigned   st_rdev;
+       unsigned int st_rdev;
        off_t   st_size;
        time_t  st_atime;
        time_t  st_mtime;
index 24361b4..2585c1e 100644 (file)
@@ -5,27 +5,27 @@
 
 #include "kernel.h"
 
-static unsigned dir_class[] = {
+static unsigned int dir_class[] = {
 #include <asm-generic/audit_dir_write.h>
 ~0U
 };
 
-static unsigned read_class[] = {
+static unsigned int read_class[] = {
 #include <asm-generic/audit_read.h>
 ~0U
 };
 
-static unsigned write_class[] = {
+static unsigned int write_class[] = {
 #include <asm-generic/audit_write.h>
 ~0U
 };
 
-static unsigned chattr_class[] = {
+static unsigned int chattr_class[] = {
 #include <asm-generic/audit_change_attr.h>
 ~0U
 };
 
-static unsigned signal_class[] = {
+static unsigned int signal_class[] = {
 #include <asm-generic/audit_signal.h>
 ~0U
 };
@@ -39,7 +39,7 @@ int audit_classify_arch(int arch)
        return 0;
 }
 
-int audit_classify_syscall(int abi, unsigned syscall)
+int audit_classify_syscall(int abi, unsigned int syscall)
 {
 #ifdef CONFIG_COMPAT
        if (abi == AUDIT_ARCH_SPARC)
index 7062263..e5611cd 100644 (file)
@@ -2,32 +2,32 @@
 #include <asm/unistd.h>
 #include "kernel.h"
 
-unsigned sparc32_dir_class[] = {
+unsigned int sparc32_dir_class[] = {
 #include <asm-generic/audit_dir_write.h>
 ~0U
 };
 
-unsigned sparc32_chattr_class[] = {
+unsigned int sparc32_chattr_class[] = {
 #include <asm-generic/audit_change_attr.h>
 ~0U
 };
 
-unsigned sparc32_write_class[] = {
+unsigned int sparc32_write_class[] = {
 #include <asm-generic/audit_write.h>
 ~0U
 };
 
-unsigned sparc32_read_class[] = {
+unsigned int sparc32_read_class[] = {
 #include <asm-generic/audit_read.h>
 ~0U
 };
 
-unsigned sparc32_signal_class[] = {
+unsigned int sparc32_signal_class[] = {
 #include <asm-generic/audit_signal.h>
 ~0U
 };
 
-int sparc32_classify_syscall(unsigned syscall)
+int sparc32_classify_syscall(unsigned int syscall)
 {
        switch(syscall) {
        case __NR_open:
index a83707c..51aa6e8 100644 (file)
@@ -1255,7 +1255,7 @@ flush_patch_exception:
 kuw_patch1_7win:       sll     %o3, 6, %o3
 
        /* No matter how much overhead this routine has in the worst
-        * case scenerio, it is several times better than taking the
+        * case scenario, it is several times better than taking the
         * traps with the old method of just doing flush_user_windows().
         */
 kill_user_windows:
index 28fed53..ffd5ff4 100644 (file)
@@ -131,7 +131,7 @@ void __iomem *ioremap(unsigned long offset, unsigned long size)
 EXPORT_SYMBOL(ioremap);
 
 /*
- * Comlimentary to ioremap().
+ * Complementary to ioremap().
  */
 void iounmap(volatile void __iomem *virtual)
 {
@@ -233,7 +233,7 @@ _sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz)
 }
 
 /*
- * Comlimentary to _sparc_ioremap().
+ * Complementary to _sparc_ioremap().
  */
 static void _sparc_free_io(struct resource *res)
 {
@@ -532,7 +532,7 @@ static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
 }
 
 /* Map a set of buffers described by scatterlist in streaming
- * mode for DMA.  This is the scather-gather version of the
+ * mode for DMA.  This is the scatter-gather version of the
  * above pci_map_single interface.  Here the scatter gather list
  * elements are each tagged with the appropriate dma address
  * and length.  They are obtained via sg_dma_{address,length}(SG).
index e7f652b..5057ec2 100644 (file)
@@ -54,12 +54,12 @@ void do_signal32(struct pt_regs * regs);
 asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp);
 
 /* compat_audit.c */
-extern unsigned sparc32_dir_class[];
-extern unsigned sparc32_chattr_class[];
-extern unsigned sparc32_write_class[];
-extern unsigned sparc32_read_class[];
-extern unsigned sparc32_signal_class[];
-int sparc32_classify_syscall(unsigned syscall);
+extern unsigned int sparc32_dir_class[];
+extern unsigned int sparc32_chattr_class[];
+extern unsigned int sparc32_write_class[];
+extern unsigned int sparc32_read_class[];
+extern unsigned int sparc32_signal_class[];
+int sparc32_classify_syscall(unsigned int syscall);
 #endif
 
 #ifdef CONFIG_SPARC32
index 42efcf8..33cd171 100644 (file)
@@ -203,7 +203,7 @@ static struct irq_chip leon_irq = {
 
 /*
  * Build a LEON IRQ for the edge triggered LEON IRQ controller:
- *  Edge (normal) IRQ           - handle_simple_irq, ack=DONT-CARE, never ack
+ *  Edge (normal) IRQ           - handle_simple_irq, ack=DON'T-CARE, never ack
  *  Level IRQ (PCI|Level-GPIO)  - handle_fasteoi_irq, ack=1, ack after ISR
  *  Per-CPU Edge                - handle_percpu_irq, ack=0
  */
index 46a5964..c16ef1a 100644 (file)
@@ -103,7 +103,7 @@ static void show_regwindow32(struct pt_regs *regs)
        mm_segment_t old_fs;
        
        __asm__ __volatile__ ("flushw");
-       rw = compat_ptr((unsigned)regs->u_regs[14]);
+       rw = compat_ptr((unsigned int)regs->u_regs[14]);
        old_fs = get_fs();
        set_fs (USER_DS);
        if (copy_from_user (&r_w, rw, sizeof(r_w))) {
index baef495..69d75ff 100644 (file)
@@ -109,7 +109,7 @@ unsigned long cmdline_memory_size __initdata = 0;
 unsigned char boot_cpu_id = 0xff; /* 0xff will make it into DATA section... */
 
 static void
-prom_console_write(struct console *con, const char *s, unsigned n)
+prom_console_write(struct console *con, const char *s, unsigned int n)
 {
        prom_write(s, n);
 }
index f3185e2..26db95b 100644 (file)
@@ -77,7 +77,7 @@ struct screen_info screen_info = {
 };
 
 static void
-prom_console_write(struct console *con, const char *s, unsigned n)
+prom_console_write(struct console *con, const char *s, unsigned int n)
 {
        prom_write(s, n);
 }
index 4eed773..3c25241 100644 (file)
@@ -144,7 +144,7 @@ void do_sigreturn32(struct pt_regs *regs)
        compat_uptr_t fpu_save;
        compat_uptr_t rwin_save;
        unsigned int psr;
-       unsigned pc, npc;
+       unsigned int pc, npc;
        sigset_t set;
        compat_sigset_t seta;
        int err, i;
index b489e97..fe8b8ee 100644 (file)
@@ -337,10 +337,10 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
                switch (call) {
                case SEMOP:
                        err = sys_semtimedop(first, ptr,
-                                            (unsigned)second, NULL);
+                                            (unsigned int)second, NULL);
                        goto out;
                case SEMTIMEDOP:
-                       err = sys_semtimedop(first, ptr, (unsigned)second,
+                       err = sys_semtimedop(first, ptr, (unsigned int)second,
                                (const struct timespec __user *)
                                             (unsigned long) fifth);
                        goto out;
index 7f41d40..fa8e21a 100644 (file)
@@ -1,4 +1,4 @@
-/* sysfs.c: Toplogy sysfs support code for sparc64.
+/* sysfs.c: Topology sysfs support code for sparc64.
  *
  * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
  */
index d89e97b..9aacb91 100644 (file)
@@ -209,8 +209,8 @@ static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr,
        if (size == 16) {
                size = 8;
                zero = (((long)(reg_num ?
-                       (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) |
-                       (unsigned)fetch_reg(reg_num + 1, regs);
+                       (unsigned int)fetch_reg(reg_num, regs) : 0)) << 32) |
+                       (unsigned int)fetch_reg(reg_num + 1, regs);
        } else if (reg_num) {
                src_val_p = fetch_reg_addr(reg_num, regs);
        }
index c399e7b..b6c559c 100644 (file)
@@ -303,10 +303,10 @@ no_context:
                fixup = search_extables_range(regs->pc, &g2);
                /* Values below 10 are reserved for other things */
                if (fixup > 10) {
-                       extern const unsigned __memset_start[];
-                       extern const unsigned __memset_end[];
-                       extern const unsigned __csum_partial_copy_start[];
-                       extern const unsigned __csum_partial_copy_end[];
+                       extern const unsigned int __memset_start[];
+                       extern const unsigned int __memset_end[];
+                       extern const unsigned int __csum_partial_copy_start[];
+                       extern const unsigned int __csum_partial_copy_end[];
 
 #ifdef DEBUG_EXCEPTIONS
                        printk("Exception: PC<%08lx> faddr<%08lx>\n",
index 3e6e05a..a6d9204 100644 (file)
@@ -351,7 +351,7 @@ do {        *prog++ = BR_OPC | WDISP22(OFF);                \
  *
  * Sometimes we need to emit a branch earlier in the code
  * sequence.  And in these situations we adjust "destination"
- * to accomodate this difference.  For example, if we needed
+ * to accommodate this difference.  For example, if we needed
  * to emit a branch (and it's delay slot) right before the
  * final instruction emitted for a BPF opcode, we'd use
  * "destination + 4" instead of just plain "destination" above.
index c97e416..ff7f50f 100644 (file)
@@ -211,7 +211,7 @@ _gxio_mpipe_link_mac_t;
  *  request shared data permission on the same link.
  *
  *  No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA,
- *  or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open()
+ *  or ::GXIO_MPIPE_LINK_EXCL_DATA may be specified in a gxio_mpipe_link_open()
  *  call.  If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed.
  */
 #define GXIO_MPIPE_LINK_DATA               0x00000001UL
@@ -219,7 +219,7 @@ _gxio_mpipe_link_mac_t;
 /** Do not request data permission on the specified link.
  *
  *  No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA,
- *  or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open()
+ *  or ::GXIO_MPIPE_LINK_EXCL_DATA may be specified in a gxio_mpipe_link_open()
  *  call.  If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed.
  */
 #define GXIO_MPIPE_LINK_NO_DATA            0x00000002UL
@@ -230,7 +230,7 @@ _gxio_mpipe_link_mac_t;
  *  data permission on it, this open will fail.
  *
  *  No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA,
- *  or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open()
+ *  or ::GXIO_MPIPE_LINK_EXCL_DATA may be specified in a gxio_mpipe_link_open()
  *  call.  If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed.
  */
 #define GXIO_MPIPE_LINK_EXCL_DATA          0x00000004UL
@@ -241,7 +241,7 @@ _gxio_mpipe_link_mac_t;
  *  permission on the same link.
  *
  *  No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS,
- *  or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open()
+ *  or ::GXIO_MPIPE_LINK_EXCL_STATS may be specified in a gxio_mpipe_link_open()
  *  call.  If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed.
  */
 #define GXIO_MPIPE_LINK_STATS              0x00000008UL
@@ -249,7 +249,7 @@ _gxio_mpipe_link_mac_t;
 /** Do not request stats permission on the specified link.
  *
  *  No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS,
- *  or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open()
+ *  or ::GXIO_MPIPE_LINK_EXCL_STATS may be specified in a gxio_mpipe_link_open()
  *  call.  If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed.
  */
 #define GXIO_MPIPE_LINK_NO_STATS           0x00000010UL
@@ -267,7 +267,7 @@ _gxio_mpipe_link_mac_t;
  *  reset by other statistics programs.
  *
  *  No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS,
- *  or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open()
+ *  or ::GXIO_MPIPE_LINK_EXCL_STATS may be specified in a gxio_mpipe_link_open()
  *  call.  If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed.
  */
 #define GXIO_MPIPE_LINK_EXCL_STATS         0x00000020UL
@@ -278,7 +278,7 @@ _gxio_mpipe_link_mac_t;
  *  permission on the same link.
  *
  *  No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL,
- *  or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open()
+ *  or ::GXIO_MPIPE_LINK_EXCL_CTL may be specified in a gxio_mpipe_link_open()
  *  call.  If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed.
  */
 #define GXIO_MPIPE_LINK_CTL                0x00000040UL
@@ -286,7 +286,7 @@ _gxio_mpipe_link_mac_t;
 /** Do not request control permission on the specified link.
  *
  *  No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL,
- *  or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open()
+ *  or ::GXIO_MPIPE_LINK_EXCL_CTL may be specified in a gxio_mpipe_link_open()
  *  call.  If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed.
  */
 #define GXIO_MPIPE_LINK_NO_CTL             0x00000080UL
@@ -301,7 +301,7 @@ _gxio_mpipe_link_mac_t;
  *  it prevents programs like mpipe-link from configuring the link.
  *
  *  No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL,
- *  or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open()
+ *  or ::GXIO_MPIPE_LINK_EXCL_CTL may be specified in a gxio_mpipe_link_open()
  *  call.  If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed.
  */
 #define GXIO_MPIPE_LINK_EXCL_CTL           0x00000100UL
@@ -311,7 +311,7 @@ _gxio_mpipe_link_mac_t;
  *  change the desired state of the link when it is closed or the process
  *  exits.  No more than one of ::GXIO_MPIPE_LINK_AUTO_UP,
  *  ::GXIO_MPIPE_LINK_AUTO_UPDOWN, ::GXIO_MPIPE_LINK_AUTO_DOWN, or
- *  ::GXIO_MPIPE_LINK_AUTO_NONE may be specifed in a gxio_mpipe_link_open()
+ *  ::GXIO_MPIPE_LINK_AUTO_NONE may be specified in a gxio_mpipe_link_open()
  *  call.  If none are specified, ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
  */
 #define GXIO_MPIPE_LINK_AUTO_UP            0x00000200UL
@@ -322,7 +322,7 @@ _gxio_mpipe_link_mac_t;
  *  open, set the desired state of the link to down.  No more than one of
  *  ::GXIO_MPIPE_LINK_AUTO_UP, ::GXIO_MPIPE_LINK_AUTO_UPDOWN,
  *  ::GXIO_MPIPE_LINK_AUTO_DOWN, or ::GXIO_MPIPE_LINK_AUTO_NONE may be
- *  specifed in a gxio_mpipe_link_open() call.  If none are specified,
+ *  specified in a gxio_mpipe_link_open() call.  If none are specified,
  *  ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
  */
 #define GXIO_MPIPE_LINK_AUTO_UPDOWN        0x00000400UL
@@ -332,7 +332,7 @@ _gxio_mpipe_link_mac_t;
  *  process has the link open, set the desired state of the link to down.
  *  No more than one of ::GXIO_MPIPE_LINK_AUTO_UP,
  *  ::GXIO_MPIPE_LINK_AUTO_UPDOWN, ::GXIO_MPIPE_LINK_AUTO_DOWN, or
- *  ::GXIO_MPIPE_LINK_AUTO_NONE may be specifed in a gxio_mpipe_link_open()
+ *  ::GXIO_MPIPE_LINK_AUTO_NONE may be specified in a gxio_mpipe_link_open()
  *  call.  If none are specified, ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
  */
 #define GXIO_MPIPE_LINK_AUTO_DOWN          0x00000800UL
@@ -342,7 +342,7 @@ _gxio_mpipe_link_mac_t;
  *  closed or the process exits.  No more than one of
  *  ::GXIO_MPIPE_LINK_AUTO_UP, ::GXIO_MPIPE_LINK_AUTO_UPDOWN,
  *  ::GXIO_MPIPE_LINK_AUTO_DOWN, or ::GXIO_MPIPE_LINK_AUTO_NONE may be
- *  specifed in a gxio_mpipe_link_open() call.  If none are specified,
+ *  specified in a gxio_mpipe_link_open() call.  If none are specified,
  *  ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
  */
 #define GXIO_MPIPE_LINK_AUTO_NONE          0x00001000UL
index a506c2c..9247d6b 100644 (file)
@@ -126,15 +126,15 @@ void
 sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
 {
        struct pt_regs *thread_regs;
+       const int NGPRS = TREG_LAST_GPR + 1;
 
        if (task == NULL)
                return;
 
-       /* Initialize to zero. */
-       memset(gdb_regs, 0, NUMREGBYTES);
-
        thread_regs = task_pt_regs(task);
-       memcpy(gdb_regs, thread_regs, TREG_LAST_GPR * sizeof(unsigned long));
+       memcpy(gdb_regs, thread_regs, NGPRS * sizeof(unsigned long));
+       memset(&gdb_regs[NGPRS], 0,
+              (TILEGX_PC_REGNUM - NGPRS) * sizeof(unsigned long));
        gdb_regs[TILEGX_PC_REGNUM] = thread_regs->pc;
        gdb_regs[TILEGX_FAULTNUM_REGNUM] = thread_regs->faultnum;
 }
@@ -433,9 +433,9 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
 struct kgdb_arch arch_kgdb_ops;
 
 /*
- * kgdb_arch_init - Perform any architecture specific initalization.
+ * kgdb_arch_init - Perform any architecture specific initialization.
  *
- * This function will handle the initalization of any architecture
+ * This function will handle the initialization of any architecture
  * specific callbacks.
  */
 int kgdb_arch_init(void)
@@ -447,9 +447,9 @@ int kgdb_arch_init(void)
 }
 
 /*
- * kgdb_arch_exit - Perform any architecture specific uninitalization.
+ * kgdb_arch_exit - Perform any architecture specific uninitialization.
  *
- * This function will handle the uninitalization of any architecture
+ * This function will handle the uninitialization of any architecture
  * specific callbacks, for dynamic registration and unregistration.
  */
 void kgdb_arch_exit(void)
index 4c017d0..aa2b44c 100644 (file)
@@ -1326,7 +1326,7 @@ invalid_device:
 
 
 /*
- * See tile_cfg_read() for relevent comments.
+ * See tile_cfg_read() for relevant comments.
  * Note that "val" is the value to write, not a pointer to that value.
  */
 static int tile_cfg_write(struct pci_bus *bus, unsigned int devfn, int offset,
index bf8b35d..fbc5e92 100644 (file)
@@ -47,6 +47,15 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
                BUG();
 }
 
+static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src,
+               size_t n)
+{
+       if (static_cpu_has(X86_FEATURE_MCE_RECOVERY))
+               return memcpy_mcsafe(dst, (void __force *) src, n);
+       memcpy(dst, (void __force *) src, n);
+       return 0;
+}
+
 /**
  * arch_wmb_pmem - synchronize writes to persistent memory
  *
index c24b422..1fde8d5 100644 (file)
@@ -319,12 +319,6 @@ static inline void reset_lazy_tlbstate(void)
 
 #endif /* SMP */
 
-/* Not inlined due to inc_irq_stat not being defined yet */
-#define flush_tlb_local() {            \
-       inc_irq_stat(irq_tlb_count);    \
-       local_flush_tlb();              \
-}
-
 #ifndef CONFIG_PARAVIRT
 #define flush_tlb_others(mask, mm, start, end) \
        native_flush_tlb_others(mask, mm, start, end)
index 0b445c2..ac780ca 100644 (file)
@@ -384,6 +384,9 @@ static void intel_thermal_interrupt(void)
 {
        __u64 msr_val;
 
+       if (static_cpu_has(X86_FEATURE_HWP))
+               wrmsrl_safe(MSR_HWP_STATUS, 0);
+
        rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
 
        /* Check for violation of core thermal thresholds*/
index 8f4cc3d..fe9b9f7 100644 (file)
@@ -104,10 +104,8 @@ static void flush_tlb_func(void *info)
 
        inc_irq_stat(irq_tlb_count);
 
-       if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
+       if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
                return;
-       if (!f->flush_end)
-               f->flush_end = f->flush_start + PAGE_SIZE;
 
        count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
        if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
@@ -135,12 +133,20 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
                                 unsigned long end)
 {
        struct flush_tlb_info info;
+
+       if (end == 0)
+               end = start + PAGE_SIZE;
        info.flush_mm = mm;
        info.flush_start = start;
        info.flush_end = end;
 
        count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
-       trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start);
+       if (end == TLB_FLUSH_ALL)
+               trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
+       else
+               trace_tlb_flush(TLB_REMOTE_SEND_IPI,
+                               (end - start) >> PAGE_SHIFT);
+
        if (is_uv_system()) {
                unsigned int cpu;
 
index 3bbdcc7..7d7a39b 100644 (file)
@@ -178,6 +178,8 @@ int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
        int cached_ret = -ENOKEY;
        int ret;
 
+       *_trusted = false;
+
        for (p = pkcs7->certs; p; p = p->next)
                p->seen = false;
 
index b5e54f2..0d92d0f 100644 (file)
@@ -491,6 +491,58 @@ static void acpi_processor_remove(struct acpi_device *device)
 }
 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
 
+#ifdef CONFIG_X86
+static bool acpi_hwp_native_thermal_lvt_set;
+static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle,
+                                                         u32 lvl,
+                                                         void *context,
+                                                         void **rv)
+{
+       u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953";
+       u32 capbuf[2];
+       struct acpi_osc_context osc_context = {
+               .uuid_str = sb_uuid_str,
+               .rev = 1,
+               .cap.length = 8,
+               .cap.pointer = capbuf,
+       };
+
+       if (acpi_hwp_native_thermal_lvt_set)
+               return AE_CTRL_TERMINATE;
+
+       capbuf[0] = 0x0000;
+       capbuf[1] = 0x1000; /* set bit 12 */
+
+       if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) {
+               if (osc_context.ret.pointer && osc_context.ret.length > 1) {
+                       u32 *capbuf_ret = osc_context.ret.pointer;
+
+                       if (capbuf_ret[1] & 0x1000) {
+                               acpi_handle_info(handle,
+                                       "_OSC native thermal LVT Acked\n");
+                               acpi_hwp_native_thermal_lvt_set = true;
+                       }
+               }
+               kfree(osc_context.ret.pointer);
+       }
+
+       return AE_OK;
+}
+
+void __init acpi_early_processor_osc(void)
+{
+       if (boot_cpu_has(X86_FEATURE_HWP)) {
+               acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
+                                   ACPI_UINT32_MAX,
+                                   acpi_hwp_native_thermal_lvt_osc,
+                                   NULL, NULL, NULL);
+               acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID,
+                                acpi_hwp_native_thermal_lvt_osc,
+                                NULL, NULL);
+       }
+}
+#endif
+
 /*
  * The following ACPI IDs are known to be suitable for representing as
  * processor devices.
index 0e85678..c068c82 100644 (file)
@@ -1019,6 +1019,9 @@ static int __init acpi_bus_init(void)
                goto error1;
        }
 
+       /* Set capability bits for _OSC under processor scope */
+       acpi_early_processor_osc();
+
        /*
         * _OSC method may exist in module level code,
         * so it must be run after ACPI_FULL_INITIALIZATION
index a37508e..7c18847 100644 (file)
@@ -145,6 +145,12 @@ void acpi_early_processor_set_pdc(void);
 static inline void acpi_early_processor_set_pdc(void) {}
 #endif
 
+#ifdef CONFIG_X86
+void acpi_early_processor_osc(void);
+#else
+static inline void acpi_early_processor_osc(void) {}
+#endif
+
 /* --------------------------------------------------------------------------
                                   Embedded Controller
    -------------------------------------------------------------------------- */
index 9e9fe4b..309049d 100644 (file)
@@ -57,7 +57,7 @@ static int mtk_reset(struct reset_controller_dev *rcdev,
        return mtk_reset_deassert(rcdev, id);
 }
 
-static struct reset_control_ops mtk_reset_ops = {
+static const struct reset_control_ops mtk_reset_ops = {
        .assert = mtk_reset_assert,
        .deassert = mtk_reset_deassert,
        .reset = mtk_reset,
index b54da1f..b4e4d6a 100644 (file)
@@ -74,7 +74,7 @@ static int mmp_clk_reset_deassert(struct reset_controller_dev *rcdev,
        return 0;
 }
 
-static struct reset_control_ops mmp_clk_reset_ops = {
+static const struct reset_control_ops mmp_clk_reset_ops = {
        .assert         = mmp_clk_reset_assert,
        .deassert       = mmp_clk_reset_deassert,
 };
index 5428efb..3cd1af0 100644 (file)
@@ -129,20 +129,10 @@ static const char * const gcc_xo_ddr_500_200[] = {
 };
 
 #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-#define P_XO 0
-#define FE_PLL_200 1
-#define FE_PLL_500 2
-#define DDRC_PLL_666  3
-
-#define DDRC_PLL_666_SDCC  1
-#define FE_PLL_125_DLY 1
-
-#define FE_PLL_WCSS2G 1
-#define FE_PLL_WCSS5G 1
 
 static const struct freq_tbl ftbl_gcc_audio_pwm_clk[] = {
        F(48000000, P_XO, 1, 0, 0),
-       F(200000000, FE_PLL_200, 1, 0, 0),
+       F(200000000, P_FEPLL200, 1, 0, 0),
        { }
 };
 
@@ -334,15 +324,15 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
 };
 
 static const struct freq_tbl ftbl_gcc_blsp1_uart1_2_apps_clk[] = {
-       F(1843200, FE_PLL_200, 1, 144, 15625),
-       F(3686400, FE_PLL_200, 1, 288, 15625),
-       F(7372800, FE_PLL_200, 1, 576, 15625),
-       F(14745600, FE_PLL_200, 1, 1152, 15625),
-       F(16000000, FE_PLL_200, 1, 2, 25),
+       F(1843200, P_FEPLL200, 1, 144, 15625),
+       F(3686400, P_FEPLL200, 1, 288, 15625),
+       F(7372800, P_FEPLL200, 1, 576, 15625),
+       F(14745600, P_FEPLL200, 1, 1152, 15625),
+       F(16000000, P_FEPLL200, 1, 2, 25),
        F(24000000, P_XO, 1, 1, 2),
-       F(32000000, FE_PLL_200, 1, 4, 25),
-       F(40000000, FE_PLL_200, 1, 1, 5),
-       F(46400000, FE_PLL_200, 1, 29, 125),
+       F(32000000, P_FEPLL200, 1, 4, 25),
+       F(40000000, P_FEPLL200, 1, 1, 5),
+       F(46400000, P_FEPLL200, 1, 29, 125),
        F(48000000, P_XO, 1, 0, 0),
        { }
 };
@@ -410,9 +400,9 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = {
 };
 
 static const struct freq_tbl ftbl_gcc_gp_clk[] = {
-       F(1250000,  FE_PLL_200, 1, 16, 0),
-       F(2500000,  FE_PLL_200, 1,  8, 0),
-       F(5000000,  FE_PLL_200, 1,  4, 0),
+       F(1250000,  P_FEPLL200, 1, 16, 0),
+       F(2500000,  P_FEPLL200, 1,  8, 0),
+       F(5000000,  P_FEPLL200, 1,  4, 0),
        { }
 };
 
@@ -512,11 +502,11 @@ static struct clk_branch gcc_gp3_clk = {
 static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk[] = {
        F(144000,    P_XO,                      1,  3, 240),
        F(400000,    P_XO,                      1,  1, 0),
-       F(20000000,  FE_PLL_500,                1,  1, 25),
-       F(25000000,  FE_PLL_500,                1,  1, 20),
-       F(50000000,  FE_PLL_500,                1,  1, 10),
-       F(100000000, FE_PLL_500,                1,  1, 5),
-       F(193000000, DDRC_PLL_666_SDCC,         1,  0, 0),
+       F(20000000,  P_FEPLL500,                1,  1, 25),
+       F(25000000,  P_FEPLL500,                1,  1, 20),
+       F(50000000,  P_FEPLL500,                1,  1, 10),
+       F(100000000, P_FEPLL500,                1,  1, 5),
+       F(193000000, P_DDRPLL,          1,  0, 0),
        { }
 };
 
@@ -536,9 +526,9 @@ static struct clk_rcg2  sdcc1_apps_clk_src = {
 
 static const struct freq_tbl ftbl_gcc_apps_clk[] = {
        F(48000000, P_XO,          1, 0, 0),
-       F(200000000, FE_PLL_200,   1, 0, 0),
-       F(500000000, FE_PLL_500,   1, 0, 0),
-       F(626000000, DDRC_PLL_666, 1, 0, 0),
+       F(200000000, P_FEPLL200,   1, 0, 0),
+       F(500000000, P_FEPLL500,   1, 0, 0),
+       F(626000000, P_DDRPLLAPSS, 1, 0, 0),
        { }
 };
 
@@ -557,7 +547,7 @@ static struct clk_rcg2 apps_clk_src = {
 
 static const struct freq_tbl ftbl_gcc_apps_ahb_clk[] = {
        F(48000000, P_XO,          1, 0, 0),
-       F(100000000, FE_PLL_200,   2, 0, 0),
+       F(100000000, P_FEPLL200,   2, 0, 0),
        { }
 };
 
@@ -940,7 +930,7 @@ static struct clk_branch gcc_usb2_mock_utmi_clk = {
 };
 
 static const struct freq_tbl ftbl_gcc_usb30_mock_utmi_clk[] = {
-       F(2000000, FE_PLL_200, 10, 0, 0),
+       F(2000000, P_FEPLL200, 10, 0, 0),
        { }
 };
 
@@ -1007,7 +997,7 @@ static struct clk_branch gcc_usb3_mock_utmi_clk = {
 };
 
 static const struct freq_tbl ftbl_gcc_fephy_dly_clk[] = {
-       F(125000000, FE_PLL_125_DLY, 1, 0, 0),
+       F(125000000, P_FEPLL125DLY, 1, 0, 0),
        { }
 };
 
@@ -1027,7 +1017,7 @@ static struct clk_rcg2 fephy_125m_dly_clk_src = {
 
 static const struct freq_tbl ftbl_gcc_wcss2g_clk[] = {
        F(48000000, P_XO, 1, 0, 0),
-       F(250000000, FE_PLL_WCSS2G, 1, 0, 0),
+       F(250000000, P_FEPLLWCSS2G, 1, 0, 0),
        { }
 };
 
@@ -1097,7 +1087,7 @@ static struct clk_branch gcc_wcss2g_rtc_clk = {
 
 static const struct freq_tbl ftbl_gcc_wcss5g_clk[] = {
        F(48000000, P_XO, 1, 0, 0),
-       F(250000000, FE_PLL_WCSS5G, 1, 0, 0),
+       F(250000000, P_FEPLLWCSS5G, 1, 0, 0),
        { }
 };
 
@@ -1325,6 +1315,16 @@ MODULE_DEVICE_TABLE(of, gcc_ipq4019_match_table);
 
 static int gcc_ipq4019_probe(struct platform_device *pdev)
 {
+       struct device *dev = &pdev->dev;
+
+       clk_register_fixed_rate(dev, "fepll125", "xo", 0, 200000000);
+       clk_register_fixed_rate(dev, "fepll125dly", "xo", 0, 200000000);
+       clk_register_fixed_rate(dev, "fepllwcss2g", "xo", 0, 200000000);
+       clk_register_fixed_rate(dev, "fepllwcss5g", "xo", 0, 200000000);
+       clk_register_fixed_rate(dev, "fepll200", "xo", 0, 200000000);
+       clk_register_fixed_rate(dev, "fepll500", "xo", 0, 200000000);
+       clk_register_fixed_rate(dev, "ddrpllapss", "xo", 0, 666000000);
+
        return qcom_cc_probe(pdev, &gcc_ipq4019_desc);
 }
 
index 6c977d3..0324d8d 100644 (file)
@@ -55,7 +55,7 @@ qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
        return regmap_update_bits(rst->regmap, map->reg, mask, 0);
 }
 
-struct reset_control_ops qcom_reset_ops = {
+const struct reset_control_ops qcom_reset_ops = {
        .reset = qcom_reset,
        .assert = qcom_reset_assert,
        .deassert = qcom_reset_deassert,
index 0e11e21..cda8779 100644 (file)
@@ -32,6 +32,6 @@ struct qcom_reset_controller {
 #define to_qcom_reset_controller(r) \
        container_of(r, struct qcom_reset_controller, rcdev);
 
-extern struct reset_control_ops qcom_reset_ops;
+extern const struct reset_control_ops qcom_reset_ops;
 
 #endif
index 552f7bb..2121898 100644 (file)
@@ -81,7 +81,7 @@ static int rockchip_softrst_deassert(struct reset_controller_dev *rcdev,
        return 0;
 }
 
-static struct reset_control_ops rockchip_softrst_ops = {
+static const struct reset_control_ops rockchip_softrst_ops = {
        .assert         = rockchip_softrst_assert,
        .deassert       = rockchip_softrst_deassert,
 };
index 957aae6..d0c6c9a 100644 (file)
@@ -1423,7 +1423,7 @@ static int atlas7_reset_module(struct reset_controller_dev *rcdev,
        return 0;
 }
 
-static struct reset_control_ops atlas7_rst_ops = {
+static const struct reset_control_ops atlas7_rst_ops = {
        .reset = atlas7_reset_module,
 };
 
index 044c171..d9ea22e 100644 (file)
@@ -85,7 +85,7 @@ static int sunxi_ve_of_xlate(struct reset_controller_dev *rcdev,
        return 0;
 }
 
-static struct reset_control_ops sunxi_ve_reset_ops = {
+static const struct reset_control_ops sunxi_ve_reset_ops = {
        .assert         = sunxi_ve_reset_assert,
        .deassert       = sunxi_ve_reset_deassert,
 };
index a9b1761..028dd83 100644 (file)
@@ -83,7 +83,7 @@ static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev,
        return 0;
 }
 
-static struct reset_control_ops sun9i_mmc_reset_ops = {
+static const struct reset_control_ops sun9i_mmc_reset_ops = {
        .assert         = sun9i_mmc_reset_assert,
        .deassert       = sun9i_mmc_reset_deassert,
 };
index 5432b1c..fe0c3d1 100644 (file)
@@ -76,7 +76,7 @@ static int sunxi_usb_reset_deassert(struct reset_controller_dev *rcdev,
        return 0;
 }
 
-static struct reset_control_ops sunxi_usb_reset_ops = {
+static const struct reset_control_ops sunxi_usb_reset_ops = {
        .assert         = sunxi_usb_reset_assert,
        .deassert       = sunxi_usb_reset_deassert,
 };
index 2a3a4fe..f60fe2e 100644 (file)
@@ -271,7 +271,7 @@ void __init tegra_init_from_table(struct tegra_clk_init_table *tbl,
        }
 }
 
-static struct reset_control_ops rst_ops = {
+static const struct reset_control_ops rst_ops = {
        .assert = tegra_clk_rst_assert,
        .deassert = tegra_clk_rst_deassert,
 };
index a68e199..c5c9599 100644 (file)
@@ -37,7 +37,6 @@ struct men_z127_gpio {
        void __iomem *reg_base;
        struct mcb_device *mdev;
        struct resource *mem;
-       spinlock_t lock;
 };
 
 static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
@@ -69,7 +68,7 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
                debounce /= 50;
        }
 
-       spin_lock(&priv->lock);
+       spin_lock(&gc->bgpio_lock);
 
        db_en = readl(priv->reg_base + MEN_Z127_DBER);
 
@@ -84,7 +83,7 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
        writel(db_en, priv->reg_base + MEN_Z127_DBER);
        writel(db_cnt, priv->reg_base + GPIO_TO_DBCNT_REG(gpio));
 
-       spin_unlock(&priv->lock);
+       spin_unlock(&gc->bgpio_lock);
 
        return 0;
 }
@@ -97,7 +96,7 @@ static int men_z127_request(struct gpio_chip *gc, unsigned gpio_pin)
        if (gpio_pin >= gc->ngpio)
                return -EINVAL;
 
-       spin_lock(&priv->lock);
+       spin_lock(&gc->bgpio_lock);
        od_en = readl(priv->reg_base + MEN_Z127_ODER);
 
        if (gpiochip_line_is_open_drain(gc, gpio_pin))
@@ -106,7 +105,7 @@ static int men_z127_request(struct gpio_chip *gc, unsigned gpio_pin)
                od_en &= ~BIT(gpio_pin);
 
        writel(od_en, priv->reg_base + MEN_Z127_ODER);
-       spin_unlock(&priv->lock);
+       spin_unlock(&gc->bgpio_lock);
 
        return 0;
 }
index c0aa387..0dc9161 100644 (file)
@@ -173,6 +173,11 @@ static int xgene_gpio_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               err = -EINVAL;
+               goto err;
+       }
+
        gpio->base = devm_ioremap_nocache(&pdev->dev, res->start,
                                                        resource_size(res));
        if (!gpio->base) {
index 0f734ee..ca77ec1 100644 (file)
@@ -1,10 +1,14 @@
-menu "ACP Configuration"
+menu "ACP (Audio CoProcessor) Configuration"
 
 config DRM_AMD_ACP
-       bool "Enable ACP IP support"
+       bool "Enable AMD Audio CoProcessor IP support"
        select MFD_CORE
        select PM_GENERIC_DOMAINS if PM
        help
        Choose this option to enable ACP IP support for AMD SOCs.
+       This adds the ACP (Audio CoProcessor) IP driver and wires
+       it up into the amdgpu driver.  The ACP block provides the DMA
+       engine for the i2s-based ALSA driver. It is required for audio
+       on APUs which utilize an i2s codec.
 
 endmenu
index 151a2d4..56d1458 100644 (file)
@@ -608,6 +608,10 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
        if ((offset + size) <= adev->mc.visible_vram_size)
                return 0;
 
+       /* Can't move a pinned BO to visible VRAM */
+       if (abo->pin_count > 0)
+               return -EINVAL;
+
        /* hurrah the memory is not visible ! */
        amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
        lpfn =  adev->mc.visible_vram_size >> PAGE_SHIFT;
index ab34190..f1a55d1 100644 (file)
@@ -384,9 +384,15 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
                        struct ttm_mem_reg *new_mem)
 {
        struct amdgpu_device *adev;
+       struct amdgpu_bo *abo;
        struct ttm_mem_reg *old_mem = &bo->mem;
        int r;
 
+       /* Can't move a pinned BO */
+       abo = container_of(bo, struct amdgpu_bo, tbo);
+       if (WARN_ON_ONCE(abo->pin_count > 0))
+               return -EINVAL;
+
        adev = amdgpu_get_adev(bo->bdev);
        if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
                amdgpu_move_null(bo, new_mem);
index 7d58f59..df64ed1 100644 (file)
@@ -179,7 +179,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
 {
        struct drm_dp_aux_msg msg;
        unsigned int retry;
-       int err;
+       int err = 0;
 
        memset(&msg, 0, sizeof(msg));
        msg.address = offset;
@@ -187,6 +187,8 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
        msg.buffer = buffer;
        msg.size = size;
 
+       mutex_lock(&aux->hw_mutex);
+
        /*
         * The specification doesn't give any recommendation on how often to
         * retry native transactions. We used to retry 7 times like for
@@ -195,25 +197,24 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
         */
        for (retry = 0; retry < 32; retry++) {
 
-               mutex_lock(&aux->hw_mutex);
                err = aux->transfer(aux, &msg);
-               mutex_unlock(&aux->hw_mutex);
                if (err < 0) {
                        if (err == -EBUSY)
                                continue;
 
-                       return err;
+                       goto unlock;
                }
 
 
                switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) {
                case DP_AUX_NATIVE_REPLY_ACK:
                        if (err < size)
-                               return -EPROTO;
-                       return err;
+                               err = -EPROTO;
+                       goto unlock;
 
                case DP_AUX_NATIVE_REPLY_NACK:
-                       return -EIO;
+                       err = -EIO;
+                       goto unlock;
 
                case DP_AUX_NATIVE_REPLY_DEFER:
                        usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
@@ -222,7 +223,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
        }
 
        DRM_DEBUG_KMS("too many retries, giving up\n");
-       return -EIO;
+       err = -EIO;
+
+unlock:
+       mutex_unlock(&aux->hw_mutex);
+       return err;
 }
 
 /**
@@ -544,9 +549,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
        int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz));
 
        for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) {
-               mutex_lock(&aux->hw_mutex);
                ret = aux->transfer(aux, msg);
-               mutex_unlock(&aux->hw_mutex);
                if (ret < 0) {
                        if (ret == -EBUSY)
                                continue;
@@ -685,6 +688,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
 
        memset(&msg, 0, sizeof(msg));
 
+       mutex_lock(&aux->hw_mutex);
+
        for (i = 0; i < num; i++) {
                msg.address = msgs[i].addr;
                drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
@@ -739,6 +744,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
        msg.size = 0;
        (void)drm_dp_i2c_do_msg(aux, &msg);
 
+       mutex_unlock(&aux->hw_mutex);
+
        return err;
 }
 
index b04a646..65428cf 100644 (file)
@@ -196,7 +196,7 @@ void __exit msm_hdmi_phy_driver_unregister(void);
 int msm_hdmi_pll_8960_init(struct platform_device *pdev);
 int msm_hdmi_pll_8996_init(struct platform_device *pdev);
 #else
-static inline int msm_hdmi_pll_8960_init(struct platform_device *pdev);
+static inline int msm_hdmi_pll_8960_init(struct platform_device *pdev)
 {
        return -ENODEV;
 }
index d52910e..c03b967 100644 (file)
@@ -467,9 +467,6 @@ static void msm_preclose(struct drm_device *dev, struct drm_file *file)
        struct msm_file_private *ctx = file->driver_priv;
        struct msm_kms *kms = priv->kms;
 
-       if (kms)
-               kms->funcs->preclose(kms, file);
-
        mutex_lock(&dev->struct_mutex);
        if (ctx == priv->lastctx)
                priv->lastctx = NULL;
index 9bcabaa..e32222c 100644 (file)
@@ -55,7 +55,6 @@ struct msm_kms_funcs {
                        struct drm_encoder *slave_encoder,
                        bool is_cmd_mode);
        /* cleanup: */
-       void (*preclose)(struct msm_kms *kms, struct drm_file *file);
        void (*destroy)(struct msm_kms *kms);
 };
 
index dd46c38..2d901bf 100644 (file)
@@ -799,6 +799,10 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
        if ((offset + size) <= rdev->mc.visible_vram_size)
                return 0;
 
+       /* Can't move a pinned BO to visible VRAM */
+       if (rbo->pin_count > 0)
+               return -EINVAL;
+
        /* hurrah the memory is not visible ! */
        radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
        lpfn =  rdev->mc.visible_vram_size >> PAGE_SHIFT;
index 6d8c323..c008312 100644 (file)
@@ -397,9 +397,15 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
                        struct ttm_mem_reg *new_mem)
 {
        struct radeon_device *rdev;
+       struct radeon_bo *rbo;
        struct ttm_mem_reg *old_mem = &bo->mem;
        int r;
 
+       /* Can't move a pinned BO */
+       rbo = container_of(bo, struct radeon_bo, tbo);
+       if (WARN_ON_ONCE(rbo->pin_count > 0))
+               return -EINVAL;
+
        rdev = radeon_get_rdev(bo->bdev);
        if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
                radeon_move_null(bo, new_mem);
index cb75ab7..af4df81 100644 (file)
@@ -2926,9 +2926,11 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
        /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
        { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
        { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
+       { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
        { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
        { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
        { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
+       { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
        { 0, 0, 0, 0 },
 };
 
@@ -3008,6 +3010,10 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
                }
                ++p;
        }
+       /* limit mclk on all R7 370 parts for stability */
+       if (rdev->pdev->device == 0x6811 &&
+           rdev->pdev->revision == 0x81)
+               max_mclk = 120000;
 
        if (rps->vce_active) {
                rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
index 3d3cf2f..d5cfef7 100644 (file)
@@ -271,8 +271,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
        if (!iores)
                return -ENXIO;
 
-       platform_set_drvdata(pdev, hdmi);
-
        encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
        /*
         * If we failed to find the CRTC(s) which this encoder is
@@ -293,7 +291,16 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
        drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs,
                         DRM_MODE_ENCODER_TMDS, NULL);
 
-       return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
+       ret = dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
+
+       /*
+        * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
+        * which would have called the encoder cleanup.  Do it manually.
+        */
+       if (ret)
+               drm_encoder_cleanup(encoder);
+
+       return ret;
 }
 
 static void dw_hdmi_rockchip_unbind(struct device *dev, struct device *master,
index 896da09..f556a8f 100644 (file)
@@ -251,6 +251,27 @@ static int rockchip_drm_unload(struct drm_device *drm_dev)
        return 0;
 }
 
+static void rockchip_drm_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
+                                                   struct drm_file *file_priv)
+{
+       struct rockchip_drm_private *priv = crtc->dev->dev_private;
+       int pipe = drm_crtc_index(crtc);
+
+       if (pipe < ROCKCHIP_MAX_CRTC &&
+           priv->crtc_funcs[pipe] &&
+           priv->crtc_funcs[pipe]->cancel_pending_vblank)
+               priv->crtc_funcs[pipe]->cancel_pending_vblank(crtc, file_priv);
+}
+
+static void rockchip_drm_preclose(struct drm_device *dev,
+                                 struct drm_file *file_priv)
+{
+       struct drm_crtc *crtc;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+               rockchip_drm_crtc_cancel_pending_vblank(crtc, file_priv);
+}
+
 void rockchip_drm_lastclose(struct drm_device *dev)
 {
        struct rockchip_drm_private *priv = dev->dev_private;
@@ -281,6 +302,7 @@ static struct drm_driver rockchip_drm_driver = {
                                  DRIVER_PRIME | DRIVER_ATOMIC,
        .load                   = rockchip_drm_load,
        .unload                 = rockchip_drm_unload,
+       .preclose               = rockchip_drm_preclose,
        .lastclose              = rockchip_drm_lastclose,
        .get_vblank_counter     = drm_vblank_no_hw_counter,
        .enable_vblank          = rockchip_drm_crtc_enable_vblank,
index 3529f69..00d17d7 100644 (file)
@@ -40,6 +40,7 @@ struct rockchip_crtc_funcs {
        int (*enable_vblank)(struct drm_crtc *crtc);
        void (*disable_vblank)(struct drm_crtc *crtc);
        void (*wait_for_update)(struct drm_crtc *crtc);
+       void (*cancel_pending_vblank)(struct drm_crtc *crtc, struct drm_file *file_priv);
 };
 
 struct rockchip_atomic_commit {
index fd37054..a619f12 100644 (file)
@@ -499,10 +499,25 @@ err_disable_hclk:
 static void vop_crtc_disable(struct drm_crtc *crtc)
 {
        struct vop *vop = to_vop(crtc);
+       int i;
 
        if (!vop->is_enabled)
                return;
 
+       /*
+        * We need to make sure that all windows are disabled before we
+        * disable that crtc. Otherwise we might try to scan from a destroyed
+        * buffer later.
+        */
+       for (i = 0; i < vop->data->win_size; i++) {
+               struct vop_win *vop_win = &vop->win[i];
+               const struct vop_win_data *win = vop_win->data;
+
+               spin_lock(&vop->reg_lock);
+               VOP_WIN_SET(vop, win, enable, 0);
+               spin_unlock(&vop->reg_lock);
+       }
+
        drm_crtc_vblank_off(crtc);
 
        /*
@@ -549,6 +564,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
                           struct drm_plane_state *state)
 {
        struct drm_crtc *crtc = state->crtc;
+       struct drm_crtc_state *crtc_state;
        struct drm_framebuffer *fb = state->fb;
        struct vop_win *vop_win = to_vop_win(plane);
        struct vop_plane_state *vop_plane_state = to_vop_plane_state(state);
@@ -563,12 +579,13 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
        int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
                                        DRM_PLANE_HELPER_NO_SCALING;
 
-       crtc = crtc ? crtc : plane->state->crtc;
-       /*
-        * Both crtc or plane->state->crtc can be null.
-        */
        if (!crtc || !fb)
                goto out_disable;
+
+       crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+       if (WARN_ON(!crtc_state))
+               return -EINVAL;
+
        src->x1 = state->src_x;
        src->y1 = state->src_y;
        src->x2 = state->src_x + state->src_w;
@@ -580,8 +597,8 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
 
        clip.x1 = 0;
        clip.y1 = 0;
-       clip.x2 = crtc->mode.hdisplay;
-       clip.y2 = crtc->mode.vdisplay;
+       clip.x2 = crtc_state->adjusted_mode.hdisplay;
+       clip.y2 = crtc_state->adjusted_mode.vdisplay;
 
        ret = drm_plane_helper_check_update(plane, crtc, state->fb,
                                            src, dest, &clip,
@@ -873,10 +890,30 @@ static void vop_crtc_wait_for_update(struct drm_crtc *crtc)
        WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100));
 }
 
+static void vop_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
+                                          struct drm_file *file_priv)
+{
+       struct drm_device *drm = crtc->dev;
+       struct vop *vop = to_vop(crtc);
+       struct drm_pending_vblank_event *e;
+       unsigned long flags;
+
+       spin_lock_irqsave(&drm->event_lock, flags);
+       e = vop->event;
+       if (e && e->base.file_priv == file_priv) {
+               vop->event = NULL;
+
+               e->base.destroy(&e->base);
+               file_priv->event_space += sizeof(e->event);
+       }
+       spin_unlock_irqrestore(&drm->event_lock, flags);
+}
+
 static const struct rockchip_crtc_funcs private_crtc_funcs = {
        .enable_vblank = vop_crtc_enable_vblank,
        .disable_vblank = vop_crtc_disable_vblank,
        .wait_for_update = vop_crtc_wait_for_update,
+       .cancel_pending_vblank = vop_crtc_cancel_pending_vblank,
 };
 
 static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -885,9 +922,6 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
 {
        struct vop *vop = to_vop(crtc);
 
-       if (adjusted_mode->htotal == 0 || adjusted_mode->vtotal == 0)
-               return false;
-
        adjusted_mode->clock =
                clk_round_rate(vop->dclk, mode->clock * 1000) / 1000;
 
@@ -1108,7 +1142,7 @@ static int vop_create_crtc(struct vop *vop)
        const struct vop_data *vop_data = vop->data;
        struct device *dev = vop->dev;
        struct drm_device *drm_dev = vop->drm_dev;
-       struct drm_plane *primary = NULL, *cursor = NULL, *plane;
+       struct drm_plane *primary = NULL, *cursor = NULL, *plane, *tmp;
        struct drm_crtc *crtc = &vop->crtc;
        struct device_node *port;
        int ret;
@@ -1148,7 +1182,7 @@ static int vop_create_crtc(struct vop *vop)
        ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
                                        &vop_crtc_funcs, NULL);
        if (ret)
-               return ret;
+               goto err_cleanup_planes;
 
        drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
 
@@ -1181,6 +1215,7 @@ static int vop_create_crtc(struct vop *vop)
        if (!port) {
                DRM_ERROR("no port node found in %s\n",
                          dev->of_node->full_name);
+               ret = -ENOENT;
                goto err_cleanup_crtc;
        }
 
@@ -1194,7 +1229,8 @@ static int vop_create_crtc(struct vop *vop)
 err_cleanup_crtc:
        drm_crtc_cleanup(crtc);
 err_cleanup_planes:
-       list_for_each_entry(plane, &drm_dev->mode_config.plane_list, head)
+       list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
+                                head)
                drm_plane_cleanup(plane);
        return ret;
 }
@@ -1202,9 +1238,28 @@ err_cleanup_planes:
 static void vop_destroy_crtc(struct vop *vop)
 {
        struct drm_crtc *crtc = &vop->crtc;
+       struct drm_device *drm_dev = vop->drm_dev;
+       struct drm_plane *plane, *tmp;
 
        rockchip_unregister_crtc_funcs(crtc);
        of_node_put(crtc->port);
+
+       /*
+        * We need to cleanup the planes now.  Why?
+        *
+        * The planes are "&vop->win[i].base".  That means the memory is
+        * all part of the big "struct vop" chunk of memory.  That memory
+        * was devm allocated and associated with this component.  We need to
+        * free it ourselves before vop_unbind() finishes.
+        */
+       list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
+                                head)
+               vop_plane_destroy(plane);
+
+       /*
+        * Destroy CRTC after vop_plane_destroy() since vop_disable_plane()
+        * references the CRTC.
+        */
        drm_crtc_cleanup(crtc);
 }
 
index 33239a2..fd1eb9d 100644 (file)
@@ -536,7 +536,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
 out_destroy_fbi:
        drm_fb_helper_release_fbi(helper);
 out_gfree:
-       drm_gem_object_unreference(&ufbdev->ufb.obj->base);
+       drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
 out:
        return ret;
 }
index 2a0a784..d7528e0 100644 (file)
@@ -52,7 +52,7 @@ udl_gem_create(struct drm_file *file,
                return ret;
        }
 
-       drm_gem_object_unreference(&obj->base);
+       drm_gem_object_unreference_unlocked(&obj->base);
        *handle_p = handle;
        return 0;
 }
index 36544c4..303d0c9 100644 (file)
@@ -85,6 +85,9 @@ static struct max1111_data *the_max1111;
 
 int max1111_read_channel(int channel)
 {
+       if (!the_max1111 || !the_max1111->spi)
+               return -ENODEV;
+
        return max1111_read(&the_max1111->spi->dev, channel);
 }
 EXPORT_SYMBOL(max1111_read_channel);
@@ -258,6 +261,9 @@ static int max1111_remove(struct spi_device *spi)
 {
        struct max1111_data *data = spi_get_drvdata(spi);
 
+#ifdef CONFIG_SHARPSL_PM
+       the_max1111 = NULL;
+#endif
        hwmon_device_unregister(data->hwmon_dev);
        sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group);
        sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
index 9f0a48e..80e933b 100644 (file)
@@ -451,7 +451,7 @@ err_free:
        return ret;
 }
 
-static const struct ide_port_info icside_v6_port_info __initconst = {
+static const struct ide_port_info icside_v6_port_info = {
        .init_dma               = icside_dma_off_init,
        .port_ops               = &icside_v6_no_dma_port_ops,
        .host_flags             = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
index 8012e43..46427ea 100644 (file)
@@ -325,6 +325,8 @@ static int __init palm_bk3710_probe(struct platform_device *pdev)
 
        clk_enable(clk);
        rate = clk_get_rate(clk);
+       if (!rate)
+               return -EINVAL;
 
        /* NOTE:  round *down* to meet minimum timings; we count in clocks */
        ideclk_period = 1000000000UL / rate;
index 60b30d3..411e446 100644 (file)
@@ -63,7 +63,6 @@ isert_rdma_accept(struct isert_conn *isert_conn);
 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
 
 static void isert_release_work(struct work_struct *work);
-static void isert_wait4flush(struct isert_conn *isert_conn);
 static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc);
 static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc);
 static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc);
@@ -141,7 +140,7 @@ isert_create_qp(struct isert_conn *isert_conn,
        attr.qp_context = isert_conn;
        attr.send_cq = comp->cq;
        attr.recv_cq = comp->cq;
-       attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
+       attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1;
        attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
        attr.cap.max_send_sge = device->ib_device->attrs.max_sge;
        isert_conn->max_sge = min(device->ib_device->attrs.max_sge,
@@ -887,7 +886,7 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
                break;
        case ISER_CONN_UP:
                isert_conn_terminate(isert_conn);
-               isert_wait4flush(isert_conn);
+               ib_drain_qp(isert_conn->qp);
                isert_handle_unbound_conn(isert_conn);
                break;
        case ISER_CONN_BOUND:
@@ -3213,36 +3212,6 @@ isert_wait4cmds(struct iscsi_conn *conn)
        }
 }
 
-static void
-isert_beacon_done(struct ib_cq *cq, struct ib_wc *wc)
-{
-       struct isert_conn *isert_conn = wc->qp->qp_context;
-
-       isert_print_wc(wc, "beacon");
-
-       isert_info("conn %p completing wait_comp_err\n", isert_conn);
-       complete(&isert_conn->wait_comp_err);
-}
-
-static void
-isert_wait4flush(struct isert_conn *isert_conn)
-{
-       struct ib_recv_wr *bad_wr;
-       static struct ib_cqe cqe = { .done = isert_beacon_done };
-
-       isert_info("conn %p\n", isert_conn);
-
-       init_completion(&isert_conn->wait_comp_err);
-       isert_conn->beacon.wr_cqe = &cqe;
-       /* post an indication that all flush errors were consumed */
-       if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) {
-               isert_err("conn %p failed to post beacon", isert_conn);
-               return;
-       }
-
-       wait_for_completion(&isert_conn->wait_comp_err);
-}
-
 /**
  * isert_put_unsol_pending_cmds() - Drop commands waiting for
  *     unsolicitate dataout
@@ -3288,7 +3257,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
        isert_conn_terminate(isert_conn);
        mutex_unlock(&isert_conn->mutex);
 
-       isert_wait4flush(isert_conn);
+       ib_drain_qp(isert_conn->qp);
        isert_put_unsol_pending_cmds(conn);
        isert_wait4cmds(conn);
        isert_wait4logout(isert_conn);
@@ -3300,7 +3269,7 @@ static void isert_free_conn(struct iscsi_conn *conn)
 {
        struct isert_conn *isert_conn = conn->context;
 
-       isert_wait4flush(isert_conn);
+       ib_drain_qp(isert_conn->qp);
        isert_put_conn(isert_conn);
 }
 
index 192788a..147900c 100644 (file)
@@ -209,14 +209,12 @@ struct isert_conn {
        struct ib_qp            *qp;
        struct isert_device     *device;
        struct mutex            mutex;
-       struct completion       wait_comp_err;
        struct kref             kref;
        struct list_head        fr_pool;
        int                     fr_pool_size;
        /* lock to protect fastreg pool */
        spinlock_t              pool_lock;
        struct work_struct      release_work;
-       struct ib_recv_wr       beacon;
        bool                    logout_posted;
        bool                    snd_w_inv;
 };
index 7fdf78f..df7e05c 100644 (file)
@@ -215,9 +215,11 @@ isac_interrupt(struct IsdnCardState *cs, u_char val)
                        if (count == 0)
                                count = 32;
                        isac_empty_fifo(cs, count);
-                       if ((count = cs->rcvidx) > 0) {
+                       count = cs->rcvidx;
+                       if (count > 0) {
                                cs->rcvidx = 0;
-                               if (!(skb = alloc_skb(count, GFP_ATOMIC)))
+                               skb = alloc_skb(count, GFP_ATOMIC);
+                               if (!skb)
                                        printk(KERN_WARNING "HiSax: D receive out of memory\n");
                                else {
                                        memcpy(skb_put(skb, count), cs->rcvbuf, count);
@@ -251,7 +253,8 @@ isac_interrupt(struct IsdnCardState *cs, u_char val)
                                cs->tx_skb = NULL;
                        }
                }
-               if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
+               cs->tx_skb = skb_dequeue(&cs->sq);
+               if (cs->tx_skb) {
                        cs->tx_cnt = 0;
                        isac_fill_fifo(cs);
                } else
@@ -313,7 +316,8 @@ afterXPR:
 #if ARCOFI_USE
                        if (v1 & 0x08) {
                                if (!cs->dc.isac.mon_rx) {
-                                       if (!(cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC))) {
+                                       cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC);
+                                       if (!cs->dc.isac.mon_rx) {
                                                if (cs->debug & L1_DEB_WARN)
                                                        debugl1(cs, "ISAC MON RX out of memory!");
                                                cs->dc.isac.mocr &= 0xf0;
@@ -343,7 +347,8 @@ afterXPR:
                afterMONR0:
                        if (v1 & 0x80) {
                                if (!cs->dc.isac.mon_rx) {
-                                       if (!(cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC))) {
+                                       cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC);
+                                       if (!cs->dc.isac.mon_rx) {
                                                if (cs->debug & L1_DEB_WARN)
                                                        debugl1(cs, "ISAC MON RX out of memory!");
                                                cs->dc.isac.mocr &= 0x0f;
index fa086e0..50454be 100644 (file)
@@ -2264,6 +2264,57 @@ static void mv88e6xxx_bridge_work(struct work_struct *work)
        mutex_unlock(&ps->smi_mutex);
 }
 
+static int _mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
+                                    int reg, int val)
+{
+       int ret;
+
+       ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
+       if (ret < 0)
+               goto restore_page_0;
+
+       ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
+restore_page_0:
+       _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+
+       return ret;
+}
+
+static int _mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page,
+                                   int reg)
+{
+       int ret;
+
+       ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
+       if (ret < 0)
+               goto restore_page_0;
+
+       ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
+restore_page_0:
+       _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+
+       return ret;
+}
+
+static int mv88e6xxx_power_on_serdes(struct dsa_switch *ds)
+{
+       int ret;
+
+       ret = _mv88e6xxx_phy_page_read(ds, REG_FIBER_SERDES, PAGE_FIBER_SERDES,
+                                      MII_BMCR);
+       if (ret < 0)
+               return ret;
+
+       if (ret & BMCR_PDOWN) {
+               ret &= ~BMCR_PDOWN;
+               ret = _mv88e6xxx_phy_page_write(ds, REG_FIBER_SERDES,
+                                               PAGE_FIBER_SERDES, MII_BMCR,
+                                               ret);
+       }
+
+       return ret;
+}
+
 static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -2367,6 +2418,23 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
                        goto abort;
        }
 
+       /* If this port is connected to a SerDes, make sure the SerDes is not
+        * powered down.
+        */
+       if (mv88e6xxx_6352_family(ds)) {
+               ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
+               if (ret < 0)
+                       goto abort;
+               ret &= PORT_STATUS_CMODE_MASK;
+               if ((ret == PORT_STATUS_CMODE_100BASE_X) ||
+                   (ret == PORT_STATUS_CMODE_1000BASE_X) ||
+                   (ret == PORT_STATUS_CMODE_SGMII)) {
+                       ret = mv88e6xxx_power_on_serdes(ds);
+                       if (ret < 0)
+                               goto abort;
+               }
+       }
+
        /* Port Control 2: don't force a good FCS, set the maximum frame size to
         * 10240 bytes, disable 802.1q tags checking, don't discard tagged or
         * untagged frames on this port, do a destination address lookup on all
@@ -2714,13 +2782,9 @@ int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
        int ret;
 
        mutex_lock(&ps->smi_mutex);
-       ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
-       if (ret < 0)
-               goto error;
-       ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
-error:
-       _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+       ret = _mv88e6xxx_phy_page_read(ds, port, page, reg);
        mutex_unlock(&ps->smi_mutex);
+
        return ret;
 }
 
@@ -2731,14 +2795,9 @@ int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
        int ret;
 
        mutex_lock(&ps->smi_mutex);
-       ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
-       if (ret < 0)
-               goto error;
-
-       ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
-error:
-       _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+       ret = _mv88e6xxx_phy_page_write(ds, port, page, reg, val);
        mutex_unlock(&ps->smi_mutex);
+
        return ret;
 }
 
index 9a038ab..26a424a 100644 (file)
 #define SMI_CMD_OP_45_READ_DATA_INC    ((3 << 10) | SMI_CMD_BUSY)
 #define SMI_DATA               0x01
 
+/* Fiber/SERDES Registers are located at SMI address F, page 1 */
+#define REG_FIBER_SERDES       0x0f
+#define PAGE_FIBER_SERDES      0x01
+
 #define REG_PORT(p)            (0x10 + (p))
 #define PORT_STATUS            0x00
 #define PORT_STATUS_PAUSE_EN   BIT(15)
 #define PORT_STATUS_MGMII      BIT(6) /* 6185 */
 #define PORT_STATUS_TX_PAUSED  BIT(5)
 #define PORT_STATUS_FLOW_CTRL  BIT(4)
+#define PORT_STATUS_CMODE_MASK 0x0f
+#define PORT_STATUS_CMODE_100BASE_X    0x8
+#define PORT_STATUS_CMODE_1000BASE_X   0x9
+#define PORT_STATUS_CMODE_SGMII                0xa
 #define PORT_PCS_CTRL          0x01
 #define PORT_PCS_CTRL_RGMII_DELAY_RXCLK        BIT(15)
 #define PORT_PCS_CTRL_RGMII_DELAY_TXCLK        BIT(14)
index aabbd51..12a009d 100644 (file)
@@ -2653,7 +2653,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
        /* Write request msg to hwrm channel */
        __iowrite32_copy(bp->bar0, data, msg_len / 4);
 
-       for (i = msg_len; i < HWRM_MAX_REQ_LEN; i += 4)
+       for (i = msg_len; i < BNXT_HWRM_MAX_REQ_LEN; i += 4)
                writel(0, bp->bar0 + i);
 
        /* currently supports only one outstanding message */
@@ -3391,11 +3391,11 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
                struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
                struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
 
+               cpr->cp_doorbell = bp->bar1 + i * 0x80;
                rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
                                              INVALID_STATS_CTX_ID);
                if (rc)
                        goto err_out;
-               cpr->cp_doorbell = bp->bar1 + i * 0x80;
                BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
                bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
        }
@@ -3830,6 +3830,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
        struct hwrm_ver_get_input req = {0};
        struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
 
+       bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
        req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
        req.hwrm_intf_min = HWRM_VERSION_MINOR;
@@ -3855,6 +3856,9 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
        if (!bp->hwrm_cmd_timeout)
                bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
 
+       if (resp->hwrm_intf_maj >= 1)
+               bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
+
 hwrm_ver_get_exit:
        mutex_unlock(&bp->hwrm_cmd_lock);
        return rc;
@@ -4555,7 +4559,7 @@ bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
                if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
                        req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
                if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
-                       req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
+                       req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
                req->enables |=
                        cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
        } else {
index ec04c47..709b95b 100644 (file)
@@ -477,6 +477,7 @@ struct rx_tpa_end_cmp_ext {
 #define RING_CMP(idx)          ((idx) & bp->cp_ring_mask)
 #define NEXT_CMP(idx)          RING_CMP(ADV_RAW_CMP(idx, 1))
 
+#define BNXT_HWRM_MAX_REQ_LEN          (bp->hwrm_max_req_len)
 #define DFLT_HWRM_CMD_TIMEOUT          500
 #define HWRM_CMD_TIMEOUT               (bp->hwrm_cmd_timeout)
 #define HWRM_RESET_TIMEOUT             ((HWRM_CMD_TIMEOUT) * 4)
@@ -953,6 +954,7 @@ struct bnxt {
        dma_addr_t              hw_tx_port_stats_map;
        int                     hw_port_stats_size;
 
+       u16                     hwrm_max_req_len;
        int                     hwrm_cmd_timeout;
        struct mutex            hwrm_cmd_lock;  /* serialize hwrm messages */
        struct hwrm_ver_get_output      ver_resp;
index 9ada166..2e472f6 100644 (file)
@@ -855,10 +855,8 @@ static void bnxt_get_pauseparam(struct net_device *dev,
        if (BNXT_VF(bp))
                return;
        epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
-       epause->rx_pause =
-               ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX) != 0);
-       epause->tx_pause =
-               ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_TX) != 0);
+       epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
+       epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
 }
 
 static int bnxt_set_pauseparam(struct net_device *dev,
index 6746fd0..cf6445d 100644 (file)
@@ -1171,6 +1171,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
        struct enet_cb *tx_cb_ptr;
        struct netdev_queue *txq;
        unsigned int pkts_compl = 0;
+       unsigned int bytes_compl = 0;
        unsigned int c_index;
        unsigned int txbds_ready;
        unsigned int txbds_processed = 0;
@@ -1193,16 +1194,13 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
                tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
                if (tx_cb_ptr->skb) {
                        pkts_compl++;
-                       dev->stats.tx_packets++;
-                       dev->stats.tx_bytes += tx_cb_ptr->skb->len;
+                       bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent;
                        dma_unmap_single(&dev->dev,
                                         dma_unmap_addr(tx_cb_ptr, dma_addr),
                                         dma_unmap_len(tx_cb_ptr, dma_len),
                                         DMA_TO_DEVICE);
                        bcmgenet_free_cb(tx_cb_ptr);
                } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
-                       dev->stats.tx_bytes +=
-                               dma_unmap_len(tx_cb_ptr, dma_len);
                        dma_unmap_page(&dev->dev,
                                       dma_unmap_addr(tx_cb_ptr, dma_addr),
                                       dma_unmap_len(tx_cb_ptr, dma_len),
@@ -1220,6 +1218,9 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
        ring->free_bds += txbds_processed;
        ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
 
+       dev->stats.tx_packets += pkts_compl;
+       dev->stats.tx_bytes += bytes_compl;
+
        if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
                txq = netdev_get_tx_queue(dev, ring->queue);
                if (netif_tx_queue_stopped(txq))
@@ -1296,7 +1297,7 @@ static int bcmgenet_xmit_single(struct net_device *dev,
 
        tx_cb_ptr->skb = skb;
 
-       skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb);
+       skb_len = skb_headlen(skb);
 
        mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
        ret = dma_mapping_error(kdev, mapping);
@@ -1464,6 +1465,11 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
                goto out;
        }
 
+       /* Retain how many bytes will be sent on the wire, without TSB inserted
+        * by transmit checksum offload
+        */
+       GENET_CB(skb)->bytes_sent = skb->len;
+
        /* set the SKB transmit checksum */
        if (priv->desc_64b_en) {
                skb = bcmgenet_put_tx_csum(dev, skb);
index 9673675..1e2dc34 100644 (file)
@@ -531,6 +531,12 @@ struct bcmgenet_hw_params {
        u32             flags;
 };
 
+struct bcmgenet_skb_cb {
+       unsigned int bytes_sent;        /* bytes on the wire (no TSB) */
+};
+
+#define GENET_CB(skb)  ((struct bcmgenet_skb_cb *)((skb)->cb))
+
 struct bcmgenet_tx_ring {
        spinlock_t      lock;           /* ring lock */
        struct napi_struct napi;        /* NAPI per tx queue */
index 6619178..48a7d7d 100644 (file)
@@ -917,7 +917,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
                unsigned int frag_len = bp->rx_buffer_size;
 
                if (offset + frag_len > len) {
-                       BUG_ON(frag != last_frag);
+                       if (unlikely(frag != last_frag)) {
+                               dev_kfree_skb_any(skb);
+                               return -1;
+                       }
                        frag_len = len - offset;
                }
                skb_copy_to_linear_data_offset(skb, offset,
@@ -945,8 +948,23 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
        return 0;
 }
 
+static inline void macb_init_rx_ring(struct macb *bp)
+{
+       dma_addr_t addr;
+       int i;
+
+       addr = bp->rx_buffers_dma;
+       for (i = 0; i < RX_RING_SIZE; i++) {
+               bp->rx_ring[i].addr = addr;
+               bp->rx_ring[i].ctrl = 0;
+               addr += bp->rx_buffer_size;
+       }
+       bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
+}
+
 static int macb_rx(struct macb *bp, int budget)
 {
+       bool reset_rx_queue = false;
        int received = 0;
        unsigned int tail;
        int first_frag = -1;
@@ -972,10 +990,18 @@ static int macb_rx(struct macb *bp, int budget)
 
                if (ctrl & MACB_BIT(RX_EOF)) {
                        int dropped;
-                       BUG_ON(first_frag == -1);
+
+                       if (unlikely(first_frag == -1)) {
+                               reset_rx_queue = true;
+                               continue;
+                       }
 
                        dropped = macb_rx_frame(bp, first_frag, tail);
                        first_frag = -1;
+                       if (unlikely(dropped < 0)) {
+                               reset_rx_queue = true;
+                               continue;
+                       }
                        if (!dropped) {
                                received++;
                                budget--;
@@ -983,6 +1009,26 @@ static int macb_rx(struct macb *bp, int budget)
                }
        }
 
+       if (unlikely(reset_rx_queue)) {
+               unsigned long flags;
+               u32 ctrl;
+
+               netdev_err(bp->dev, "RX queue corruption: reset it\n");
+
+               spin_lock_irqsave(&bp->lock, flags);
+
+               ctrl = macb_readl(bp, NCR);
+               macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
+
+               macb_init_rx_ring(bp);
+               macb_writel(bp, RBQP, bp->rx_ring_dma);
+
+               macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
+
+               spin_unlock_irqrestore(&bp->lock, flags);
+               return received;
+       }
+
        if (first_frag != -1)
                bp->rx_tail = first_frag;
        else
@@ -1100,7 +1146,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
                        macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
 
                        if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
-                               macb_writel(bp, ISR, MACB_BIT(RXUBR));
+                               queue_writel(queue, ISR, MACB_BIT(RXUBR));
                }
 
                if (status & MACB_BIT(ISR_ROVR)) {
@@ -1523,15 +1569,8 @@ static void gem_init_rings(struct macb *bp)
 static void macb_init_rings(struct macb *bp)
 {
        int i;
-       dma_addr_t addr;
 
-       addr = bp->rx_buffers_dma;
-       for (i = 0; i < RX_RING_SIZE; i++) {
-               bp->rx_ring[i].addr = addr;
-               bp->rx_ring[i].ctrl = 0;
-               addr += bp->rx_buffer_size;
-       }
-       bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
+       macb_init_rx_ring(bp);
 
        for (i = 0; i < TX_RING_SIZE; i++) {
                bp->queues[0].tx_ring[i].addr = 0;
@@ -2957,9 +2996,10 @@ static int macb_probe(struct platform_device *pdev)
        phy_node =  of_get_next_available_child(np, NULL);
        if (phy_node) {
                int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
-               if (gpio_is_valid(gpio))
+               if (gpio_is_valid(gpio)) {
                        bp->reset_gpio = gpio_to_desc(gpio);
-               gpiod_direction_output(bp->reset_gpio, 1);
+                       gpiod_direction_output(bp->reset_gpio, 1);
+               }
        }
        of_node_put(phy_node);
 
@@ -3029,7 +3069,8 @@ static int macb_remove(struct platform_device *pdev)
                mdiobus_free(bp->mii_bus);
 
                /* Shutdown the PHY if there is a GPIO reset */
-               gpiod_set_value(bp->reset_gpio, 0);
+               if (bp->reset_gpio)
+                       gpiod_set_value(bp->reset_gpio, 0);
 
                unregister_netdev(dev);
                clk_disable_unprepare(bp->tx_clk);
index 37c0815..08243c2 100644 (file)
@@ -943,8 +943,8 @@ fec_restart(struct net_device *ndev)
                else
                        val &= ~FEC_RACC_OPTIONS;
                writel(val, fep->hwp + FEC_RACC);
+               writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
        }
-       writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
 #endif
 
        /*
index 37d0cce..e8d36aa 100644 (file)
@@ -469,7 +469,7 @@ struct hnae_ae_ops {
                                   u32 *tx_usecs, u32 *rx_usecs);
        void (*get_rx_max_coalesced_frames)(struct hnae_handle *handle,
                                            u32 *tx_frames, u32 *rx_frames);
-       void (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
+       int (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
        int (*set_coalesce_frames)(struct hnae_handle *handle,
                                   u32 coalesce_frames);
        void (*set_promisc_mode)(struct hnae_handle *handle, u32 en);
index 285c893..a1cb461 100644 (file)
@@ -159,11 +159,6 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
                ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i];
 
                ring_pair_cb->used_by_vf = 1;
-               if (port_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF)
-                       ring_pair_cb->port_id_in_dsa = port_idx;
-               else
-                       ring_pair_cb->port_id_in_dsa = 0;
-
                ring_pair_cb++;
        }
 
@@ -453,59 +448,46 @@ static int hns_ae_set_pauseparam(struct hnae_handle *handle,
 static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle,
                                      u32 *tx_usecs, u32 *rx_usecs)
 {
-       int port;
-
-       port = hns_ae_map_eport_to_dport(handle->eport_id);
+       struct ring_pair_cb *ring_pair =
+               container_of(handle->qs[0], struct ring_pair_cb, q);
 
-       *tx_usecs = hns_rcb_get_coalesce_usecs(
-               hns_ae_get_dsaf_dev(handle->dev),
-               hns_dsaf_get_comm_idx_by_port(port));
-       *rx_usecs = hns_rcb_get_coalesce_usecs(
-               hns_ae_get_dsaf_dev(handle->dev),
-               hns_dsaf_get_comm_idx_by_port(port));
+       *tx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common,
+                                              ring_pair->port_id_in_comm);
+       *rx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common,
+                                              ring_pair->port_id_in_comm);
 }
 
 static void hns_ae_get_rx_max_coalesced_frames(struct hnae_handle *handle,
                                               u32 *tx_frames, u32 *rx_frames)
 {
-       int port;
+       struct ring_pair_cb *ring_pair =
+               container_of(handle->qs[0], struct ring_pair_cb, q);
 
-       assert(handle);
-
-       port = hns_ae_map_eport_to_dport(handle->eport_id);
-
-       *tx_frames = hns_rcb_get_coalesced_frames(
-               hns_ae_get_dsaf_dev(handle->dev), port);
-       *rx_frames = hns_rcb_get_coalesced_frames(
-               hns_ae_get_dsaf_dev(handle->dev), port);
+       *tx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common,
+                                                 ring_pair->port_id_in_comm);
+       *rx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common,
+                                                 ring_pair->port_id_in_comm);
 }
 
-static void hns_ae_set_coalesce_usecs(struct hnae_handle *handle,
-                                     u32 timeout)
+static int hns_ae_set_coalesce_usecs(struct hnae_handle *handle,
+                                    u32 timeout)
 {
-       int port;
+       struct ring_pair_cb *ring_pair =
+               container_of(handle->qs[0], struct ring_pair_cb, q);
 
-       assert(handle);
-
-       port = hns_ae_map_eport_to_dport(handle->eport_id);
-
-       hns_rcb_set_coalesce_usecs(hns_ae_get_dsaf_dev(handle->dev),
-                                  port, timeout);
+       return hns_rcb_set_coalesce_usecs(
+               ring_pair->rcb_common, ring_pair->port_id_in_comm, timeout);
 }
 
 static int  hns_ae_set_coalesce_frames(struct hnae_handle *handle,
                                       u32 coalesce_frames)
 {
-       int port;
-       int ret;
+       struct ring_pair_cb *ring_pair =
+               container_of(handle->qs[0], struct ring_pair_cb, q);
 
-       assert(handle);
-
-       port = hns_ae_map_eport_to_dport(handle->eport_id);
-
-       ret = hns_rcb_set_coalesced_frames(hns_ae_get_dsaf_dev(handle->dev),
-                                          port, coalesce_frames);
-       return ret;
+       return hns_rcb_set_coalesced_frames(
+               ring_pair->rcb_common,
+               ring_pair->port_id_in_comm, coalesce_frames);
 }
 
 void hns_ae_update_stats(struct hnae_handle *handle,
index 6e2b76e..44abb08 100644 (file)
@@ -664,7 +664,8 @@ static void hns_gmac_get_strings(u32 stringset, u8 *data)
                return;
 
        for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) {
-               snprintf(buff, ETH_GSTRING_LEN, g_gmac_stats_string[i].desc);
+               snprintf(buff, ETH_GSTRING_LEN, "%s",
+                        g_gmac_stats_string[i].desc);
                buff = buff + ETH_GSTRING_LEN;
        }
 }
index 5c1ac9b..5978a5c 100644 (file)
@@ -2219,17 +2219,17 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
        /* dsaf onode registers */
        for (i = 0; i < DSAF_XOD_NUM; i++) {
                p[311 + i] = dsaf_read_dev(ddev,
-                               DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + j * 0x90);
+                               DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + i * 0x90);
                p[319 + i] = dsaf_read_dev(ddev,
-                               DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + j * 0x90);
+                               DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + i * 0x90);
                p[327 + i] = dsaf_read_dev(ddev,
-                               DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + j * 0x90);
+                               DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + i * 0x90);
                p[335 + i] = dsaf_read_dev(ddev,
-                               DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + j * 0x90);
+                               DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + i * 0x90);
                p[343 + i] = dsaf_read_dev(ddev,
-                               DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + j * 0x90);
+                               DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + i * 0x90);
                p[351 + i] = dsaf_read_dev(ddev,
-                               DSAF_XOD_ETS_TOKEN_CFG_0_REG + j * 0x90);
+                               DSAF_XOD_ETS_TOKEN_CFG_0_REG + i * 0x90);
        }
 
        p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
index 607c3be..e69b022 100644 (file)
@@ -244,31 +244,35 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
  */
 phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
 {
-       u32 hilink3_mode;
-       u32 hilink4_mode;
+       u32 mode;
+       u32 reg;
+       u32 shift;
+       bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
        void __iomem *sys_ctl_vaddr = mac_cb->sys_ctl_vaddr;
-       int dev_id = mac_cb->mac_id;
+       int mac_id = mac_cb->mac_id;
        phy_interface_t phy_if = PHY_INTERFACE_MODE_NA;
 
-       hilink3_mode = dsaf_read_reg(sys_ctl_vaddr, HNS_MAC_HILINK3_REG);
-       hilink4_mode = dsaf_read_reg(sys_ctl_vaddr, HNS_MAC_HILINK4_REG);
-       if (dev_id >= 0 && dev_id <= 3) {
-               if (hilink4_mode == 0)
-                       phy_if = PHY_INTERFACE_MODE_SGMII;
-               else
+       if (is_ver1 && (mac_id >= 6 && mac_id <= 7)) {
+               phy_if = PHY_INTERFACE_MODE_SGMII;
+       } else if (mac_id >= 0 && mac_id <= 3) {
+               reg = is_ver1 ? HNS_MAC_HILINK4_REG : HNS_MAC_HILINK4V2_REG;
+               mode = dsaf_read_reg(sys_ctl_vaddr, reg);
+               /* mac_id 0, 1, 2, 3 ---> hilink4 lane 0, 1, 2, 3 */
+               shift = is_ver1 ? 0 : mac_id;
+               if (dsaf_get_bit(mode, shift))
                        phy_if = PHY_INTERFACE_MODE_XGMII;
-       } else if (dev_id >= 4 && dev_id <= 5) {
-               if (hilink3_mode == 0)
-                       phy_if = PHY_INTERFACE_MODE_SGMII;
                else
+                       phy_if = PHY_INTERFACE_MODE_SGMII;
+       } else if (mac_id >= 4 && mac_id <= 7) {
+               reg = is_ver1 ? HNS_MAC_HILINK3_REG : HNS_MAC_HILINK3V2_REG;
+               mode = dsaf_read_reg(sys_ctl_vaddr, reg);
+               /* mac_id 4, 5, 6, 7 ---> hilink3 lane 2, 3, 0, 1 */
+               shift = is_ver1 ? 0 : mac_id <= 5 ? mac_id - 2 : mac_id - 6;
+               if (dsaf_get_bit(mode, shift))
                        phy_if = PHY_INTERFACE_MODE_XGMII;
-       } else {
-               phy_if = PHY_INTERFACE_MODE_SGMII;
+               else
+                       phy_if = PHY_INTERFACE_MODE_SGMII;
        }
-
-       dev_dbg(mac_cb->dev,
-               "hilink3_mode=%d, hilink4_mode=%d dev_id=%d, phy_if=%d\n",
-               hilink3_mode, hilink4_mode, dev_id, phy_if);
        return phy_if;
 }
 
index 1218880..28ee26e 100644 (file)
@@ -215,9 +215,9 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
                dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG,
                               bd_size_type);
                dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG,
-                              ring_pair->port_id_in_dsa);
+                              ring_pair->port_id_in_comm);
                dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG,
-                              ring_pair->port_id_in_dsa);
+                              ring_pair->port_id_in_comm);
        } else {
                dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG,
                               (u32)dma);
@@ -227,9 +227,9 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
                dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG,
                               bd_size_type);
                dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG,
-                              ring_pair->port_id_in_dsa);
+                              ring_pair->port_id_in_comm);
                dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG,
-                              ring_pair->port_id_in_dsa);
+                              ring_pair->port_id_in_comm);
        }
 }
 
@@ -256,50 +256,16 @@ static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common,
                       desc_cnt);
 }
 
-/**
- *hns_rcb_set_port_coalesced_frames - set rcb port coalesced frames
- *@rcb_common: rcb_common device
- *@port_idx:port index
- *@coalesced_frames:BD num for coalesced frames
- */
-static int  hns_rcb_set_port_coalesced_frames(struct rcb_common_cb *rcb_common,
-                                             u32 port_idx,
-                                             u32 coalesced_frames)
-{
-       if (coalesced_frames >= rcb_common->desc_num ||
-           coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES)
-               return -EINVAL;
-
-       dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4,
-                      coalesced_frames);
-       return 0;
-}
-
-/**
- *hns_rcb_get_port_coalesced_frames - set rcb port coalesced frames
- *@rcb_common: rcb_common device
- *@port_idx:port index
- * return coaleseced frames value
- */
-static u32 hns_rcb_get_port_coalesced_frames(struct rcb_common_cb *rcb_common,
-                                            u32 port_idx)
+static void hns_rcb_set_port_timeout(
+       struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
 {
-       if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM)
-               port_idx = 0;
-
-       return dsaf_read_dev(rcb_common,
-                            RCB_CFG_PKTLINE_REG + port_idx * 4);
-}
-
-/**
- *hns_rcb_set_timeout - set rcb port coalesced time_out
- *@rcb_common: rcb_common device
- *@time_out:time for coalesced time_out
- */
-static void hns_rcb_set_timeout(struct rcb_common_cb *rcb_common,
-                               u32 timeout)
-{
-       dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG, timeout);
+       if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver))
+               dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG,
+                              timeout * HNS_RCB_CLK_FREQ_MHZ);
+       else
+               dsaf_write_dev(rcb_common,
+                              RCB_PORT_CFG_OVERTIME_REG + port_idx * 4,
+                              timeout);
 }
 
 static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common)
@@ -361,10 +327,11 @@ int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common)
 
        for (i = 0; i < port_num; i++) {
                hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num);
-               (void)hns_rcb_set_port_coalesced_frames(
-                       rcb_common, i, rcb_common->coalesced_frames);
+               (void)hns_rcb_set_coalesced_frames(
+                       rcb_common, i, HNS_RCB_DEF_COALESCED_FRAMES);
+               hns_rcb_set_port_timeout(
+                       rcb_common, i, HNS_RCB_DEF_COALESCED_USECS);
        }
-       hns_rcb_set_timeout(rcb_common, rcb_common->timeout);
 
        dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG,
                       HNS_RCB_COMMON_ENDIAN);
@@ -460,7 +427,8 @@ static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb)
        hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING);
 }
 
-static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx)
+static int hns_rcb_get_port_in_comm(
+       struct rcb_common_cb *rcb_common, int ring_idx)
 {
        int comm_index = rcb_common->comm_index;
        int port;
@@ -470,7 +438,7 @@ static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx)
                q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn;
                port = ring_idx / q_num;
        } else {
-               port = HNS_RCB_SERVICE_NW_ENGINE_NUM + comm_index - 1;
+               port = 0; /* config debug-ports port_id_in_comm to 0*/
        }
 
        return port;
@@ -518,7 +486,8 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
                ring_pair_cb->index = i;
                ring_pair_cb->q.io_base =
                        RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i);
-               ring_pair_cb->port_id_in_dsa = hns_rcb_get_port(rcb_common, i);
+               ring_pair_cb->port_id_in_comm =
+                       hns_rcb_get_port_in_comm(rcb_common, i);
                ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] =
                is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2) :
                          platform_get_irq(pdev, base_irq_idx + i * 3 + 1);
@@ -534,82 +503,95 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
 /**
  *hns_rcb_get_coalesced_frames - get rcb port coalesced frames
  *@rcb_common: rcb_common device
- *@comm_index:port index
- *return coalesced_frames
+ *@port_idx:port id in comm
+ *
+ *Returns: coalesced_frames
  */
-u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int port)
+u32 hns_rcb_get_coalesced_frames(
+       struct rcb_common_cb *rcb_common, u32 port_idx)
 {
-       int comm_index =  hns_dsaf_get_comm_idx_by_port(port);
-       struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
-
-       return hns_rcb_get_port_coalesced_frames(rcb_comm, port);
+       return dsaf_read_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4);
 }
 
 /**
  *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out
  *@rcb_common: rcb_common device
- *@comm_index:port index
- *return time_out
+ *@port_idx:port id in comm
+ *
+ *Returns: time_out
  */
-u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index)
+u32 hns_rcb_get_coalesce_usecs(
+       struct rcb_common_cb *rcb_common, u32 port_idx)
 {
-       struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
-
-       return rcb_comm->timeout;
+       if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver))
+               return dsaf_read_dev(rcb_common, RCB_CFG_OVERTIME_REG) /
+                      HNS_RCB_CLK_FREQ_MHZ;
+       else
+               return dsaf_read_dev(rcb_common,
+                                    RCB_PORT_CFG_OVERTIME_REG + port_idx * 4);
 }
 
 /**
  *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out
  *@rcb_common: rcb_common device
- *@comm_index: comm :index
- *@etx_usecs:tx time for coalesced time_out
- *@rx_usecs:rx time for coalesced time_out
+ *@port_idx:port id in comm
+ *@timeout:tx/rx time for coalesced time_out
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
  */
-void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev,
-                               int port, u32 timeout)
+int hns_rcb_set_coalesce_usecs(
+       struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
 {
-       int comm_index =  hns_dsaf_get_comm_idx_by_port(port);
-       struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
+       u32 old_timeout = hns_rcb_get_coalesce_usecs(rcb_common, port_idx);
 
-       if (rcb_comm->timeout == timeout)
-               return;
+       if (timeout == old_timeout)
+               return 0;
 
-       if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
-               dev_err(dsaf_dev->dev,
-                       "error: not support coalesce_usecs setting!\n");
-               return;
+       if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
+               if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+                       dev_err(rcb_common->dsaf_dev->dev,
+                               "error: not support coalesce_usecs setting!\n");
+                       return -EINVAL;
+               }
        }
-       rcb_comm->timeout = timeout;
-       hns_rcb_set_timeout(rcb_comm, rcb_comm->timeout);
+       if (timeout > HNS_RCB_MAX_COALESCED_USECS) {
+               dev_err(rcb_common->dsaf_dev->dev,
+                       "error: not support coalesce %dus!\n", timeout);
+               return -EINVAL;
+       }
+       hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
+       return 0;
 }
 
 /**
  *hns_rcb_set_coalesced_frames - set rcb coalesced frames
  *@rcb_common: rcb_common device
- *@tx_frames:tx BD num for coalesced frames
- *@rx_frames:rx BD num for coalesced frames
- *Return 0 on success, negative on failure
+ *@port_idx:port id in comm
+ *@coalesced_frames:tx/rx BD num for coalesced frames
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
  */
-int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev,
-                                int port, u32 coalesced_frames)
+int hns_rcb_set_coalesced_frames(
+       struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames)
 {
-       int comm_index =  hns_dsaf_get_comm_idx_by_port(port);
-       struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
-       u32 coalesced_reg_val;
-       int ret;
+       u32 old_waterline = hns_rcb_get_coalesced_frames(rcb_common, port_idx);
 
-       coalesced_reg_val = hns_rcb_get_port_coalesced_frames(rcb_comm, port);
-
-       if (coalesced_reg_val == coalesced_frames)
+       if (coalesced_frames == old_waterline)
                return 0;
 
-       if (coalesced_frames >= HNS_RCB_MIN_COALESCED_FRAMES) {
-               ret = hns_rcb_set_port_coalesced_frames(rcb_comm, port,
-                                                       coalesced_frames);
-               return ret;
-       } else {
+       if (coalesced_frames >= rcb_common->desc_num ||
+           coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES ||
+           coalesced_frames < HNS_RCB_MIN_COALESCED_FRAMES) {
+               dev_err(rcb_common->dsaf_dev->dev,
+                       "error: not support coalesce_frames setting!\n");
                return -EINVAL;
        }
+
+       dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4,
+                      coalesced_frames);
+       return 0;
 }
 
 /**
@@ -749,8 +731,6 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
        rcb_common->dsaf_dev = dsaf_dev;
 
        rcb_common->desc_num = dsaf_dev->desc_num;
-       rcb_common->coalesced_frames = HNS_RCB_DEF_COALESCED_FRAMES;
-       rcb_common->timeout = HNS_RCB_MAX_TIME_OUT;
 
        hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf);
        rcb_common->max_vfn = max_vfn;
@@ -951,6 +931,10 @@ void hns_rcb_get_strings(int stringset, u8 *data, int index)
 void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
 {
        u32 *regs = data;
+       bool is_ver1 = AE_IS_VER1(rcb_com->dsaf_dev->dsaf_ver);
+       bool is_dbg = (rcb_com->comm_index != HNS_DSAF_COMM_SERVICE_NW_IDX);
+       u32 reg_tmp;
+       u32 reg_num_tmp;
        u32 i = 0;
 
        /*rcb common registers */
@@ -1004,12 +988,16 @@ void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
                        = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i);
        }
 
-       regs[70] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_REG);
-       regs[71] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG);
-       regs[72] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG);
+       reg_tmp = is_ver1 ? RCB_CFG_OVERTIME_REG : RCB_PORT_CFG_OVERTIME_REG;
+       reg_num_tmp = (is_ver1 || is_dbg) ? 1 : 6;
+       for (i = 0; i < reg_num_tmp; i++)
+               regs[70 + i] = dsaf_read_dev(rcb_com, reg_tmp);
+
+       regs[76] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG);
+       regs[77] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG);
 
        /* mark end of rcb common regs */
-       for (i = 73; i < 80; i++)
+       for (i = 78; i < 80; i++)
                regs[i] = 0xcccccccc;
 }
 
index 81fe9f8..eb61014 100644 (file)
@@ -38,7 +38,9 @@ struct rcb_common_cb;
 #define HNS_RCB_MAX_COALESCED_FRAMES           1023
 #define HNS_RCB_MIN_COALESCED_FRAMES           1
 #define HNS_RCB_DEF_COALESCED_FRAMES           50
-#define HNS_RCB_MAX_TIME_OUT                   0x500
+#define HNS_RCB_CLK_FREQ_MHZ                   350
+#define HNS_RCB_MAX_COALESCED_USECS            0x3ff
+#define HNS_RCB_DEF_COALESCED_USECS            3
 
 #define HNS_RCB_COMMON_ENDIAN                  1
 
@@ -82,7 +84,7 @@ struct ring_pair_cb {
 
        int virq[HNS_RCB_IRQ_NUM_PER_QUEUE];
 
-       u8 port_id_in_dsa;
+       u8 port_id_in_comm;
        u8 used_by_vf;
 
        struct hns_ring_hw_stats hw_stats;
@@ -97,8 +99,6 @@ struct rcb_common_cb {
 
        u8 comm_index;
        u32 ring_num;
-       u32 coalesced_frames; /* frames  threshold of  rx interrupt   */
-       u32 timeout; /* time threshold of  rx interrupt  */
        u32 desc_num; /*  desc num per queue*/
 
        struct ring_pair_cb ring_pair_cb[0];
@@ -125,13 +125,14 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag);
 void hns_rcb_init_hw(struct ring_pair_cb *ring);
 void hns_rcb_reset_ring_hw(struct hnae_queue *q);
 void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
-
-u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int comm_index);
-u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index);
-void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev,
-                               int comm_index, u32 timeout);
-int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev,
-                                int comm_index, u32 coalesce_frames);
+u32 hns_rcb_get_coalesced_frames(
+       struct rcb_common_cb *rcb_common, u32 port_idx);
+u32 hns_rcb_get_coalesce_usecs(
+       struct rcb_common_cb *rcb_common, u32 port_idx);
+int hns_rcb_set_coalesce_usecs(
+       struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout);
+int hns_rcb_set_coalesced_frames(
+       struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames);
 void hns_rcb_update_stats(struct hnae_queue *queue);
 
 void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data);
index bf62687..7d7204f 100644 (file)
 /*serdes offset**/
 #define HNS_MAC_HILINK3_REG DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG
 #define HNS_MAC_HILINK4_REG DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG
+#define HNS_MAC_HILINK3V2_REG DSAF_SUB_SC_HILINK3_CRG_CTRL1_REG
+#define HNS_MAC_HILINK4V2_REG DSAF_SUB_SC_HILINK4_CRG_CTRL1_REG
 #define HNS_MAC_LANE0_CTLEDFE_REG 0x000BFFCCULL
 #define HNS_MAC_LANE1_CTLEDFE_REG 0x000BFFBCULL
 #define HNS_MAC_LANE2_CTLEDFE_REG 0x000BFFACULL
 #define RCB_CFG_OVERTIME_REG                   0x9300
 #define RCB_CFG_PKTLINE_INT_NUM_REG            0x9304
 #define RCB_CFG_OVERTIME_INT_NUM_REG           0x9308
+#define RCB_PORT_CFG_OVERTIME_REG              0x9430
 
 #define RCB_RING_RX_RING_BASEADDR_L_REG                0x00000
 #define RCB_RING_RX_RING_BASEADDR_H_REG                0x00004
index 71aa37b..687204b 100644 (file)
@@ -913,10 +913,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
 static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
 {
        struct hnae_ring *ring = ring_data->ring;
-       int head = ring->next_to_clean;
-
-       /* for hardware bug fixed */
-       head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+       int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
 
        if (head != ring->next_to_clean) {
                ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
@@ -959,8 +956,8 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget)
                napi_complete(napi);
                ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
                        ring_data->ring, 0);
-
-               ring_data->fini_process(ring_data);
+               if (ring_data->fini_process)
+                       ring_data->fini_process(ring_data);
                return 0;
        }
 
@@ -1723,6 +1720,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
 {
        struct hnae_handle *h = priv->ae_handle;
        struct hns_nic_ring_data *rd;
+       bool is_ver1 = AE_IS_VER1(priv->enet_ver);
        int i;
 
        if (h->q_num > NIC_MAX_Q_PER_VF) {
@@ -1740,7 +1738,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
                rd->queue_index = i;
                rd->ring = &h->qs[i]->tx_ring;
                rd->poll_one = hns_nic_tx_poll_one;
-               rd->fini_process = hns_nic_tx_fini_pro;
+               rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro : NULL;
 
                netif_napi_add(priv->netdev, &rd->napi,
                               hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
@@ -1752,7 +1750,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
                rd->ring = &h->qs[i - h->q_num]->rx_ring;
                rd->poll_one = hns_nic_rx_poll_one;
                rd->ex_process = hns_nic_rx_up_pro;
-               rd->fini_process = hns_nic_rx_fini_pro;
+               rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro : NULL;
 
                netif_napi_add(priv->netdev, &rd->napi,
                               hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
@@ -1816,7 +1814,7 @@ static int hns_nic_try_get_ae(struct net_device *ndev)
        h = hnae_get_handle(&priv->netdev->dev,
                            priv->ae_node, priv->port_id, NULL);
        if (IS_ERR_OR_NULL(h)) {
-               ret = PTR_ERR(h);
+               ret = -ENODEV;
                dev_dbg(priv->dev, "has not handle, register notifier!\n");
                goto out;
        }
index 9c3ba65..3d746c8 100644 (file)
@@ -794,8 +794,10 @@ static int hns_set_coalesce(struct net_device *net_dev,
            (!ops->set_coalesce_frames))
                return -ESRCH;
 
-       ops->set_coalesce_usecs(priv->ae_handle,
-                                       ec->rx_coalesce_usecs);
+       ret = ops->set_coalesce_usecs(priv->ae_handle,
+                                     ec->rx_coalesce_usecs);
+       if (ret)
+               return ret;
 
        ret = ops->set_coalesce_frames(
                priv->ae_handle,
@@ -1013,8 +1015,8 @@ int hns_phy_led_set(struct net_device *netdev, int value)
        struct phy_device *phy_dev = priv->phy;
 
        retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED);
-       retval = phy_write(phy_dev, HNS_LED_FC_REG, value);
-       retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER);
+       retval |= phy_write(phy_dev, HNS_LED_FC_REG, value);
+       retval |= phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER);
        if (retval) {
                netdev_err(netdev, "mdiobus_write fail !\n");
                return retval;
index 84fa28c..e4949af 100644 (file)
@@ -661,9 +661,7 @@ struct ixgbe_adapter {
 #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP         (u32)(1 << 9)
 #define IXGBE_FLAG2_PTP_PPS_ENABLED            (u32)(1 << 10)
 #define IXGBE_FLAG2_PHY_INTERRUPT              (u32)(1 << 11)
-#ifdef CONFIG_IXGBE_VXLAN
 #define IXGBE_FLAG2_VXLAN_REREG_NEEDED         BIT(12)
-#endif
 #define IXGBE_FLAG2_VLAN_PROMISC               BIT(13)
 
        /* Tx fast path data */
@@ -675,6 +673,9 @@ struct ixgbe_adapter {
        int num_rx_queues;
        u16 rx_itr_setting;
 
+       /* Port number used to identify VXLAN traffic */
+       __be16 vxlan_port;
+
        /* TX */
        struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
 
@@ -782,9 +783,6 @@ struct ixgbe_adapter {
        u32 timer_event_accumulator;
        u32 vferr_refcount;
        struct ixgbe_mac_addr *mac_table;
-#ifdef CONFIG_IXGBE_VXLAN
-       u16 vxlan_port;
-#endif
        struct kobject *info_kobj;
 #ifdef CONFIG_IXGBE_HWMON
        struct hwmon_buff *ixgbe_hwmon_buff;
@@ -879,6 +877,8 @@ extern const char ixgbe_driver_version[];
 extern char ixgbe_default_device_descr[];
 #endif /* IXGBE_FCOE */
 
+int ixgbe_open(struct net_device *netdev);
+int ixgbe_close(struct net_device *netdev);
 void ixgbe_up(struct ixgbe_adapter *adapter);
 void ixgbe_down(struct ixgbe_adapter *adapter);
 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
index 726e0ee..b3530e1 100644 (file)
@@ -2053,7 +2053,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
 
                if (if_running)
                        /* indicate we're in test mode */
-                       dev_close(netdev);
+                       ixgbe_close(netdev);
                else
                        ixgbe_reset(adapter);
 
@@ -2091,7 +2091,7 @@ skip_loopback:
                /* clear testing bit and return adapter to previous state */
                clear_bit(__IXGBE_TESTING, &adapter->state);
                if (if_running)
-                       dev_open(netdev);
+                       ixgbe_open(netdev);
                else if (hw->mac.ops.disable_tx_laser)
                        hw->mac.ops.disable_tx_laser(hw);
        } else {
index 569cb07..7df3fe2 100644 (file)
@@ -4531,9 +4531,7 @@ static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter)
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
-#ifdef CONFIG_IXGBE_VXLAN
                adapter->vxlan_port = 0;
-#endif
                break;
        default:
                break;
@@ -5994,7 +5992,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
  * handler is registered with the OS, the watchdog timer is started,
  * and the stack is notified that the interface is ready.
  **/
-static int ixgbe_open(struct net_device *netdev)
+int ixgbe_open(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -6096,7 +6094,7 @@ static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
  * needs to be disabled.  A global MAC reset is issued to stop the
  * hardware, and all transmit and receive resources are freed.
  **/
-static int ixgbe_close(struct net_device *netdev)
+int ixgbe_close(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
@@ -7560,11 +7558,10 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
                struct ipv6hdr *ipv6;
        } hdr;
        struct tcphdr *th;
+       unsigned int hlen;
        struct sk_buff *skb;
-#ifdef CONFIG_IXGBE_VXLAN
-       u8 encap = false;
-#endif /* CONFIG_IXGBE_VXLAN */
        __be16 vlan_id;
+       int l4_proto;
 
        /* if ring doesn't have a interrupt vector, cannot perform ATR */
        if (!q_vector)
@@ -7576,62 +7573,50 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
 
        ring->atr_count++;
 
+       /* currently only IPv4/IPv6 with TCP is supported */
+       if ((first->protocol != htons(ETH_P_IP)) &&
+           (first->protocol != htons(ETH_P_IPV6)))
+               return;
+
        /* snag network header to get L4 type and address */
        skb = first->skb;
        hdr.network = skb_network_header(skb);
-       if (!skb->encapsulation) {
-               th = tcp_hdr(skb);
-       } else {
 #ifdef CONFIG_IXGBE_VXLAN
+       if (skb->encapsulation &&
+           first->protocol == htons(ETH_P_IP) &&
+           hdr.ipv4->protocol != IPPROTO_UDP) {
                struct ixgbe_adapter *adapter = q_vector->adapter;
 
-               if (!adapter->vxlan_port)
-                       return;
-               if (first->protocol != htons(ETH_P_IP) ||
-                   hdr.ipv4->version != IPVERSION ||
-                   hdr.ipv4->protocol != IPPROTO_UDP) {
-                       return;
-               }
-               if (ntohs(udp_hdr(skb)->dest) != adapter->vxlan_port)
-                       return;
-               encap = true;
-               hdr.network = skb_inner_network_header(skb);
-               th = inner_tcp_hdr(skb);
-#else
-               return;
-#endif /* CONFIG_IXGBE_VXLAN */
+               /* verify the port is recognized as VXLAN */
+               if (adapter->vxlan_port &&
+                   udp_hdr(skb)->dest == adapter->vxlan_port)
+                       hdr.network = skb_inner_network_header(skb);
        }
+#endif /* CONFIG_IXGBE_VXLAN */
 
        /* Currently only IPv4/IPv6 with TCP is supported */
        switch (hdr.ipv4->version) {
        case IPVERSION:
-               if (hdr.ipv4->protocol != IPPROTO_TCP)
-                       return;
+               /* access ihl as u8 to avoid unaligned access on ia64 */
+               hlen = (hdr.network[0] & 0x0F) << 2;
+               l4_proto = hdr.ipv4->protocol;
                break;
        case 6:
-               if (likely((unsigned char *)th - hdr.network ==
-                          sizeof(struct ipv6hdr))) {
-                       if (hdr.ipv6->nexthdr != IPPROTO_TCP)
-                               return;
-               } else {
-                       __be16 frag_off;
-                       u8 l4_hdr;
-
-                       ipv6_skip_exthdr(skb, hdr.network - skb->data +
-                                             sizeof(struct ipv6hdr),
-                                        &l4_hdr, &frag_off);
-                       if (unlikely(frag_off))
-                               return;
-                       if (l4_hdr != IPPROTO_TCP)
-                               return;
-               }
+               hlen = hdr.network - skb->data;
+               l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
+               hlen -= hdr.network - skb->data;
                break;
        default:
                return;
        }
 
-       /* skip this packet since it is invalid or the socket is closing */
-       if (!th || th->fin)
+       if (l4_proto != IPPROTO_TCP)
+               return;
+
+       th = (struct tcphdr *)(hdr.network + hlen);
+
+       /* skip this packet since the socket is closing */
+       if (th->fin)
                return;
 
        /* sample on all syn packets or once every atr sample count */
@@ -7682,10 +7667,8 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
                break;
        }
 
-#ifdef CONFIG_IXGBE_VXLAN
-       if (encap)
+       if (hdr.network != skb_network_header(skb))
                input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
-#endif /* CONFIG_IXGBE_VXLAN */
 
        /* This assumes the Rx queue and Tx queue are bound to the same CPU */
        ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
@@ -8209,10 +8192,17 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
 static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
                               struct tc_cls_u32_offload *cls)
 {
+       u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
+       u32 loc;
        int err;
 
+       if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
+               return -EINVAL;
+
+       loc = cls->knode.handle & 0xfffff;
+
        spin_lock(&adapter->fdir_perfect_lock);
-       err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, cls->knode.handle);
+       err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
        spin_unlock(&adapter->fdir_perfect_lock);
        return err;
 }
@@ -8221,20 +8211,30 @@ static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter,
                                            __be16 protocol,
                                            struct tc_cls_u32_offload *cls)
 {
+       u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
+
+       if (uhtid >= IXGBE_MAX_LINK_HANDLE)
+               return -EINVAL;
+
        /* This ixgbe devices do not support hash tables at the moment
         * so abort when given hash tables.
         */
        if (cls->hnode.divisor > 0)
                return -EINVAL;
 
-       set_bit(TC_U32_USERHTID(cls->hnode.handle), &adapter->tables);
+       set_bit(uhtid - 1, &adapter->tables);
        return 0;
 }
 
 static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
                                            struct tc_cls_u32_offload *cls)
 {
-       clear_bit(TC_U32_USERHTID(cls->hnode.handle), &adapter->tables);
+       u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
+
+       if (uhtid >= IXGBE_MAX_LINK_HANDLE)
+               return -EINVAL;
+
+       clear_bit(uhtid - 1, &adapter->tables);
        return 0;
 }
 
@@ -8252,27 +8252,29 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
 #endif
        int i, err = 0;
        u8 queue;
-       u32 handle;
+       u32 uhtid, link_uhtid;
 
        memset(&mask, 0, sizeof(union ixgbe_atr_input));
-       handle = cls->knode.handle;
+       uhtid = TC_U32_USERHTID(cls->knode.handle);
+       link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
 
-       /* At the moment cls_u32 jumps to transport layer and skips past
+       /* At the moment cls_u32 jumps to network layer and skips past
         * L2 headers. The canonical method to match L2 frames is to use
         * negative values. However this is error prone at best but really
         * just broken because there is no way to "know" what sort of hdr
-        * is in front of the transport layer. Fix cls_u32 to support L2
+        * is in front of the network layer. Fix cls_u32 to support L2
         * headers when needed.
         */
        if (protocol != htons(ETH_P_IP))
                return -EINVAL;
 
-       if (cls->knode.link_handle ||
-           cls->knode.link_handle >= IXGBE_MAX_LINK_HANDLE) {
+       if (link_uhtid) {
                struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
-               u32 uhtid = TC_U32_USERHTID(cls->knode.link_handle);
 
-               if (!test_bit(uhtid, &adapter->tables))
+               if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
+                       return -EINVAL;
+
+               if (!test_bit(link_uhtid - 1, &adapter->tables))
                        return -EINVAL;
 
                for (i = 0; nexthdr[i].jump; i++) {
@@ -8288,10 +8290,7 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
                            nexthdr->mask != cls->knode.sel->keys[0].mask)
                                return -EINVAL;
 
-                       if (uhtid >= IXGBE_MAX_LINK_HANDLE)
-                               return -EINVAL;
-
-                       adapter->jump_tables[uhtid] = nexthdr->jump;
+                       adapter->jump_tables[link_uhtid] = nexthdr->jump;
                }
                return 0;
        }
@@ -8308,13 +8307,13 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
         * To add support for new nodes update ixgbe_model.h parse structures
         * this function _should_ be generic try not to hardcode values here.
         */
-       if (TC_U32_USERHTID(handle) == 0x800) {
+       if (uhtid == 0x800) {
                field_ptr = adapter->jump_tables[0];
        } else {
-               if (TC_U32_USERHTID(handle) >= ARRAY_SIZE(adapter->jump_tables))
+               if (uhtid >= IXGBE_MAX_LINK_HANDLE)
                        return -EINVAL;
 
-               field_ptr = adapter->jump_tables[TC_U32_USERHTID(handle)];
+               field_ptr = adapter->jump_tables[uhtid];
        }
 
        if (!field_ptr)
@@ -8332,8 +8331,7 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
                int j;
 
                for (j = 0; field_ptr[j].val; j++) {
-                       if (field_ptr[j].off == off &&
-                           field_ptr[j].mask == m) {
+                       if (field_ptr[j].off == off) {
                                field_ptr[j].val(input, &mask, val, m);
                                input->filter.formatted.flow_type |=
                                        field_ptr[j].type;
@@ -8393,8 +8391,8 @@ err_out:
        return -EINVAL;
 }
 
-int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
-                    struct tc_to_netdev *tc)
+static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
+                           struct tc_to_netdev *tc)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
 
@@ -8554,7 +8552,6 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        struct ixgbe_hw *hw = &adapter->hw;
-       u16 new_port = ntohs(port);
 
        if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
                return;
@@ -8562,18 +8559,18 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
        if (sa_family == AF_INET6)
                return;
 
-       if (adapter->vxlan_port == new_port)
+       if (adapter->vxlan_port == port)
                return;
 
        if (adapter->vxlan_port) {
                netdev_info(dev,
                            "Hit Max num of VXLAN ports, not adding port %d\n",
-                           new_port);
+                           ntohs(port));
                return;
        }
 
-       adapter->vxlan_port = new_port;
-       IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, new_port);
+       adapter->vxlan_port = port;
+       IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, ntohs(port));
 }
 
 /**
@@ -8586,7 +8583,6 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
                                 __be16 port)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
-       u16 new_port = ntohs(port);
 
        if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
                return;
@@ -8594,9 +8590,9 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
        if (sa_family == AF_INET6)
                return;
 
-       if (adapter->vxlan_port != new_port) {
+       if (adapter->vxlan_port != port) {
                netdev_info(dev, "Port %d was not found, not deleting\n",
-                           new_port);
+                           ntohs(port));
                return;
        }
 
@@ -9265,17 +9261,6 @@ skip_sriov:
        netdev->priv_flags |= IFF_UNICAST_FLT;
        netdev->priv_flags |= IFF_SUPP_NOFCS;
 
-#ifdef CONFIG_IXGBE_VXLAN
-       switch (adapter->hw.mac.type) {
-       case ixgbe_mac_X550:
-       case ixgbe_mac_X550EM_x:
-               netdev->hw_enc_features |= NETIF_F_RXCSUM;
-               break;
-       default:
-               break;
-       }
-#endif /* CONFIG_IXGBE_VXLAN */
-
 #ifdef CONFIG_IXGBE_DCB
        netdev->dcbnl_ops = &dcbnl_ops;
 #endif
@@ -9329,6 +9314,8 @@ skip_sriov:
                goto err_sw_init;
        }
 
+       /* Set hw->mac.addr to permanent MAC address */
+       ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
        ixgbe_mac_set_default_filter(adapter);
 
        setup_timer(&adapter->service_timer, &ixgbe_service_timer,
index ce48872..74c53ad 100644 (file)
@@ -32,7 +32,6 @@
 
 struct ixgbe_mat_field {
        unsigned int off;
-       unsigned int mask;
        int (*val)(struct ixgbe_fdir_filter *input,
                   union ixgbe_atr_input *mask,
                   u32 val, u32 m);
@@ -58,35 +57,27 @@ static inline int ixgbe_mat_prgm_dip(struct ixgbe_fdir_filter *input,
 }
 
 static struct ixgbe_mat_field ixgbe_ipv4_fields[] = {
-       { .off = 12, .mask = -1, .val = ixgbe_mat_prgm_sip,
+       { .off = 12, .val = ixgbe_mat_prgm_sip,
          .type = IXGBE_ATR_FLOW_TYPE_IPV4},
-       { .off = 16, .mask = -1, .val = ixgbe_mat_prgm_dip,
+       { .off = 16, .val = ixgbe_mat_prgm_dip,
          .type = IXGBE_ATR_FLOW_TYPE_IPV4},
        { .val = NULL } /* terminal node */
 };
 
-static inline int ixgbe_mat_prgm_sport(struct ixgbe_fdir_filter *input,
+static inline int ixgbe_mat_prgm_ports(struct ixgbe_fdir_filter *input,
                                       union ixgbe_atr_input *mask,
                                       u32 val, u32 m)
 {
        input->filter.formatted.src_port = val & 0xffff;
        mask->formatted.src_port = m & 0xffff;
-       return 0;
-};
+       input->filter.formatted.dst_port = val >> 16;
+       mask->formatted.dst_port = m >> 16;
 
-static inline int ixgbe_mat_prgm_dport(struct ixgbe_fdir_filter *input,
-                                      union ixgbe_atr_input *mask,
-                                      u32 val, u32 m)
-{
-       input->filter.formatted.dst_port = val & 0xffff;
-       mask->formatted.dst_port = m & 0xffff;
        return 0;
 };
 
 static struct ixgbe_mat_field ixgbe_tcp_fields[] = {
-       {.off = 0, .mask = 0xffff, .val = ixgbe_mat_prgm_sport,
-        .type = IXGBE_ATR_FLOW_TYPE_TCPV4},
-       {.off = 2, .mask = 0xffff, .val = ixgbe_mat_prgm_dport,
+       {.off = 0, .val = ixgbe_mat_prgm_ports,
         .type = IXGBE_ATR_FLOW_TYPE_TCPV4},
        { .val = NULL } /* terminal node */
 };
index 87aca3f..68a9c64 100644 (file)
@@ -355,7 +355,7 @@ static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
                command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
                if (!(command & IXGBE_SB_IOSF_CTRL_BUSY))
                        break;
-               usleep_range(10, 20);
+               udelay(10);
        }
        if (ctrl)
                *ctrl = command;
index c48aef6..d7aa4b2 100644 (file)
@@ -680,7 +680,7 @@ static void ixgbevf_diag_test(struct net_device *netdev,
 
                if (if_running)
                        /* indicate we're in test mode */
-                       dev_close(netdev);
+                       ixgbevf_close(netdev);
                else
                        ixgbevf_reset(adapter);
 
@@ -692,7 +692,7 @@ static void ixgbevf_diag_test(struct net_device *netdev,
 
                clear_bit(__IXGBEVF_TESTING, &adapter->state);
                if (if_running)
-                       dev_open(netdev);
+                       ixgbevf_open(netdev);
        } else {
                hw_dbg(&adapter->hw, "online testing starting\n");
                /* Online tests */
index 68ec7da..991eeae 100644 (file)
@@ -486,6 +486,8 @@ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
 extern const char ixgbevf_driver_name[];
 extern const char ixgbevf_driver_version[];
 
+int ixgbevf_open(struct net_device *netdev);
+int ixgbevf_close(struct net_device *netdev);
 void ixgbevf_up(struct ixgbevf_adapter *adapter);
 void ixgbevf_down(struct ixgbevf_adapter *adapter);
 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
index 0ea14c0..b0edae9 100644 (file)
@@ -3122,7 +3122,7 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
  * handler is registered with the OS, the watchdog timer is started,
  * and the stack is notified that the interface is ready.
  **/
-static int ixgbevf_open(struct net_device *netdev)
+int ixgbevf_open(struct net_device *netdev)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -3205,7 +3205,7 @@ err_setup_reset:
  * needs to be disabled.  A global MAC reset is issued to stop the
  * hardware, and all transmit and receive resources are freed.
  **/
-static int ixgbevf_close(struct net_device *netdev)
+int ixgbevf_close(struct net_device *netdev)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 
@@ -3692,19 +3692,23 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
        struct sockaddr *addr = p;
+       int err;
 
        if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
 
-       ether_addr_copy(netdev->dev_addr, addr->sa_data);
-       ether_addr_copy(hw->mac.addr, addr->sa_data);
-
        spin_lock_bh(&adapter->mbx_lock);
 
-       hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+       err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0);
 
        spin_unlock_bh(&adapter->mbx_lock);
 
+       if (err)
+               return -EPERM;
+
+       ether_addr_copy(hw->mac.addr, addr->sa_data);
+       ether_addr_copy(netdev->dev_addr, addr->sa_data);
+
        return 0;
 }
 
index 61a98f4..4d613a4 100644 (file)
@@ -408,8 +408,10 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
 
        /* if nacked the address was rejected, use "perm_addr" */
        if (!ret_val &&
-           (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
+           (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
                ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
+               return IXGBE_ERR_MBX;
+       }
 
        return ret_val;
 }
index 577f7ca..7fc4902 100644 (file)
 
 #define MVNETA_VLAN_TAG_LEN             4
 
-#define MVNETA_CPU_D_CACHE_LINE_SIZE    32
 #define MVNETA_TX_CSUM_DEF_SIZE                1600
 #define MVNETA_TX_CSUM_MAX_SIZE                9800
 #define MVNETA_ACC_MODE_EXT1           1
 #define MVNETA_RX_PKT_SIZE(mtu) \
        ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
              ETH_HLEN + ETH_FCS_LEN,                        \
-             MVNETA_CPU_D_CACHE_LINE_SIZE)
+             cache_line_size())
 
 #define IS_TSO_HEADER(txq, addr) \
        ((addr >= txq->tso_hdrs_phys) && \
@@ -2764,9 +2763,6 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
        if (rxq->descs == NULL)
                return -ENOMEM;
 
-       BUG_ON(rxq->descs !=
-              PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
-
        rxq->last_desc = rxq->size - 1;
 
        /* Set Rx descriptors queue starting address */
@@ -2837,10 +2833,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
        if (txq->descs == NULL)
                return -ENOMEM;
 
-       /* Make sure descriptor address is cache line size aligned  */
-       BUG_ON(txq->descs !=
-              PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
-
        txq->last_desc = txq->size - 1;
 
        /* Set maximum bandwidth for enabled TXQs */
@@ -3050,6 +3042,20 @@ static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
        return mtu;
 }
 
+static void mvneta_percpu_enable(void *arg)
+{
+       struct mvneta_port *pp = arg;
+
+       enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
+}
+
+static void mvneta_percpu_disable(void *arg)
+{
+       struct mvneta_port *pp = arg;
+
+       disable_percpu_irq(pp->dev->irq);
+}
+
 /* Change the device mtu */
 static int mvneta_change_mtu(struct net_device *dev, int mtu)
 {
@@ -3074,6 +3080,7 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
         * reallocation of the queues
         */
        mvneta_stop_dev(pp);
+       on_each_cpu(mvneta_percpu_disable, pp, true);
 
        mvneta_cleanup_txqs(pp);
        mvneta_cleanup_rxqs(pp);
@@ -3097,6 +3104,7 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
                return ret;
        }
 
+       on_each_cpu(mvneta_percpu_enable, pp, true);
        mvneta_start_dev(pp);
        mvneta_port_up(pp);
 
@@ -3250,20 +3258,6 @@ static void mvneta_mdio_remove(struct mvneta_port *pp)
        pp->phy_dev = NULL;
 }
 
-static void mvneta_percpu_enable(void *arg)
-{
-       struct mvneta_port *pp = arg;
-
-       enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
-}
-
-static void mvneta_percpu_disable(void *arg)
-{
-       struct mvneta_port *pp = arg;
-
-       disable_percpu_irq(pp->dev->irq);
-}
-
 /* Electing a CPU must be done in an atomic way: it should be done
  * after or before the removal/insertion of a CPU and this function is
  * not reentrant.
index c797971..868a957 100644 (file)
 /* Lbtd 802.3 type */
 #define MVPP2_IP_LBDT_TYPE             0xfffa
 
-#define MVPP2_CPU_D_CACHE_LINE_SIZE    32
 #define MVPP2_TX_CSUM_MAX_SIZE         9800
 
 /* Timeout constants */
 
 #define MVPP2_RX_PKT_SIZE(mtu) \
        ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
-             ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
+             ETH_HLEN + ETH_FCS_LEN, cache_line_size())
 
 #define MVPP2_RX_BUF_SIZE(pkt_size)    ((pkt_size) + NET_SKB_PAD)
 #define MVPP2_RX_TOTAL_SIZE(buf_size)  ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
@@ -4493,10 +4492,6 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
        if (!aggr_txq->descs)
                return -ENOMEM;
 
-       /* Make sure descriptor address is cache line size aligned  */
-       BUG_ON(aggr_txq->descs !=
-              PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
-
        aggr_txq->last_desc = aggr_txq->size - 1;
 
        /* Aggr TXQ no reset WA */
@@ -4526,9 +4521,6 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
        if (!rxq->descs)
                return -ENOMEM;
 
-       BUG_ON(rxq->descs !=
-              PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
-
        rxq->last_desc = rxq->size - 1;
 
        /* Zero occupied and non-occupied counters - direct access */
@@ -4616,10 +4608,6 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
        if (!txq->descs)
                return -ENOMEM;
 
-       /* Make sure descriptor address is cache line size aligned  */
-       BUG_ON(txq->descs !=
-              PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
-
        txq->last_desc = txq->size - 1;
 
        /* Set Tx descriptors queue starting address - indirect access */
@@ -6059,8 +6047,10 @@ static int mvpp2_port_init(struct mvpp2_port *port)
 
                /* Map physical Rx queue to port's logical Rx queue */
                rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
-               if (!rxq)
+               if (!rxq) {
+                       err = -ENOMEM;
                        goto err_free_percpu;
+               }
                /* Map this Rx queue to a physical queue */
                rxq->id = port->first_rxq + queue;
                rxq->port = port->id;
index ffd0acc..2017b01 100644 (file)
@@ -2750,7 +2750,7 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
 int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
                       enum qed_int_mode int_mode)
 {
-       int rc;
+       int rc = 0;
 
        /* Configure AEU signal change to produce attentions */
        qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
index ef33270..6d31f92 100644 (file)
@@ -18,7 +18,7 @@
  */
 #define DRV_NAME       "qlge"
 #define DRV_STRING     "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION    "1.00.00.34"
+#define DRV_VERSION    "1.00.00.35"
 
 #define WQ_ADDR_ALIGN  0x3     /* 4 byte alignment */
 
index 4e1a7db..087e14a 100644 (file)
@@ -1377,11 +1377,11 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 
                /* TAG and timestamp required flag */
                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
-               skb_tx_timestamp(skb);
                desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
                desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12);
        }
 
+       skb_tx_timestamp(skb);
        /* Descriptor type must be set after all the above writes */
        dma_wmb();
        desc->die_dt = DT_FEND;
index b02eed1..73427e2 100644 (file)
@@ -155,11 +155,11 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
        return 0;
 
 err_rx_irq_unmap:
-       while (--i)
+       while (i--)
                irq_dispose_mapping(priv->rxq[i]->irq_no);
        i = SXGBE_TX_QUEUES;
 err_tx_irq_unmap:
-       while (--i)
+       while (i--)
                irq_dispose_mapping(priv->txq[i]->irq_no);
        irq_dispose_mapping(priv->irq);
 err_drv_remove:
index e13228f..011386f 100644 (file)
@@ -199,11 +199,6 @@ static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
 {
        unsigned int tdes1 = p->des1;
 
-       if (mode == STMMAC_CHAIN_MODE)
-               norm_set_tx_desc_len_on_chain(p, len);
-       else
-               norm_set_tx_desc_len_on_ring(p, len);
-
        if (is_fs)
                tdes1 |= TDES1_FIRST_SEGMENT;
        else
@@ -217,10 +212,15 @@ static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
        if (ls)
                tdes1 |= TDES1_LAST_SEGMENT;
 
-       if (tx_own)
-               tdes1 |= TDES0_OWN;
-
        p->des1 = tdes1;
+
+       if (mode == STMMAC_CHAIN_MODE)
+               norm_set_tx_desc_len_on_chain(p, len);
+       else
+               norm_set_tx_desc_len_on_ring(p, len);
+
+       if (tx_own)
+               p->des0 |= TDES0_OWN;
 }
 
 static void ndesc_set_tx_ic(struct dma_desc *p)
index 4c5ce98..78464fa 100644 (file)
@@ -278,7 +278,6 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
  */
 bool stmmac_eee_init(struct stmmac_priv *priv)
 {
-       char *phy_bus_name = priv->plat->phy_bus_name;
        unsigned long flags;
        bool ret = false;
 
@@ -290,7 +289,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
                goto out;
 
        /* Never init EEE in case of a switch is attached */
-       if (phy_bus_name && (!strcmp(phy_bus_name, "fixed")))
+       if (priv->phydev->is_pseudo_fixed_link)
                goto out;
 
        /* MAC core supports the EEE feature. */
@@ -827,12 +826,8 @@ static int stmmac_init_phy(struct net_device *dev)
                phydev = of_phy_connect(dev, priv->plat->phy_node,
                                        &stmmac_adjust_link, 0, interface);
        } else {
-               if (priv->plat->phy_bus_name)
-                       snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
-                                priv->plat->phy_bus_name, priv->plat->bus_id);
-               else
-                       snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
-                                priv->plat->bus_id);
+               snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
+                        priv->plat->bus_id);
 
                snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
                         priv->plat->phy_addr);
@@ -871,9 +866,8 @@ static int stmmac_init_phy(struct net_device *dev)
        }
 
        /* If attached to a switch, there is no reason to poll phy handler */
-       if (priv->plat->phy_bus_name)
-               if (!strcmp(priv->plat->phy_bus_name, "fixed"))
-                       phydev->irq = PHY_IGNORE_INTERRUPT;
+       if (phydev->is_pseudo_fixed_link)
+               phydev->irq = PHY_IGNORE_INTERRUPT;
 
        pr_debug("stmmac_init_phy:  %s: attached to PHY (UID 0x%x)"
                 " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
index ea76129..06704ca 100644 (file)
@@ -198,20 +198,12 @@ int stmmac_mdio_register(struct net_device *ndev)
        struct mii_bus *new_bus;
        struct stmmac_priv *priv = netdev_priv(ndev);
        struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
-       int addr, found;
        struct device_node *mdio_node = priv->plat->mdio_node;
+       int addr, found;
 
        if (!mdio_bus_data)
                return 0;
 
-       if (IS_ENABLED(CONFIG_OF)) {
-               if (mdio_node) {
-                       netdev_dbg(ndev, "FOUND MDIO subnode\n");
-               } else {
-                       netdev_warn(ndev, "No MDIO subnode found\n");
-               }
-       }
-
        new_bus = mdiobus_alloc();
        if (new_bus == NULL)
                return -ENOMEM;
index dcbd2a1..cf37ea5 100644 (file)
@@ -131,6 +131,69 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
        return axi;
 }
 
+/**
+ * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
+ * @plat: driver data platform structure
+ * @np: device tree node
+ * @dev: device pointer
+ * Description:
+ * The mdio bus will be allocated in case of a phy transceiver is on board;
+ * it will be NULL if the fixed-link is configured.
+ * If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated
+ * in any case (for DSA, mdio must be registered even if fixed-link).
+ * The table below sums the supported configurations:
+ *     -------------------------------
+ *     snps,phy-addr   |     Y
+ *     -------------------------------
+ *     phy-handle      |     Y
+ *     -------------------------------
+ *     fixed-link      |     N
+ *     -------------------------------
+ *     snps,dwmac-mdio |
+ *       even if       |     Y
+ *     fixed-link      |
+ *     -------------------------------
+ *
+ * It returns 0 in case of success otherwise -ENODEV.
+ */
+static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
+                        struct device_node *np, struct device *dev)
+{
+       bool mdio = true;
+
+       /* If phy-handle property is passed from DT, use it as the PHY */
+       plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
+       if (plat->phy_node)
+               dev_dbg(dev, "Found phy-handle subnode\n");
+
+       /* If phy-handle is not specified, check if we have a fixed-phy */
+       if (!plat->phy_node && of_phy_is_fixed_link(np)) {
+               if ((of_phy_register_fixed_link(np) < 0))
+                       return -ENODEV;
+
+               dev_dbg(dev, "Found fixed-link subnode\n");
+               plat->phy_node = of_node_get(np);
+               mdio = false;
+       }
+
+       /* If snps,dwmac-mdio is passed from DT, always register the MDIO */
+       for_each_child_of_node(np, plat->mdio_node) {
+               if (of_device_is_compatible(plat->mdio_node, "snps,dwmac-mdio"))
+                       break;
+       }
+
+       if (plat->mdio_node) {
+               dev_dbg(dev, "Found MDIO subnode\n");
+               mdio = true;
+       }
+
+       if (mdio)
+               plat->mdio_bus_data =
+                       devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data),
+                                    GFP_KERNEL);
+       return 0;
+}
+
 /**
  * stmmac_probe_config_dt - parse device-tree driver parameters
  * @pdev: platform_device structure
@@ -146,7 +209,6 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
        struct device_node *np = pdev->dev.of_node;
        struct plat_stmmacenet_data *plat;
        struct stmmac_dma_cfg *dma_cfg;
-       struct device_node *child_node = NULL;
 
        plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
        if (!plat)
@@ -166,36 +228,15 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
        /* Default to phy auto-detection */
        plat->phy_addr = -1;
 
-       /* If we find a phy-handle property, use it as the PHY */
-       plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
-
-       /* If phy-handle is not specified, check if we have a fixed-phy */
-       if (!plat->phy_node && of_phy_is_fixed_link(np)) {
-               if ((of_phy_register_fixed_link(np) < 0))
-                       return ERR_PTR(-ENODEV);
-
-               plat->phy_node = of_node_get(np);
-       }
-
-       for_each_child_of_node(np, child_node)
-               if (of_device_is_compatible(child_node, "snps,dwmac-mdio")) {
-                       plat->mdio_node = child_node;
-                       break;
-               }
-
        /* "snps,phy-addr" is not a standard property. Mark it as deprecated
         * and warn of its use. Remove this when phy node support is added.
         */
        if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
                dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
 
-       if ((plat->phy_node && !of_phy_is_fixed_link(np)) || !plat->mdio_node)
-               plat->mdio_bus_data = NULL;
-       else
-               plat->mdio_bus_data =
-                       devm_kzalloc(&pdev->dev,
-                                    sizeof(struct stmmac_mdio_bus_data),
-                                    GFP_KERNEL);
+       /* To Configure PHY by using all device-tree supported properties */
+       if (stmmac_dt_phy(plat, np, &pdev->dev))
+               return ERR_PTR(-ENODEV);
 
        of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
 
index b881a7b..9636da0 100644 (file)
@@ -339,6 +339,8 @@ static struct phy_driver bcm7xxx_driver[] = {
        BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"),
        BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"),
        BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"),
+       BCM7XXX_40NM_EPHY(PHY_ID_BCM7346, "Broadcom BCM7346"),
+       BCM7XXX_40NM_EPHY(PHY_ID_BCM7362, "Broadcom BCM7362"),
        BCM7XXX_40NM_EPHY(PHY_ID_BCM7425, "Broadcom BCM7425"),
        BCM7XXX_40NM_EPHY(PHY_ID_BCM7429, "Broadcom BCM7429"),
        BCM7XXX_40NM_EPHY(PHY_ID_BCM7435, "Broadcom BCM7435"),
@@ -348,6 +350,8 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
        { PHY_ID_BCM7250, 0xfffffff0, },
        { PHY_ID_BCM7364, 0xfffffff0, },
        { PHY_ID_BCM7366, 0xfffffff0, },
+       { PHY_ID_BCM7346, 0xfffffff0, },
+       { PHY_ID_BCM7362, 0xfffffff0, },
        { PHY_ID_BCM7425, 0xfffffff0, },
        { PHY_ID_BCM7429, 0xfffffff0, },
        { PHY_ID_BCM7439, 0xfffffff0, },
index 26c64d2..a0f64cb 100644 (file)
@@ -1198,6 +1198,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
                goto err_dev_open;
        }
 
+       dev_uc_sync_multiple(port_dev, dev);
+       dev_mc_sync_multiple(port_dev, dev);
+
        err = vlan_vids_add_by_dev(port_dev, dev);
        if (err) {
                netdev_err(dev, "Failed to add vlan ids to device %s\n",
@@ -1261,6 +1264,8 @@ err_enable_netpoll:
        vlan_vids_del_by_dev(port_dev, dev);
 
 err_vids_add:
+       dev_uc_unsync(port_dev, dev);
+       dev_mc_unsync(port_dev, dev);
        dev_close(port_dev);
 
 err_dev_open:
index afdf950..510e90a 100644 (file)
@@ -622,7 +622,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
 
        /* Re-attach the filter to persist device */
        if (!skip_filter && (tun->filter_attached == true)) {
-               err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+               err = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
+                                        lockdep_rtnl_is_held());
                if (!err)
                        goto out;
        }
@@ -1822,7 +1823,7 @@ static void tun_detach_filter(struct tun_struct *tun, int n)
 
        for (i = 0; i < n; i++) {
                tfile = rtnl_dereference(tun->tfiles[i]);
-               sk_detach_filter(tfile->socket.sk);
+               __sk_detach_filter(tfile->socket.sk, lockdep_rtnl_is_held());
        }
 
        tun->filter_attached = false;
@@ -1835,7 +1836,8 @@ static int tun_attach_filter(struct tun_struct *tun)
 
        for (i = 0; i < tun->numqueues; i++) {
                tfile = rtnl_dereference(tun->tfiles[i]);
-               ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+               ret = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
+                                        lockdep_rtnl_is_held());
                if (ret) {
                        tun_detach_filter(tun, i);
                        return ret;
index 86ba30b..2fb31ed 100644 (file)
@@ -1626,6 +1626,13 @@ static const struct usb_device_id cdc_devs[] = {
          .driver_info = (unsigned long) &wwan_info,
        },
 
+       /* Telit LE910 V2 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x0036,
+               USB_CLASS_COMM,
+               USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+         .driver_info = (unsigned long)&wwan_noarp_info,
+       },
+
        /* DW5812 LTE Verizon Mobile Broadband Card
         * Unlike DW5550 this device requires FLAG_NOARP
         */
index 1bfe0fc..22e1a9a 100644 (file)
@@ -38,7 +38,7 @@
  * HEADS UP:  this handshaking isn't all that robust.  This driver
  * gets confused easily if you unplug one end of the cable then
  * try to connect it again; you'll need to restart both ends. The
- * "naplink" software (used by some PlayStation/2 deveopers) does
+ * "naplink" software (used by some PlayStation/2 developers) does
  * the handshaking much better!   Also, sometimes this hardware
  * seems to get wedged under load.  Prolific docs are weak, and
  * don't identify differences between PL2301 and PL2302, much less
index 7d717c6..9d1fce8 100644 (file)
@@ -844,6 +844,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x19d2, 0x1426, 2)},    /* ZTE MF91 */
        {QMI_FIXED_INTF(0x19d2, 0x1428, 2)},    /* Telewell TW-LTE 4G v2 */
        {QMI_FIXED_INTF(0x19d2, 0x2002, 4)},    /* ZTE (Vodafone) K3765-Z */
+       {QMI_FIXED_INTF(0x2001, 0x7e19, 4)},    /* D-Link DWM-221 B1 */
        {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
        {QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
        {QMI_FIXED_INTF(0x1199, 0x68a2, 8)},    /* Sierra Wireless MC7710 in QMI mode */
index ca5721c..cc31c6f 100644 (file)
@@ -99,7 +99,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
                if (unlikely(bad_pmem))
                        rc = -EIO;
                else {
-                       memcpy_from_pmem(mem + off, pmem_addr, len);
+                       rc = memcpy_from_pmem(mem + off, pmem_addr, len);
                        flush_dcache_page(page);
                }
        } else {
@@ -295,7 +295,7 @@ static int pmem_rw_bytes(struct nd_namespace_common *ndns,
 
                if (unlikely(is_bad_pmem(&pmem->bb, offset / 512, sz_align)))
                        return -EIO;
-               memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
+               return memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
        } else {
                memcpy_to_pmem(pmem->virt_addr + offset, buf, size);
                wmb_pmem();
index 9973ceb..07462d7 100644 (file)
@@ -309,8 +309,7 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
                 * much memory to the process.
                 */
                down_read(&current->mm->mmap_sem);
-               ret = get_user_pages(current, current->mm, address, 1,
-                                    !is_write, 0, &page, NULL);
+               ret = get_user_pages(address, 1, !is_write, 0, &page, NULL);
                up_read(&current->mm->mmap_sem);
                if (ret < 0)
                        break;
index 9607bc8..5d4d918 100644 (file)
@@ -886,7 +886,7 @@ rio_dma_transfer(struct file *filp, uint32_t transfer_mode,
                }
 
                down_read(&current->mm->mmap_sem);
-               pinned = get_user_pages(current, current->mm,
+               pinned = get_user_pages(
                                (unsigned long)xfer->loc_addr & PAGE_MASK,
                                nr_pages, dir == DMA_FROM_DEVICE, 0,
                                page_list, NULL);
index 6bb04d4..6f056ca 100644 (file)
@@ -189,9 +189,9 @@ static int st_rproc_parse_dt(struct platform_device *pdev)
        }
 
        ddata->boot_base = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
-       if (!ddata->boot_base) {
+       if (IS_ERR(ddata->boot_base)) {
                dev_err(dev, "Boot base not found\n");
-               return -EINVAL;
+               return PTR_ERR(ddata->boot_base);
        }
 
        err = of_property_read_u32_index(np, "st,syscfg", 1,
index 17ad574..1e56018 100644 (file)
@@ -317,17 +317,17 @@ static int _add_device_to_lcu(struct alias_lcu *lcu,
        struct alias_pav_group *group;
        struct dasd_uid uid;
 
+       spin_lock(get_ccwdev_lock(device->cdev));
        private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
        private->uid.base_unit_addr =
                lcu->uac->unit[private->uid.real_unit_addr].base_ua;
        uid = private->uid;
-
+       spin_unlock(get_ccwdev_lock(device->cdev));
        /* if we have no PAV anyway, we don't need to bother with PAV groups */
        if (lcu->pav == NO_PAV) {
                list_move(&device->alias_list, &lcu->active_devices);
                return 0;
        }
-
        group = _find_group(lcu, &uid);
        if (!group) {
                group = kzalloc(sizeof(*group), GFP_ATOMIC);
@@ -397,130 +397,6 @@ suborder_not_supported(struct dasd_ccw_req *cqr)
        return 0;
 }
 
-/*
- * This function tries to lock all devices on an lcu via trylock
- * return NULL on success otherwise return first failed device
- */
-static struct dasd_device *_trylock_all_devices_on_lcu(struct alias_lcu *lcu,
-                                                     struct dasd_device *pos)
-
-{
-       struct alias_pav_group *pavgroup;
-       struct dasd_device *device;
-
-       list_for_each_entry(device, &lcu->active_devices, alias_list) {
-               if (device == pos)
-                       continue;
-               if (!spin_trylock(get_ccwdev_lock(device->cdev)))
-                       return device;
-       }
-       list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
-               if (device == pos)
-                       continue;
-               if (!spin_trylock(get_ccwdev_lock(device->cdev)))
-                       return device;
-       }
-       list_for_each_entry(pavgroup, &lcu->grouplist, group) {
-               list_for_each_entry(device, &pavgroup->baselist, alias_list) {
-                       if (device == pos)
-                               continue;
-                       if (!spin_trylock(get_ccwdev_lock(device->cdev)))
-                               return device;
-               }
-               list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
-                       if (device == pos)
-                               continue;
-                       if (!spin_trylock(get_ccwdev_lock(device->cdev)))
-                               return device;
-               }
-       }
-       return NULL;
-}
-
-/*
- * unlock all devices except the one that is specified as pos
- * stop if enddev is specified and reached
- */
-static void _unlock_all_devices_on_lcu(struct alias_lcu *lcu,
-                                      struct dasd_device *pos,
-                                      struct dasd_device *enddev)
-
-{
-       struct alias_pav_group *pavgroup;
-       struct dasd_device *device;
-
-       list_for_each_entry(device, &lcu->active_devices, alias_list) {
-               if (device == pos)
-                       continue;
-               if (device == enddev)
-                       return;
-               spin_unlock(get_ccwdev_lock(device->cdev));
-       }
-       list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
-               if (device == pos)
-                       continue;
-               if (device == enddev)
-                       return;
-               spin_unlock(get_ccwdev_lock(device->cdev));
-       }
-       list_for_each_entry(pavgroup, &lcu->grouplist, group) {
-               list_for_each_entry(device, &pavgroup->baselist, alias_list) {
-                       if (device == pos)
-                               continue;
-                       if (device == enddev)
-                               return;
-                       spin_unlock(get_ccwdev_lock(device->cdev));
-               }
-               list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
-                       if (device == pos)
-                               continue;
-                       if (device == enddev)
-                               return;
-                       spin_unlock(get_ccwdev_lock(device->cdev));
-               }
-       }
-}
-
-/*
- *  this function is needed because the locking order
- *  device lock -> lcu lock
- *  needs to be assured when iterating over devices in an LCU
- *
- *  if a device is specified in pos then the device lock is already hold
- */
-static void _trylock_and_lock_lcu_irqsave(struct alias_lcu *lcu,
-                                         struct dasd_device *pos,
-                                         unsigned long *flags)
-{
-       struct dasd_device *failed;
-
-       do {
-               spin_lock_irqsave(&lcu->lock, *flags);
-               failed = _trylock_all_devices_on_lcu(lcu, pos);
-               if (failed) {
-                       _unlock_all_devices_on_lcu(lcu, pos, failed);
-                       spin_unlock_irqrestore(&lcu->lock, *flags);
-                       cpu_relax();
-               }
-       } while (failed);
-}
-
-static void _trylock_and_lock_lcu(struct alias_lcu *lcu,
-                                 struct dasd_device *pos)
-{
-       struct dasd_device *failed;
-
-       do {
-               spin_lock(&lcu->lock);
-               failed = _trylock_all_devices_on_lcu(lcu, pos);
-               if (failed) {
-                       _unlock_all_devices_on_lcu(lcu, pos, failed);
-                       spin_unlock(&lcu->lock);
-                       cpu_relax();
-               }
-       } while (failed);
-}
-
 static int read_unit_address_configuration(struct dasd_device *device,
                                           struct alias_lcu *lcu)
 {
@@ -615,7 +491,7 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
        if (rc)
                return rc;
 
-       _trylock_and_lock_lcu_irqsave(lcu, NULL, &flags);
+       spin_lock_irqsave(&lcu->lock, flags);
        lcu->pav = NO_PAV;
        for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
                switch (lcu->uac->unit[i].ua_type) {
@@ -634,7 +510,6 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
                                 alias_list) {
                _add_device_to_lcu(lcu, device, refdev);
        }
-       _unlock_all_devices_on_lcu(lcu, NULL, NULL);
        spin_unlock_irqrestore(&lcu->lock, flags);
        return 0;
 }
@@ -722,8 +597,7 @@ int dasd_alias_add_device(struct dasd_device *device)
 
        lcu = private->lcu;
        rc = 0;
-       spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
-       spin_lock(&lcu->lock);
+       spin_lock_irqsave(&lcu->lock, flags);
        if (!(lcu->flags & UPDATE_PENDING)) {
                rc = _add_device_to_lcu(lcu, device, device);
                if (rc)
@@ -733,8 +607,7 @@ int dasd_alias_add_device(struct dasd_device *device)
                list_move(&device->alias_list, &lcu->active_devices);
                _schedule_lcu_update(lcu, device);
        }
-       spin_unlock(&lcu->lock);
-       spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+       spin_unlock_irqrestore(&lcu->lock, flags);
        return rc;
 }
 
@@ -933,15 +806,27 @@ static void _stop_all_devices_on_lcu(struct alias_lcu *lcu)
        struct alias_pav_group *pavgroup;
        struct dasd_device *device;
 
-       list_for_each_entry(device, &lcu->active_devices, alias_list)
+       list_for_each_entry(device, &lcu->active_devices, alias_list) {
+               spin_lock(get_ccwdev_lock(device->cdev));
                dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
-       list_for_each_entry(device, &lcu->inactive_devices, alias_list)
+               spin_unlock(get_ccwdev_lock(device->cdev));
+       }
+       list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
+               spin_lock(get_ccwdev_lock(device->cdev));
                dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
+               spin_unlock(get_ccwdev_lock(device->cdev));
+       }
        list_for_each_entry(pavgroup, &lcu->grouplist, group) {
-               list_for_each_entry(device, &pavgroup->baselist, alias_list)
+               list_for_each_entry(device, &pavgroup->baselist, alias_list) {
+                       spin_lock(get_ccwdev_lock(device->cdev));
                        dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
-               list_for_each_entry(device, &pavgroup->aliaslist, alias_list)
+                       spin_unlock(get_ccwdev_lock(device->cdev));
+               }
+               list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
+                       spin_lock(get_ccwdev_lock(device->cdev));
                        dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
+                       spin_unlock(get_ccwdev_lock(device->cdev));
+               }
        }
 }
 
@@ -950,15 +835,27 @@ static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
        struct alias_pav_group *pavgroup;
        struct dasd_device *device;
 
-       list_for_each_entry(device, &lcu->active_devices, alias_list)
+       list_for_each_entry(device, &lcu->active_devices, alias_list) {
+               spin_lock(get_ccwdev_lock(device->cdev));
                dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
-       list_for_each_entry(device, &lcu->inactive_devices, alias_list)
+               spin_unlock(get_ccwdev_lock(device->cdev));
+       }
+       list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
+               spin_lock(get_ccwdev_lock(device->cdev));
                dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
+               spin_unlock(get_ccwdev_lock(device->cdev));
+       }
        list_for_each_entry(pavgroup, &lcu->grouplist, group) {
-               list_for_each_entry(device, &pavgroup->baselist, alias_list)
+               list_for_each_entry(device, &pavgroup->baselist, alias_list) {
+                       spin_lock(get_ccwdev_lock(device->cdev));
                        dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
-               list_for_each_entry(device, &pavgroup->aliaslist, alias_list)
+                       spin_unlock(get_ccwdev_lock(device->cdev));
+               }
+               list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
+                       spin_lock(get_ccwdev_lock(device->cdev));
                        dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
+                       spin_unlock(get_ccwdev_lock(device->cdev));
+               }
        }
 }
 
@@ -984,48 +881,32 @@ static void summary_unit_check_handling_work(struct work_struct *work)
        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
        reset_summary_unit_check(lcu, device, suc_data->reason);
 
-       _trylock_and_lock_lcu_irqsave(lcu, NULL, &flags);
+       spin_lock_irqsave(&lcu->lock, flags);
        _unstop_all_devices_on_lcu(lcu);
        _restart_all_base_devices_on_lcu(lcu);
        /* 3. read new alias configuration */
        _schedule_lcu_update(lcu, device);
        lcu->suc_data.device = NULL;
        dasd_put_device(device);
-       _unlock_all_devices_on_lcu(lcu, NULL, NULL);
        spin_unlock_irqrestore(&lcu->lock, flags);
 }
 
-/*
- * note: this will be called from int handler context (cdev locked)
- */
-void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
-                                         struct irb *irb)
+void dasd_alias_handle_summary_unit_check(struct work_struct *work)
 {
+       struct dasd_device *device = container_of(work, struct dasd_device,
+                                                 suc_work);
        struct dasd_eckd_private *private = device->private;
        struct alias_lcu *lcu;
-       char reason;
-       char *sense;
-
-       sense = dasd_get_sense(irb);
-       if (sense) {
-               reason = sense[8];
-               DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
-                           "eckd handle summary unit check: reason", reason);
-       } else {
-               DBF_DEV_EVENT(DBF_WARNING, device, "%s",
-                           "eckd handle summary unit check:"
-                           " no reason code available");
-               return;
-       }
+       unsigned long flags;
 
        lcu = private->lcu;
        if (!lcu) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
                            "device not ready to handle summary"
                            " unit check (no lcu structure)");
-               return;
+               goto out;
        }
-       _trylock_and_lock_lcu(lcu, device);
+       spin_lock_irqsave(&lcu->lock, flags);
        /* If this device is about to be removed just return and wait for
         * the next interrupt on a different device
         */
@@ -1033,27 +914,26 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
                            "device is in offline processing,"
                            " don't do summary unit check handling");
-               _unlock_all_devices_on_lcu(lcu, device, NULL);
-               spin_unlock(&lcu->lock);
-               return;
+               goto out_unlock;
        }
        if (lcu->suc_data.device) {
                /* already scheduled or running */
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
                            "previous instance of summary unit check worker"
                            " still pending");
-               _unlock_all_devices_on_lcu(lcu, device, NULL);
-               spin_unlock(&lcu->lock);
-               return ;
+               goto out_unlock;
        }
        _stop_all_devices_on_lcu(lcu);
        /* prepare for lcu_update */
-       private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
-       lcu->suc_data.reason = reason;
+       lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
+       lcu->suc_data.reason = private->suc_reason;
        lcu->suc_data.device = device;
        dasd_get_device(device);
-       _unlock_all_devices_on_lcu(lcu, device, NULL);
-       spin_unlock(&lcu->lock);
        if (!schedule_work(&lcu->suc_data.worker))
                dasd_put_device(device);
+out_unlock:
+       spin_unlock_irqrestore(&lcu->lock, flags);
+out:
+       clear_bit(DASD_FLAG_SUC, &device->flags);
+       dasd_put_device(device);
 };
index 75c032d..c1b4ae5 100644 (file)
@@ -1682,6 +1682,8 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
 
        /* setup work queue for validate server*/
        INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
+       /* setup work queue for summary unit check */
+       INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check);
 
        if (!ccw_device_is_pathgroup(device->cdev)) {
                dev_warn(&device->cdev->dev,
@@ -2549,14 +2551,6 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device,
                    device->state == DASD_STATE_ONLINE &&
                    !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
                    !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
-                       /*
-                        * the state change could be caused by an alias
-                        * reassignment remove device from alias handling
-                        * to prevent new requests from being scheduled on
-                        * the wrong alias device
-                        */
-                       dasd_alias_remove_device(device);
-
                        /* schedule worker to reload device */
                        dasd_reload_device(device);
                }
@@ -2571,7 +2565,27 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device,
        /* summary unit check */
        if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
            (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
-               dasd_alias_handle_summary_unit_check(device, irb);
+               if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) {
+                       DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+                                     "eckd suc: device already notified");
+                       return;
+               }
+               sense = dasd_get_sense(irb);
+               if (!sense) {
+                       DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+                                     "eckd suc: no reason code available");
+                       clear_bit(DASD_FLAG_SUC, &device->flags);
+                       return;
+
+               }
+               private->suc_reason = sense[8];
+               DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
+                             "eckd handle summary unit check: reason",
+                             private->suc_reason);
+               dasd_get_device(device);
+               if (!schedule_work(&device->suc_work))
+                       dasd_put_device(device);
+
                return;
        }
 
@@ -4495,6 +4509,12 @@ static int dasd_eckd_reload_device(struct dasd_device *device)
        struct dasd_uid uid;
        unsigned long flags;
 
+       /*
+        * remove device from alias handling to prevent new requests
+        * from being scheduled on the wrong alias device
+        */
+       dasd_alias_remove_device(device);
+
        spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
        old_base = private->uid.base_unit_addr;
        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
index f8f91ee..6d9a6d3 100644 (file)
@@ -525,6 +525,7 @@ struct dasd_eckd_private {
        int count;
 
        u32 fcx_max_data;
+       char suc_reason;
 };
 
 
@@ -534,7 +535,7 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *);
 int dasd_alias_add_device(struct dasd_device *);
 int dasd_alias_remove_device(struct dasd_device *);
 struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *);
-void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *);
+void dasd_alias_handle_summary_unit_check(struct work_struct *);
 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
 void dasd_alias_lcu_setup_complete(struct dasd_device *);
 void dasd_alias_wait_for_lcu_setup(struct dasd_device *);
index 8de29be..0f0add9 100644 (file)
@@ -470,6 +470,7 @@ struct dasd_device {
        struct work_struct restore_device;
        struct work_struct reload_device;
        struct work_struct kick_validate;
+       struct work_struct suc_work;
        struct timer_list timer;
 
        debug_info_t *debug_area;
@@ -542,6 +543,7 @@ struct dasd_attention_data {
 #define DASD_FLAG_SAFE_OFFLINE_RUNNING 11      /* safe offline running */
 #define DASD_FLAG_ABORTALL     12      /* Abort all noretry requests */
 #define DASD_FLAG_PATH_VERIFY  13      /* Path verification worker running */
+#define DASD_FLAG_SUC          14      /* unhandled summary unit check */
 
 #define DASD_SLEEPON_START_TAG ((void *) 1)
 #define DASD_SLEEPON_END_TAG   ((void *) 2)
index a24443b..97e5b69 100644 (file)
@@ -779,14 +779,6 @@ static int lio_target_init_nodeacl(struct se_node_acl *se_nacl,
        return 0;
 }
 
-static void lio_target_cleanup_nodeacl( struct se_node_acl *se_nacl)
-{
-       struct iscsi_node_acl *acl = container_of(se_nacl,
-                       struct iscsi_node_acl, se_node_acl);
-
-       configfs_remove_default_groups(&acl->se_node_acl.acl_fabric_stat_group);
-}
-
 /* End items for lio_target_acl_cit */
 
 /* Start items for lio_target_tpg_attrib_cit */
@@ -1247,6 +1239,16 @@ static struct se_wwn *lio_target_call_coreaddtiqn(
        if (IS_ERR(tiqn))
                return ERR_CAST(tiqn);
 
+       pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
+       pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
+                       " %s\n", name);
+       return &tiqn->tiqn_wwn;
+}
+
+static void lio_target_add_wwn_groups(struct se_wwn *wwn)
+{
+       struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
+
        config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_instance_group,
                        "iscsi_instance", &iscsi_stat_instance_cit);
        configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_instance_group,
@@ -1271,12 +1273,6 @@ static struct se_wwn *lio_target_call_coreaddtiqn(
                        "iscsi_logout_stats", &iscsi_stat_logout_cit);
        configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group,
                        &tiqn->tiqn_wwn.fabric_stat_group);
-
-
-       pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
-       pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
-                       " %s\n", name);
-       return &tiqn->tiqn_wwn;
 }
 
 static void lio_target_call_coredeltiqn(
@@ -1284,8 +1280,6 @@ static void lio_target_call_coredeltiqn(
 {
        struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
 
-       configfs_remove_default_groups(&tiqn->tiqn_wwn.fabric_stat_group);
-
        pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s\n",
                        tiqn->tiqn);
        iscsit_del_tiqn(tiqn);
@@ -1660,12 +1654,12 @@ const struct target_core_fabric_ops iscsi_ops = {
        .aborted_task                   = lio_aborted_task,
        .fabric_make_wwn                = lio_target_call_coreaddtiqn,
        .fabric_drop_wwn                = lio_target_call_coredeltiqn,
+       .add_wwn_groups                 = lio_target_add_wwn_groups,
        .fabric_make_tpg                = lio_target_tiqn_addtpg,
        .fabric_drop_tpg                = lio_target_tiqn_deltpg,
        .fabric_make_np                 = lio_target_call_addnptotpg,
        .fabric_drop_np                 = lio_target_call_delnpfromtpg,
        .fabric_init_nodeacl            = lio_target_init_nodeacl,
-       .fabric_cleanup_nodeacl         = lio_target_cleanup_nodeacl,
 
        .tfc_discovery_attrs            = lio_target_discovery_auth_attrs,
        .tfc_wwn_attrs                  = lio_target_wwn_attrs,
index 1bd5c72..31a096a 100644 (file)
@@ -338,10 +338,8 @@ static void target_fabric_nacl_base_release(struct config_item *item)
 {
        struct se_node_acl *se_nacl = container_of(to_config_group(item),
                        struct se_node_acl, acl_group);
-       struct target_fabric_configfs *tf = se_nacl->se_tpg->se_tpg_wwn->wwn_tf;
 
-       if (tf->tf_ops->fabric_cleanup_nodeacl)
-               tf->tf_ops->fabric_cleanup_nodeacl(se_nacl);
+       configfs_remove_default_groups(&se_nacl->acl_fabric_stat_group);
        core_tpg_del_initiator_node_acl(se_nacl);
 }
 
@@ -383,14 +381,6 @@ static struct config_group *target_fabric_make_nodeacl(
        if (IS_ERR(se_nacl))
                return ERR_CAST(se_nacl);
 
-       if (tf->tf_ops->fabric_init_nodeacl) {
-               int ret = tf->tf_ops->fabric_init_nodeacl(se_nacl, name);
-               if (ret) {
-                       core_tpg_del_initiator_node_acl(se_nacl);
-                       return ERR_PTR(ret);
-               }
-       }
-
        config_group_init_type_name(&se_nacl->acl_group, name,
                        &tf->tf_tpg_nacl_base_cit);
 
@@ -414,6 +404,15 @@ static struct config_group *target_fabric_make_nodeacl(
        configfs_add_default_group(&se_nacl->acl_fabric_stat_group,
                        &se_nacl->acl_group);
 
+       if (tf->tf_ops->fabric_init_nodeacl) {
+               int ret = tf->tf_ops->fabric_init_nodeacl(se_nacl, name);
+               if (ret) {
+                       configfs_remove_default_groups(&se_nacl->acl_fabric_stat_group);
+                       core_tpg_del_initiator_node_acl(se_nacl);
+                       return ERR_PTR(ret);
+               }
+       }
+
        return &se_nacl->acl_group;
 }
 
@@ -892,6 +891,7 @@ static void target_fabric_release_wwn(struct config_item *item)
                                struct se_wwn, wwn_group);
        struct target_fabric_configfs *tf = wwn->wwn_tf;
 
+       configfs_remove_default_groups(&wwn->fabric_stat_group);
        tf->tf_ops->fabric_drop_wwn(wwn);
 }
 
@@ -945,6 +945,8 @@ static struct config_group *target_fabric_make_wwn(
                        &tf->tf_wwn_fabric_stats_cit);
        configfs_add_default_group(&wwn->fabric_stat_group, &wwn->wwn_group);
 
+       if (tf->tf_ops->add_wwn_groups)
+               tf->tf_ops->add_wwn_groups(wwn);
        return &wwn->wwn_group;
 }
 
index 4b02591..d01f89d 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/buffer_head.h>
 #include <linux/workqueue.h>
 #include <linux/kthread.h>
-#include <linux/freezer.h>
 #include <linux/slab.h>
 #include <linux/migrate.h>
 #include <linux/ratelimit.h>
@@ -303,7 +302,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
                err = map_private_extent_buffer(buf, offset, 32,
                                        &kaddr, &map_start, &map_len);
                if (err)
-                       return 1;
+                       return err;
                cur_len = min(len, map_len - (offset - map_start));
                crc = btrfs_csum_data(kaddr + offset - map_start,
                                      crc, cur_len);
@@ -313,7 +312,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
        if (csum_size > sizeof(inline_result)) {
                result = kzalloc(csum_size, GFP_NOFS);
                if (!result)
-                       return 1;
+                       return -ENOMEM;
        } else {
                result = (char *)&inline_result;
        }
@@ -334,7 +333,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
                                val, found, btrfs_header_level(buf));
                        if (result != (char *)&inline_result)
                                kfree(result);
-                       return 1;
+                       return -EUCLEAN;
                }
        } else {
                write_extent_buffer(buf, result, 0, csum_size);
@@ -513,11 +512,21 @@ static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
        eb = (struct extent_buffer *)page->private;
        if (page != eb->pages[0])
                return 0;
+
        found_start = btrfs_header_bytenr(eb);
-       if (WARN_ON(found_start != start || !PageUptodate(page)))
-               return 0;
-       csum_tree_block(fs_info, eb, 0);
-       return 0;
+       /*
+        * Please do not consolidate these warnings into a single if.
+        * It is useful to know what went wrong.
+        */
+       if (WARN_ON(found_start != start))
+               return -EUCLEAN;
+       if (WARN_ON(!PageUptodate(page)))
+               return -EUCLEAN;
+
+       ASSERT(memcmp_extent_buffer(eb, fs_info->fsid,
+                       btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
+
+       return csum_tree_block(fs_info, eb, 0);
 }
 
 static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
@@ -661,10 +670,8 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
                                       eb, found_level);
 
        ret = csum_tree_block(fs_info, eb, 1);
-       if (ret) {
-               ret = -EIO;
+       if (ret)
                goto err;
-       }
 
        /*
         * If this is a leaf block and it is corrupt, set the corrupt bit so
@@ -1831,7 +1838,7 @@ static int cleaner_kthread(void *arg)
                 */
                btrfs_delete_unused_bgs(root->fs_info);
 sleep:
-               if (!try_to_freeze() && !again) {
+               if (!again) {
                        set_current_state(TASK_INTERRUPTIBLE);
                        if (!kthread_should_stop())
                                schedule();
@@ -1921,14 +1928,12 @@ sleep:
                if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
                                      &root->fs_info->fs_state)))
                        btrfs_cleanup_transaction(root);
-               if (!try_to_freeze()) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       if (!kthread_should_stop() &&
-                           (!btrfs_transaction_blocked(root->fs_info) ||
-                            cannot_commit))
-                               schedule_timeout(delay);
-                       __set_current_state(TASK_RUNNING);
-               }
+               set_current_state(TASK_INTERRUPTIBLE);
+               if (!kthread_should_stop() &&
+                               (!btrfs_transaction_blocked(root->fs_info) ||
+                                cannot_commit))
+                       schedule_timeout(delay);
+               __set_current_state(TASK_RUNNING);
        } while (!kthread_should_stop());
        return 0;
 }
index 5191121..1669f62 100644 (file)
@@ -343,13 +343,12 @@ static struct config_group *make_cluster(struct config_group *g,
        struct dlm_cluster *cl = NULL;
        struct dlm_spaces *sps = NULL;
        struct dlm_comms *cms = NULL;
-       void *gps = NULL;
 
        cl = kzalloc(sizeof(struct dlm_cluster), GFP_NOFS);
        sps = kzalloc(sizeof(struct dlm_spaces), GFP_NOFS);
        cms = kzalloc(sizeof(struct dlm_comms), GFP_NOFS);
 
-       if (!cl || !gps || !sps || !cms)
+       if (!cl || !sps || !cms)
                goto fail;
 
        config_group_init_type_name(&cl->group, name, &cluster_type);
index 794f81d..1d9ca2d 100644 (file)
@@ -1740,15 +1740,17 @@ static int walk_component(struct nameidata *nd, int flags)
                                          nd->flags);
                if (IS_ERR(path.dentry))
                        return PTR_ERR(path.dentry);
-               if (unlikely(d_is_negative(path.dentry))) {
-                       dput(path.dentry);
-                       return -ENOENT;
-               }
+
                path.mnt = nd->path.mnt;
                err = follow_managed(&path, nd);
                if (unlikely(err < 0))
                        return err;
 
+               if (unlikely(d_is_negative(path.dentry))) {
+                       path_to_nameidata(&path, nd);
+                       return -ENOENT;
+               }
+
                seq = 0;        /* we are already out of RCU mode */
                inode = d_backing_inode(path.dentry);
        }
index f30b6ec..ba7dec4 100644 (file)
@@ -235,7 +235,7 @@ get_new_buffer_index:
        if (ret == -EIO && op_state_purged(new_op)) {
                gossip_err("%s: Client is down. Aborting readdir call.\n",
                        __func__);
-               goto out_slot;
+               goto out_free_op;
        }
 
        if (ret < 0 || new_op->downcall.status != 0) {
@@ -244,14 +244,14 @@ get_new_buffer_index:
                             new_op->downcall.status);
                if (ret >= 0)
                        ret = new_op->downcall.status;
-               goto out_slot;
+               goto out_free_op;
        }
 
        dents_buf = new_op->downcall.trailer_buf;
        if (dents_buf == NULL) {
                gossip_err("Invalid NULL buffer in readdir response\n");
                ret = -ENOMEM;
-               goto out_slot;
+               goto out_free_op;
        }
 
        bytes_decoded = decode_dirents(dents_buf, new_op->downcall.trailer_size,
@@ -363,8 +363,6 @@ out_destroy_handle:
 out_vfree:
        gossip_debug(GOSSIP_DIR_DEBUG, "vfree %p\n", dents_buf);
        vfree(dents_buf);
-out_slot:
-       orangefs_readdir_index_put(buffer_index);
 out_free_op:
        op_release(new_op);
        gossip_debug(GOSSIP_DIR_DEBUG, "orangefs_readdir returning %d\n", ret);
index 45ce4ff..50578a2 100644 (file)
@@ -407,7 +407,7 @@ enum {
  * space. Zero signifies the upstream version of the kernel module.
  */
 #define ORANGEFS_KERNEL_PROTO_VERSION 0
-#define ORANGEFS_MINIMUM_USERSPACE_VERSION 20904
+#define ORANGEFS_MINIMUM_USERSPACE_VERSION 20903
 
 /*
  * describes memory regions to map in the ORANGEFS_DEV_MAP ioctl.
index f0ba9c2..e3354b7 100644 (file)
@@ -24,6 +24,8 @@
 #define PHY_ID_BCM7250                 0xae025280
 #define PHY_ID_BCM7364                 0xae025260
 #define PHY_ID_BCM7366                 0x600d8490
+#define PHY_ID_BCM7346                 0x600d8650
+#define PHY_ID_BCM7362                 0x600d84b0
 #define PHY_ID_BCM7425                 0x600d86b0
 #define PHY_ID_BCM7429                 0x600d8730
 #define PHY_ID_BCM7435                 0x600d8750
index 485fe55..d9d6a9d 100644 (file)
@@ -188,7 +188,7 @@ static struct configfs_bin_attribute _pfx##attr_##_name = { \
 }
 
 #define CONFIGFS_BIN_ATTR_RO(_pfx, _name, _priv, _maxsz)       \
-static struct configfs_attribute _pfx##attr_##_name = {                \
+static struct configfs_bin_attribute _pfx##attr_##_name = {    \
        .cb_attr = {                                            \
                .ca_name        = __stringify(_name),           \
                .ca_mode        = S_IRUGO,                      \
@@ -200,7 +200,7 @@ static struct configfs_attribute _pfx##attr_##_name = {             \
 }
 
 #define CONFIGFS_BIN_ATTR_WO(_pfx, _name, _priv, _maxsz)       \
-static struct configfs_attribute _pfx##attr_##_name = {                \
+static struct configfs_bin_attribute _pfx##attr_##_name = {    \
        .cb_attr = {                                            \
                .ca_name        = __stringify(_name),           \
                .ca_mode        = S_IWUSR,                      \
index 43aa1f8..a51a536 100644 (file)
@@ -465,10 +465,14 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
 void bpf_prog_destroy(struct bpf_prog *fp);
 
 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
+int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
+                      bool locked);
 int sk_attach_bpf(u32 ufd, struct sock *sk);
 int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
 int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
 int sk_detach_filter(struct sock *sk);
+int __sk_detach_filter(struct sock *sk, bool locked);
+
 int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
                  unsigned int len);
 
index 79b0ef6..7008623 100644 (file)
@@ -127,7 +127,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
        if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
                return __pmd_trans_huge_lock(pmd, vma);
        else
-               return false;
+               return NULL;
 }
 static inline int hpage_nr_pages(struct page *page)
 {
index 0e1f433..f48b8a6 100644 (file)
@@ -234,6 +234,10 @@ struct ip_set {
        spinlock_t lock;
        /* References to the set */
        u32 ref;
+       /* References to the set for netlink events like dump,
+        * ref can be swapped out by ip_set_swap
+        */
+       u32 ref_netlink;
        /* The core set type */
        struct ip_set_type *type;
        /* The type variant doing the real job */
index 3ec5309..ac6d872 100644 (file)
@@ -42,6 +42,13 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
        BUG();
 }
 
+static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src,
+               size_t n)
+{
+       BUG();
+       return -EFAULT;
+}
+
 static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
                struct iov_iter *i)
 {
@@ -66,14 +73,17 @@ static inline void arch_invalidate_pmem(void __pmem *addr, size_t size)
 #endif
 
 /*
- * Architectures that define ARCH_HAS_PMEM_API must provide
- * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(),
- * arch_copy_from_iter_pmem(), arch_clear_pmem(), arch_wb_cache_pmem()
- * and arch_has_wmb_pmem().
+ * memcpy_from_pmem - read from persistent memory with error handling
+ * @dst: destination buffer
+ * @src: source buffer
+ * @size: transfer length
+ *
+ * Returns 0 on success negative error code on failure.
  */
-static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size)
+static inline int memcpy_from_pmem(void *dst, void __pmem const *src,
+               size_t size)
 {
-       memcpy(dst, (void __force const *) src, size);
+       return arch_memcpy_from_pmem(dst, src, size);
 }
 
 static inline bool arch_has_pmem_api(void)
index 4bcf5a6..e6bc30a 100644 (file)
@@ -108,7 +108,6 @@ struct stmmac_axi {
 };
 
 struct plat_stmmacenet_data {
-       char *phy_bus_name;
        int bus_id;
        int phy_addr;
        int interface;
index 685a51a..8ff6d40 100644 (file)
@@ -76,6 +76,7 @@ struct target_core_fabric_ops {
        struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *,
                                struct config_group *, const char *);
        void (*fabric_drop_wwn)(struct se_wwn *);
+       void (*add_wwn_groups)(struct se_wwn *);
        struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
                                struct config_group *, const char *);
        void (*fabric_drop_tpg)(struct se_portal_group *);
@@ -87,7 +88,6 @@ struct target_core_fabric_ops {
                                struct config_group *, const char *);
        void (*fabric_drop_np)(struct se_tpg_np *);
        int (*fabric_init_nodeacl)(struct se_node_acl *, const char *);
-       void (*fabric_cleanup_nodeacl)(struct se_node_acl *);
 
        struct configfs_attribute **tfc_discovery_attrs;
        struct configfs_attribute **tfc_wwn_attrs;
index 6fb6440..8738a78 100644 (file)
@@ -29,7 +29,7 @@ TRACE_EVENT(test_pages_isolated,
 
        TP_printk("start_pfn=0x%lx end_pfn=0x%lx fin_pfn=0x%lx ret=%s",
                __entry->start_pfn, __entry->end_pfn, __entry->fin_pfn,
-               __entry->end_pfn == __entry->fin_pfn ? "success" : "fail")
+               __entry->end_pfn <= __entry->fin_pfn ? "success" : "fail")
 );
 
 #endif /* _TRACE_PAGE_ISOLATION_H */
index 924f537..23917bb 100644 (file)
@@ -375,6 +375,7 @@ struct bpf_tunnel_key {
        };
        __u8 tunnel_tos;
        __u8 tunnel_ttl;
+       __u16 tunnel_ext;
        __u32 tunnel_label;
 };
 
index e0d2616..0dfd09d 100644 (file)
@@ -272,8 +272,9 @@ config CROSS_MEMORY_ATTACH
          See the man page for more details.
 
 config FHANDLE
-       bool "open by fhandle syscalls"
+       bool "open by fhandle syscalls" if EXPERT
        select EXPORTFS
+       default y
        help
          If you say Y here, a user level program will be able to map
          file names to handle and then later use the handle for
index 2a2efe1..adc5e4b 100644 (file)
@@ -137,11 +137,13 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
                   "map_type:\t%u\n"
                   "key_size:\t%u\n"
                   "value_size:\t%u\n"
-                  "max_entries:\t%u\n",
+                  "max_entries:\t%u\n"
+                  "map_flags:\t%#x\n",
                   map->map_type,
                   map->key_size,
                   map->value_size,
-                  map->max_entries);
+                  map->max_entries,
+                  map->map_flags);
 }
 #endif
 
index acb3b6c..38f1dd7 100644 (file)
@@ -498,7 +498,7 @@ void kasan_slab_free(struct kmem_cache *cache, void *object)
                struct kasan_alloc_meta *alloc_info =
                        get_alloc_info(cache, object);
                alloc_info->state = KASAN_STATE_FREE;
-               set_track(&free_info->track);
+               set_track(&free_info->track, GFP_NOWAIT);
        }
 #endif
 
index b34d279..8634958 100644 (file)
@@ -547,7 +547,11 @@ static int oom_reaper(void *unused)
 
 static void wake_oom_reaper(struct task_struct *tsk)
 {
-       if (!oom_reaper_th || tsk->oom_reaper_list)
+       if (!oom_reaper_th)
+               return;
+
+       /* tsk is already queued? */
+       if (tsk == oom_reaper_list || tsk->oom_reaper_list)
                return;
 
        get_task_struct(tsk);
index 92c4c36..c4f5682 100644 (file)
@@ -215,7 +215,7 @@ int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
  * all pages in [start_pfn...end_pfn) must be in the same zone.
  * zone->lock must be held before call this.
  *
- * Returns 1 if all pages in the range are isolated.
+ * Returns the last tested pfn.
  */
 static unsigned long
 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
@@ -289,11 +289,11 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private,
         * now as a simple work-around, we use the next node for destination.
         */
        if (PageHuge(page)) {
-               nodemask_t src = nodemask_of_node(page_to_nid(page));
-               nodemask_t dst;
-               nodes_complement(dst, src);
+               int node = next_online_node(page_to_nid(page));
+               if (node == MAX_NUMNODES)
+                       node = first_online_node;
                return alloc_huge_page_node(page_hstate(compound_head(page)),
-                                           next_node(page_to_nid(page), dst));
+                                           node);
        }
 
        if (PageHighMem(page))
index c399a0d..395e314 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -569,19 +569,6 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
 }
 
 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
-static void percpu_flush_tlb_batch_pages(void *data)
-{
-       /*
-        * All TLB entries are flushed on the assumption that it is
-        * cheaper to flush all TLBs and let them be refilled than
-        * flushing individual PFNs. Note that we do not track mm's
-        * to flush as that might simply be multiple full TLB flushes
-        * for no gain.
-        */
-       count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
-       flush_tlb_local();
-}
-
 /*
  * Flush TLB entries for recently unmapped pages from remote CPUs. It is
  * important if a PTE was dirty when it was unmapped that it's flushed
@@ -598,15 +585,14 @@ void try_to_unmap_flush(void)
 
        cpu = get_cpu();
 
-       trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, -1UL);
-
-       if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask))
-               percpu_flush_tlb_batch_pages(&tlb_ubc->cpumask);
-
-       if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) {
-               smp_call_function_many(&tlb_ubc->cpumask,
-                       percpu_flush_tlb_batch_pages, (void *)tlb_ubc, true);
+       if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) {
+               count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+               local_flush_tlb();
+               trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
        }
+
+       if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids)
+               flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL);
        cpumask_clear(&tlb_ubc->cpumask);
        tlb_ubc->flush_required = false;
        tlb_ubc->writable = false;
index e234490..9cb7044 100644 (file)
@@ -582,7 +582,7 @@ int br_set_ageing_time(struct net_bridge *br, u32 ageing_time)
        int err;
 
        err = switchdev_port_attr_set(br->dev, &attr);
-       if (err)
+       if (err && err != -EOPNOTSUPP)
                return err;
 
        br->ageing_time = t;
index 67b2e27..8570bc7 100644 (file)
@@ -1521,6 +1521,8 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
        if (copy_from_user(&tmp, user, sizeof(tmp)))
                return -EFAULT;
 
+       tmp.name[sizeof(tmp.name) - 1] = '\0';
+
        t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
        if (!t)
                return ret;
@@ -2332,6 +2334,8 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
        if (copy_from_user(&tmp, user, sizeof(tmp)))
                return -EFAULT;
 
+       tmp.name[sizeof(tmp.name) - 1] = '\0';
+
        t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
        if (!t)
                return ret;
index adc8d72..77f7e7a 100644 (file)
@@ -40,7 +40,8 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
 /* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT)
  * or the bridge port (NF_BRIDGE PREROUTING).
  */
-static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb,
+static void nft_reject_br_send_v4_tcp_reset(struct net *net,
+                                           struct sk_buff *oldskb,
                                            const struct net_device *dev,
                                            int hook)
 {
@@ -48,7 +49,6 @@ static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb,
        struct iphdr *niph;
        const struct tcphdr *oth;
        struct tcphdr _oth;
-       struct net *net = sock_net(oldskb->sk);
 
        if (!nft_bridge_iphdr_validate(oldskb))
                return;
@@ -75,7 +75,8 @@ static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb,
        br_deliver(br_port_get_rcu(dev), nskb);
 }
 
-static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb,
+static void nft_reject_br_send_v4_unreach(struct net *net,
+                                         struct sk_buff *oldskb,
                                          const struct net_device *dev,
                                          int hook, u8 code)
 {
@@ -86,7 +87,6 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb,
        void *payload;
        __wsum csum;
        u8 proto;
-       struct net *net = sock_net(oldskb->sk);
 
        if (oldskb->csum_bad || !nft_bridge_iphdr_validate(oldskb))
                return;
@@ -273,17 +273,17 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
        case htons(ETH_P_IP):
                switch (priv->type) {
                case NFT_REJECT_ICMP_UNREACH:
-                       nft_reject_br_send_v4_unreach(pkt->skb, pkt->in,
-                                                     pkt->hook,
+                       nft_reject_br_send_v4_unreach(pkt->net, pkt->skb,
+                                                     pkt->in, pkt->hook,
                                                      priv->icmp_code);
                        break;
                case NFT_REJECT_TCP_RST:
-                       nft_reject_br_send_v4_tcp_reset(pkt->skb, pkt->in,
-                                                       pkt->hook);
+                       nft_reject_br_send_v4_tcp_reset(pkt->net, pkt->skb,
+                                                       pkt->in, pkt->hook);
                        break;
                case NFT_REJECT_ICMPX_UNREACH:
-                       nft_reject_br_send_v4_unreach(pkt->skb, pkt->in,
-                                                     pkt->hook,
+                       nft_reject_br_send_v4_unreach(pkt->net, pkt->skb,
+                                                     pkt->in, pkt->hook,
                                                      nft_reject_icmp_code(priv->icmp_code));
                        break;
                }
index b7177d0..ca7f832 100644 (file)
@@ -1149,7 +1149,8 @@ void bpf_prog_destroy(struct bpf_prog *fp)
 }
 EXPORT_SYMBOL_GPL(bpf_prog_destroy);
 
-static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
+static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk,
+                           bool locked)
 {
        struct sk_filter *fp, *old_fp;
 
@@ -1165,10 +1166,8 @@ static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
                return -ENOMEM;
        }
 
-       old_fp = rcu_dereference_protected(sk->sk_filter,
-                                          sock_owned_by_user(sk));
+       old_fp = rcu_dereference_protected(sk->sk_filter, locked);
        rcu_assign_pointer(sk->sk_filter, fp);
-
        if (old_fp)
                sk_filter_uncharge(sk, old_fp);
 
@@ -1247,7 +1246,8 @@ struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
  * occurs or there is insufficient memory for the filter a negative
  * errno code is returned. On success the return is zero.
  */
-int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
+int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
+                      bool locked)
 {
        struct bpf_prog *prog = __get_filter(fprog, sk);
        int err;
@@ -1255,7 +1255,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
        if (IS_ERR(prog))
                return PTR_ERR(prog);
 
-       err = __sk_attach_prog(prog, sk);
+       err = __sk_attach_prog(prog, sk, locked);
        if (err < 0) {
                __bpf_prog_release(prog);
                return err;
@@ -1263,7 +1263,12 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(sk_attach_filter);
+EXPORT_SYMBOL_GPL(__sk_attach_filter);
+
+int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
+{
+       return __sk_attach_filter(fprog, sk, sock_owned_by_user(sk));
+}
 
 int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
 {
@@ -1309,7 +1314,7 @@ int sk_attach_bpf(u32 ufd, struct sock *sk)
        if (IS_ERR(prog))
                return PTR_ERR(prog);
 
-       err = __sk_attach_prog(prog, sk);
+       err = __sk_attach_prog(prog, sk, sock_owned_by_user(sk));
        if (err < 0) {
                bpf_prog_put(prog);
                return err;
@@ -1764,6 +1769,7 @@ static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
        if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
                switch (size) {
                case offsetof(struct bpf_tunnel_key, tunnel_label):
+               case offsetof(struct bpf_tunnel_key, tunnel_ext):
                        goto set_compat;
                case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
                        /* Fixup deprecated structure layouts here, so we have
@@ -1849,6 +1855,7 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
        if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
                switch (size) {
                case offsetof(struct bpf_tunnel_key, tunnel_label):
+               case offsetof(struct bpf_tunnel_key, tunnel_ext):
                case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
                        /* Fixup deprecated structure layouts here, so we have
                         * a common path later on.
@@ -1861,7 +1868,8 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
                        return -EINVAL;
                }
        }
-       if (unlikely(!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label))
+       if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) ||
+                    from->tunnel_ext))
                return -EINVAL;
 
        skb_dst_drop(skb);
@@ -2247,7 +2255,7 @@ static int __init register_sk_filter_ops(void)
 }
 late_initcall(register_sk_filter_ops);
 
-int sk_detach_filter(struct sock *sk)
+int __sk_detach_filter(struct sock *sk, bool locked)
 {
        int ret = -ENOENT;
        struct sk_filter *filter;
@@ -2255,8 +2263,7 @@ int sk_detach_filter(struct sock *sk)
        if (sock_flag(sk, SOCK_FILTER_LOCKED))
                return -EPERM;
 
-       filter = rcu_dereference_protected(sk->sk_filter,
-                                          sock_owned_by_user(sk));
+       filter = rcu_dereference_protected(sk->sk_filter, locked);
        if (filter) {
                RCU_INIT_POINTER(sk->sk_filter, NULL);
                sk_filter_uncharge(sk, filter);
@@ -2265,7 +2272,12 @@ int sk_detach_filter(struct sock *sk)
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(sk_detach_filter);
+EXPORT_SYMBOL_GPL(__sk_detach_filter);
+
+int sk_detach_filter(struct sock *sk)
+{
+       return __sk_detach_filter(sk, sock_owned_by_user(sk));
+}
 
 int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
                  unsigned int len)
index 94acfc8..a57bd17 100644 (file)
@@ -603,7 +603,6 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
        const struct net_device_ops *ops;
        int err;
 
-       np->dev = ndev;
        strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
        INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
 
@@ -670,6 +669,7 @@ int netpoll_setup(struct netpoll *np)
                goto unlock;
        }
        dev_hold(ndev);
+       np->dev = ndev;
 
        if (netdev_master_upper_dev_get(ndev)) {
                np_err(np, "%s is a slave device, aborting\n", np->dev_name);
@@ -770,6 +770,7 @@ int netpoll_setup(struct netpoll *np)
        return 0;
 
 put:
+       np->dev = NULL;
        dev_put(ndev);
 unlock:
        rtnl_unlock();
index f206677..a75f7e9 100644 (file)
@@ -909,6 +909,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
               + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
               + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
               + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
+              + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
               + nla_total_size(1); /* IFLA_PROTO_DOWN */
 
 }
index a0586b4..5a94aea 100644 (file)
@@ -195,6 +195,14 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head,
        u8 proto = NAPI_GRO_CB(skb)->proto;
        const struct net_offload **offloads;
 
+       /* We can clear the encap_mark for FOU as we are essentially doing
+        * one of two possible things.  We are either adding an L4 tunnel
+        * header to the outer L3 tunnel header, or we are are simply
+        * treating the GRE tunnel header as though it is a UDP protocol
+        * specific header such as VXLAN or GENEVE.
+        */
+       NAPI_GRO_CB(skb)->encap_mark = 0;
+
        rcu_read_lock();
        offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
        ops = rcu_dereference(offloads[proto]);
@@ -352,6 +360,14 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
                }
        }
 
+       /* We can clear the encap_mark for GUE as we are essentially doing
+        * one of two possible things.  We are either adding an L4 tunnel
+        * header to the outer L3 tunnel header, or we are are simply
+        * treating the GRE tunnel header as though it is a UDP protocol
+        * specific header such as VXLAN or GENEVE.
+        */
+       NAPI_GRO_CB(skb)->encap_mark = 0;
+
        rcu_read_lock();
        offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
        ops = rcu_dereference(offloads[guehdr->proto_ctype]);
index 02dd990..6165f30 100644 (file)
@@ -372,8 +372,8 @@ static int ip6_tun_fill_encap_info(struct sk_buff *skb,
        if (nla_put_be64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id) ||
            nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) ||
            nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
-           nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.tos) ||
-           nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.ttl) ||
+           nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.tos) ||
+           nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.ttl) ||
            nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags))
                return -ENOMEM;
 
index bf08192..4133b0f 100644 (file)
@@ -359,11 +359,12 @@ unsigned int arpt_do_table(struct sk_buff *skb,
 }
 
 /* All zeroes == unconditional rule. */
-static inline bool unconditional(const struct arpt_arp *arp)
+static inline bool unconditional(const struct arpt_entry *e)
 {
        static const struct arpt_arp uncond;
 
-       return memcmp(arp, &uncond, sizeof(uncond)) == 0;
+       return e->target_offset == sizeof(struct arpt_entry) &&
+              memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
 }
 
 /* Figures out from what hook each rule can be called: returns 0 if
@@ -402,11 +403,10 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
                                |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS));
 
                        /* Unconditional return/END. */
-                       if ((e->target_offset == sizeof(struct arpt_entry) &&
+                       if ((unconditional(e) &&
                             (strcmp(t->target.u.user.name,
                                     XT_STANDARD_TARGET) == 0) &&
-                            t->verdict < 0 && unconditional(&e->arp)) ||
-                           visited) {
+                            t->verdict < 0) || visited) {
                                unsigned int oldpos, size;
 
                                if ((strcmp(t->target.u.user.name,
@@ -474,14 +474,12 @@ next:
        return 1;
 }
 
-static inline int check_entry(const struct arpt_entry *e, const char *name)
+static inline int check_entry(const struct arpt_entry *e)
 {
        const struct xt_entry_target *t;
 
-       if (!arp_checkentry(&e->arp)) {
-               duprintf("arp_tables: arp check failed %p %s.\n", e, name);
+       if (!arp_checkentry(&e->arp))
                return -EINVAL;
-       }
 
        if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset)
                return -EINVAL;
@@ -522,10 +520,6 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
        struct xt_target *target;
        int ret;
 
-       ret = check_entry(e, name);
-       if (ret)
-               return ret;
-
        e->counters.pcnt = xt_percpu_counter_alloc();
        if (IS_ERR_VALUE(e->counters.pcnt))
                return -ENOMEM;
@@ -557,7 +551,7 @@ static bool check_underflow(const struct arpt_entry *e)
        const struct xt_entry_target *t;
        unsigned int verdict;
 
-       if (!unconditional(&e->arp))
+       if (!unconditional(e))
                return false;
        t = arpt_get_target_c(e);
        if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
@@ -576,9 +570,11 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
                                             unsigned int valid_hooks)
 {
        unsigned int h;
+       int err;
 
        if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
-           (unsigned char *)e + sizeof(struct arpt_entry) >= limit) {
+           (unsigned char *)e + sizeof(struct arpt_entry) >= limit ||
+           (unsigned char *)e + e->next_offset > limit) {
                duprintf("Bad offset %p\n", e);
                return -EINVAL;
        }
@@ -590,6 +586,10 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
                return -EINVAL;
        }
 
+       err = check_entry(e);
+       if (err)
+               return err;
+
        /* Check hooks & underflows */
        for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
                if (!(valid_hooks & (1 << h)))
@@ -598,9 +598,9 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
                        newinfo->hook_entry[h] = hook_entries[h];
                if ((unsigned char *)e - base == underflows[h]) {
                        if (!check_underflow(e)) {
-                               pr_err("Underflows must be unconditional and "
-                                      "use the STANDARD target with "
-                                      "ACCEPT/DROP\n");
+                               pr_debug("Underflows must be unconditional and "
+                                        "use the STANDARD target with "
+                                        "ACCEPT/DROP\n");
                                return -EINVAL;
                        }
                        newinfo->underflow[h] = underflows[h];
@@ -969,6 +969,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
                         sizeof(struct arpt_get_entries) + get.size);
                return -EINVAL;
        }
+       get.name[sizeof(get.name) - 1] = '\0';
 
        t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
        if (!IS_ERR_OR_NULL(t)) {
@@ -1233,7 +1234,8 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
 
        duprintf("check_compat_entry_size_and_hooks %p\n", e);
        if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
-           (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) {
+           (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit ||
+           (unsigned char *)e + e->next_offset > limit) {
                duprintf("Bad offset %p, limit = %p\n", e, limit);
                return -EINVAL;
        }
@@ -1246,7 +1248,7 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
        }
 
        /* For purposes of check_entry casting the compat entry is fine */
-       ret = check_entry((struct arpt_entry *)e, name);
+       ret = check_entry((struct arpt_entry *)e);
        if (ret)
                return ret;
 
@@ -1662,6 +1664,7 @@ static int compat_get_entries(struct net *net,
                         *len, sizeof(get) + get.size);
                return -EINVAL;
        }
+       get.name[sizeof(get.name) - 1] = '\0';
 
        xt_compat_lock(NFPROTO_ARP);
        t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
index e53f8d6..631c100 100644 (file)
@@ -168,11 +168,12 @@ get_entry(const void *base, unsigned int offset)
 
 /* All zeroes == unconditional rule. */
 /* Mildly perf critical (only if packet tracing is on) */
-static inline bool unconditional(const struct ipt_ip *ip)
+static inline bool unconditional(const struct ipt_entry *e)
 {
        static const struct ipt_ip uncond;
 
-       return memcmp(ip, &uncond, sizeof(uncond)) == 0;
+       return e->target_offset == sizeof(struct ipt_entry) &&
+              memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
 #undef FWINV
 }
 
@@ -229,11 +230,10 @@ get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
        } else if (s == e) {
                (*rulenum)++;
 
-               if (s->target_offset == sizeof(struct ipt_entry) &&
+               if (unconditional(s) &&
                    strcmp(t->target.u.kernel.target->name,
                           XT_STANDARD_TARGET) == 0 &&
-                  t->verdict < 0 &&
-                  unconditional(&s->ip)) {
+                  t->verdict < 0) {
                        /* Tail of chains: STANDARD target (return/policy) */
                        *comment = *chainname == hookname
                                ? comments[NF_IP_TRACE_COMMENT_POLICY]
@@ -476,11 +476,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
                        e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
 
                        /* Unconditional return/END. */
-                       if ((e->target_offset == sizeof(struct ipt_entry) &&
+                       if ((unconditional(e) &&
                             (strcmp(t->target.u.user.name,
                                     XT_STANDARD_TARGET) == 0) &&
-                            t->verdict < 0 && unconditional(&e->ip)) ||
-                           visited) {
+                            t->verdict < 0) || visited) {
                                unsigned int oldpos, size;
 
                                if ((strcmp(t->target.u.user.name,
@@ -569,14 +568,12 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
 }
 
 static int
-check_entry(const struct ipt_entry *e, const char *name)
+check_entry(const struct ipt_entry *e)
 {
        const struct xt_entry_target *t;
 
-       if (!ip_checkentry(&e->ip)) {
-               duprintf("ip check failed %p %s.\n", e, name);
+       if (!ip_checkentry(&e->ip))
                return -EINVAL;
-       }
 
        if (e->target_offset + sizeof(struct xt_entry_target) >
            e->next_offset)
@@ -666,10 +663,6 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
        struct xt_mtchk_param mtpar;
        struct xt_entry_match *ematch;
 
-       ret = check_entry(e, name);
-       if (ret)
-               return ret;
-
        e->counters.pcnt = xt_percpu_counter_alloc();
        if (IS_ERR_VALUE(e->counters.pcnt))
                return -ENOMEM;
@@ -721,7 +714,7 @@ static bool check_underflow(const struct ipt_entry *e)
        const struct xt_entry_target *t;
        unsigned int verdict;
 
-       if (!unconditional(&e->ip))
+       if (!unconditional(e))
                return false;
        t = ipt_get_target_c(e);
        if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
@@ -741,9 +734,11 @@ check_entry_size_and_hooks(struct ipt_entry *e,
                           unsigned int valid_hooks)
 {
        unsigned int h;
+       int err;
 
        if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
-           (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
+           (unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
+           (unsigned char *)e + e->next_offset > limit) {
                duprintf("Bad offset %p\n", e);
                return -EINVAL;
        }
@@ -755,6 +750,10 @@ check_entry_size_and_hooks(struct ipt_entry *e,
                return -EINVAL;
        }
 
+       err = check_entry(e);
+       if (err)
+               return err;
+
        /* Check hooks & underflows */
        for (h = 0; h < NF_INET_NUMHOOKS; h++) {
                if (!(valid_hooks & (1 << h)))
@@ -763,9 +762,9 @@ check_entry_size_and_hooks(struct ipt_entry *e,
                        newinfo->hook_entry[h] = hook_entries[h];
                if ((unsigned char *)e - base == underflows[h]) {
                        if (!check_underflow(e)) {
-                               pr_err("Underflows must be unconditional and "
-                                      "use the STANDARD target with "
-                                      "ACCEPT/DROP\n");
+                               pr_debug("Underflows must be unconditional and "
+                                        "use the STANDARD target with "
+                                        "ACCEPT/DROP\n");
                                return -EINVAL;
                        }
                        newinfo->underflow[h] = underflows[h];
@@ -1157,6 +1156,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
                         *len, sizeof(get) + get.size);
                return -EINVAL;
        }
+       get.name[sizeof(get.name) - 1] = '\0';
 
        t = xt_find_table_lock(net, AF_INET, get.name);
        if (!IS_ERR_OR_NULL(t)) {
@@ -1493,7 +1493,8 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
 
        duprintf("check_compat_entry_size_and_hooks %p\n", e);
        if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
-           (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
+           (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
+           (unsigned char *)e + e->next_offset > limit) {
                duprintf("Bad offset %p, limit = %p\n", e, limit);
                return -EINVAL;
        }
@@ -1506,7 +1507,7 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
        }
 
        /* For purposes of check_entry casting the compat entry is fine */
-       ret = check_entry((struct ipt_entry *)e, name);
+       ret = check_entry((struct ipt_entry *)e);
        if (ret)
                return ret;
 
@@ -1935,6 +1936,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
                         *len, sizeof(get) + get.size);
                return -EINVAL;
        }
+       get.name[sizeof(get.name) - 1] = '\0';
 
        xt_compat_lock(AF_INET);
        t = xt_find_table_lock(net, AF_INET, get.name);
index 7b8fbb3..db5b875 100644 (file)
 #include <net/netfilter/nf_conntrack_synproxy.h>
 
 static struct iphdr *
-synproxy_build_ip(struct sk_buff *skb, __be32 saddr, __be32 daddr)
+synproxy_build_ip(struct net *net, struct sk_buff *skb, __be32 saddr,
+                 __be32 daddr)
 {
        struct iphdr *iph;
-       struct net *net = sock_net(skb->sk);
 
        skb_reset_network_header(skb);
        iph = (struct iphdr *)skb_put(skb, sizeof(*iph));
@@ -40,14 +40,12 @@ synproxy_build_ip(struct sk_buff *skb, __be32 saddr, __be32 daddr)
 }
 
 static void
-synproxy_send_tcp(const struct synproxy_net *snet,
+synproxy_send_tcp(struct net *net,
                  const struct sk_buff *skb, struct sk_buff *nskb,
                  struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
                  struct iphdr *niph, struct tcphdr *nth,
                  unsigned int tcp_hdr_size)
 {
-       struct net *net = nf_ct_net(snet->tmpl);
-
        nth->check = ~tcp_v4_check(tcp_hdr_size, niph->saddr, niph->daddr, 0);
        nskb->ip_summed   = CHECKSUM_PARTIAL;
        nskb->csum_start  = (unsigned char *)nth - nskb->head;
@@ -72,7 +70,7 @@ free_nskb:
 }
 
 static void
-synproxy_send_client_synack(const struct synproxy_net *snet,
+synproxy_send_client_synack(struct net *net,
                            const struct sk_buff *skb, const struct tcphdr *th,
                            const struct synproxy_options *opts)
 {
@@ -91,7 +89,7 @@ synproxy_send_client_synack(const struct synproxy_net *snet,
                return;
        skb_reserve(nskb, MAX_TCP_HEADER);
 
-       niph = synproxy_build_ip(nskb, iph->daddr, iph->saddr);
+       niph = synproxy_build_ip(net, nskb, iph->daddr, iph->saddr);
 
        skb_reset_transport_header(nskb);
        nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -109,15 +107,16 @@ synproxy_send_client_synack(const struct synproxy_net *snet,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+       synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
                          niph, nth, tcp_hdr_size);
 }
 
 static void
-synproxy_send_server_syn(const struct synproxy_net *snet,
+synproxy_send_server_syn(struct net *net,
                         const struct sk_buff *skb, const struct tcphdr *th,
                         const struct synproxy_options *opts, u32 recv_seq)
 {
+       struct synproxy_net *snet = synproxy_pernet(net);
        struct sk_buff *nskb;
        struct iphdr *iph, *niph;
        struct tcphdr *nth;
@@ -132,7 +131,7 @@ synproxy_send_server_syn(const struct synproxy_net *snet,
                return;
        skb_reserve(nskb, MAX_TCP_HEADER);
 
-       niph = synproxy_build_ip(nskb, iph->saddr, iph->daddr);
+       niph = synproxy_build_ip(net, nskb, iph->saddr, iph->daddr);
 
        skb_reset_transport_header(nskb);
        nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -153,12 +152,12 @@ synproxy_send_server_syn(const struct synproxy_net *snet,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
+       synproxy_send_tcp(net, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
                          niph, nth, tcp_hdr_size);
 }
 
 static void
-synproxy_send_server_ack(const struct synproxy_net *snet,
+synproxy_send_server_ack(struct net *net,
                         const struct ip_ct_tcp *state,
                         const struct sk_buff *skb, const struct tcphdr *th,
                         const struct synproxy_options *opts)
@@ -177,7 +176,7 @@ synproxy_send_server_ack(const struct synproxy_net *snet,
                return;
        skb_reserve(nskb, MAX_TCP_HEADER);
 
-       niph = synproxy_build_ip(nskb, iph->daddr, iph->saddr);
+       niph = synproxy_build_ip(net, nskb, iph->daddr, iph->saddr);
 
        skb_reset_transport_header(nskb);
        nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -193,11 +192,11 @@ synproxy_send_server_ack(const struct synproxy_net *snet,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+       synproxy_send_tcp(net, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
 }
 
 static void
-synproxy_send_client_ack(const struct synproxy_net *snet,
+synproxy_send_client_ack(struct net *net,
                         const struct sk_buff *skb, const struct tcphdr *th,
                         const struct synproxy_options *opts)
 {
@@ -215,7 +214,7 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
                return;
        skb_reserve(nskb, MAX_TCP_HEADER);
 
-       niph = synproxy_build_ip(nskb, iph->saddr, iph->daddr);
+       niph = synproxy_build_ip(net, nskb, iph->saddr, iph->daddr);
 
        skb_reset_transport_header(nskb);
        nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -231,15 +230,16 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+       synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
                          niph, nth, tcp_hdr_size);
 }
 
 static bool
-synproxy_recv_client_ack(const struct synproxy_net *snet,
+synproxy_recv_client_ack(struct net *net,
                         const struct sk_buff *skb, const struct tcphdr *th,
                         struct synproxy_options *opts, u32 recv_seq)
 {
+       struct synproxy_net *snet = synproxy_pernet(net);
        int mss;
 
        mss = __cookie_v4_check(ip_hdr(skb), th, ntohl(th->ack_seq) - 1);
@@ -255,7 +255,7 @@ synproxy_recv_client_ack(const struct synproxy_net *snet,
        if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP)
                synproxy_check_timestamp_cookie(opts);
 
-       synproxy_send_server_syn(snet, skb, th, opts, recv_seq);
+       synproxy_send_server_syn(net, skb, th, opts, recv_seq);
        return true;
 }
 
@@ -263,7 +263,8 @@ static unsigned int
 synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
 {
        const struct xt_synproxy_info *info = par->targinfo;
-       struct synproxy_net *snet = synproxy_pernet(par->net);
+       struct net *net = par->net;
+       struct synproxy_net *snet = synproxy_pernet(net);
        struct synproxy_options opts = {};
        struct tcphdr *th, _th;
 
@@ -292,12 +293,12 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
                                          XT_SYNPROXY_OPT_SACK_PERM |
                                          XT_SYNPROXY_OPT_ECN);
 
-               synproxy_send_client_synack(snet, skb, th, &opts);
+               synproxy_send_client_synack(net, skb, th, &opts);
                return NF_DROP;
 
        } else if (th->ack && !(th->fin || th->rst || th->syn)) {
                /* ACK from client */
-               synproxy_recv_client_ack(snet, skb, th, &opts, ntohl(th->seq));
+               synproxy_recv_client_ack(net, skb, th, &opts, ntohl(th->seq));
                return NF_DROP;
        }
 
@@ -308,7 +309,8 @@ static unsigned int ipv4_synproxy_hook(void *priv,
                                       struct sk_buff *skb,
                                       const struct nf_hook_state *nhs)
 {
-       struct synproxy_net *snet = synproxy_pernet(nhs->net);
+       struct net *net = nhs->net;
+       struct synproxy_net *snet = synproxy_pernet(net);
        enum ip_conntrack_info ctinfo;
        struct nf_conn *ct;
        struct nf_conn_synproxy *synproxy;
@@ -365,7 +367,7 @@ static unsigned int ipv4_synproxy_hook(void *priv,
                         * therefore we need to add 1 to make the SYN sequence
                         * number match the one of first SYN.
                         */
-                       if (synproxy_recv_client_ack(snet, skb, th, &opts,
+                       if (synproxy_recv_client_ack(net, skb, th, &opts,
                                                     ntohl(th->seq) + 1))
                                this_cpu_inc(snet->stats->cookie_retrans);
 
@@ -391,12 +393,12 @@ static unsigned int ipv4_synproxy_hook(void *priv,
                                  XT_SYNPROXY_OPT_SACK_PERM);
 
                swap(opts.tsval, opts.tsecr);
-               synproxy_send_server_ack(snet, state, skb, th, &opts);
+               synproxy_send_server_ack(net, state, skb, th, &opts);
 
                nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq));
 
                swap(opts.tsval, opts.tsecr);
-               synproxy_send_client_ack(snet, skb, th, &opts);
+               synproxy_send_client_ack(net, skb, th, &opts);
 
                consume_skb(skb);
                return NF_STOLEN;
index 84f9baf..86b67b7 100644 (file)
@@ -198,11 +198,12 @@ get_entry(const void *base, unsigned int offset)
 
 /* All zeroes == unconditional rule. */
 /* Mildly perf critical (only if packet tracing is on) */
-static inline bool unconditional(const struct ip6t_ip6 *ipv6)
+static inline bool unconditional(const struct ip6t_entry *e)
 {
        static const struct ip6t_ip6 uncond;
 
-       return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
+       return e->target_offset == sizeof(struct ip6t_entry) &&
+              memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
 }
 
 static inline const struct xt_entry_target *
@@ -258,11 +259,10 @@ get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
        } else if (s == e) {
                (*rulenum)++;
 
-               if (s->target_offset == sizeof(struct ip6t_entry) &&
+               if (unconditional(s) &&
                    strcmp(t->target.u.kernel.target->name,
                           XT_STANDARD_TARGET) == 0 &&
-                   t->verdict < 0 &&
-                   unconditional(&s->ipv6)) {
+                   t->verdict < 0) {
                        /* Tail of chains: STANDARD target (return/policy) */
                        *comment = *chainname == hookname
                                ? comments[NF_IP6_TRACE_COMMENT_POLICY]
@@ -488,11 +488,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
                        e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
 
                        /* Unconditional return/END. */
-                       if ((e->target_offset == sizeof(struct ip6t_entry) &&
+                       if ((unconditional(e) &&
                             (strcmp(t->target.u.user.name,
                                     XT_STANDARD_TARGET) == 0) &&
-                            t->verdict < 0 &&
-                            unconditional(&e->ipv6)) || visited) {
+                            t->verdict < 0) || visited) {
                                unsigned int oldpos, size;
 
                                if ((strcmp(t->target.u.user.name,
@@ -581,14 +580,12 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
 }
 
 static int
-check_entry(const struct ip6t_entry *e, const char *name)
+check_entry(const struct ip6t_entry *e)
 {
        const struct xt_entry_target *t;
 
-       if (!ip6_checkentry(&e->ipv6)) {
-               duprintf("ip_tables: ip check failed %p %s.\n", e, name);
+       if (!ip6_checkentry(&e->ipv6))
                return -EINVAL;
-       }
 
        if (e->target_offset + sizeof(struct xt_entry_target) >
            e->next_offset)
@@ -679,10 +676,6 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
        struct xt_mtchk_param mtpar;
        struct xt_entry_match *ematch;
 
-       ret = check_entry(e, name);
-       if (ret)
-               return ret;
-
        e->counters.pcnt = xt_percpu_counter_alloc();
        if (IS_ERR_VALUE(e->counters.pcnt))
                return -ENOMEM;
@@ -733,7 +726,7 @@ static bool check_underflow(const struct ip6t_entry *e)
        const struct xt_entry_target *t;
        unsigned int verdict;
 
-       if (!unconditional(&e->ipv6))
+       if (!unconditional(e))
                return false;
        t = ip6t_get_target_c(e);
        if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
@@ -753,9 +746,11 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
                           unsigned int valid_hooks)
 {
        unsigned int h;
+       int err;
 
        if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
-           (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
+           (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
+           (unsigned char *)e + e->next_offset > limit) {
                duprintf("Bad offset %p\n", e);
                return -EINVAL;
        }
@@ -767,6 +762,10 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
                return -EINVAL;
        }
 
+       err = check_entry(e);
+       if (err)
+               return err;
+
        /* Check hooks & underflows */
        for (h = 0; h < NF_INET_NUMHOOKS; h++) {
                if (!(valid_hooks & (1 << h)))
@@ -775,9 +774,9 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
                        newinfo->hook_entry[h] = hook_entries[h];
                if ((unsigned char *)e - base == underflows[h]) {
                        if (!check_underflow(e)) {
-                               pr_err("Underflows must be unconditional and "
-                                      "use the STANDARD target with "
-                                      "ACCEPT/DROP\n");
+                               pr_debug("Underflows must be unconditional and "
+                                        "use the STANDARD target with "
+                                        "ACCEPT/DROP\n");
                                return -EINVAL;
                        }
                        newinfo->underflow[h] = underflows[h];
@@ -1169,6 +1168,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
                         *len, sizeof(get) + get.size);
                return -EINVAL;
        }
+       get.name[sizeof(get.name) - 1] = '\0';
 
        t = xt_find_table_lock(net, AF_INET6, get.name);
        if (!IS_ERR_OR_NULL(t)) {
@@ -1505,7 +1505,8 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
 
        duprintf("check_compat_entry_size_and_hooks %p\n", e);
        if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
-           (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
+           (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
+           (unsigned char *)e + e->next_offset > limit) {
                duprintf("Bad offset %p, limit = %p\n", e, limit);
                return -EINVAL;
        }
@@ -1518,7 +1519,7 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
        }
 
        /* For purposes of check_entry casting the compat entry is fine */
-       ret = check_entry((struct ip6t_entry *)e, name);
+       ret = check_entry((struct ip6t_entry *)e);
        if (ret)
                return ret;
 
@@ -1944,6 +1945,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
                         *len, sizeof(get) + get.size);
                return -EINVAL;
        }
+       get.name[sizeof(get.name) - 1] = '\0';
 
        xt_compat_lock(AF_INET6);
        t = xt_find_table_lock(net, AF_INET6, get.name);
index fd25e44..8125931 100644 (file)
@@ -843,8 +843,8 @@ start_lookup:
                flush_stack(stack, count, skb, count - 1);
        } else {
                if (!inner_flushed)
-                       UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
-                                        proto == IPPROTO_UDPLITE);
+                       UDP6_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
+                                         proto == IPPROTO_UDPLITE);
                consume_skb(skb);
        }
        return 0;
index b0bc475..2e8e7e5 100644 (file)
@@ -95,7 +95,7 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
        if (!nested)
                goto nla_put_failure;
        if (mtype_do_head(skb, map) ||
-           nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+           nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
            nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)))
                goto nla_put_failure;
        if (unlikely(ip_set_put_flags(skb, set)))
index 7e6568c..a748b0c 100644 (file)
@@ -497,6 +497,26 @@ __ip_set_put(struct ip_set *set)
        write_unlock_bh(&ip_set_ref_lock);
 }
 
+/* set->ref can be swapped out by ip_set_swap, netlink events (like dump) need
+ * a separate reference counter
+ */
+static inline void
+__ip_set_get_netlink(struct ip_set *set)
+{
+       write_lock_bh(&ip_set_ref_lock);
+       set->ref_netlink++;
+       write_unlock_bh(&ip_set_ref_lock);
+}
+
+static inline void
+__ip_set_put_netlink(struct ip_set *set)
+{
+       write_lock_bh(&ip_set_ref_lock);
+       BUG_ON(set->ref_netlink == 0);
+       set->ref_netlink--;
+       write_unlock_bh(&ip_set_ref_lock);
+}
+
 /* Add, del and test set entries from kernel.
  *
  * The set behind the index must exist and must be referenced
@@ -1002,7 +1022,7 @@ static int ip_set_destroy(struct net *net, struct sock *ctnl,
        if (!attr[IPSET_ATTR_SETNAME]) {
                for (i = 0; i < inst->ip_set_max; i++) {
                        s = ip_set(inst, i);
-                       if (s && s->ref) {
+                       if (s && (s->ref || s->ref_netlink)) {
                                ret = -IPSET_ERR_BUSY;
                                goto out;
                        }
@@ -1024,7 +1044,7 @@ static int ip_set_destroy(struct net *net, struct sock *ctnl,
                if (!s) {
                        ret = -ENOENT;
                        goto out;
-               } else if (s->ref) {
+               } else if (s->ref || s->ref_netlink) {
                        ret = -IPSET_ERR_BUSY;
                        goto out;
                }
@@ -1171,6 +1191,9 @@ static int ip_set_swap(struct net *net, struct sock *ctnl, struct sk_buff *skb,
              from->family == to->family))
                return -IPSET_ERR_TYPE_MISMATCH;
 
+       if (from->ref_netlink || to->ref_netlink)
+               return -EBUSY;
+
        strncpy(from_name, from->name, IPSET_MAXNAMELEN);
        strncpy(from->name, to->name, IPSET_MAXNAMELEN);
        strncpy(to->name, from_name, IPSET_MAXNAMELEN);
@@ -1206,7 +1229,7 @@ ip_set_dump_done(struct netlink_callback *cb)
                if (set->variant->uref)
                        set->variant->uref(set, cb, false);
                pr_debug("release set %s\n", set->name);
-               __ip_set_put_byindex(inst, index);
+               __ip_set_put_netlink(set);
        }
        return 0;
 }
@@ -1328,7 +1351,7 @@ dump_last:
                if (!cb->args[IPSET_CB_ARG0]) {
                        /* Start listing: make sure set won't be destroyed */
                        pr_debug("reference set\n");
-                       set->ref++;
+                       set->ref_netlink++;
                }
                write_unlock_bh(&ip_set_ref_lock);
                nlh = start_msg(skb, NETLINK_CB(cb->skb).portid,
@@ -1396,7 +1419,7 @@ release_refcount:
                if (set->variant->uref)
                        set->variant->uref(set, cb, false);
                pr_debug("release set %s\n", set->name);
-               __ip_set_put_byindex(inst, index);
+               __ip_set_put_netlink(set);
                cb->args[IPSET_CB_ARG0] = 0;
        }
 out:
index e5336ab..d32fd6b 100644 (file)
@@ -1082,7 +1082,7 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
        if (nla_put_u32(skb, IPSET_ATTR_MARKMASK, h->markmask))
                goto nla_put_failure;
 #endif
-       if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+       if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
            nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)))
                goto nla_put_failure;
        if (unlikely(ip_set_put_flags(skb, set)))
index 24c6c19..a2a89e4 100644 (file)
@@ -458,7 +458,7 @@ list_set_head(struct ip_set *set, struct sk_buff *skb)
        if (!nested)
                goto nla_put_failure;
        if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) ||
-           nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+           nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
            nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
                          htonl(sizeof(*map) + n * set->dsize)))
                goto nla_put_failure;
index 7542999..cb5b630 100644 (file)
@@ -582,7 +582,12 @@ __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
        /* nfnetlink_unicast will either free the nskb or add it to a socket */
        err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT);
        if (err < 0) {
-               queue->queue_user_dropped++;
+               if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
+                       failopen = 1;
+                       err = 0;
+               } else {
+                       queue->queue_user_dropped++;
+               }
                goto err_out_unlock;
        }
 
index 234a733..ce94729 100644 (file)
@@ -7,7 +7,9 @@ config OPENVSWITCH
        depends on INET
        depends on !NF_CONNTRACK || \
                   (NF_CONNTRACK && ((!NF_DEFRAG_IPV6 || NF_DEFRAG_IPV6) && \
-                                    (!NF_NAT || NF_NAT)))
+                                    (!NF_NAT || NF_NAT) && \
+                                    (!NF_NAT_IPV4 || NF_NAT_IPV4) && \
+                                    (!NF_NAT_IPV6 || NF_NAT_IPV6)))
        select LIBCRC32C
        select MPLS
        select NET_MPLS_GSO
index dc5eb29..1b9d286 100644 (file)
@@ -535,14 +535,15 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
        switch (ctinfo) {
        case IP_CT_RELATED:
        case IP_CT_RELATED_REPLY:
-               if (skb->protocol == htons(ETH_P_IP) &&
+               if (IS_ENABLED(CONFIG_NF_NAT_IPV4) &&
+                   skb->protocol == htons(ETH_P_IP) &&
                    ip_hdr(skb)->protocol == IPPROTO_ICMP) {
                        if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
                                                           hooknum))
                                err = NF_DROP;
                        goto push;
-#if IS_ENABLED(CONFIG_NF_NAT_IPV6)
-               } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               } else if (IS_ENABLED(CONFIG_NF_NAT_IPV6) &&
+                          skb->protocol == htons(ETH_P_IPV6)) {
                        __be16 frag_off;
                        u8 nexthdr = ipv6_hdr(skb)->nexthdr;
                        int hdrlen = ipv6_skip_exthdr(skb,
@@ -557,7 +558,6 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
                                        err = NF_DROP;
                                goto push;
                        }
-#endif
                }
                /* Non-ICMP, fall thru to initialize if needed. */
        case IP_CT_NEW:
@@ -664,11 +664,12 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
 
        /* Determine NAT type.
         * Check if the NAT type can be deduced from the tracked connection.
-        * Make sure expected traffic is NATted only when committing.
+        * Make sure new expected connections (IP_CT_RELATED) are NATted only
+        * when committing.
         */
        if (info->nat & OVS_CT_NAT && ctinfo != IP_CT_NEW &&
            ct->status & IPS_NAT_MASK &&
-           (!(ct->status & IPS_EXPECTED_BIT) || info->commit)) {
+           (ctinfo != IP_CT_RELATED || info->commit)) {
                /* NAT an established or related connection like before. */
                if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
                        /* This is the REPLY direction for a connection
@@ -968,7 +969,8 @@ static int parse_nat(const struct nlattr *attr,
                        break;
 
                case OVS_NAT_ATTR_IP_MIN:
-                       nla_memcpy(&info->range.min_addr, a, nla_len(a));
+                       nla_memcpy(&info->range.min_addr, a,
+                                  sizeof(info->range.min_addr));
                        info->range.flags |= NF_NAT_RANGE_MAP_IPS;
                        break;
 
@@ -1238,7 +1240,8 @@ static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info *info,
        }
 
        if (info->range.flags & NF_NAT_RANGE_MAP_IPS) {
-               if (info->family == NFPROTO_IPV4) {
+               if (IS_ENABLED(CONFIG_NF_NAT_IPV4) &&
+                   info->family == NFPROTO_IPV4) {
                        if (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MIN,
                                            info->range.min_addr.ip) ||
                            (info->range.max_addr.ip
@@ -1246,8 +1249,8 @@ static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info *info,
                             (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MAX,
                                              info->range.max_addr.ip))))
                                return false;
-#if IS_ENABLED(CONFIG_NF_NAT_IPV6)
-               } else if (info->family == NFPROTO_IPV6) {
+               } else if (IS_ENABLED(CONFIG_NF_NAT_IPV6) &&
+                          info->family == NFPROTO_IPV6) {
                        if (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MIN,
                                             &info->range.min_addr.in6) ||
                            (memcmp(&info->range.max_addr.in6,
@@ -1256,7 +1259,6 @@ static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info *info,
                             (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MAX,
                                               &info->range.max_addr.in6))))
                                return false;
-#endif
                } else {
                        return false;
                }
index 736c004..9774535 100644 (file)
@@ -401,7 +401,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
        sk = chunk->skb->sk;
 
        /* Allocate the new skb.  */
-       nskb = alloc_skb(packet->size + MAX_HEADER, GFP_ATOMIC);
+       nskb = alloc_skb(packet->size + MAX_HEADER, gfp);
        if (!nskb)
                goto nomem;
 
@@ -523,8 +523,8 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
         */
        if (auth)
                sctp_auth_calculate_hmac(asoc, nskb,
-                                       (struct sctp_auth_chunk *)auth,
-                                       GFP_ATOMIC);
+                                        (struct sctp_auth_chunk *)auth,
+                                        gfp);
 
        /* 2) Calculate the Adler-32 checksum of the whole packet,
         *    including the SCTP common header and all the
index 8b5833c..2b9b98f 100644 (file)
@@ -1079,7 +1079,7 @@ nla_put_failure:
  *     @filter_dev: filter device
  *     @idx:
  *
- *     Delete FDB entry from switch device.
+ *     Dump FDB entries from switch device.
  */
 int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
                            struct net_device *dev,
index ad7f5b3..1c4ad47 100644 (file)
@@ -292,12 +292,15 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
                XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
 
                skb_dst_force(skb);
+               dev_hold(skb->dev);
 
                nexthdr = x->type->input(x, skb);
 
                if (nexthdr == -EINPROGRESS)
                        return 0;
 resume:
+               dev_put(skb->dev);
+
                spin_lock(&x->lock);
                if (nexthdr <= 0) {
                        if (nexthdr == -EBADMSG) {
index aa1b15c..6469bed 100644 (file)
@@ -1019,8 +1019,8 @@ static int snd_timer_s_start(struct snd_timer * timer)
                njiff += timer->sticks - priv->correction;
                priv->correction = 0;
        }
-       priv->last_expires = priv->tlist.expires = njiff;
-       add_timer(&priv->tlist);
+       priv->last_expires = njiff;
+       mod_timer(&priv->tlist, njiff);
        return 0;
 }
 
@@ -1502,17 +1502,13 @@ static int snd_timer_user_ginfo(struct file *file,
        return err;
 }
 
-static int snd_timer_user_gparams(struct file *file,
-                                 struct snd_timer_gparams __user *_gparams)
+static int timer_set_gparams(struct snd_timer_gparams *gparams)
 {
-       struct snd_timer_gparams gparams;
        struct snd_timer *t;
        int err;
 
-       if (copy_from_user(&gparams, _gparams, sizeof(gparams)))
-               return -EFAULT;
        mutex_lock(&register_mutex);
-       t = snd_timer_find(&gparams.tid);
+       t = snd_timer_find(&gparams->tid);
        if (!t) {
                err = -ENODEV;
                goto _error;
@@ -1525,12 +1521,22 @@ static int snd_timer_user_gparams(struct file *file,
                err = -ENOSYS;
                goto _error;
        }
-       err = t->hw.set_period(t, gparams.period_num, gparams.period_den);
+       err = t->hw.set_period(t, gparams->period_num, gparams->period_den);
 _error:
        mutex_unlock(&register_mutex);
        return err;
 }
 
+static int snd_timer_user_gparams(struct file *file,
+                                 struct snd_timer_gparams __user *_gparams)
+{
+       struct snd_timer_gparams gparams;
+
+       if (copy_from_user(&gparams, _gparams, sizeof(gparams)))
+               return -EFAULT;
+       return timer_set_gparams(&gparams);
+}
+
 static int snd_timer_user_gstatus(struct file *file,
                                  struct snd_timer_gstatus __user *_gstatus)
 {
index 2e90822..6a437eb 100644 (file)
 
 #include <linux/compat.h>
 
+/*
+ * ILP32/LP64 has different size for 'long' type. Additionally, the size
+ * of storage alignment differs depending on architectures. Here, '__packed'
+ * qualifier is used so that the size of this structure is multiple of 4 and
+ * it fits to any architectures with 32 bit storage alignment.
+ */
+struct snd_timer_gparams32 {
+       struct snd_timer_id tid;
+       u32 period_num;
+       u32 period_den;
+       unsigned char reserved[32];
+} __packed;
+
 struct snd_timer_info32 {
        u32 flags;
        s32 card;
@@ -32,6 +45,19 @@ struct snd_timer_info32 {
        unsigned char reserved[64];
 };
 
+static int snd_timer_user_gparams_compat(struct file *file,
+                                       struct snd_timer_gparams32 __user *user)
+{
+       struct snd_timer_gparams gparams;
+
+       if (copy_from_user(&gparams.tid, &user->tid, sizeof(gparams.tid)) ||
+           get_user(gparams.period_num, &user->period_num) ||
+           get_user(gparams.period_den, &user->period_den))
+               return -EFAULT;
+
+       return timer_set_gparams(&gparams);
+}
+
 static int snd_timer_user_info_compat(struct file *file,
                                      struct snd_timer_info32 __user *_info)
 {
@@ -99,6 +125,7 @@ static int snd_timer_user_status_compat(struct file *file,
  */
 
 enum {
+       SNDRV_TIMER_IOCTL_GPARAMS32 = _IOW('T', 0x04, struct snd_timer_gparams32),
        SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32),
        SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32),
 #ifdef CONFIG_X86_X32
@@ -114,7 +141,6 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
        case SNDRV_TIMER_IOCTL_PVERSION:
        case SNDRV_TIMER_IOCTL_TREAD:
        case SNDRV_TIMER_IOCTL_GINFO:
-       case SNDRV_TIMER_IOCTL_GPARAMS:
        case SNDRV_TIMER_IOCTL_GSTATUS:
        case SNDRV_TIMER_IOCTL_SELECT:
        case SNDRV_TIMER_IOCTL_PARAMS:
@@ -128,6 +154,8 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
        case SNDRV_TIMER_IOCTL_PAUSE_OLD:
        case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
                return snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
+       case SNDRV_TIMER_IOCTL_GPARAMS32:
+               return snd_timer_user_gparams_compat(file, argp);
        case SNDRV_TIMER_IOCTL_INFO32:
                return snd_timer_user_info_compat(file, argp);
        case SNDRV_TIMER_IOCTL_STATUS32:
index 845d5e5..ec4db3a 100644 (file)
@@ -446,18 +446,12 @@ end:
 
 void snd_dice_stream_destroy_duplex(struct snd_dice *dice)
 {
-       struct reg_params tx_params, rx_params;
-
-       snd_dice_transaction_clear_enable(dice);
+       unsigned int i;
 
-       if (get_register_params(dice, &tx_params, &rx_params) == 0) {
-               stop_streams(dice, AMDTP_IN_STREAM, &tx_params);
-               stop_streams(dice, AMDTP_OUT_STREAM, &rx_params);
+       for (i = 0; i < MAX_STREAMS; i++) {
+               destroy_stream(dice, AMDTP_IN_STREAM, i);
+               destroy_stream(dice, AMDTP_OUT_STREAM, i);
        }
-
-       release_resources(dice);
-
-       dice->substreams_counter = 0;
 }
 
 void snd_dice_stream_update_duplex(struct snd_dice *dice)
index 2624cfe..b680be0 100644 (file)
@@ -2361,6 +2361,10 @@ static const struct pci_device_id azx_ids[] = {
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0xaae8),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+       { PCI_DEVICE(0x1002, 0xaae0),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+       { PCI_DEVICE(0x1002, 0xaaf0),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        /* VIA VT8251/VT8237A */
        { PCI_DEVICE(0x1106, 0x3288), .driver_data = AZX_DRIVER_VIA },
        /* VIA GFX VT7122/VX900 */
index 4f5ca0b..fefe83f 100644 (file)
@@ -4759,6 +4759,7 @@ enum {
        ALC255_FIXUP_DELL_SPK_NOISE,
        ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
        ALC280_FIXUP_HP_HEADSET_MIC,
+       ALC221_FIXUP_HP_FRONT_MIC,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -5401,6 +5402,13 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_HEADSET_MIC,
        },
+       [ALC221_FIXUP_HP_FRONT_MIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x02a19020 }, /* Front Mic */
+                       { }
+               },
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5506,6 +5514,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
+       SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
        SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
        SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -6406,6 +6415,7 @@ enum {
        ALC668_FIXUP_AUTO_MUTE,
        ALC668_FIXUP_DELL_DISABLE_AAMIX,
        ALC668_FIXUP_DELL_XPS13,
+       ALC662_FIXUP_ASUS_Nx50,
 };
 
 static const struct hda_fixup alc662_fixups[] = {
@@ -6646,6 +6656,12 @@ static const struct hda_fixup alc662_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc_fixup_bass_chmap,
        },
+       [ALC662_FIXUP_ASUS_Nx50] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_auto_mute_via_amp,
+               .chained = true,
+               .chain_id = ALC662_FIXUP_BASS_1A
+       },
 };
 
 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -6668,8 +6684,9 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
-       SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A),
+       SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
        SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
+       SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
        SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
        SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
        SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
index fb62bce..6178bb5 100644 (file)
@@ -150,6 +150,7 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
                usb_audio_err(chip, "cannot memdup\n");
                return -ENOMEM;
        }
+       INIT_LIST_HEAD(&fp->list);
        if (fp->nr_rates > MAX_NR_RATES) {
                kfree(fp);
                return -EINVAL;
@@ -193,6 +194,7 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
        return 0;
 
  error:
+       list_del(&fp->list); /* unlink for avoiding double-free */
        kfree(fp);
        kfree(rate_table);
        return err;
@@ -469,6 +471,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
        fp->ep_attr = get_endpoint(alts, 0)->bmAttributes;
        fp->datainterval = 0;
        fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
+       INIT_LIST_HEAD(&fp->list);
 
        switch (fp->maxpacksize) {
        case 0x120:
@@ -492,6 +495,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
                ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
        err = snd_usb_add_audio_stream(chip, stream, fp);
        if (err < 0) {
+               list_del(&fp->list); /* unlink for avoiding double-free */
                kfree(fp);
                return err;
        }
index 51258a1..6fe7f21 100644 (file)
@@ -316,7 +316,9 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
 /*
  * add this endpoint to the chip instance.
  * if a stream with the same endpoint already exists, append to it.
- * if not, create a new pcm stream.
+ * if not, create a new pcm stream. note, fp is added to the substream
+ * fmt_list and will be freed on the chip instance release. do not free
+ * fp or do remove it from the substream fmt_list to avoid double-free.
  */
 int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
                             int stream,
@@ -677,6 +679,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
                                        * (fp->maxpacksize & 0x7ff);
                fp->attributes = parse_uac_endpoint_attributes(chip, alts, protocol, iface_no);
                fp->clock = clock;
+               INIT_LIST_HEAD(&fp->list);
 
                /* some quirks for attributes here */
 
@@ -725,6 +728,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
                dev_dbg(&dev->dev, "%u:%d: add audio endpoint %#x\n", iface_no, altno, fp->endpoint);
                err = snd_usb_add_audio_stream(chip, stream, fp);
                if (err < 0) {
+                       list_del(&fp->list); /* unlink for avoiding double-free */
                        kfree(fp->rate_table);
                        kfree(fp->chmap);
                        kfree(fp);